Lines Matching full:ctrl

124 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
126 static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
129 void nvme_queue_scan(struct nvme_ctrl *ctrl)
134 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE && ctrl->tagset)
135 queue_work(nvme_wq, &ctrl->scan_work);
144 int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
146 if (nvme_ctrl_state(ctrl) != NVME_CTRL_RESETTING)
148 if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
156 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
159 if (nvme_ctrl_state(ctrl) != NVME_CTRL_CONNECTING)
162 set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
163 dev_info(ctrl->device, "failfast expired\n");
164 nvme_kick_requeue_lists(ctrl);
167 static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl)
169 if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1)
172 schedule_delayed_work(&ctrl->failfast_work,
173 ctrl->opts->fast_io_fail_tmo * HZ);
176 static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl)
178 if (!ctrl->opts)
181 cancel_delayed_work_sync(&ctrl->failfast_work);
182 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
186 int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
188 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
190 if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
196 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
200 ret = nvme_reset_ctrl(ctrl);
202 flush_work(&ctrl->reset_work);
203 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
210 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
212 dev_info(ctrl->device,
213 "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl));
215 flush_work(&ctrl->reset_work);
216 nvme_stop_ctrl(ctrl);
217 nvme_remove_namespaces(ctrl);
218 ctrl->ops->delete_ctrl(ctrl);
219 nvme_uninit_ctrl(ctrl);
224 struct nvme_ctrl *ctrl =
227 nvme_do_delete_ctrl(ctrl);
230 int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
232 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
234 if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
240 void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
246 nvme_get_ctrl(ctrl);
247 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
248 nvme_do_delete_ctrl(ctrl);
249 nvme_put_ctrl(ctrl);
302 delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
330 dev_name(nr->ctrl->device),
395 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
408 if (ctrl->kas &&
409 req->deadline - req->timeout >= ctrl->ka_last_check_time)
410 ctrl->comp_seen = true;
424 queue_work(nvme_wq, &ctrl->dhchap_auth_work);
473 void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
475 if (ctrl->tagset) {
476 blk_mq_tagset_busy_iter(ctrl->tagset,
477 nvme_cancel_request, ctrl);
478 blk_mq_tagset_wait_completed_request(ctrl->tagset);
483 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
485 if (ctrl->admin_tagset) {
486 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
487 nvme_cancel_request, ctrl);
488 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
493 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
500 spin_lock_irqsave(&ctrl->lock, flags);
502 old_state = nvme_ctrl_state(ctrl);
568 WRITE_ONCE(ctrl->state, new_state);
569 wake_up_all(&ctrl->state_wq);
572 spin_unlock_irqrestore(&ctrl->lock, flags);
578 nvme_stop_failfast_work(ctrl);
579 nvme_kick_requeue_lists(ctrl);
582 nvme_start_failfast_work(ctrl);
592 bool nvme_wait_reset(struct nvme_ctrl *ctrl)
594 wait_event(ctrl->state_wq,
595 nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
596 nvme_state_terminal(ctrl));
597 return nvme_ctrl_state(ctrl) == NVME_CTRL_RESETTING;
629 nvme_put_ctrl(ns->ctrl);
681 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
684 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
689 !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
696 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
709 if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
712 if (ctrl->ops->flags & NVME_F_FABRICS) {
718 switch (nvme_ctrl_state(ctrl)) {
766 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
769 range = page_address(ns->ctrl->discard_page);
795 if (virt_to_page(range) == ns->ctrl->discard_page)
796 clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
843 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
933 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
935 if (req->special_vec.bv_page == ctrl->discard_page)
936 clear_bit_unlock(0, &ctrl->discard_page_busy);
1062 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
1069 dev_warn_once(ctrl->device,
1080 effects = le32_to_cpu(ctrl->effects->acs[opcode]);
1087 u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
1089 u32 effects = nvme_command_effects(ctrl, ns, opcode);
1096 mutex_lock(&ctrl->scan_lock);
1097 mutex_lock(&ctrl->subsys->lock);
1098 nvme_mpath_start_freeze(ctrl->subsys);
1099 nvme_mpath_wait_freeze(ctrl->subsys);
1100 nvme_start_freeze(ctrl);
1101 nvme_wait_freeze(ctrl);
1107 void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
1111 nvme_unfreeze(ctrl);
1112 nvme_mpath_unfreeze(ctrl->subsys);
1113 mutex_unlock(&ctrl->subsys->lock);
1114 mutex_unlock(&ctrl->scan_lock);
1118 &ctrl->flags)) {
1119 dev_info(ctrl->device,
1124 nvme_queue_scan(ctrl);
1125 flush_work(&ctrl->scan_work);
1140 nvme_update_keep_alive(ctrl, cmd);
1158 static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
1160 unsigned long delay = ctrl->kato * HZ / 2;
1168 if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS)
1173 static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
1175 queue_delayed_work(nvme_wq, &ctrl->ka_work,
1176 nvme_keep_alive_work_period(ctrl));
1182 struct nvme_ctrl *ctrl = rq->end_io_data;
1184 unsigned long delay = nvme_keep_alive_work_period(ctrl);
1185 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
1194 dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n",
1202 dev_err(ctrl->device,
1208 ctrl->ka_last_check_time = jiffies;
1209 ctrl->comp_seen = false;
1211 queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
1217 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
1219 bool comp_seen = ctrl->comp_seen;
1222 ctrl->ka_last_check_time = jiffies;
1224 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
1225 dev_dbg(ctrl->device,
1227 ctrl->comp_seen = false;
1228 nvme_queue_keep_alive_work(ctrl);
1232 rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd),
1236 dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
1237 nvme_reset_ctrl(ctrl);
1240 nvme_init_request(rq, &ctrl->ka_cmd);
1242 rq->timeout = ctrl->kato * HZ;
1244 rq->end_io_data = ctrl;
1248 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
1250 if (unlikely(ctrl->kato == 0))
1253 nvme_queue_keep_alive_work(ctrl);
1256 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
1258 if (unlikely(ctrl->kato == 0))
1261 cancel_delayed_work_sync(&ctrl->ka_work);
1265 static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
1271 dev_info(ctrl->device,
1273 ctrl->kato * 1000 / 2, new_kato * 1000 / 2);
1275 nvme_stop_keep_alive(ctrl);
1276 ctrl->kato = new_kato;
1277 nvme_start_keep_alive(ctrl);
1286 static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
1288 if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
1289 return ctrl->vs < NVME_VS(1, 2, 0);
1290 return ctrl->vs < NVME_VS(1, 1, 0);
1315 static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1318 const char *warn_str = "ctrl returned bogus length:";
1324 dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
1328 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1334 dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
1338 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1344 dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
1348 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1354 dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n",
1367 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl,
1375 if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl))
1377 if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
1388 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1391 dev_warn(ctrl->device,
1403 len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen);
1410 if (nvme_multi_css(ctrl) && !csi_seen) {
1411 dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
1421 static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
1436 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
1438 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
1445 static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
1452 ret = nvme_identify_ns(ctrl, info->nsid, &id);
1467 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
1468 dev_info(ctrl->device,
1471 if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1474 if (ctrl->vs >= NVME_VS(1, 2, 0) &&
1484 static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
1499 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
1546 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
1552 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
1569 dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1584 static void nvme_enable_aen(struct nvme_ctrl *ctrl)
1586 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1592 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
1595 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1598 queue_work(nvme_wq, &ctrl->async_event_work);
1609 if (!try_module_get(ns->ctrl->ops->module))
1623 module_put(ns->ctrl->ops->module);
1706 struct nvme_ctrl *ctrl = ns->ctrl;
1710 if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
1711 ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
1713 if (ctrl->max_discard_sectors == 0) {
1727 blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
1728 blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
1730 if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
1746 struct nvme_ctrl *ctrl = ns->ctrl;
1754 if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
1769 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm));
1804 struct nvme_ctrl *ctrl = ns->ctrl;
1812 if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1815 if (ctrl->ops->flags & NVME_F_FABRICS) {
1835 if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
1852 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
1855 bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;
1857 if (ctrl->max_hw_sectors) {
1859 (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
1861 max_segments = min_not_zero(max_segments, ctrl->max_segments);
1862 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
1899 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
1929 ns->ctrl->max_integrity_segments);
1938 ns->ctrl->max_zeroes_sectors);
1954 struct nvme_ctrl *ctrl = ns->ctrl;
1957 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
1958 is_power_of_2(ctrl->max_hw_sectors))
1959 iob = ctrl->max_hw_sectors;
1987 nvme_set_queue_limits(ns->ctrl, ns->queue);
2015 ret = nvme_identify_ns(ns->ctrl, info->nsid, &id);
2029 nvme_set_queue_limits(ns->ctrl, ns->queue);
2098 dev_info(ns->ctrl->device,
2107 dev_info(ns->ctrl->device,
2118 struct nvme_ctrl *ctrl = data;
2129 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
2133 static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
2135 if (ctrl->oacs & NVME_CTRL_OACS_SEC_SUPP) {
2136 if (!ctrl->opal_dev)
2137 ctrl->opal_dev = init_opal_dev(ctrl, &nvme_sec_submit);
2139 opal_unlock_from_suspend(ctrl->opal_dev);
2141 free_opal_dev(ctrl->opal_dev);
2142 ctrl->opal_dev = NULL;
2146 static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
2173 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 mask, u32 val,
2180 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2190 dev_err(ctrl->device,
2200 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2204 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2206 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
2208 ctrl->ctrl_config &= ~NVME_CC_ENABLE;
2210 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2215 return nvme_wait_ready(ctrl, NVME_CSTS_SHST_MASK,
2217 ctrl->shutdown_timeout, "shutdown");
2219 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
2221 return nvme_wait_ready(ctrl, NVME_CSTS_RDY, 0,
2222 (NVME_CAP_TIMEOUT(ctrl->cap) + 1) / 2, "reset");
2226 int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
2232 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2234 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
2237 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
2240 dev_err(ctrl->device,
2246 if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
2247 ctrl->ctrl_config = NVME_CC_CSS_CSI;
2249 ctrl->ctrl_config = NVME_CC_CSS_NVM;
2257 ctrl->ctrl_config &= ~NVME_CC_CRIME;
2259 ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
2260 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
2261 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
2262 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2267 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config);
2272 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2276 timeout = NVME_CAP_TIMEOUT(ctrl->cap);
2277 if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
2280 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
2282 dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
2295 dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n",
2296 crto, ctrl->cap);
2301 ctrl->ctrl_config |= NVME_CC_ENABLE;
2302 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2305 return nvme_wait_ready(ctrl, NVME_CSTS_RDY, NVME_CSTS_RDY,
2310 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
2315 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
2319 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
2322 dev_warn_once(ctrl->device,
2327 static int nvme_configure_host_options(struct nvme_ctrl *ctrl)
2334 if (ctrl->crdt[0])
2336 if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)
2348 ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
2408 static int nvme_configure_apst(struct nvme_ctrl *ctrl)
2423 if (!ctrl->apsta)
2426 if (ctrl->npss > 31) {
2427 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
2435 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
2437 dev_dbg(ctrl->device, "APST disabled\n");
2447 for (state = (int)ctrl->npss; state >= 0; state--) {
2457 if (state == ctrl->npss &&
2458 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
2465 if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE))
2468 exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
2469 if (exit_latency_us > ctrl->ps_max_latency_us)
2473 le32_to_cpu(ctrl->psd[state].entry_lat);
2498 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
2500 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2505 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
2508 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
2515 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2528 if (ctrl->ps_max_latency_us != latency) {
2529 ctrl->ps_max_latency_us = latency;
2530 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
2531 nvme_configure_apst(ctrl);
2626 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
2632 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
2639 if (ctrl->vs >= NVME_VS(1, 2, 1))
2640 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
2715 static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
2717 return ctrl->opts && ctrl->opts->discovery_nqn;
2721 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2731 if (tmp->cntlid == ctrl->cntlid) {
2732 dev_err(ctrl->device,
2734 ctrl->cntlid, dev_name(tmp->device),
2740 nvme_discovery_ctrl(ctrl))
2743 dev_err(ctrl->device,
2751 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2765 nvme_init_subnqn(subsys, ctrl, id);
2778 if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) {
2779 dev_err(ctrl->device,
2791 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
2800 if (!nvme_validate_cntlid(subsys, ctrl, id)) {
2807 dev_err(ctrl->device,
2816 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
2817 dev_name(ctrl->device));
2819 dev_err(ctrl->device,
2825 subsys->instance = ctrl->instance;
2826 ctrl->subsys = subsys;
2827 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
2838 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
2854 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
2857 static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
2860 struct nvme_effects_log *old, *cel = xa_load(&ctrl->cels, csi);
2870 ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
2877 old = xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
2887 static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units)
2889 u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val;
2896 static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
2902 if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
2903 ctrl->max_discard_sectors = UINT_MAX;
2904 ctrl->max_discard_segments = NVME_DSM_MAX_RANGES;
2906 ctrl->max_discard_sectors = 0;
2907 ctrl->max_discard_segments = 0;
2916 if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&
2917 !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
2918 ctrl->max_zeroes_sectors = ctrl->max_hw_sectors;
2920 ctrl->max_zeroes_sectors = 0;
2922 if (ctrl->subsys->subtype != NVME_NQN_NVME ||
2923 nvme_ctrl_limited_cns(ctrl) ||
2924 test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags))
2935 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
2940 ctrl->max_discard_segments = id->dmrl;
2941 ctrl->dmrsl = le32_to_cpu(id->dmrsl);
2943 ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
2947 set_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags);
2952 static int nvme_init_effects_log(struct nvme_ctrl *ctrl,
2961 old = xa_store(&ctrl->cels, csi, effects, GFP_KERNEL);
2971 static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl)
2973 struct nvme_effects_log *log = ctrl->effects;
3003 static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
3007 if (ctrl->effects)
3011 ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
3016 if (!ctrl->effects) {
3017 ret = nvme_init_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
3022 nvme_init_known_nvm_effects(ctrl);
3026 static int nvme_init_identify(struct nvme_ctrl *ctrl)
3033 ret = nvme_identify_ctrl(ctrl, &id);
3035 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
3039 if (!(ctrl->ops->flags & NVME_F_FABRICS))
3040 ctrl->cntlid = le16_to_cpu(id->cntlid);
3042 if (!ctrl->identified) {
3055 ctrl->quirks |= core_quirks[i].quirks;
3058 ret = nvme_init_subsystem(ctrl, id);
3062 ret = nvme_init_effects(ctrl, id);
3066 memcpy(ctrl->subsys->firmware_rev, id->fr,
3067 sizeof(ctrl->subsys->firmware_rev));
3069 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
3070 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
3071 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
3074 ctrl->crdt[0] = le16_to_cpu(id->crdt1);
3075 ctrl->crdt[1] = le16_to_cpu(id->crdt2);
3076 ctrl->crdt[2] = le16_to_cpu(id->crdt3);
3078 ctrl->oacs = le16_to_cpu(id->oacs);
3079 ctrl->oncs = le16_to_cpu(id->oncs);
3080 ctrl->mtfa = le16_to_cpu(id->mtfa);
3081 ctrl->oaes = le32_to_cpu(id->oaes);
3082 ctrl->wctemp = le16_to_cpu(id->wctemp);
3083 ctrl->cctemp = le16_to_cpu(id->cctemp);
3085 atomic_set(&ctrl->abort_limit, id->acl + 1);
3086 ctrl->vwc = id->vwc;
3088 max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts);
3091 ctrl->max_hw_sectors =
3092 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
3094 nvme_set_queue_limits(ctrl, ctrl->admin_q);
3095 ctrl->sgls = le32_to_cpu(id->sgls);
3096 ctrl->kas = le16_to_cpu(id->kas);
3097 ctrl->max_namespaces = le32_to_cpu(id->mnan);
3098 ctrl->ctratt = le32_to_cpu(id->ctratt);
3100 ctrl->cntrltype = id->cntrltype;
3101 ctrl->dctype = id->dctype;
3107 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
3110 if (ctrl->shutdown_timeout != shutdown_timeout)
3111 dev_info(ctrl->device,
3113 ctrl->shutdown_timeout);
3115 ctrl->shutdown_timeout = shutdown_timeout;
3117 ctrl->npss = id->npss;
3118 ctrl->apsta = id->apsta;
3119 prev_apst_enabled = ctrl->apst_enabled;
3120 if (ctrl->quirks & NVME_QUIRK_NO_APST) {
3122 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
3123 ctrl->apst_enabled = true;
3125 ctrl->apst_enabled = false;
3128 ctrl->apst_enabled = id->apsta;
3130 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
3132 if (ctrl->ops->flags & NVME_F_FABRICS) {
3133 ctrl->icdoff = le16_to_cpu(id->icdoff);
3134 ctrl->ioccsz = le32_to_cpu(id->ioccsz);
3135 ctrl->iorcsz = le32_to_cpu(id->iorcsz);
3136 ctrl->maxcmd = le16_to_cpu(id->maxcmd);
3142 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
3143 dev_err(ctrl->device,
3146 ctrl->cntlid, le16_to_cpu(id->cntlid));
3151 if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
3152 dev_err(ctrl->device,
3158 ctrl->hmpre = le32_to_cpu(id->hmpre);
3159 ctrl->hmmin = le32_to_cpu(id->hmmin);
3160 ctrl->hmminds = le32_to_cpu(id->hmminds);
3161 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
3164 ret = nvme_mpath_init_identify(ctrl, id);
3168 if (ctrl->apst_enabled && !prev_apst_enabled)
3169 dev_pm_qos_expose_latency_tolerance(ctrl->device);
3170 else if (!ctrl->apst_enabled && prev_apst_enabled)
3171 dev_pm_qos_hide_latency_tolerance(ctrl->device);
3183 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended)
3187 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
3189 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
3193 ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
3195 if (ctrl->vs >= NVME_VS(1, 1, 0))
3196 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
3198 ret = nvme_init_identify(ctrl);
3202 ret = nvme_configure_apst(ctrl);
3206 ret = nvme_configure_timestamp(ctrl);
3210 ret = nvme_configure_host_options(ctrl);
3214 nvme_configure_opal(ctrl, was_suspended);
3216 if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
3221 ret = nvme_hwmon_init(ctrl);
3226 clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags);
3227 ctrl->identified = true;
3235 struct nvme_ctrl *ctrl =
3238 switch (nvme_ctrl_state(ctrl)) {
3245 nvme_get_ctrl(ctrl);
3246 if (!try_module_get(ctrl->ops->module)) {
3247 nvme_put_ctrl(ctrl);
3251 file->private_data = ctrl;
3257 struct nvme_ctrl *ctrl =
3260 module_put(ctrl->ops->module);
3261 nvme_put_ctrl(ctrl);
3274 static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl,
3279 lockdep_assert_held(&ctrl->subsys->lock);
3281 list_for_each_entry(h, &ctrl->subsys->nsheads, entry) {
3287 if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h))
3377 ns->cdev_device.parent = ns->ctrl->device;
3379 ns->ctrl->instance, ns->head->instance);
3384 ns->ctrl->ops->module);
3387 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
3401 ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL);
3409 head->subsys = ctrl->subsys;
3416 ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
3420 head->effects = ctrl->effects;
3422 ret = nvme_mpath_alloc_disk(ctrl, head);
3426 list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3428 kref_get(&ctrl->subsys->ref);
3434 ida_free(&ctrl->subsys->ns_ida, head->instance);
3471 struct nvme_ctrl *ctrl = ns->ctrl;
3475 ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids);
3493 nvme_print_device_info(ctrl);
3494 if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */
3495 ((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) &&
3497 dev_err(ctrl->device,
3503 dev_err(ctrl->device,
3505 dev_err(ctrl->device,
3510 ctrl->quirks |= NVME_QUIRK_BOGUS_NID;
3513 mutex_lock(&ctrl->subsys->lock);
3514 head = nvme_find_ns_head(ctrl, info->nsid);
3516 ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids);
3518 dev_err(ctrl->device,
3523 head = nvme_alloc_ns_head(ctrl, info);
3531 dev_err(ctrl->device,
3537 dev_err(ctrl->device,
3544 dev_warn(ctrl->device,
3547 dev_warn_once(ctrl->device,
3554 mutex_unlock(&ctrl->subsys->lock);
3560 mutex_unlock(&ctrl->subsys->lock);
3564 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3569 srcu_idx = srcu_read_lock(&ctrl->srcu);
3570 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
3571 srcu_read_lock_held(&ctrl->srcu)) {
3581 srcu_read_unlock(&ctrl->srcu, srcu_idx);
3593 list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
3599 list_add(&ns->list, &ns->ctrl->namespaces);
3602 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
3606 int node = ctrl->numa_node;
3612 disk = blk_mq_alloc_disk(ctrl->tagset, ns);
3621 if (ctrl->opts && ctrl->opts->data_digest)
3625 if (ctrl->ops->supports_pci_p2pdma &&
3626 ctrl->ops->supports_pci_p2pdma(ctrl))
3629 ns->ctrl = ctrl;
3647 sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
3648 ctrl->instance, ns->head->instance);
3651 sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance,
3654 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
3661 mutex_lock(&ctrl->namespaces_lock);
3663 * Ensure that no namespaces are added to the ctrl list after the queues
3666 if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) {
3667 mutex_unlock(&ctrl->namespaces_lock);
3671 mutex_unlock(&ctrl->namespaces_lock);
3672 synchronize_srcu(&ctrl->srcu);
3673 nvme_get_ctrl(ctrl);
3675 if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
3687 nvme_put_ctrl(ctrl);
3688 mutex_lock(&ctrl->namespaces_lock);
3690 mutex_unlock(&ctrl->namespaces_lock);
3691 synchronize_srcu(&ctrl->srcu);
3693 mutex_lock(&ctrl->subsys->lock);
3697 mutex_unlock(&ctrl->subsys->lock);
3726 mutex_lock(&ns->ctrl->subsys->lock);
3732 mutex_unlock(&ns->ctrl->subsys->lock);
3741 mutex_lock(&ns->ctrl->namespaces_lock);
3743 mutex_unlock(&ns->ctrl->namespaces_lock);
3744 synchronize_srcu(&ns->ctrl->srcu);
3751 static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
3753 struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);
3766 dev_err(ns->ctrl->device,
3783 static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3789 if (nvme_identify_ns_descs(ctrl, &info))
3792 if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) {
3793 dev_warn(ctrl->device,
3803 if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) ||
3805 ret = nvme_ns_info_from_id_cs_indep(ctrl, &info);
3807 ret = nvme_ns_info_from_identify(ctrl, &info);
3810 nvme_ns_remove_by_nsid(ctrl, nsid);
3819 ns = nvme_find_get_ns(ctrl, nsid);
3824 nvme_alloc_ns(ctrl, &info);
3828 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
3834 mutex_lock(&ctrl->namespaces_lock);
3835 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
3838 synchronize_srcu(&ctrl->srcu);
3842 mutex_unlock(&ctrl->namespaces_lock);
3848 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
3866 ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
3869 dev_warn(ctrl->device,
3879 nvme_scan_ns(ctrl, nsid);
3881 nvme_ns_remove_by_nsid(ctrl, prev);
3885 nvme_remove_invalid_namespaces(ctrl, prev);
3891 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
3896 if (nvme_identify_ctrl(ctrl, &id))
3902 nvme_scan_ns(ctrl, i);
3904 nvme_remove_invalid_namespaces(ctrl, nn);
3907 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
3923 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0,
3926 dev_warn(ctrl->device,
3934 struct nvme_ctrl *ctrl =
3938 /* No tagset on a live ctrl means IO queues could not created */
3939 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE || !ctrl->tagset)
3949 ret = nvme_init_non_mdts_limits(ctrl);
3951 dev_warn(ctrl->device,
3956 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
3957 dev_info(ctrl->device, "rescanning namespaces.\n");
3958 nvme_clear_changed_ns_log(ctrl);
3961 mutex_lock(&ctrl->scan_lock);
3962 if (nvme_ctrl_limited_cns(ctrl)) {
3963 nvme_scan_ns_sequential(ctrl);
3970 ret = nvme_scan_ns_list(ctrl);
3972 nvme_scan_ns_sequential(ctrl);
3974 mutex_unlock(&ctrl->scan_lock);
3982 void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
3992 nvme_mpath_clear_ctrl_paths(ctrl);
3998 nvme_unquiesce_io_queues(ctrl);
4001 flush_work(&ctrl->scan_work);
4009 if (nvme_ctrl_state(ctrl) == NVME_CTRL_DEAD)
4010 nvme_mark_namespaces_dead(ctrl);
4013 nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
4015 mutex_lock(&ctrl->namespaces_lock);
4016 list_splice_init_rcu(&ctrl->namespaces, &ns_list, synchronize_rcu);
4017 mutex_unlock(&ctrl->namespaces_lock);
4018 synchronize_srcu(&ctrl->srcu);
4027 const struct nvme_ctrl *ctrl =
4029 struct nvmf_ctrl_options *opts = ctrl->opts;
4032 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
4057 static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata)
4061 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
4064 static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
4067 u32 aen_result = ctrl->aen_result;
4069 ctrl->aen_result = 0;
4076 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
4082 struct nvme_ctrl *ctrl =
4085 nvme_aen_uevent(ctrl);
4089 * flushing ctrl async_event_work after changing the controller state
4092 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
4093 ctrl->ops->submit_async_event(ctrl);
4096 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
4101 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
4107 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
4110 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
4118 if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
4120 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
4126 struct nvme_ctrl *ctrl = container_of(work,
4130 nvme_auth_stop(ctrl);
4132 if (ctrl->mtfa)
4134 msecs_to_jiffies(ctrl->mtfa * 100);
4139 nvme_quiesce_io_queues(ctrl);
4140 while (nvme_ctrl_pp_status(ctrl)) {
4142 dev_warn(ctrl->device,
4144 nvme_try_sched_reset(ctrl);
4150 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
4153 nvme_unquiesce_io_queues(ctrl);
4155 nvme_get_fw_slot_info(ctrl);
4157 queue_work(nvme_wq, &ctrl->async_event_work);
4170 static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
4177 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
4178 nvme_queue_scan(ctrl);
4186 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
4188 queue_work(nvme_wq, &ctrl->fw_act_work);
4193 if (!ctrl->ana_log_buf)
4195 queue_work(nvme_wq, &ctrl->ana_work);
4199 ctrl->aen_result = result;
4202 dev_warn(ctrl->device, "async event result %08x\n", result);
4207 static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
4209 dev_warn(ctrl->device, "resetting controller due to AER\n");
4210 nvme_reset_ctrl(ctrl);
4213 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
4224 trace_nvme_async_event(ctrl, result);
4227 requeue = nvme_handle_aen_notice(ctrl, result);
4235 nvme_handle_aer_persistent_error(ctrl);
4242 ctrl->aen_result = result;
4249 queue_work(nvme_wq, &ctrl->async_event_work);
4253 int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
4261 if (ctrl->ops->flags & NVME_F_FABRICS)
4264 set->numa_node = ctrl->numa_node;
4266 if (ctrl->ops->flags & NVME_F_BLOCKING)
4269 set->driver_data = ctrl;
4276 ctrl->admin_q = blk_mq_init_queue(set);
4277 if (IS_ERR(ctrl->admin_q)) {
4278 ret = PTR_ERR(ctrl->admin_q);
4282 if (ctrl->ops->flags & NVME_F_FABRICS) {
4283 ctrl->fabrics_q = blk_mq_init_queue(set);
4284 if (IS_ERR(ctrl->fabrics_q)) {
4285 ret = PTR_ERR(ctrl->fabrics_q);
4290 ctrl->admin_tagset = set;
4294 blk_mq_destroy_queue(ctrl->admin_q);
4295 blk_put_queue(ctrl->admin_q);
4298 ctrl->admin_q = NULL;
4299 ctrl->fabrics_q = NULL;
4304 void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
4306 blk_mq_destroy_queue(ctrl->admin_q);
4307 blk_put_queue(ctrl->admin_q);
4308 if (ctrl->ops->flags & NVME_F_FABRICS) {
4309 blk_mq_destroy_queue(ctrl->fabrics_q);
4310 blk_put_queue(ctrl->fabrics_q);
4312 blk_mq_free_tag_set(ctrl->admin_tagset);
4316 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
4324 set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1);
4329 if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS)
4331 else if (ctrl->ops->flags & NVME_F_FABRICS)
4334 set->numa_node = ctrl->numa_node;
4336 if (ctrl->ops->flags & NVME_F_BLOCKING)
4339 set->driver_data = ctrl;
4340 set->nr_hw_queues = ctrl->queue_count - 1;
4347 if (ctrl->ops->flags & NVME_F_FABRICS) {
4348 ctrl->connect_q = blk_mq_init_queue(set);
4349 if (IS_ERR(ctrl->connect_q)) {
4350 ret = PTR_ERR(ctrl->connect_q);
4354 ctrl->connect_q);
4357 ctrl->tagset = set;
4362 ctrl->connect_q = NULL;
4367 void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl)
4369 if (ctrl->ops->flags & NVME_F_FABRICS) {
4370 blk_mq_destroy_queue(ctrl->connect_q);
4371 blk_put_queue(ctrl->connect_q);
4373 blk_mq_free_tag_set(ctrl->tagset);
4377 void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
4379 nvme_mpath_stop(ctrl);
4380 nvme_auth_stop(ctrl);
4381 nvme_stop_keep_alive(ctrl);
4382 nvme_stop_failfast_work(ctrl);
4383 flush_work(&ctrl->async_event_work);
4384 cancel_work_sync(&ctrl->fw_act_work);
4385 if (ctrl->ops->stop_ctrl)
4386 ctrl->ops->stop_ctrl(ctrl);
4390 void nvme_start_ctrl(struct nvme_ctrl *ctrl)
4392 nvme_start_keep_alive(ctrl);
4394 nvme_enable_aen(ctrl);
4402 if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
4403 nvme_discovery_ctrl(ctrl))
4404 nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");
4406 if (ctrl->queue_count > 1) {
4407 nvme_queue_scan(ctrl);
4408 nvme_unquiesce_io_queues(ctrl);
4409 nvme_mpath_update(ctrl);
4412 nvme_change_uevent(ctrl, "NVME_EVENT=connected");
4413 set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags);
4417 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
4419 nvme_hwmon_exit(ctrl);
4420 nvme_fault_inject_fini(&ctrl->fault_inject);
4421 dev_pm_qos_hide_latency_tolerance(ctrl->device);
4422 cdev_device_del(&ctrl->cdev, ctrl->device);
4423 nvme_put_ctrl(ctrl);
4427 static void nvme_free_cels(struct nvme_ctrl *ctrl)
4432 xa_for_each(&ctrl->cels, i, cel) {
4433 xa_erase(&ctrl->cels, i);
4437 xa_destroy(&ctrl->cels);
4442 struct nvme_ctrl *ctrl =
4444 struct nvme_subsystem *subsys = ctrl->subsys;
4446 if (!subsys || ctrl->instance != subsys->instance)
4447 ida_free(&nvme_instance_ida, ctrl->instance);
4449 nvme_free_cels(ctrl);
4450 nvme_mpath_uninit(ctrl);
4451 cleanup_srcu_struct(&ctrl->srcu);
4452 nvme_auth_stop(ctrl);
4453 nvme_auth_free(ctrl);
4454 __free_page(ctrl->discard_page);
4455 free_opal_dev(ctrl->opal_dev);
4459 list_del(&ctrl->subsys_entry);
4460 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
4464 ctrl->ops->free_ctrl(ctrl);
4475 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
4480 WRITE_ONCE(ctrl->state, NVME_CTRL_NEW);
4481 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
4482 spin_lock_init(&ctrl->lock);
4483 mutex_init(&ctrl->namespaces_lock);
4485 ret = init_srcu_struct(&ctrl->srcu);
4489 mutex_init(&ctrl->scan_lock);
4490 INIT_LIST_HEAD(&ctrl->namespaces);
4491 xa_init(&ctrl->cels);
4492 ctrl->dev = dev;
4493 ctrl->ops = ops;
4494 ctrl->quirks = quirks;
4495 ctrl->numa_node = NUMA_NO_NODE;
4496 INIT_WORK(&ctrl->scan_work, nvme_scan_work);
4497 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
4498 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
4499 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
4500 init_waitqueue_head(&ctrl->state_wq);
4502 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
4503 INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
4504 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
4505 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
4509 ctrl->discard_page = alloc_page(GFP_KERNEL);
4510 if (!ctrl->discard_page) {
4518 ctrl->instance = ret;
4520 device_initialize(&ctrl->ctrl_device);
4521 ctrl->device = &ctrl->ctrl_device;
4522 ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
4523 ctrl->instance);
4524 ctrl->device->class = nvme_class;
4525 ctrl->device->parent = ctrl->dev;
4527 ctrl->device->groups = ops->dev_attr_groups;
4529 ctrl->device->groups = nvme_dev_attr_groups;
4530 ctrl->device->release = nvme_free_ctrl;
4531 dev_set_drvdata(ctrl->device, ctrl);
4532 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
4536 nvme_get_ctrl(ctrl);
4537 cdev_init(&ctrl->cdev, &nvme_dev_fops);
4538 ctrl->cdev.owner = ops->module;
4539 ret = cdev_device_add(&ctrl->cdev, ctrl->device);
4547 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
4548 dev_pm_qos_update_user_latency_tolerance(ctrl->device,
4551 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
4552 nvme_mpath_init_ctrl(ctrl);
4553 ret = nvme_auth_init_ctrl(ctrl);
4559 nvme_fault_inject_fini(&ctrl->fault_inject);
4560 dev_pm_qos_hide_latency_tolerance(ctrl->device);
4561 cdev_device_del(&ctrl->cdev, ctrl->device);
4563 nvme_put_ctrl(ctrl);
4564 kfree_const(ctrl->device->kobj.name);
4566 ida_free(&nvme_instance_ida, ctrl->instance);
4568 if (ctrl->discard_page)
4569 __free_page(ctrl->discard_page);
4570 cleanup_srcu_struct(&ctrl->srcu);
4576 void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl)
4581 srcu_idx = srcu_read_lock(&ctrl->srcu);
4582 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4583 srcu_read_lock_held(&ctrl->srcu))
4585 srcu_read_unlock(&ctrl->srcu, srcu_idx);
4589 void nvme_unfreeze(struct nvme_ctrl *ctrl)
4594 srcu_idx = srcu_read_lock(&ctrl->srcu);
4595 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4596 srcu_read_lock_held(&ctrl->srcu))
4598 srcu_read_unlock(&ctrl->srcu, srcu_idx);
4599 clear_bit(NVME_CTRL_FROZEN, &ctrl->flags);
4603 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
4608 srcu_idx = srcu_read_lock(&ctrl->srcu);
4609 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4610 srcu_read_lock_held(&ctrl->srcu)) {
4615 srcu_read_unlock(&ctrl->srcu, srcu_idx);
4620 void nvme_wait_freeze(struct nvme_ctrl *ctrl)
4625 srcu_idx = srcu_read_lock(&ctrl->srcu);
4626 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4627 srcu_read_lock_held(&ctrl->srcu))
4629 srcu_read_unlock(&ctrl->srcu, srcu_idx);
4633 void nvme_start_freeze(struct nvme_ctrl *ctrl)
4638 set_bit(NVME_CTRL_FROZEN, &ctrl->flags);
4639 srcu_idx = srcu_read_lock(&ctrl->srcu);
4640 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4641 srcu_read_lock_held(&ctrl->srcu))
4643 srcu_read_unlock(&ctrl->srcu, srcu_idx);
4647 void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl)
4649 if (!ctrl->tagset)
4651 if (!test_and_set_bit(NVME_CTRL_STOPPED, &ctrl->flags))
4652 blk_mq_quiesce_tagset(ctrl->tagset);
4654 blk_mq_wait_quiesce_done(ctrl->tagset);
4658 void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl)
4660 if (!ctrl->tagset)
4662 if (test_and_clear_bit(NVME_CTRL_STOPPED, &ctrl->flags))
4663 blk_mq_unquiesce_tagset(ctrl->tagset);
4667 void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl)
4669 if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
4670 blk_mq_quiesce_queue(ctrl->admin_q);
4672 blk_mq_wait_quiesce_done(ctrl->admin_q->tag_set);
4676 void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl)
4678 if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
4679 blk_mq_unquiesce_queue(ctrl->admin_q);
4683 void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
4688 srcu_idx = srcu_read_lock(&ctrl->srcu);
4689 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4690 srcu_read_lock_held(&ctrl->srcu))
4692 srcu_read_unlock(&ctrl->srcu, srcu_idx);
4696 void nvme_sync_queues(struct nvme_ctrl *ctrl)
4698 nvme_sync_io_queues(ctrl);
4699 if (ctrl->admin_q)
4700 blk_sync_queue(ctrl->admin_q);