Lines Matching +full:battery +full:- +full:profile
1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2011-2014, Intel Corporation.
8 #include <linux/blk-mq.h>
9 #include <linux/blk-integrity.h>
16 #include <linux/backing-dev.h>
27 #include <linux/nvme-auth.h>
92 * nvme_wq - hosts nvme related works that are not reset or delete
93 * nvme_reset_wq - hosts nvme reset works
94 * nvme_delete_wq - hosts nvme delete works
97 * keep-alive, periodic reconnects etc. nvme_reset_wq
134 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE && ctrl->tagset)
135 queue_work(nvme_wq, &ctrl->scan_work);
147 return -EBUSY;
148 if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
149 return -EBUSY;
162 set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
163 dev_info(ctrl->device, "failfast expired\n");
169 if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1)
172 schedule_delayed_work(&ctrl->failfast_work,
173 ctrl->opts->fast_io_fail_tmo * HZ);
178 if (!ctrl->opts)
181 cancel_delayed_work_sync(&ctrl->failfast_work);
182 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
189 return -EBUSY;
190 if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
191 return -EBUSY;
202 flush_work(&ctrl->reset_work);
204 ret = -ENETRESET;
212 dev_info(ctrl->device,
215 flush_work(&ctrl->reset_work);
218 ctrl->ops->delete_ctrl(ctrl);
233 return -EBUSY;
234 if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
235 return -EBUSY;
244 * since ->delete_ctrl can free the controller.
300 crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
302 delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
304 nvme_req(req)->retries++;
306 blk_mq_delay_kick_requeue_list(req->q, delay);
311 struct nvme_ns *ns = req->q->queuedata;
316 ns->disk ? ns->disk->disk_name : "?",
317 nvme_get_opcode_str(nr->cmd->common.opcode),
318 nr->cmd->common.opcode,
320 (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift,
321 nvme_get_error_status_str(nr->status),
322 nr->status >> 8 & 7, /* Status Code Type */
323 nr->status & 0xff, /* Status Code */
324 nr->status & NVME_SC_MORE ? "MORE " : "",
325 nr->status & NVME_SC_DNR ? "DNR " : "");
330 dev_name(nr->ctrl->device),
331 nvme_get_admin_opcode_str(nr->cmd->common.opcode),
332 nr->cmd->common.opcode,
333 nvme_get_error_status_str(nr->status),
334 nr->status >> 8 & 7, /* Status Code Type */
335 nr->status & 0xff, /* Status Code */
336 nr->status & NVME_SC_MORE ? "MORE " : "",
337 nr->status & NVME_SC_DNR ? "DNR " : "");
349 if (likely(nvme_req(req)->status == 0))
352 if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED)
356 (nvme_req(req)->status & NVME_SC_DNR) ||
357 nvme_req(req)->retries >= nvme_max_retries)
360 if (req->cmd_flags & REQ_NVME_MPATH) {
361 if (nvme_is_path_error(nvme_req(req)->status) ||
362 blk_queue_dying(req->q))
365 if (blk_queue_dying(req->q))
376 req->__sector = nvme_lba_to_sect(req->q->queuedata,
377 le64_to_cpu(nvme_req(req)->result.u64));
382 blk_status_t status = nvme_error_status(nvme_req(req)->status);
384 if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET)))
388 if (req->cmd_flags & REQ_NVME_MPATH)
395 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
401 * Completions of long-running commands should not be able to
405 * req->deadline - req->timeout is the command submission time
408 if (ctrl->kas &&
409 req->deadline - req->timeout >= ctrl->ka_last_check_time)
410 ctrl->comp_seen = true;
424 queue_work(nvme_wq, &ctrl->dhchap_auth_work);
443 * Called to unwind from ->queue_rq on a failed command submission so that the
450 nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
459 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
460 "Cancelling I/O %d", req->tag);
466 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
467 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
475 if (ctrl->tagset) {
476 blk_mq_tagset_busy_iter(ctrl->tagset,
478 blk_mq_tagset_wait_completed_request(ctrl->tagset);
485 if (ctrl->admin_tagset) {
486 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
488 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
500 spin_lock_irqsave(&ctrl->lock, flags);
568 WRITE_ONCE(ctrl->state, new_state);
569 wake_up_all(&ctrl->state_wq);
572 spin_unlock_irqrestore(&ctrl->lock, flags);
594 wait_event(ctrl->state_wq,
607 ida_free(&head->subsys->ns_ida, head->instance);
608 cleanup_srcu_struct(&head->srcu);
609 nvme_put_subsystem(head->subsys);
615 return kref_get_unless_zero(&head->ref);
620 kref_put(&head->ref, nvme_free_ns_head);
627 put_disk(ns->disk);
628 nvme_put_ns_head(ns->head);
629 nvme_put_ctrl(ns->ctrl);
635 return kref_get_unless_zero(&ns->kref);
640 kref_put(&ns->kref, nvme_free_ns);
646 nvme_req(req)->status = 0;
647 nvme_req(req)->retries = 0;
648 nvme_req(req)->flags = 0;
649 req->rq_flags |= RQF_DONTPREP;
655 if (req->q->queuedata)
656 req->timeout = NVME_IO_TIMEOUT;
658 req->timeout = NVME_ADMIN_TIMEOUT;
661 cmd->common.flags &= ~NVME_CMD_SGL_ALL;
663 req->cmd_flags |= REQ_FAILFAST_DRIVER;
664 if (req->mq_hctx->type == HCTX_TYPE_POLL)
665 req->cmd_flags |= REQ_POLLED;
667 req->rq_flags |= RQF_QUIET;
668 memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
689 !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
690 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
709 if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
712 if (ctrl->ops->flags & NVME_F_FABRICS) {
720 if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
721 (req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
722 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send ||
723 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive))
741 cmnd->common.opcode = nvme_cmd_flush;
742 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
766 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
769 range = page_address(ns->ctrl->discard_page);
772 if (queue_max_discard_segments(req->q) == 1) {
774 u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9);
782 u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
783 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
795 if (virt_to_page(range) == ns->ctrl->discard_page)
796 clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
803 cmnd->dsm.opcode = nvme_cmd_dsm;
804 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
805 cmnd->dsm.nr = cpu_to_le32(segments - 1);
806 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
808 bvec_set_virt(&req->special_vec, range, alloc_size);
809 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
821 switch (ns->guard_type) {
823 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
830 cmnd->rw.reftag = cpu_to_le32(lower);
831 cmnd->rw.cdw3 = cpu_to_le32(upper);
843 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
846 cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
847 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
848 cmnd->write_zeroes.slba =
850 cmnd->write_zeroes.length =
851 cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
853 if (!(req->cmd_flags & REQ_NOUNMAP) && (ns->features & NVME_NS_DEAC))
854 cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC);
857 cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT);
859 switch (ns->pi_type) {
877 if (req->cmd_flags & REQ_FUA)
879 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
882 if (req->cmd_flags & REQ_RAHEAD)
885 cmnd->rw.opcode = op;
886 cmnd->rw.flags = 0;
887 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
888 cmnd->rw.cdw2 = 0;
889 cmnd->rw.cdw3 = 0;
890 cmnd->rw.metadata = 0;
891 cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
892 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
893 cmnd->rw.reftag = 0;
894 cmnd->rw.apptag = 0;
895 cmnd->rw.appmask = 0;
897 if (ns->ms) {
910 switch (ns->pi_type) {
925 cmnd->rw.control = cpu_to_le16(control);
926 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
932 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
933 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
935 if (req->special_vec.bv_page == ctrl->discard_page)
936 clear_bit_unlock(0, &ctrl->discard_page_busy);
938 kfree(bvec_virt(&req->special_vec));
939 req->rq_flags &= ~RQF_SPECIAL_PAYLOAD;
946 struct nvme_command *cmd = nvme_req(req)->cmd;
949 if (!(req->rq_flags & RQF_DONTPREP))
993 cmd->common.command_id = nvme_cid(req);
1010 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
1011 return -EINTR;
1012 if (nvme_req(rq)->status)
1013 return nvme_req(rq)->status;
1033 qid - 1);
1047 *result = nvme_req(req)->result;
1067 effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
1069 dev_warn_once(ctrl->device,
1080 effects = le32_to_cpu(ctrl->effects->acs[opcode]);
1096 mutex_lock(&ctrl->scan_lock);
1097 mutex_lock(&ctrl->subsys->lock);
1098 nvme_mpath_start_freeze(ctrl->subsys);
1099 nvme_mpath_wait_freeze(ctrl->subsys);
1112 nvme_mpath_unfreeze(ctrl->subsys);
1113 mutex_unlock(&ctrl->subsys->lock);
1114 mutex_unlock(&ctrl->scan_lock);
1118 &ctrl->flags)) {
1119 dev_info(ctrl->device,
1125 flush_work(&ctrl->scan_work);
1130 switch (cmd->common.opcode) {
1132 switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) {
1160 unsigned long delay = ctrl->kato * HZ / 2;
1168 if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS)
1175 queue_delayed_work(nvme_wq, &ctrl->ka_work,
1182 struct nvme_ctrl *ctrl = rq->end_io_data;
1183 unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
1192 delay -= rtt;
1194 dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n",
1202 dev_err(ctrl->device,
1208 ctrl->ka_last_check_time = jiffies;
1209 ctrl->comp_seen = false;
1211 queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
1219 bool comp_seen = ctrl->comp_seen;
1222 ctrl->ka_last_check_time = jiffies;
1224 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
1225 dev_dbg(ctrl->device,
1226 "reschedule traffic based keep-alive timer\n");
1227 ctrl->comp_seen = false;
1232 rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd),
1236 dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
1240 nvme_init_request(rq, &ctrl->ka_cmd);
1242 rq->timeout = ctrl->kato * HZ;
1243 rq->end_io = nvme_keep_alive_end_io;
1244 rq->end_io_data = ctrl;
1250 if (unlikely(ctrl->kato == 0))
1258 if (unlikely(ctrl->kato == 0))
1261 cancel_delayed_work_sync(&ctrl->ka_work);
1269 DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000);
1271 dev_info(ctrl->device,
1273 ctrl->kato * 1000 / 2, new_kato * 1000 / 2);
1276 ctrl->kato = new_kato;
1288 if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
1289 return ctrl->vs < NVME_VS(1, 2, 0);
1290 return ctrl->vs < NVME_VS(1, 1, 0);
1298 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1304 return -ENOMEM;
1306 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
1321 switch (cur->nidt) {
1323 if (cur->nidl != NVME_NIDT_EUI64_LEN) {
1324 dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
1325 warn_str, cur->nidl);
1326 return -1;
1328 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1330 memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
1333 if (cur->nidl != NVME_NIDT_NGUID_LEN) {
1334 dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
1335 warn_str, cur->nidl);
1336 return -1;
1338 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1340 memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
1343 if (cur->nidl != NVME_NIDT_UUID_LEN) {
1344 dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
1345 warn_str, cur->nidl);
1346 return -1;
1348 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1350 uuid_copy(&ids->uuid, data + sizeof(*cur));
1353 if (cur->nidl != NVME_NIDT_CSI_LEN) {
1354 dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n",
1355 warn_str, cur->nidl);
1356 return -1;
1358 memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN);
1363 return cur->nidl;
1375 if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl))
1377 if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
1381 c.identify.nsid = cpu_to_le32(info->nsid);
1386 return -ENOMEM;
1388 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1391 dev_warn(ctrl->device,
1393 info->nsid, status);
1400 if (cur->nidl == 0)
1403 len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen);
1411 dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
1412 info->nsid);
1413 status = -EINVAL;
1427 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1434 return -ENOMEM;
1436 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
1438 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
1448 struct nvme_ns_ids *ids = &info->ids;
1452 ret = nvme_identify_ns(ctrl, info->nsid, &id);
1456 if (id->ncap == 0) {
1458 info->is_removed = true;
1459 ret = -ENODEV;
1463 info->anagrpid = id->anagrpid;
1464 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
1465 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
1466 info->is_ready = true;
1467 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
1468 dev_info(ctrl->device,
1471 if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1472 !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
1473 memcpy(ids->eui64, id->eui64, sizeof(ids->eui64));
1474 if (ctrl->vs >= NVME_VS(1, 2, 0) &&
1475 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
1476 memcpy(ids->nguid, id->nguid, sizeof(ids->nguid));
1490 .identify.nsid = cpu_to_le32(info->nsid),
1497 return -ENOMEM;
1499 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
1501 info->anagrpid = id->anagrpid;
1502 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
1503 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
1504 info->is_ready = id->nstat & NVME_NSTAT_NRDY;
1521 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1548 u32 q_count = (*count - 1) | ((*count - 1) << 16);
1569 dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1586 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1595 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1598 queue_work(nvme_wq, &ctrl->async_event_work);
1605 if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head)))
1609 if (!try_module_get(ns->ctrl->ops->module))
1617 return -ENXIO;
1623 module_put(ns->ctrl->ops->module);
1629 return nvme_ns_open(disk->private_data);
1634 nvme_ns_release(disk->private_data);
1640 geo->heads = 1 << 6;
1641 geo->sectors = 1 << 5;
1642 geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
1652 switch (ns->pi_type) {
1654 switch (ns->guard_type) {
1656 integrity.profile = &t10_pi_type3_crc;
1661 integrity.profile = &ext_pi_type3_crc64;
1666 integrity.profile = NULL;
1672 switch (ns->guard_type) {
1674 integrity.profile = &t10_pi_type1_crc;
1679 integrity.profile = &ext_pi_type1_crc64;
1684 integrity.profile = NULL;
1689 integrity.profile = NULL;
1693 integrity.tuple_size = ns->ms;
1695 blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
1706 struct nvme_ctrl *ctrl = ns->ctrl;
1707 struct request_queue *queue = disk->queue;
1710 if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
1711 ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
1713 if (ctrl->max_discard_sectors == 0) {
1721 queue->limits.discard_granularity = size;
1724 if (queue->limits.max_discard_sectors)
1727 blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
1728 blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
1730 if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
1736 return uuid_equal(&a->uuid, &b->uuid) &&
1737 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
1738 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 &&
1739 a->csi == b->csi;
1744 bool first = id->dps & NVME_NS_DPS_PI_FIRST;
1745 unsigned lbaf = nvme_lbaf_index(id->flbas);
1746 struct nvme_ctrl *ctrl = ns->ctrl;
1752 ns->pi_size = 0;
1753 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
1754 if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
1755 ns->pi_size = sizeof(struct t10_pi_tuple);
1756 ns->guard_type = NVME_NVM_NS_16B_GUARD;
1762 return -ENOMEM;
1765 c.identify.nsid = cpu_to_le32(ns->head->ns_id);
1769 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm));
1773 elbaf = le32_to_cpu(nvm->elbaf[lbaf]);
1779 ns->guard_type = nvme_elbaf_guard_type(elbaf);
1780 switch (ns->guard_type) {
1782 ns->pi_size = sizeof(struct crc64_pi_tuple);
1785 ns->pi_size = sizeof(struct t10_pi_tuple);
1794 if (ns->pi_size && (first || ns->ms == ns->pi_size))
1795 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
1797 ns->pi_type = 0;
1804 struct nvme_ctrl *ctrl = ns->ctrl;
1811 ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
1812 if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1815 if (ctrl->ops->flags & NVME_F_FABRICS) {
1821 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
1824 ns->features |= NVME_NS_EXT_LBAS;
1835 if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
1836 ns->features |= NVME_NS_METADATA_SUPPORTED;
1844 if (id->flbas & NVME_NS_FLBAS_META_EXT)
1845 ns->features |= NVME_NS_EXT_LBAS;
1847 ns->features |= NVME_NS_METADATA_SUPPORTED;
1855 bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;
1857 if (ctrl->max_hw_sectors) {
1859 (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
1861 max_segments = min_not_zero(max_segments, ctrl->max_segments);
1862 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
1865 blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
1873 sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
1874 u32 bs = 1U << ns->lba_shift;
1882 if (ns->lba_shift > PAGE_SHIFT || ns->lba_shift < SECTOR_SHIFT) {
1890 if (id->nabo == 0) {
1896 if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
1897 atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
1899 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
1902 if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
1904 phys_bs = bs * (1 + le16_to_cpu(id->npwg));
1906 io_opt = bs * (1 + le16_to_cpu(id->nows));
1909 blk_queue_logical_block_size(disk->queue, bs);
1915 blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
1916 blk_queue_io_min(disk->queue, phys_bs);
1917 blk_queue_io_opt(disk->queue, io_opt);
1920 * Register a metadata profile for PI, or the plain non-integrity NVMe
1925 if (ns->ms) {
1927 (ns->features & NVME_NS_METADATA_SUPPORTED))
1929 ns->ctrl->max_integrity_segments);
1937 blk_queue_max_write_zeroes_sectors(disk->queue,
1938 ns->ctrl->max_zeroes_sectors);
1943 return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags);
1954 struct nvme_ctrl *ctrl = ns->ctrl;
1957 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
1958 is_power_of_2(ctrl->max_hw_sectors))
1959 iob = ctrl->max_hw_sectors;
1961 iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
1967 if (nvme_first_scan(ns->disk))
1969 ns->disk->disk_name, iob);
1973 if (blk_queue_is_zoned(ns->disk->queue)) {
1974 if (nvme_first_scan(ns->disk))
1976 ns->disk->disk_name);
1980 blk_queue_chunk_sectors(ns->queue, iob);
1986 blk_mq_freeze_queue(ns->disk->queue);
1987 nvme_set_queue_limits(ns->ctrl, ns->queue);
1988 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
1989 blk_mq_unfreeze_queue(ns->disk->queue);
1991 if (nvme_ns_head_multipath(ns->head)) {
1992 blk_mq_freeze_queue(ns->head->disk->queue);
1993 set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
1995 blk_stack_limits(&ns->head->disk->queue->limits,
1996 &ns->queue->limits, 0);
1997 ns->head->disk->flags |= GENHD_FL_HIDDEN;
1998 blk_mq_unfreeze_queue(ns->head->disk->queue);
2001 /* Hide the block-interface for these devices */
2002 ns->disk->flags |= GENHD_FL_HIDDEN;
2003 set_bit(NVME_NS_READY, &ns->flags);
2015 ret = nvme_identify_ns(ns->ctrl, info->nsid, &id);
2019 if (id->ncap == 0) {
2021 info->is_removed = true;
2022 ret = -ENODEV;
2026 blk_mq_freeze_queue(ns->disk->queue);
2027 lbaf = nvme_lbaf_index(id->flbas);
2028 ns->lba_shift = id->lbaf[lbaf].ds;
2029 nvme_set_queue_limits(ns->ctrl, ns->queue);
2033 blk_mq_unfreeze_queue(ns->disk->queue);
2037 nvme_update_disk_info(ns->disk, ns, id);
2039 if (ns->head->ids.csi == NVME_CSI_ZNS) {
2042 blk_mq_unfreeze_queue(ns->disk->queue);
2050 * require that, it must be a no-op if reads from deallocated data
2053 if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3)))
2054 ns->features |= NVME_NS_DEAC;
2055 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
2056 set_bit(NVME_NS_READY, &ns->flags);
2057 blk_mq_unfreeze_queue(ns->disk->queue);
2059 if (blk_queue_is_zoned(ns->queue)) {
2061 if (ret && !nvme_first_scan(ns->disk))
2065 if (nvme_ns_head_multipath(ns->head)) {
2066 blk_mq_freeze_queue(ns->head->disk->queue);
2067 nvme_update_disk_info(ns->head->disk, ns, id);
2068 set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
2070 blk_stack_limits(&ns->head->disk->queue->limits,
2071 &ns->queue->limits, 0);
2072 disk_update_readahead(ns->head->disk);
2073 blk_mq_unfreeze_queue(ns->head->disk->queue);
2082 if (ret == -ENODEV) {
2083 ns->disk->flags |= GENHD_FL_HIDDEN;
2084 set_bit(NVME_NS_READY, &ns->flags);
2095 switch (info->ids.csi) {
2098 dev_info(ns->ctrl->device,
2100 info->nsid);
2107 dev_info(ns->ctrl->device,
2109 info->nsid, info->ids.csi);
2129 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
2135 if (ctrl->oacs & NVME_CTRL_OACS_SEC_SUPP) {
2136 if (!ctrl->opal_dev)
2137 ctrl->opal_dev = init_opal_dev(ctrl, &nvme_sec_submit);
2139 opal_unlock_from_suspend(ctrl->opal_dev);
2141 free_opal_dev(ctrl->opal_dev);
2142 ctrl->opal_dev = NULL;
2155 return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb,
2180 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2182 return -ENODEV;
2188 return -EINTR;
2190 dev_err(ctrl->device,
2193 return -ENODEV;
2204 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2206 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
2208 ctrl->ctrl_config &= ~NVME_CC_ENABLE;
2210 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2217 ctrl->shutdown_timeout, "shutdown");
2219 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
2222 (NVME_CAP_TIMEOUT(ctrl->cap) + 1) / 2, "reset");
2232 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2234 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
2237 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
2240 dev_err(ctrl->device,
2243 return -ENODEV;
2246 if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
2247 ctrl->ctrl_config = NVME_CC_CSS_CSI;
2249 ctrl->ctrl_config = NVME_CC_CSS_NVM;
2257 ctrl->ctrl_config &= ~NVME_CC_CRIME;
2259 ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
2260 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
2261 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
2262 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2267 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config);
2272 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2276 timeout = NVME_CAP_TIMEOUT(ctrl->cap);
2277 if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
2280 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
2282 dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
2295 dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n",
2296 crto, ctrl->cap);
2301 ctrl->ctrl_config |= NVME_CC_ENABLE;
2302 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2315 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
2322 dev_warn_once(ctrl->device,
2334 if (ctrl->crdt[0])
2336 if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)
2346 host->acre = acre;
2347 host->lbafee = lbafee;
2389 * - If the parameters provide explicit timeouts and tolerances, they will be
2390 * used to build a table with up to 2 non-operational states to transition to.
2394 * and battery power, the timeouts and tolerances reflect a compromise
2395 * between values used by Microsoft for AC and battery scenarios.
2396 * - If not, we'll configure the table with a simple heuristic: we are willing
2399 * lower-power non-operational state after waiting 50 * (enlat + exlat)
2403 * We will not autonomously enter any non-operational state for which the total
2414 int max_ps = -1;
2423 if (!ctrl->apsta)
2426 if (ctrl->npss > 31) {
2427 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
2435 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
2437 dev_dbg(ctrl->device, "APST disabled\n");
2442 * Walk through all states from lowest- to highest-power.
2443 * According to the spec, lower-numbered states use more power. NPSS,
2444 * despite the name, is the index of the lowest-power state, not the
2447 for (state = (int)ctrl->npss; state >= 0; state--) {
2451 table->entries[state] = target;
2457 if (state == ctrl->npss &&
2458 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
2462 * Is this state a useful non-operational state for higher-power
2465 if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE))
2468 exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
2469 if (exit_latency_us > ctrl->ps_max_latency_us)
2473 le32_to_cpu(ctrl->psd[state].entry_lat);
2486 if (transition_ms > (1 << 24) - 1)
2487 transition_ms = (1 << 24) - 1;
2491 if (max_ps == -1)
2497 if (max_ps == -1)
2498 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
2500 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2508 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
2528 if (ctrl->ps_max_latency_us != latency) {
2529 ctrl->ps_max_latency_us = latency;
2559 * This LiteON CL1-3D*-Q11 firmware version has a race
2569 * This Kioxia CD6-V Series / HPE PE8030 device times out and
2574 * to use "nvme set-feature" to disable APST, but booting with
2597 /* match is null-terminated but idstr is space-padded. */
2621 return q->vid == le16_to_cpu(id->vid) &&
2622 string_matches(id->mn, q->mn, sizeof(id->mn)) &&
2623 string_matches(id->fr, q->fr, sizeof(id->fr));
2632 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
2633 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
2635 strscpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
2639 if (ctrl->vs >= NVME_VS(1, 2, 1))
2640 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
2648 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2650 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
2651 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2652 off += sizeof(id->sn);
2653 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
2654 off += sizeof(id->mn);
2655 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
2663 if (subsys->instance >= 0)
2664 ida_free(&nvme_instance_ida, subsys->instance);
2674 list_del(&subsys->entry);
2677 ida_destroy(&subsys->ns_ida);
2678 device_del(&subsys->dev);
2679 put_device(&subsys->dev);
2684 kref_put(&subsys->ref, nvme_destroy_subsystem);
2705 if (strcmp(subsys->subnqn, subsysnqn))
2707 if (!kref_get_unless_zero(&subsys->ref))
2717 return ctrl->opts && ctrl->opts->discovery_nqn;
2727 list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
2731 if (tmp->cntlid == ctrl->cntlid) {
2732 dev_err(ctrl->device,
2734 ctrl->cntlid, dev_name(tmp->device),
2735 subsys->subnqn);
2739 if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
2743 dev_err(ctrl->device,
2758 return -ENOMEM;
2760 subsys->instance = -1;
2761 mutex_init(&subsys->lock);
2762 kref_init(&subsys->ref);
2763 INIT_LIST_HEAD(&subsys->ctrls);
2764 INIT_LIST_HEAD(&subsys->nsheads);
2766 memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
2767 memcpy(subsys->model, id->mn, sizeof(subsys->model));
2768 subsys->vendor_id = le16_to_cpu(id->vid);
2769 subsys->cmic = id->cmic;
2772 if (id->cntrltype == NVME_CTRL_DISC ||
2773 !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME))
2774 subsys->subtype = NVME_NQN_DISC;
2776 subsys->subtype = NVME_NQN_NVME;
2778 if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) {
2779 dev_err(ctrl->device,
2781 subsys->subnqn);
2783 return -EINVAL;
2785 subsys->awupf = le16_to_cpu(id->awupf);
2788 subsys->dev.class = nvme_subsys_class;
2789 subsys->dev.release = nvme_release_subsystem;
2790 subsys->dev.groups = nvme_subsys_attrs_groups;
2791 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
2792 device_initialize(&subsys->dev);
2795 found = __nvme_find_get_subsystem(subsys->subnqn);
2797 put_device(&subsys->dev);
2801 ret = -EINVAL;
2805 ret = device_add(&subsys->dev);
2807 dev_err(ctrl->device,
2809 put_device(&subsys->dev);
2812 ida_init(&subsys->ns_ida);
2813 list_add_tail(&subsys->entry, &nvme_subsystems);
2816 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
2817 dev_name(ctrl->device));
2819 dev_err(ctrl->device,
2825 subsys->instance = ctrl->instance;
2826 ctrl->subsys = subsys;
2827 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
2848 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
2854 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
2860 struct nvme_effects_log *old, *cel = xa_load(&ctrl->cels, csi);
2868 return -ENOMEM;
2877 old = xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
2889 u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val;
2891 if (check_shl_overflow(1U, units + page_shift - 9, &val))
2902 if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
2903 ctrl->max_discard_sectors = UINT_MAX;
2904 ctrl->max_discard_segments = NVME_DSM_MAX_RANGES;
2906 ctrl->max_discard_sectors = 0;
2907 ctrl->max_discard_segments = 0;
2912 * to the write-zeroes, we are cautious and limit the size to the
2916 if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&
2917 !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
2918 ctrl->max_zeroes_sectors = ctrl->max_hw_sectors;
2920 ctrl->max_zeroes_sectors = 0;
2922 if (ctrl->subsys->subtype != NVME_NQN_NVME ||
2924 test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags))
2929 return -ENOMEM;
2935 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
2939 if (id->dmrl)
2940 ctrl->max_discard_segments = id->dmrl;
2941 ctrl->dmrsl = le32_to_cpu(id->dmrsl);
2942 if (id->wzsl)
2943 ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
2947 set_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags);
2959 return -ENOMEM;
2961 old = xa_store(&ctrl->cels, csi, effects, GFP_KERNEL);
2973 struct nvme_effects_log *log = ctrl->effects;
2975 log->acs[nvme_admin_format_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC |
2978 log->acs[nvme_admin_sanitize_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC |
2996 log->acs[nvme_admin_security_recv] &= cpu_to_le32(~NVME_CMD_EFFECTS_CSE_MASK);
2998 log->iocs[nvme_cmd_write] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
2999 log->iocs[nvme_cmd_write_zeroes] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
3000 log->iocs[nvme_cmd_write_uncor] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
3007 if (ctrl->effects)
3010 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
3011 ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
3016 if (!ctrl->effects) {
3017 ret = nvme_init_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
3035 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
3036 return -EIO;
3039 if (!(ctrl->ops->flags & NVME_F_FABRICS))
3040 ctrl->cntlid = le16_to_cpu(id->cntlid);
3042 if (!ctrl->identified) {
3049 * could re-scan for quirks every time we reinitialize
3055 ctrl->quirks |= core_quirks[i].quirks;
3066 memcpy(ctrl->subsys->firmware_rev, id->fr,
3067 sizeof(ctrl->subsys->firmware_rev));
3069 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
3070 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
3071 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
3074 ctrl->crdt[0] = le16_to_cpu(id->crdt1);
3075 ctrl->crdt[1] = le16_to_cpu(id->crdt2);
3076 ctrl->crdt[2] = le16_to_cpu(id->crdt3);
3078 ctrl->oacs = le16_to_cpu(id->oacs);
3079 ctrl->oncs = le16_to_cpu(id->oncs);
3080 ctrl->mtfa = le16_to_cpu(id->mtfa);
3081 ctrl->oaes = le32_to_cpu(id->oaes);
3082 ctrl->wctemp = le16_to_cpu(id->wctemp);
3083 ctrl->cctemp = le16_to_cpu(id->cctemp);
3085 atomic_set(&ctrl->abort_limit, id->acl + 1);
3086 ctrl->vwc = id->vwc;
3087 if (id->mdts)
3088 max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts);
3091 ctrl->max_hw_sectors =
3092 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
3094 nvme_set_queue_limits(ctrl, ctrl->admin_q);
3095 ctrl->sgls = le32_to_cpu(id->sgls);
3096 ctrl->kas = le16_to_cpu(id->kas);
3097 ctrl->max_namespaces = le32_to_cpu(id->mnan);
3098 ctrl->ctratt = le32_to_cpu(id->ctratt);
3100 ctrl->cntrltype = id->cntrltype;
3101 ctrl->dctype = id->dctype;
3103 if (id->rtd3e) {
3104 /* us -> s */
3105 u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
3107 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
3110 if (ctrl->shutdown_timeout != shutdown_timeout)
3111 dev_info(ctrl->device,
3113 ctrl->shutdown_timeout);
3115 ctrl->shutdown_timeout = shutdown_timeout;
3117 ctrl->npss = id->npss;
3118 ctrl->apsta = id->apsta;
3119 prev_apst_enabled = ctrl->apst_enabled;
3120 if (ctrl->quirks & NVME_QUIRK_NO_APST) {
3121 if (force_apst && id->apsta) {
3122 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
3123 ctrl->apst_enabled = true;
3125 ctrl->apst_enabled = false;
3128 ctrl->apst_enabled = id->apsta;
3130 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
3132 if (ctrl->ops->flags & NVME_F_FABRICS) {
3133 ctrl->icdoff = le16_to_cpu(id->icdoff);
3134 ctrl->ioccsz = le32_to_cpu(id->ioccsz);
3135 ctrl->iorcsz = le32_to_cpu(id->iorcsz);
3136 ctrl->maxcmd = le16_to_cpu(id->maxcmd);
3142 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
3143 dev_err(ctrl->device,
3146 ctrl->cntlid, le16_to_cpu(id->cntlid));
3147 ret = -EINVAL;
3151 if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
3152 dev_err(ctrl->device,
3153 "keep-alive support is mandatory for fabrics\n");
3154 ret = -EINVAL;
3158 ctrl->hmpre = le32_to_cpu(id->hmpre);
3159 ctrl->hmmin = le32_to_cpu(id->hmmin);
3160 ctrl->hmminds = le32_to_cpu(id->hmminds);
3161 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
3168 if (ctrl->apst_enabled && !prev_apst_enabled)
3169 dev_pm_qos_expose_latency_tolerance(ctrl->device);
3170 else if (!ctrl->apst_enabled && prev_apst_enabled)
3171 dev_pm_qos_hide_latency_tolerance(ctrl->device);
3187 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
3189 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
3193 ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
3195 if (ctrl->vs >= NVME_VS(1, 1, 0))
3196 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
3216 if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
3222 if (ret == -EINTR)
3226 clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags);
3227 ctrl->identified = true;
3236 container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3242 return -EWOULDBLOCK;
3246 if (!try_module_get(ctrl->ops->module)) {
3248 return -EINVAL;
3251 file->private_data = ctrl;
3258 container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3260 module_put(ctrl->ops->module);
3279 lockdep_assert_held(&ctrl->subsys->lock);
3281 list_for_each_entry(h, &ctrl->subsys->nsheads, entry) {
3287 if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h))
3289 if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
3299 bool has_uuid = !uuid_is_null(&ids->uuid);
3300 bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid));
3301 bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
3304 lockdep_assert_held(&subsys->lock);
3306 list_for_each_entry(h, &subsys->nsheads, entry) {
3307 if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid))
3308 return -EINVAL;
3310 memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0)
3311 return -EINVAL;
3313 memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0)
3314 return -EINVAL;
3322 ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
3339 cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
3340 cdev_device->class = nvme_ns_chr_class;
3341 cdev_device->release = nvme_cdev_rel;
3344 cdev->owner = owner;
3354 return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev));
3359 nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev));
3377 ns->cdev_device.parent = ns->ctrl->device;
3378 ret = dev_set_name(&ns->cdev_device, "ng%dn%d",
3379 ns->ctrl->instance, ns->head->instance);
3383 return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
3384 ns->ctrl->ops->module);
3392 int ret = -ENOMEM;
3401 ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL);
3404 head->instance = ret;
3405 INIT_LIST_HEAD(&head->list);
3406 ret = init_srcu_struct(&head->srcu);
3409 head->subsys = ctrl->subsys;
3410 head->ns_id = info->nsid;
3411 head->ids = info->ids;
3412 head->shared = info->is_shared;
3413 kref_init(&head->ref);
3415 if (head->ids.csi) {
3416 ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
3420 head->effects = ctrl->effects;
3426 list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3428 kref_get(&ctrl->subsys->ref);
3432 cleanup_srcu_struct(&head->srcu);
3434 ida_free(&ctrl->subsys->ns_ida, head->instance);
3458 mutex_lock(&s->lock);
3460 mutex_unlock(&s->lock);
3471 struct nvme_ctrl *ctrl = ns->ctrl;
3475 ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids);
3482 * and in user space the /dev/disk/by-id/ links rely on it.
3484 * If the device also claims to be multi-path capable back off
3494 if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */
3495 ((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) &&
3496 info->is_shared)) {
3497 dev_err(ctrl->device,
3499 info->nsid);
3503 dev_err(ctrl->device,
3504 "clearing duplicate IDs for nsid %d\n", info->nsid);
3505 dev_err(ctrl->device,
3506 "use of /dev/disk/by-id/ may cause data corruption\n");
3507 memset(&info->ids.nguid, 0, sizeof(info->ids.nguid));
3508 memset(&info->ids.uuid, 0, sizeof(info->ids.uuid));
3509 memset(&info->ids.eui64, 0, sizeof(info->ids.eui64));
3510 ctrl->quirks |= NVME_QUIRK_BOGUS_NID;
3513 mutex_lock(&ctrl->subsys->lock);
3514 head = nvme_find_ns_head(ctrl, info->nsid);
3516 ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids);
3518 dev_err(ctrl->device,
3520 info->nsid);
3529 ret = -EINVAL;
3530 if (!info->is_shared || !head->shared) {
3531 dev_err(ctrl->device,
3533 info->nsid);
3536 if (!nvme_ns_ids_equal(&head->ids, &info->ids)) {
3537 dev_err(ctrl->device,
3539 info->nsid);
3544 dev_warn(ctrl->device,
3546 info->nsid);
3547 dev_warn_once(ctrl->device,
3552 list_add_tail_rcu(&ns->siblings, &head->list);
3553 ns->head = head;
3554 mutex_unlock(&ctrl->subsys->lock);
3560 mutex_unlock(&ctrl->subsys->lock);
3569 srcu_idx = srcu_read_lock(&ctrl->srcu);
3570 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
3571 srcu_read_lock_held(&ctrl->srcu)) {
3572 if (ns->head->ns_id == nsid) {
3578 if (ns->head->ns_id > nsid)
3581 srcu_read_unlock(&ctrl->srcu, srcu_idx);
3593 list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
3594 if (tmp->head->ns_id < ns->head->ns_id) {
3595 list_add_rcu(&ns->list, &tmp->list);
3599 list_add(&ns->list, &ns->ctrl->namespaces);
3606 int node = ctrl->numa_node;
3612 disk = blk_mq_alloc_disk(ctrl->tagset, ns);
3615 disk->fops = &nvme_bdev_ops;
3616 disk->private_data = ns;
3618 ns->disk = disk;
3619 ns->queue = disk->queue;
3621 if (ctrl->opts && ctrl->opts->data_digest)
3622 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
3624 blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
3625 if (ctrl->ops->supports_pci_p2pdma &&
3626 ctrl->ops->supports_pci_p2pdma(ctrl))
3627 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
3629 ns->ctrl = ctrl;
3630 kref_init(&ns->kref);
3646 if (nvme_ns_head_multipath(ns->head)) {
3647 sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
3648 ctrl->instance, ns->head->instance);
3649 disk->flags |= GENHD_FL_HIDDEN;
3651 sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance,
3652 ns->head->instance);
3654 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
3655 ns->head->instance);
3661 mutex_lock(&ctrl->namespaces_lock);
3666 if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) {
3667 mutex_unlock(&ctrl->namespaces_lock);
3671 mutex_unlock(&ctrl->namespaces_lock);
3672 synchronize_srcu(&ctrl->srcu);
3675 if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
3678 if (!nvme_ns_head_multipath(ns->head))
3681 nvme_mpath_add_disk(ns, info->anagrpid);
3682 nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
3688 mutex_lock(&ctrl->namespaces_lock);
3689 list_del_rcu(&ns->list);
3690 mutex_unlock(&ctrl->namespaces_lock);
3691 synchronize_srcu(&ctrl->srcu);
3693 mutex_lock(&ctrl->subsys->lock);
3694 list_del_rcu(&ns->siblings);
3695 if (list_empty(&ns->head->list))
3696 list_del_init(&ns->head->entry);
3697 mutex_unlock(&ctrl->subsys->lock);
3698 nvme_put_ns_head(ns->head);
3709 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
3712 clear_bit(NVME_NS_READY, &ns->flags);
3713 set_capacity(ns->disk, 0);
3714 nvme_fault_inject_fini(&ns->fault_inject);
3720 synchronize_srcu(&ns->head->srcu);
3724 synchronize_srcu(&ns->head->srcu);
3726 mutex_lock(&ns->ctrl->subsys->lock);
3727 list_del_rcu(&ns->siblings);
3728 if (list_empty(&ns->head->list)) {
3729 list_del_init(&ns->head->entry);
3732 mutex_unlock(&ns->ctrl->subsys->lock);
3734 /* guarantee not available in head->list */
3735 synchronize_srcu(&ns->head->srcu);
3737 if (!nvme_ns_head_multipath(ns->head))
3738 nvme_cdev_del(&ns->cdev, &ns->cdev_device);
3739 del_gendisk(ns->disk);
3741 mutex_lock(&ns->ctrl->namespaces_lock);
3742 list_del_rcu(&ns->list);
3743 mutex_unlock(&ns->ctrl->namespaces_lock);
3744 synchronize_srcu(&ns->ctrl->srcu);
3747 nvme_mpath_shutdown_disk(ns->head);
3765 if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) {
3766 dev_err(ns->ctrl->device,
3767 "identifiers changed for nsid %d\n", ns->head->ns_id);
3793 dev_warn(ctrl->device,
3803 if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) ||
3834 mutex_lock(&ctrl->namespaces_lock);
3835 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
3836 if (ns->head->ns_id > nsid) {
3837 list_del_rcu(&ns->list);
3838 synchronize_srcu(&ctrl->srcu);
3839 list_add_tail_rcu(&ns->list, &rm_list);
3842 mutex_unlock(&ctrl->namespaces_lock);
3857 return -ENOMEM;
3866 ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
3869 dev_warn(ctrl->device,
3898 nn = le32_to_cpu(id->nn);
3926 dev_warn(ctrl->device,
3939 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE || !ctrl->tagset)
3945 * such scenario. Controller's non-mdts limits are reported in the unit
3947 * namespace. Hence re-read the limits at the time of ns allocation.
3951 dev_warn(ctrl->device,
3952 "reading non-mdts-limits failed: %d\n", ret);
3956 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
3957 dev_info(ctrl->device, "rescanning namespaces.\n");
3961 mutex_lock(&ctrl->scan_lock);
3974 mutex_unlock(&ctrl->scan_lock);
4001 flush_work(&ctrl->scan_work);
4012 /* this is a no-op when called from the controller reset handler */
4015 mutex_lock(&ctrl->namespaces_lock);
4016 list_splice_init_rcu(&ctrl->namespaces, &ns_list, synchronize_rcu);
4017 mutex_unlock(&ctrl->namespaces_lock);
4018 synchronize_srcu(&ctrl->srcu);
4029 struct nvmf_ctrl_options *opts = ctrl->opts;
4032 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
4037 ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
4042 opts->trsvcid ?: "none");
4047 opts->host_traddr ?: "none");
4052 opts->host_iface ?: "none");
4061 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
4067 u32 aen_result = ctrl->aen_result;
4069 ctrl->aen_result = 0;
4076 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
4093 ctrl->ops->submit_async_event(ctrl);
4101 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
4107 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
4120 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
4132 if (ctrl->mtfa)
4134 msecs_to_jiffies(ctrl->mtfa * 100);
4142 dev_warn(ctrl->device,
4157 queue_work(nvme_wq, &ctrl->async_event_work);
4177 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
4188 queue_work(nvme_wq, &ctrl->fw_act_work);
4193 if (!ctrl->ana_log_buf)
4195 queue_work(nvme_wq, &ctrl->ana_work);
4199 ctrl->aen_result = result;
4202 dev_warn(ctrl->device, "async event result %08x\n", result);
4209 dev_warn(ctrl->device, "resetting controller due to AER\n");
4216 u32 result = le32_to_cpu(res->u32);
4242 ctrl->aen_result = result;
4249 queue_work(nvme_wq, &ctrl->async_event_work);
4259 set->ops = ops;
4260 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
4261 if (ctrl->ops->flags & NVME_F_FABRICS)
4263 set->reserved_tags = 2;
4264 set->numa_node = ctrl->numa_node;
4265 set->flags = BLK_MQ_F_NO_SCHED;
4266 if (ctrl->ops->flags & NVME_F_BLOCKING)
4267 set->flags |= BLK_MQ_F_BLOCKING;
4268 set->cmd_size = cmd_size;
4269 set->driver_data = ctrl;
4270 set->nr_hw_queues = 1;
4271 set->timeout = NVME_ADMIN_TIMEOUT;
4276 ctrl->admin_q = blk_mq_init_queue(set);
4277 if (IS_ERR(ctrl->admin_q)) {
4278 ret = PTR_ERR(ctrl->admin_q);
4282 if (ctrl->ops->flags & NVME_F_FABRICS) {
4283 ctrl->fabrics_q = blk_mq_init_queue(set);
4284 if (IS_ERR(ctrl->fabrics_q)) {
4285 ret = PTR_ERR(ctrl->fabrics_q);
4290 ctrl->admin_tagset = set;
4294 blk_mq_destroy_queue(ctrl->admin_q);
4295 blk_put_queue(ctrl->admin_q);
4298 ctrl->admin_q = NULL;
4299 ctrl->fabrics_q = NULL;
4306 blk_mq_destroy_queue(ctrl->admin_q);
4307 blk_put_queue(ctrl->admin_q);
4308 if (ctrl->ops->flags & NVME_F_FABRICS) {
4309 blk_mq_destroy_queue(ctrl->fabrics_q);
4310 blk_put_queue(ctrl->fabrics_q);
4312 blk_mq_free_tag_set(ctrl->admin_tagset);
4323 set->ops = ops;
4324 set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1);
4329 if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS)
4330 set->reserved_tags = NVME_AQ_DEPTH;
4331 else if (ctrl->ops->flags & NVME_F_FABRICS)
4333 set->reserved_tags = 1;
4334 set->numa_node = ctrl->numa_node;
4335 set->flags = BLK_MQ_F_SHOULD_MERGE;
4336 if (ctrl->ops->flags & NVME_F_BLOCKING)
4337 set->flags |= BLK_MQ_F_BLOCKING;
4338 set->cmd_size = cmd_size,
4339 set->driver_data = ctrl;
4340 set->nr_hw_queues = ctrl->queue_count - 1;
4341 set->timeout = NVME_IO_TIMEOUT;
4342 set->nr_maps = nr_maps;
4347 if (ctrl->ops->flags & NVME_F_FABRICS) {
4348 ctrl->connect_q = blk_mq_init_queue(set);
4349 if (IS_ERR(ctrl->connect_q)) {
4350 ret = PTR_ERR(ctrl->connect_q);
4354 ctrl->connect_q);
4357 ctrl->tagset = set;
4362 ctrl->connect_q = NULL;
4369 if (ctrl->ops->flags & NVME_F_FABRICS) {
4370 blk_mq_destroy_queue(ctrl->connect_q);
4371 blk_put_queue(ctrl->connect_q);
4373 blk_mq_free_tag_set(ctrl->tagset);
4383 flush_work(&ctrl->async_event_work);
4384 cancel_work_sync(&ctrl->fw_act_work);
4385 if (ctrl->ops->stop_ctrl)
4386 ctrl->ops->stop_ctrl(ctrl);
4398 * to re-read the discovery log page to learn about possible changes
4402 if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
4406 if (ctrl->queue_count > 1) {
4413 set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags);
4420 nvme_fault_inject_fini(&ctrl->fault_inject);
4421 dev_pm_qos_hide_latency_tolerance(ctrl->device);
4422 cdev_device_del(&ctrl->cdev, ctrl->device);
4432 xa_for_each(&ctrl->cels, i, cel) {
4433 xa_erase(&ctrl->cels, i);
4437 xa_destroy(&ctrl->cels);
4444 struct nvme_subsystem *subsys = ctrl->subsys;
4446 if (!subsys || ctrl->instance != subsys->instance)
4447 ida_free(&nvme_instance_ida, ctrl->instance);
4451 cleanup_srcu_struct(&ctrl->srcu);
4454 __free_page(ctrl->discard_page);
4455 free_opal_dev(ctrl->opal_dev);
4459 list_del(&ctrl->subsys_entry);
4460 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
4464 ctrl->ops->free_ctrl(ctrl);
4480 WRITE_ONCE(ctrl->state, NVME_CTRL_NEW);
4481 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
4482 spin_lock_init(&ctrl->lock);
4483 mutex_init(&ctrl->namespaces_lock);
4485 ret = init_srcu_struct(&ctrl->srcu);
4489 mutex_init(&ctrl->scan_lock);
4490 INIT_LIST_HEAD(&ctrl->namespaces);
4491 xa_init(&ctrl->cels);
4492 ctrl->dev = dev;
4493 ctrl->ops = ops;
4494 ctrl->quirks = quirks;
4495 ctrl->numa_node = NUMA_NO_NODE;
4496 INIT_WORK(&ctrl->scan_work, nvme_scan_work);
4497 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
4498 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
4499 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
4500 init_waitqueue_head(&ctrl->state_wq);
4502 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
4503 INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
4504 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
4505 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
4509 ctrl->discard_page = alloc_page(GFP_KERNEL);
4510 if (!ctrl->discard_page) {
4511 ret = -ENOMEM;
4518 ctrl->instance = ret;
4520 device_initialize(&ctrl->ctrl_device);
4521 ctrl->device = &ctrl->ctrl_device;
4522 ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
4523 ctrl->instance);
4524 ctrl->device->class = nvme_class;
4525 ctrl->device->parent = ctrl->dev;
4526 if (ops->dev_attr_groups)
4527 ctrl->device->groups = ops->dev_attr_groups;
4529 ctrl->device->groups = nvme_dev_attr_groups;
4530 ctrl->device->release = nvme_free_ctrl;
4531 dev_set_drvdata(ctrl->device, ctrl);
4532 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
4537 cdev_init(&ctrl->cdev, &nvme_dev_fops);
4538 ctrl->cdev.owner = ops->module;
4539 ret = cdev_device_add(&ctrl->cdev, ctrl->device);
4547 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
4548 dev_pm_qos_update_user_latency_tolerance(ctrl->device,
4551 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
4559 nvme_fault_inject_fini(&ctrl->fault_inject);
4560 dev_pm_qos_hide_latency_tolerance(ctrl->device);
4561 cdev_device_del(&ctrl->cdev, ctrl->device);
4564 kfree_const(ctrl->device->kobj.name);
4566 ida_free(&nvme_instance_ida, ctrl->instance);
4568 if (ctrl->discard_page)
4569 __free_page(ctrl->discard_page);
4570 cleanup_srcu_struct(&ctrl->srcu);
4581 srcu_idx = srcu_read_lock(&ctrl->srcu);
4582 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4583 srcu_read_lock_held(&ctrl->srcu))
4584 blk_mark_disk_dead(ns->disk);
4585 srcu_read_unlock(&ctrl->srcu, srcu_idx);
4594 srcu_idx = srcu_read_lock(&ctrl->srcu);
4595 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4596 srcu_read_lock_held(&ctrl->srcu))
4597 blk_mq_unfreeze_queue(ns->queue);
4598 srcu_read_unlock(&ctrl->srcu, srcu_idx);
4599 clear_bit(NVME_CTRL_FROZEN, &ctrl->flags);
4608 srcu_idx = srcu_read_lock(&ctrl->srcu);
4609 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4610 srcu_read_lock_held(&ctrl->srcu)) {
4611 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
4615 srcu_read_unlock(&ctrl->srcu, srcu_idx);
4625 srcu_idx = srcu_read_lock(&ctrl->srcu);
4626 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4627 srcu_read_lock_held(&ctrl->srcu))
4628 blk_mq_freeze_queue_wait(ns->queue);
4629 srcu_read_unlock(&ctrl->srcu, srcu_idx);
4638 set_bit(NVME_CTRL_FROZEN, &ctrl->flags);
4639 srcu_idx = srcu_read_lock(&ctrl->srcu);
4640 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4641 srcu_read_lock_held(&ctrl->srcu))
4642 blk_freeze_queue_start(ns->queue);
4643 srcu_read_unlock(&ctrl->srcu, srcu_idx);
4649 if (!ctrl->tagset)
4651 if (!test_and_set_bit(NVME_CTRL_STOPPED, &ctrl->flags))
4652 blk_mq_quiesce_tagset(ctrl->tagset);
4654 blk_mq_wait_quiesce_done(ctrl->tagset);
4660 if (!ctrl->tagset)
4662 if (test_and_clear_bit(NVME_CTRL_STOPPED, &ctrl->flags))
4663 blk_mq_unquiesce_tagset(ctrl->tagset);
4669 if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
4670 blk_mq_quiesce_queue(ctrl->admin_q);
4672 blk_mq_wait_quiesce_done(ctrl->admin_q->tag_set);
4678 if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
4679 blk_mq_unquiesce_queue(ctrl->admin_q);
4688 srcu_idx = srcu_read_lock(&ctrl->srcu);
4689 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4690 srcu_read_lock_held(&ctrl->srcu))
4691 blk_sync_queue(ns->queue);
4692 srcu_read_unlock(&ctrl->srcu, srcu_idx);
4699 if (ctrl->admin_q)
4700 blk_sync_queue(ctrl->admin_q);
4706 if (file->f_op != &nvme_dev_fops)
4708 return file->private_data;
4746 int result = -ENOMEM;
4750 nvme_wq = alloc_workqueue("nvme-wq",
4755 nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
4760 nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
4775 nvme_class->dev_uevent = nvme_class_uevent;
4777 nvme_subsys_class = class_create("nvme-subsystem");
4784 "nvme-generic");
4788 nvme_ns_chr_class = class_create("nvme-generic");