Lines Matching defs:ns
311 struct nvme_ns *ns = req->q->queuedata;
314 if (ns) {
316 ns->disk ? ns->disk->disk_name : "?",
319 (unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)),
320 (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift,
625 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
627 put_disk(ns->disk);
628 nvme_put_ns_head(ns->head);
629 nvme_put_ctrl(ns->ctrl);
630 kfree(ns);
633 bool nvme_get_ns(struct nvme_ns *ns)
635 return kref_get_unless_zero(&ns->kref);
638 void nvme_put_ns(struct nvme_ns *ns)
640 kref_put(&ns->kref, nvme_free_ns);
737 static inline void nvme_setup_flush(struct nvme_ns *ns,
742 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
745 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
766 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
769 range = page_address(ns->ctrl->discard_page);
773 u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req));
774 u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9);
782 u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
783 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
795 if (virt_to_page(range) == ns->ctrl->discard_page)
796 clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
804 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
814 static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
821 switch (ns->guard_type) {
838 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
843 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
844 return nvme_setup_discard(ns, req, cmnd);
847 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
849 cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
851 cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
853 if (!(req->cmd_flags & REQ_NOUNMAP) && (ns->features & NVME_NS_DEAC))
856 if (nvme_ns_has_pi(ns)) {
859 switch (ns->pi_type) {
862 nvme_set_ref_tag(ns, cmnd, req);
870 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
887 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
891 cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
892 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
897 if (ns->ms) {
905 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
910 switch (ns->pi_type) {
920 nvme_set_ref_tag(ns, cmnd, req);
944 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
958 nvme_setup_flush(ns, cmd);
962 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET);
965 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN);
968 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE);
971 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH);
974 ret = nvme_setup_write_zeroes(ns, req, cmd);
977 ret = nvme_setup_discard(ns, req, cmd);
980 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read);
983 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write);
986 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
1062 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
1066 if (ns) {
1067 effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
1087 u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
1089 u32 effects = nvme_command_effects(ctrl, ns, opcode);
1107 void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
1127 if (ns)
1601 static int nvme_ns_open(struct nvme_ns *ns)
1605 if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head)))
1607 if (!nvme_get_ns(ns))
1609 if (!try_module_get(ns->ctrl->ops->module))
1615 nvme_put_ns(ns);
1620 static void nvme_ns_release(struct nvme_ns *ns)
1623 module_put(ns->ctrl->ops->module);
1624 nvme_put_ns(ns);
1647 static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
1652 switch (ns->pi_type) {
1654 switch (ns->guard_type) {
1672 switch (ns->guard_type) {
1693 integrity.tuple_size = ns->ms;
1698 static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
1704 static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
1706 struct nvme_ctrl *ctrl = ns->ctrl;
1710 if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
1711 ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
1742 static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
1746 struct nvme_ctrl *ctrl = ns->ctrl;
1752 ns->pi_size = 0;
1753 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
1755 ns->pi_size = sizeof(struct t10_pi_tuple);
1756 ns->guard_type = NVME_NVM_NS_16B_GUARD;
1765 c.identify.nsid = cpu_to_le32(ns->head->ns_id);
1769 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm));
1779 ns->guard_type = nvme_elbaf_guard_type(elbaf);
1780 switch (ns->guard_type) {
1782 ns->pi_size = sizeof(struct crc64_pi_tuple);
1785 ns->pi_size = sizeof(struct t10_pi_tuple);
1794 if (ns->pi_size && (first || ns->ms == ns->pi_size))
1795 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
1797 ns->pi_type = 0;
1802 static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
1804 struct nvme_ctrl *ctrl = ns->ctrl;
1807 ret = nvme_init_ms(ns, id);
1811 ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
1812 if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1824 ns->features |= NVME_NS_EXT_LBAS;
1835 if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
1836 ns->features |= NVME_NS_METADATA_SUPPORTED;
1845 ns->features |= NVME_NS_EXT_LBAS;
1847 ns->features |= NVME_NS_METADATA_SUPPORTED;
1871 struct nvme_ns *ns, struct nvme_id_ns *id)
1873 sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
1874 u32 bs = 1U << ns->lba_shift;
1882 if (ns->lba_shift > PAGE_SHIFT || ns->lba_shift < SECTOR_SHIFT) {
1899 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
1925 if (ns->ms) {
1927 (ns->features & NVME_NS_METADATA_SUPPORTED))
1928 nvme_init_integrity(disk, ns,
1929 ns->ctrl->max_integrity_segments);
1930 else if (!nvme_ns_has_pi(ns))
1936 nvme_config_discard(disk, ns);
1938 ns->ctrl->max_zeroes_sectors);
1941 static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info)
1943 return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags);
1952 static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
1954 struct nvme_ctrl *ctrl = ns->ctrl;
1961 iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
1967 if (nvme_first_scan(ns->disk))
1969 ns->disk->disk_name, iob);
1973 if (blk_queue_is_zoned(ns->disk->queue)) {
1974 if (nvme_first_scan(ns->disk))
1976 ns->disk->disk_name);
1980 blk_queue_chunk_sectors(ns->queue, iob);
1983 static int nvme_update_ns_info_generic(struct nvme_ns *ns,
1986 blk_mq_freeze_queue(ns->disk->queue);
1987 nvme_set_queue_limits(ns->ctrl, ns->queue);
1988 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
1989 blk_mq_unfreeze_queue(ns->disk->queue);
1991 if (nvme_ns_head_multipath(ns->head)) {
1992 blk_mq_freeze_queue(ns->head->disk->queue);
1993 set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
1994 nvme_mpath_revalidate_paths(ns);
1995 blk_stack_limits(&ns->head->disk->queue->limits,
1996 &ns->queue->limits, 0);
1997 ns->head->disk->flags |= GENHD_FL_HIDDEN;
1998 blk_mq_unfreeze_queue(ns->head->disk->queue);
2002 ns->disk->flags |= GENHD_FL_HIDDEN;
2003 set_bit(NVME_NS_READY, &ns->flags);
2008 static int nvme_update_ns_info_block(struct nvme_ns *ns,
2015 ret = nvme_identify_ns(ns->ctrl, info->nsid, &id);
2026 blk_mq_freeze_queue(ns->disk->queue);
2028 ns->lba_shift = id->lbaf[lbaf].ds;
2029 nvme_set_queue_limits(ns->ctrl, ns->queue);
2031 ret = nvme_configure_metadata(ns, id);
2033 blk_mq_unfreeze_queue(ns->disk->queue);
2036 nvme_set_chunk_sectors(ns, id);
2037 nvme_update_disk_info(ns->disk, ns, id);
2039 if (ns->head->ids.csi == NVME_CSI_ZNS) {
2040 ret = nvme_update_zone_info(ns, lbaf);
2042 blk_mq_unfreeze_queue(ns->disk->queue);
2054 ns->features |= NVME_NS_DEAC;
2055 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
2056 set_bit(NVME_NS_READY, &ns->flags);
2057 blk_mq_unfreeze_queue(ns->disk->queue);
2059 if (blk_queue_is_zoned(ns->queue)) {
2060 ret = nvme_revalidate_zones(ns);
2061 if (ret && !nvme_first_scan(ns->disk))
2065 if (nvme_ns_head_multipath(ns->head)) {
2066 blk_mq_freeze_queue(ns->head->disk->queue);
2067 nvme_update_disk_info(ns->head->disk, ns, id);
2068 set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
2069 nvme_mpath_revalidate_paths(ns);
2070 blk_stack_limits(&ns->head->disk->queue->limits,
2071 &ns->queue->limits, 0);
2072 disk_update_readahead(ns->head->disk);
2073 blk_mq_unfreeze_queue(ns->head->disk->queue);
2083 ns->disk->flags |= GENHD_FL_HIDDEN;
2084 set_bit(NVME_NS_READY, &ns->flags);
2093 static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
2098 dev_info(ns->ctrl->device,
2101 return nvme_update_ns_info_generic(ns, info);
2103 return nvme_update_ns_info_block(ns, info);
2105 return nvme_update_ns_info_block(ns, info);
2107 dev_info(ns->ctrl->device,
2110 return nvme_update_ns_info_generic(ns, info);
3373 static int nvme_add_ns_cdev(struct nvme_ns *ns)
3377 ns->cdev_device.parent = ns->ctrl->device;
3378 ret = dev_set_name(&ns->cdev_device, "ng%dn%d",
3379 ns->ctrl->instance, ns->head->instance);
3383 return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
3384 ns->ctrl->ops->module);
3469 static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
3471 struct nvme_ctrl *ctrl = ns->ctrl;
3494 if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */
3495 ((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) &&
3552 list_add_tail_rcu(&ns->siblings, &head->list);
3553 ns->head = head;
3566 struct nvme_ns *ns, *ret = NULL;
3570 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
3572 if (ns->head->ns_id == nsid) {
3573 if (!nvme_get_ns(ns))
3575 ret = ns;
3578 if (ns->head->ns_id > nsid)
3589 static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
3593 list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
3594 if (tmp->head->ns_id < ns->head->ns_id) {
3595 list_add_rcu(&ns->list, &tmp->list);
3599 list_add(&ns->list, &ns->ctrl->namespaces);
3604 struct nvme_ns *ns;
3608 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
3609 if (!ns)
3612 disk = blk_mq_alloc_disk(ctrl->tagset, ns);
3616 disk->private_data = ns;
3618 ns->disk = disk;
3619 ns->queue = disk->queue;
3622 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
3624 blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
3627 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
3629 ns->ctrl = ctrl;
3630 kref_init(&ns->kref);
3632 if (nvme_init_ns_head(ns, info))
3646 if (nvme_ns_head_multipath(ns->head)) {
3648 ctrl->instance, ns->head->instance);
3652 ns->head->instance);
3655 ns->head->instance);
3658 if (nvme_update_ns_info(ns, info))
3670 nvme_ns_add_to_ctrl_list(ns);
3675 if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
3678 if (!nvme_ns_head_multipath(ns->head))
3679 nvme_add_ns_cdev(ns);
3681 nvme_mpath_add_disk(ns, info->anagrpid);
3682 nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
3689 list_del_rcu(&ns->list);
3694 list_del_rcu(&ns->siblings);
3695 if (list_empty(&ns->head->list))
3696 list_del_init(&ns->head->entry);
3698 nvme_put_ns_head(ns->head);
3702 kfree(ns);
3705 static void nvme_ns_remove(struct nvme_ns *ns)
3709 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
3712 clear_bit(NVME_NS_READY, &ns->flags);
3713 set_capacity(ns->disk, 0);
3714 nvme_fault_inject_fini(&ns->fault_inject);
3718 * this ns going back into current_path.
3720 synchronize_srcu(&ns->head->srcu);
3723 if (nvme_mpath_clear_current_path(ns))
3724 synchronize_srcu(&ns->head->srcu);
3726 mutex_lock(&ns->ctrl->subsys->lock);
3727 list_del_rcu(&ns->siblings);
3728 if (list_empty(&ns->head->list)) {
3729 list_del_init(&ns->head->entry);
3732 mutex_unlock(&ns->ctrl->subsys->lock);
3735 synchronize_srcu(&ns->head->srcu);
3737 if (!nvme_ns_head_multipath(ns->head))
3738 nvme_cdev_del(&ns->cdev, &ns->cdev_device);
3739 del_gendisk(ns->disk);
3741 mutex_lock(&ns->ctrl->namespaces_lock);
3742 list_del_rcu(&ns->list);
3743 mutex_unlock(&ns->ctrl->namespaces_lock);
3744 synchronize_srcu(&ns->ctrl->srcu);
3747 nvme_mpath_shutdown_disk(ns->head);
3748 nvme_put_ns(ns);
3753 struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);
3755 if (ns) {
3756 nvme_ns_remove(ns);
3757 nvme_put_ns(ns);
3761 static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info)
3765 if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) {
3766 dev_err(ns->ctrl->device,
3767 "identifiers changed for nsid %d\n", ns->head->ns_id);
3771 ret = nvme_update_ns_info(ns, info);
3780 nvme_ns_remove(ns);
3786 struct nvme_ns *ns;
3819 ns = nvme_find_get_ns(ctrl, nsid);
3820 if (ns) {
3821 nvme_validate_ns(ns, &info);
3822 nvme_put_ns(ns);
3831 struct nvme_ns *ns, *next;
3835 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
3836 if (ns->head->ns_id > nsid) {
3837 list_del_rcu(&ns->list);
3839 list_add_tail_rcu(&ns->list, &rm_list);
3844 list_for_each_entry_safe(ns, next, &rm_list, list)
3845 nvme_ns_remove(ns);
3927 "reading changed ns log failed: %d\n", error);
3947 * namespace. Hence re-read the limits at the time of ns allocation.
3984 struct nvme_ns *ns, *next;
4000 /* prevent racing with ns scanning */
4020 list_for_each_entry_safe(ns, next, &ns_list, list)
4021 nvme_ns_remove(ns);
4578 struct nvme_ns *ns;
4582 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4584 blk_mark_disk_dead(ns->disk);
4591 struct nvme_ns *ns;
4595 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4597 blk_mq_unfreeze_queue(ns->queue);
4605 struct nvme_ns *ns;
4609 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4611 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
4622 struct nvme_ns *ns;
4626 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4628 blk_mq_freeze_queue_wait(ns->queue);
4635 struct nvme_ns *ns;
4640 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4642 blk_freeze_queue_start(ns->queue);
4685 struct nvme_ns *ns;
4689 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4691 blk_sync_queue(ns->queue);