Lines Matching +full:supports +full:- +full:cqe

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
10 #include <linux/pci-p2pdma.h>
35 * - subsystems list
36 * - per-subsystem allowed hosts list
37 * - allow_any_host subsystem attribute
38 * - nvmet_genctr
39 * - the nvmet_transports array
42 * while when reading (popolating discovery log page or checking host-subsystem
56 case -ENOSPC: in errno_to_nvme_status()
57 req->error_loc = offsetof(struct nvme_rw_command, length); in errno_to_nvme_status()
59 case -EREMOTEIO: in errno_to_nvme_status()
60 req->error_loc = offsetof(struct nvme_rw_command, slba); in errno_to_nvme_status()
62 case -EOPNOTSUPP: in errno_to_nvme_status()
63 req->error_loc = offsetof(struct nvme_common_command, opcode); in errno_to_nvme_status()
64 switch (req->cmd->common.opcode) { in errno_to_nvme_status()
72 case -ENODATA: in errno_to_nvme_status()
73 req->error_loc = offsetof(struct nvme_rw_command, nsid); in errno_to_nvme_status()
75 case -EIO: in errno_to_nvme_status()
78 req->error_loc = offsetof(struct nvme_common_command, opcode); in errno_to_nvme_status()
85 pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode, in nvmet_report_invalid_opcode()
86 req->sq->qid); in nvmet_report_invalid_opcode()
88 req->error_loc = offsetof(struct nvme_common_command, opcode); in nvmet_report_invalid_opcode()
98 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { in nvmet_copy_to_sgl()
99 req->error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_copy_to_sgl()
107 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { in nvmet_copy_from_sgl()
108 req->error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_copy_from_sgl()
116 if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) { in nvmet_zero_sgl()
117 req->error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_zero_sgl()
129 xa_for_each(&subsys->namespaces, idx, cur) in nvmet_max_nsid()
130 nsid = cur->nsid; in nvmet_max_nsid()
137 return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16); in nvmet_async_event_result()
144 mutex_lock(&ctrl->lock); in nvmet_async_events_failall()
145 while (ctrl->nr_async_event_cmds) { in nvmet_async_events_failall()
146 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; in nvmet_async_events_failall()
147 mutex_unlock(&ctrl->lock); in nvmet_async_events_failall()
149 mutex_lock(&ctrl->lock); in nvmet_async_events_failall()
151 mutex_unlock(&ctrl->lock); in nvmet_async_events_failall()
159 mutex_lock(&ctrl->lock); in nvmet_async_events_process()
160 while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) { in nvmet_async_events_process()
161 aen = list_first_entry(&ctrl->async_events, in nvmet_async_events_process()
163 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; in nvmet_async_events_process()
166 list_del(&aen->entry); in nvmet_async_events_process()
169 mutex_unlock(&ctrl->lock); in nvmet_async_events_process()
170 trace_nvmet_async_event(ctrl, req->cqe->result.u32); in nvmet_async_events_process()
172 mutex_lock(&ctrl->lock); in nvmet_async_events_process()
174 mutex_unlock(&ctrl->lock); in nvmet_async_events_process()
181 mutex_lock(&ctrl->lock); in nvmet_async_events_free()
182 list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) { in nvmet_async_events_free()
183 list_del(&aen->entry); in nvmet_async_events_free()
186 mutex_unlock(&ctrl->lock); in nvmet_async_events_free()
206 aen->event_type = event_type; in nvmet_add_async_event()
207 aen->event_info = event_info; in nvmet_add_async_event()
208 aen->log_page = log_page; in nvmet_add_async_event()
210 mutex_lock(&ctrl->lock); in nvmet_add_async_event()
211 list_add_tail(&aen->entry, &ctrl->async_events); in nvmet_add_async_event()
212 mutex_unlock(&ctrl->lock); in nvmet_add_async_event()
214 queue_work(nvmet_wq, &ctrl->async_event_work); in nvmet_add_async_event()
221 mutex_lock(&ctrl->lock); in nvmet_add_to_changed_ns_log()
222 if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES) in nvmet_add_to_changed_ns_log()
225 for (i = 0; i < ctrl->nr_changed_ns; i++) { in nvmet_add_to_changed_ns_log()
226 if (ctrl->changed_ns_list[i] == nsid) in nvmet_add_to_changed_ns_log()
230 if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) { in nvmet_add_to_changed_ns_log()
231 ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff); in nvmet_add_to_changed_ns_log()
232 ctrl->nr_changed_ns = U32_MAX; in nvmet_add_to_changed_ns_log()
236 ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid; in nvmet_add_to_changed_ns_log()
238 mutex_unlock(&ctrl->lock); in nvmet_add_to_changed_ns_log()
245 lockdep_assert_held(&subsys->lock); in nvmet_ns_changed()
247 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_ns_changed()
262 mutex_lock(&subsys->lock); in nvmet_send_ana_event()
263 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_send_ana_event()
264 if (port && ctrl->port != port) in nvmet_send_ana_event()
271 mutex_unlock(&subsys->lock); in nvmet_send_ana_event()
279 list_for_each_entry(p, &port->subsystems, entry) in nvmet_port_send_ana_event()
280 nvmet_send_ana_event(p->subsys, port); in nvmet_port_send_ana_event()
289 if (nvmet_transports[ops->type]) in nvmet_register_transport()
290 ret = -EINVAL; in nvmet_register_transport()
292 nvmet_transports[ops->type] = ops; in nvmet_register_transport()
302 nvmet_transports[ops->type] = NULL; in nvmet_unregister_transport()
311 mutex_lock(&subsys->lock); in nvmet_port_del_ctrls()
312 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_port_del_ctrls()
313 if (ctrl->port == port) in nvmet_port_del_ctrls()
314 ctrl->ops->delete_ctrl(ctrl); in nvmet_port_del_ctrls()
316 mutex_unlock(&subsys->lock); in nvmet_port_del_ctrls()
326 ops = nvmet_transports[port->disc_addr.trtype]; in nvmet_enable_port()
329 request_module("nvmet-transport-%d", port->disc_addr.trtype); in nvmet_enable_port()
331 ops = nvmet_transports[port->disc_addr.trtype]; in nvmet_enable_port()
334 port->disc_addr.trtype); in nvmet_enable_port()
335 return -EINVAL; in nvmet_enable_port()
339 if (!try_module_get(ops->owner)) in nvmet_enable_port()
340 return -EINVAL; in nvmet_enable_port()
346 if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) { in nvmet_enable_port()
347 pr_err("T10-PI is not supported by transport type %d\n", in nvmet_enable_port()
348 port->disc_addr.trtype); in nvmet_enable_port()
349 ret = -EINVAL; in nvmet_enable_port()
353 ret = ops->add_port(port); in nvmet_enable_port()
358 if (port->inline_data_size < 0) in nvmet_enable_port()
359 port->inline_data_size = 0; in nvmet_enable_port()
361 port->enabled = true; in nvmet_enable_port()
362 port->tr_ops = ops; in nvmet_enable_port()
366 module_put(ops->owner); in nvmet_enable_port()
376 port->enabled = false; in nvmet_disable_port()
377 port->tr_ops = NULL; in nvmet_disable_port()
379 ops = nvmet_transports[port->disc_addr.trtype]; in nvmet_disable_port()
380 ops->remove_port(port); in nvmet_disable_port()
381 module_put(ops->owner); in nvmet_disable_port()
388 bool reset_tbkas = ctrl->reset_tbkas; in nvmet_keep_alive_timer()
390 ctrl->reset_tbkas = false; in nvmet_keep_alive_timer()
392 pr_debug("ctrl %d reschedule traffic based keep-alive timer\n", in nvmet_keep_alive_timer()
393 ctrl->cntlid); in nvmet_keep_alive_timer()
394 queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); in nvmet_keep_alive_timer()
398 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", in nvmet_keep_alive_timer()
399 ctrl->cntlid, ctrl->kato); in nvmet_keep_alive_timer()
406 if (unlikely(ctrl->kato == 0)) in nvmet_start_keep_alive_timer()
409 pr_debug("ctrl %d start keep-alive timer for %d secs\n", in nvmet_start_keep_alive_timer()
410 ctrl->cntlid, ctrl->kato); in nvmet_start_keep_alive_timer()
412 queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); in nvmet_start_keep_alive_timer()
417 if (unlikely(ctrl->kato == 0)) in nvmet_stop_keep_alive_timer()
420 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid); in nvmet_stop_keep_alive_timer()
422 cancel_delayed_work_sync(&ctrl->ka_work); in nvmet_stop_keep_alive_timer()
427 u32 nsid = le32_to_cpu(req->cmd->common.nsid); in nvmet_req_find_ns()
430 req->ns = xa_load(&subsys->namespaces, nsid); in nvmet_req_find_ns()
431 if (unlikely(!req->ns)) { in nvmet_req_find_ns()
432 req->error_loc = offsetof(struct nvme_common_command, nsid); in nvmet_req_find_ns()
438 percpu_ref_get(&req->ns->ref); in nvmet_req_find_ns()
446 complete(&ns->disable_done); in nvmet_destroy_namespace()
451 percpu_ref_put(&ns->ref); in nvmet_put_namespace()
465 if (!ns->use_p2pmem) in nvmet_p2pmem_ns_enable()
468 if (!ns->bdev) { in nvmet_p2pmem_ns_enable()
469 pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n"); in nvmet_p2pmem_ns_enable()
470 return -EINVAL; in nvmet_p2pmem_ns_enable()
473 if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) { in nvmet_p2pmem_ns_enable()
474 pr_err("peer-to-peer DMA is not supported by the driver of %s\n", in nvmet_p2pmem_ns_enable()
475 ns->device_path); in nvmet_p2pmem_ns_enable()
476 return -EINVAL; in nvmet_p2pmem_ns_enable()
479 if (ns->p2p_dev) { in nvmet_p2pmem_ns_enable()
480 ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true); in nvmet_p2pmem_ns_enable()
482 return -EINVAL; in nvmet_p2pmem_ns_enable()
493 pr_err("no peer-to-peer memory is available for %s\n", in nvmet_p2pmem_ns_enable()
494 ns->device_path); in nvmet_p2pmem_ns_enable()
495 return -EINVAL; in nvmet_p2pmem_ns_enable()
505 * Note: ctrl->subsys->lock should be held when calling this function
514 if (!ctrl->p2p_client || !ns->use_p2pmem) in nvmet_p2pmem_ns_add_p2p()
517 if (ns->p2p_dev) { in nvmet_p2pmem_ns_add_p2p()
518 ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true); in nvmet_p2pmem_ns_add_p2p()
522 p2p_dev = pci_dev_get(ns->p2p_dev); in nvmet_p2pmem_ns_add_p2p()
524 clients[0] = ctrl->p2p_client; in nvmet_p2pmem_ns_add_p2p()
529 pr_err("no peer-to-peer memory is available that's supported by %s and %s\n", in nvmet_p2pmem_ns_add_p2p()
530 dev_name(ctrl->p2p_client), ns->device_path); in nvmet_p2pmem_ns_add_p2p()
535 ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev); in nvmet_p2pmem_ns_add_p2p()
540 ns->nsid); in nvmet_p2pmem_ns_add_p2p()
545 loff_t oldsize = ns->size; in nvmet_ns_revalidate()
547 if (ns->bdev) in nvmet_ns_revalidate()
552 return oldsize != ns->size; in nvmet_ns_revalidate()
557 struct nvmet_subsys *subsys = ns->subsys; in nvmet_ns_enable()
561 mutex_lock(&subsys->lock); in nvmet_ns_enable()
569 if (ns->enabled) in nvmet_ns_enable()
572 ret = -EMFILE; in nvmet_ns_enable()
573 if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES) in nvmet_ns_enable()
577 if (ret == -ENOTBLK) in nvmet_ns_enable()
586 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_ns_enable()
589 ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace, in nvmet_ns_enable()
594 if (ns->nsid > subsys->max_nsid) in nvmet_ns_enable()
595 subsys->max_nsid = ns->nsid; in nvmet_ns_enable()
597 ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL); in nvmet_ns_enable()
601 subsys->nr_namespaces++; in nvmet_ns_enable()
603 nvmet_ns_changed(subsys, ns->nsid); in nvmet_ns_enable()
604 ns->enabled = true; in nvmet_ns_enable()
607 mutex_unlock(&subsys->lock); in nvmet_ns_enable()
611 subsys->max_nsid = nvmet_max_nsid(subsys); in nvmet_ns_enable()
612 percpu_ref_exit(&ns->ref); in nvmet_ns_enable()
614 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_ns_enable()
615 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); in nvmet_ns_enable()
623 struct nvmet_subsys *subsys = ns->subsys; in nvmet_ns_disable()
626 mutex_lock(&subsys->lock); in nvmet_ns_disable()
627 if (!ns->enabled) in nvmet_ns_disable()
630 ns->enabled = false; in nvmet_ns_disable()
631 xa_erase(&ns->subsys->namespaces, ns->nsid); in nvmet_ns_disable()
632 if (ns->nsid == subsys->max_nsid) in nvmet_ns_disable()
633 subsys->max_nsid = nvmet_max_nsid(subsys); in nvmet_ns_disable()
635 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_ns_disable()
636 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); in nvmet_ns_disable()
638 mutex_unlock(&subsys->lock); in nvmet_ns_disable()
648 percpu_ref_kill(&ns->ref); in nvmet_ns_disable()
650 wait_for_completion(&ns->disable_done); in nvmet_ns_disable()
651 percpu_ref_exit(&ns->ref); in nvmet_ns_disable()
653 mutex_lock(&subsys->lock); in nvmet_ns_disable()
655 subsys->nr_namespaces--; in nvmet_ns_disable()
656 nvmet_ns_changed(subsys, ns->nsid); in nvmet_ns_disable()
659 mutex_unlock(&subsys->lock); in nvmet_ns_disable()
667 nvmet_ana_group_enabled[ns->anagrpid]--; in nvmet_ns_free()
670 kfree(ns->device_path); in nvmet_ns_free()
682 init_completion(&ns->disable_done); in nvmet_ns_alloc()
684 ns->nsid = nsid; in nvmet_ns_alloc()
685 ns->subsys = subsys; in nvmet_ns_alloc()
688 ns->anagrpid = NVMET_DEFAULT_ANA_GRPID; in nvmet_ns_alloc()
689 nvmet_ana_group_enabled[ns->anagrpid]++; in nvmet_ns_alloc()
692 uuid_gen(&ns->uuid); in nvmet_ns_alloc()
693 ns->buffered_io = false; in nvmet_ns_alloc()
694 ns->csi = NVME_CSI_NVM; in nvmet_ns_alloc()
701 if (req->sq->size) { in nvmet_update_sq_head()
704 old_sqhd = READ_ONCE(req->sq->sqhd); in nvmet_update_sq_head()
706 new_sqhd = (old_sqhd + 1) % req->sq->size; in nvmet_update_sq_head()
707 } while (!try_cmpxchg(&req->sq->sqhd, &old_sqhd, new_sqhd)); in nvmet_update_sq_head()
709 req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF); in nvmet_update_sq_head()
714 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_set_error()
718 req->cqe->status = cpu_to_le16(status << 1); in nvmet_set_error()
720 if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC) in nvmet_set_error()
723 spin_lock_irqsave(&ctrl->error_lock, flags); in nvmet_set_error()
724 ctrl->err_counter++; in nvmet_set_error()
726 &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS]; in nvmet_set_error()
728 new_error_slot->error_count = cpu_to_le64(ctrl->err_counter); in nvmet_set_error()
729 new_error_slot->sqid = cpu_to_le16(req->sq->qid); in nvmet_set_error()
730 new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id); in nvmet_set_error()
731 new_error_slot->status_field = cpu_to_le16(status << 1); in nvmet_set_error()
732 new_error_slot->param_error_location = cpu_to_le16(req->error_loc); in nvmet_set_error()
733 new_error_slot->lba = cpu_to_le64(req->error_slba); in nvmet_set_error()
734 new_error_slot->nsid = req->cmd->common.nsid; in nvmet_set_error()
735 spin_unlock_irqrestore(&ctrl->error_lock, flags); in nvmet_set_error()
738 req->cqe->status |= cpu_to_le16(1 << 14); in nvmet_set_error()
743 struct nvmet_ns *ns = req->ns; in __nvmet_req_complete()
745 if (!req->sq->sqhd_disabled) in __nvmet_req_complete()
747 req->cqe->sq_id = cpu_to_le16(req->sq->qid); in __nvmet_req_complete()
748 req->cqe->command_id = req->cmd->common.command_id; in __nvmet_req_complete()
755 req->ops->queue_response(req); in __nvmet_req_complete()
762 struct nvmet_sq *sq = req->sq; in nvmet_req_complete()
765 percpu_ref_put(&sq->ref); in nvmet_req_complete()
772 cq->qid = qid; in nvmet_cq_setup()
773 cq->size = size; in nvmet_cq_setup()
779 sq->sqhd = 0; in nvmet_sq_setup()
780 sq->qid = qid; in nvmet_sq_setup()
781 sq->size = size; in nvmet_sq_setup()
783 ctrl->sqs[qid] = sq; in nvmet_sq_setup()
790 complete(&sq->confirm_done); in nvmet_confirm_sq()
795 struct nvmet_ctrl *ctrl = sq->ctrl; in nvmet_sq_destroy()
801 if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq) in nvmet_sq_destroy()
803 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq); in nvmet_sq_destroy()
804 wait_for_completion(&sq->confirm_done); in nvmet_sq_destroy()
805 wait_for_completion(&sq->free_done); in nvmet_sq_destroy()
806 percpu_ref_exit(&sq->ref); in nvmet_sq_destroy()
812 * store sq->ctrl locally, but before we killed the percpu_ref. the in nvmet_sq_destroy()
813 * admin connect allocates and assigns sq->ctrl, which now needs a in nvmet_sq_destroy()
816 ctrl = sq->ctrl; in nvmet_sq_destroy()
821 * send us keep-alive during this period, hence reset the in nvmet_sq_destroy()
822 * traffic based keep-alive timer so we don't trigger a in nvmet_sq_destroy()
823 * controller teardown as a result of a keep-alive expiration. in nvmet_sq_destroy()
825 ctrl->reset_tbkas = true; in nvmet_sq_destroy()
826 sq->ctrl->sqs[sq->qid] = NULL; in nvmet_sq_destroy()
828 sq->ctrl = NULL; /* allows reusing the queue later */ in nvmet_sq_destroy()
837 complete(&sq->free_done); in nvmet_sq_free()
844 ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL); in nvmet_sq_init()
849 init_completion(&sq->free_done); in nvmet_sq_init()
850 init_completion(&sq->confirm_done); in nvmet_sq_init()
860 enum nvme_ana_state state = port->ana_state[ns->anagrpid]; in nvmet_check_ana_state()
873 if (unlikely(req->ns->readonly)) { in nvmet_io_cmd_check_access()
874 switch (req->cmd->common.opcode) { in nvmet_io_cmd_check_access()
888 struct nvme_command *cmd = req->cmd; in nvmet_parse_io_cmd()
908 ret = nvmet_check_ana_state(req->port, req->ns); in nvmet_parse_io_cmd()
910 req->error_loc = offsetof(struct nvme_common_command, nsid); in nvmet_parse_io_cmd()
915 req->error_loc = offsetof(struct nvme_common_command, nsid); in nvmet_parse_io_cmd()
919 switch (req->ns->csi) { in nvmet_parse_io_cmd()
921 if (req->ns->file) in nvmet_parse_io_cmd()
936 u8 flags = req->cmd->common.flags; in nvmet_req_init()
939 req->cq = cq; in nvmet_req_init()
940 req->sq = sq; in nvmet_req_init()
941 req->ops = ops; in nvmet_req_init()
942 req->sg = NULL; in nvmet_req_init()
943 req->metadata_sg = NULL; in nvmet_req_init()
944 req->sg_cnt = 0; in nvmet_req_init()
945 req->metadata_sg_cnt = 0; in nvmet_req_init()
946 req->transfer_len = 0; in nvmet_req_init()
947 req->metadata_len = 0; in nvmet_req_init()
948 req->cqe->result.u64 = 0; in nvmet_req_init()
949 req->cqe->status = 0; in nvmet_req_init()
950 req->cqe->sq_head = 0; in nvmet_req_init()
951 req->ns = NULL; in nvmet_req_init()
952 req->error_loc = NVMET_NO_ERROR_LOC; in nvmet_req_init()
953 req->error_slba = 0; in nvmet_req_init()
957 req->error_loc = offsetof(struct nvme_common_command, flags); in nvmet_req_init()
968 req->error_loc = offsetof(struct nvme_common_command, flags); in nvmet_req_init()
973 if (unlikely(!req->sq->ctrl)) in nvmet_req_init()
974 /* will return an error for any non-connect command: */ in nvmet_req_init()
976 else if (likely(req->sq->qid != 0)) in nvmet_req_init()
984 trace_nvmet_req_init(req, req->cmd); in nvmet_req_init()
986 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) { in nvmet_req_init()
991 if (sq->ctrl) in nvmet_req_init()
992 sq->ctrl->reset_tbkas = true; in nvmet_req_init()
1004 percpu_ref_put(&req->sq->ref); in nvmet_req_uninit()
1005 if (req->ns) in nvmet_req_uninit()
1006 nvmet_put_namespace(req->ns); in nvmet_req_uninit()
1012 if (unlikely(len != req->transfer_len)) { in nvmet_check_transfer_len()
1013 req->error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_check_transfer_len()
1024 if (unlikely(data_len > req->transfer_len)) { in nvmet_check_data_len_lte()
1025 req->error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_check_data_len_lte()
1035 return req->transfer_len - req->metadata_len; in nvmet_data_transfer_len()
1041 req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt, in nvmet_req_alloc_p2pmem_sgls()
1043 if (!req->sg) in nvmet_req_alloc_p2pmem_sgls()
1046 if (req->metadata_len) { in nvmet_req_alloc_p2pmem_sgls()
1047 req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev, in nvmet_req_alloc_p2pmem_sgls()
1048 &req->metadata_sg_cnt, req->metadata_len); in nvmet_req_alloc_p2pmem_sgls()
1049 if (!req->metadata_sg) in nvmet_req_alloc_p2pmem_sgls()
1053 req->p2p_dev = p2p_dev; in nvmet_req_alloc_p2pmem_sgls()
1057 pci_p2pmem_free_sgl(req->p2p_dev, req->sg); in nvmet_req_alloc_p2pmem_sgls()
1059 return -ENOMEM; in nvmet_req_alloc_p2pmem_sgls()
1065 !req->sq->ctrl || !req->sq->qid || !req->ns) in nvmet_req_find_p2p_dev()
1067 return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid); in nvmet_req_find_p2p_dev()
1077 req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL, in nvmet_req_alloc_sgls()
1078 &req->sg_cnt); in nvmet_req_alloc_sgls()
1079 if (unlikely(!req->sg)) in nvmet_req_alloc_sgls()
1082 if (req->metadata_len) { in nvmet_req_alloc_sgls()
1083 req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL, in nvmet_req_alloc_sgls()
1084 &req->metadata_sg_cnt); in nvmet_req_alloc_sgls()
1085 if (unlikely(!req->metadata_sg)) in nvmet_req_alloc_sgls()
1091 sgl_free(req->sg); in nvmet_req_alloc_sgls()
1093 return -ENOMEM; in nvmet_req_alloc_sgls()
1099 if (req->p2p_dev) { in nvmet_req_free_sgls()
1100 pci_p2pmem_free_sgl(req->p2p_dev, req->sg); in nvmet_req_free_sgls()
1101 if (req->metadata_sg) in nvmet_req_free_sgls()
1102 pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg); in nvmet_req_free_sgls()
1103 req->p2p_dev = NULL; in nvmet_req_free_sgls()
1105 sgl_free(req->sg); in nvmet_req_free_sgls()
1106 if (req->metadata_sg) in nvmet_req_free_sgls()
1107 sgl_free(req->metadata_sg); in nvmet_req_free_sgls()
1110 req->sg = NULL; in nvmet_req_free_sgls()
1111 req->metadata_sg = NULL; in nvmet_req_free_sgls()
1112 req->sg_cnt = 0; in nvmet_req_free_sgls()
1113 req->metadata_sg_cnt = 0; in nvmet_req_free_sgls()
1165 lockdep_assert_held(&ctrl->lock); in nvmet_start_ctrl()
1173 if (!nvmet_is_disc_subsys(ctrl->subsys) && in nvmet_start_ctrl()
1174 (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || in nvmet_start_ctrl()
1175 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) { in nvmet_start_ctrl()
1176 ctrl->csts = NVME_CSTS_CFS; in nvmet_start_ctrl()
1180 if (nvmet_cc_mps(ctrl->cc) != 0 || in nvmet_start_ctrl()
1181 nvmet_cc_ams(ctrl->cc) != 0 || in nvmet_start_ctrl()
1182 !nvmet_css_supported(nvmet_cc_css(ctrl->cc))) { in nvmet_start_ctrl()
1183 ctrl->csts = NVME_CSTS_CFS; in nvmet_start_ctrl()
1187 ctrl->csts = NVME_CSTS_RDY; in nvmet_start_ctrl()
1195 if (ctrl->kato) in nvmet_start_ctrl()
1196 mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); in nvmet_start_ctrl()
1201 lockdep_assert_held(&ctrl->lock); in nvmet_clear_ctrl()
1204 ctrl->csts &= ~NVME_CSTS_RDY; in nvmet_clear_ctrl()
1205 ctrl->cc = 0; in nvmet_clear_ctrl()
1212 mutex_lock(&ctrl->lock); in nvmet_update_cc()
1213 old = ctrl->cc; in nvmet_update_cc()
1214 ctrl->cc = new; in nvmet_update_cc()
1222 ctrl->csts |= NVME_CSTS_SHST_CMPLT; in nvmet_update_cc()
1225 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; in nvmet_update_cc()
1226 mutex_unlock(&ctrl->lock); in nvmet_update_cc()
1232 ctrl->cap = (1ULL << 37); in nvmet_init_cap()
1233 /* Controller supports one or more I/O Command Sets */ in nvmet_init_cap()
1234 ctrl->cap |= (1ULL << 43); in nvmet_init_cap()
1236 ctrl->cap |= (15ULL << 24); in nvmet_init_cap()
1238 if (ctrl->ops->get_max_queue_size) in nvmet_init_cap()
1239 ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1; in nvmet_init_cap()
1241 ctrl->cap |= NVMET_QUEUE_SIZE - 1; in nvmet_init_cap()
1243 if (nvmet_is_passthru_subsys(ctrl->subsys)) in nvmet_init_cap()
1254 subsys = nvmet_find_get_subsys(req->port, subsysnqn); in nvmet_ctrl_find_get()
1258 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); in nvmet_ctrl_find_get()
1262 mutex_lock(&subsys->lock); in nvmet_ctrl_find_get()
1263 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_ctrl_find_get()
1264 if (ctrl->cntlid == cntlid) { in nvmet_ctrl_find_get()
1265 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) { in nvmet_ctrl_find_get()
1269 if (!kref_get_unless_zero(&ctrl->ref)) in nvmet_ctrl_find_get()
1280 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); in nvmet_ctrl_find_get()
1283 mutex_unlock(&subsys->lock); in nvmet_ctrl_find_get()
1291 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { in nvmet_check_ctrl_status()
1293 req->cmd->common.opcode, req->sq->qid); in nvmet_check_ctrl_status()
1297 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { in nvmet_check_ctrl_status()
1299 req->cmd->common.opcode, req->sq->qid); in nvmet_check_ctrl_status()
1304 pr_warn("qid %d not authenticated\n", req->sq->qid); in nvmet_check_ctrl_status()
1316 if (subsys->allow_any_host) in nvmet_host_allowed()
1322 list_for_each_entry(p, &subsys->hosts, entry) { in nvmet_host_allowed()
1323 if (!strcmp(nvmet_host_name(p->host), hostnqn)) in nvmet_host_allowed()
1331 * Note: ctrl->subsys->lock should be held when calling this function
1339 if (!req->p2p_client) in nvmet_setup_p2p_ns_map()
1342 ctrl->p2p_client = get_device(req->p2p_client); in nvmet_setup_p2p_ns_map()
1344 xa_for_each(&ctrl->subsys->namespaces, idx, ns) in nvmet_setup_p2p_ns_map()
1349 * Note: ctrl->subsys->lock should be held when calling this function
1356 radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0) in nvmet_release_p2p_ns_map()
1359 put_device(ctrl->p2p_client); in nvmet_release_p2p_ns_map()
1367 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid); in nvmet_fatal_error_handler()
1368 ctrl->ops->delete_ctrl(ctrl); in nvmet_fatal_error_handler()
1380 subsys = nvmet_find_get_subsys(req->port, subsysnqn); in nvmet_alloc_ctrl()
1384 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); in nvmet_alloc_ctrl()
1385 req->error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_alloc_ctrl()
1393 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); in nvmet_alloc_ctrl()
1396 req->error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_alloc_ctrl()
1405 mutex_init(&ctrl->lock); in nvmet_alloc_ctrl()
1407 ctrl->port = req->port; in nvmet_alloc_ctrl()
1408 ctrl->ops = req->ops; in nvmet_alloc_ctrl()
1412 if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP) in nvmet_alloc_ctrl()
1413 subsys->clear_ids = 1; in nvmet_alloc_ctrl()
1416 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); in nvmet_alloc_ctrl()
1417 INIT_LIST_HEAD(&ctrl->async_events); in nvmet_alloc_ctrl()
1418 INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL); in nvmet_alloc_ctrl()
1419 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); in nvmet_alloc_ctrl()
1420 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); in nvmet_alloc_ctrl()
1422 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); in nvmet_alloc_ctrl()
1423 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); in nvmet_alloc_ctrl()
1425 kref_init(&ctrl->ref); in nvmet_alloc_ctrl()
1426 ctrl->subsys = subsys; in nvmet_alloc_ctrl()
1428 WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL); in nvmet_alloc_ctrl()
1430 ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES, in nvmet_alloc_ctrl()
1432 if (!ctrl->changed_ns_list) in nvmet_alloc_ctrl()
1435 ctrl->sqs = kcalloc(subsys->max_qid + 1, in nvmet_alloc_ctrl()
1438 if (!ctrl->sqs) in nvmet_alloc_ctrl()
1441 if (subsys->cntlid_min > subsys->cntlid_max) in nvmet_alloc_ctrl()
1445 subsys->cntlid_min, subsys->cntlid_max, in nvmet_alloc_ctrl()
1451 ctrl->cntlid = ret; in nvmet_alloc_ctrl()
1457 if (nvmet_is_disc_subsys(ctrl->subsys) && !kato) in nvmet_alloc_ctrl()
1460 /* keep-alive timeout in seconds */ in nvmet_alloc_ctrl()
1461 ctrl->kato = DIV_ROUND_UP(kato, 1000); in nvmet_alloc_ctrl()
1463 ctrl->err_counter = 0; in nvmet_alloc_ctrl()
1464 spin_lock_init(&ctrl->error_lock); in nvmet_alloc_ctrl()
1468 mutex_lock(&subsys->lock); in nvmet_alloc_ctrl()
1469 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); in nvmet_alloc_ctrl()
1471 mutex_unlock(&subsys->lock); in nvmet_alloc_ctrl()
1477 kfree(ctrl->sqs); in nvmet_alloc_ctrl()
1479 kfree(ctrl->changed_ns_list); in nvmet_alloc_ctrl()
1491 struct nvmet_subsys *subsys = ctrl->subsys; in nvmet_ctrl_free()
1493 mutex_lock(&subsys->lock); in nvmet_ctrl_free()
1495 list_del(&ctrl->subsys_entry); in nvmet_ctrl_free()
1496 mutex_unlock(&subsys->lock); in nvmet_ctrl_free()
1500 flush_work(&ctrl->async_event_work); in nvmet_ctrl_free()
1501 cancel_work_sync(&ctrl->fatal_err_work); in nvmet_ctrl_free()
1505 ida_free(&cntlid_ida, ctrl->cntlid); in nvmet_ctrl_free()
1508 kfree(ctrl->sqs); in nvmet_ctrl_free()
1509 kfree(ctrl->changed_ns_list); in nvmet_ctrl_free()
1517 kref_put(&ctrl->ref, nvmet_ctrl_free); in nvmet_ctrl_put()
1522 mutex_lock(&ctrl->lock); in nvmet_ctrl_fatal_error()
1523 if (!(ctrl->csts & NVME_CSTS_CFS)) { in nvmet_ctrl_fatal_error()
1524 ctrl->csts |= NVME_CSTS_CFS; in nvmet_ctrl_fatal_error()
1525 queue_work(nvmet_wq, &ctrl->fatal_err_work); in nvmet_ctrl_fatal_error()
1527 mutex_unlock(&ctrl->lock); in nvmet_ctrl_fatal_error()
1540 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref)) in nvmet_find_get_subsys()
1546 list_for_each_entry(p, &port->subsystems, entry) { in nvmet_find_get_subsys()
1547 if (!strncmp(p->subsys->subsysnqn, subsysnqn, in nvmet_find_get_subsys()
1549 if (!kref_get_unless_zero(&p->subsys->ref)) in nvmet_find_get_subsys()
1552 return p->subsys; in nvmet_find_get_subsys()
1568 return ERR_PTR(-ENOMEM); in nvmet_subsys_alloc()
1570 subsys->ver = NVMET_DEFAULT_VS; in nvmet_subsys_alloc()
1573 bin2hex(subsys->serial, &serial, sizeof(serial)); in nvmet_subsys_alloc()
1575 subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL); in nvmet_subsys_alloc()
1576 if (!subsys->model_number) { in nvmet_subsys_alloc()
1577 ret = -ENOMEM; in nvmet_subsys_alloc()
1581 subsys->ieee_oui = 0; in nvmet_subsys_alloc()
1583 subsys->firmware_rev = kstrndup(UTS_RELEASE, NVMET_FR_MAX_SIZE, GFP_KERNEL); in nvmet_subsys_alloc()
1584 if (!subsys->firmware_rev) { in nvmet_subsys_alloc()
1585 ret = -ENOMEM; in nvmet_subsys_alloc()
1591 subsys->max_qid = NVMET_NR_QUEUES; in nvmet_subsys_alloc()
1595 subsys->max_qid = 0; in nvmet_subsys_alloc()
1598 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type); in nvmet_subsys_alloc()
1599 ret = -EINVAL; in nvmet_subsys_alloc()
1602 subsys->type = type; in nvmet_subsys_alloc()
1603 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE, in nvmet_subsys_alloc()
1605 if (!subsys->subsysnqn) { in nvmet_subsys_alloc()
1606 ret = -ENOMEM; in nvmet_subsys_alloc()
1609 subsys->cntlid_min = NVME_CNTLID_MIN; in nvmet_subsys_alloc()
1610 subsys->cntlid_max = NVME_CNTLID_MAX; in nvmet_subsys_alloc()
1611 kref_init(&subsys->ref); in nvmet_subsys_alloc()
1613 mutex_init(&subsys->lock); in nvmet_subsys_alloc()
1614 xa_init(&subsys->namespaces); in nvmet_subsys_alloc()
1615 INIT_LIST_HEAD(&subsys->ctrls); in nvmet_subsys_alloc()
1616 INIT_LIST_HEAD(&subsys->hosts); in nvmet_subsys_alloc()
1621 kfree(subsys->firmware_rev); in nvmet_subsys_alloc()
1623 kfree(subsys->model_number); in nvmet_subsys_alloc()
1634 WARN_ON_ONCE(!xa_empty(&subsys->namespaces)); in nvmet_subsys_free()
1636 xa_destroy(&subsys->namespaces); in nvmet_subsys_free()
1639 kfree(subsys->subsysnqn); in nvmet_subsys_free()
1640 kfree(subsys->model_number); in nvmet_subsys_free()
1641 kfree(subsys->firmware_rev); in nvmet_subsys_free()
1649 mutex_lock(&subsys->lock); in nvmet_subsys_del_ctrls()
1650 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_subsys_del_ctrls()
1651 ctrl->ops->delete_ctrl(ctrl); in nvmet_subsys_del_ctrls()
1652 mutex_unlock(&subsys->lock); in nvmet_subsys_del_ctrls()
1657 kref_put(&subsys->ref, nvmet_subsys_free); in nvmet_subsys_put()
1662 int error = -ENOMEM; in nvmet_init()
1666 nvmet_bvec_cache = kmem_cache_create("nvmet-bvec", in nvmet_init()
1670 return -ENOMEM; in nvmet_init()
1672 zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0); in nvmet_init()
1676 buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq", in nvmet_init()
1681 nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0); in nvmet_init()