xref: /openbmc/linux/drivers/nvme/target/core.c (revision cbafa54a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common code for the NVMe target.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/random.h>
9 #include <linux/rculist.h>
10 #include <linux/pci-p2pdma.h>
11 #include <linux/scatterlist.h>
12 
13 #define CREATE_TRACE_POINTS
14 #include "trace.h"
15 
16 #include "nvmet.h"
17 
18 struct workqueue_struct *buffered_io_wq;
19 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
20 static DEFINE_IDA(cntlid_ida);
21 
22 /*
23  * This read/write semaphore is used to synchronize access to configuration
24  * information on a target system that will result in discovery log page
25  * information change for at least one host.
26  * The full list of resources to protected by this semaphore is:
27  *
28  *  - subsystems list
29  *  - per-subsystem allowed hosts list
30  *  - allow_any_host subsystem attribute
31  *  - nvmet_genctr
32  *  - the nvmet_transports array
33  *
34  * When updating any of those lists/structures write lock should be obtained,
35  * while when reading (popolating discovery log page or checking host-subsystem
36  * link) read lock is obtained to allow concurrent reads.
37  */
38 DECLARE_RWSEM(nvmet_config_sem);
39 
40 u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
41 u64 nvmet_ana_chgcnt;
42 DECLARE_RWSEM(nvmet_ana_sem);
43 
44 inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
45 {
46 	u16 status;
47 
48 	switch (errno) {
49 	case 0:
50 		status = NVME_SC_SUCCESS;
51 		break;
52 	case -ENOSPC:
53 		req->error_loc = offsetof(struct nvme_rw_command, length);
54 		status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
55 		break;
56 	case -EREMOTEIO:
57 		req->error_loc = offsetof(struct nvme_rw_command, slba);
58 		status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
59 		break;
60 	case -EOPNOTSUPP:
61 		req->error_loc = offsetof(struct nvme_common_command, opcode);
62 		switch (req->cmd->common.opcode) {
63 		case nvme_cmd_dsm:
64 		case nvme_cmd_write_zeroes:
65 			status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
66 			break;
67 		default:
68 			status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
69 		}
70 		break;
71 	case -ENODATA:
72 		req->error_loc = offsetof(struct nvme_rw_command, nsid);
73 		status = NVME_SC_ACCESS_DENIED;
74 		break;
75 	case -EIO:
76 		fallthrough;
77 	default:
78 		req->error_loc = offsetof(struct nvme_common_command, opcode);
79 		status = NVME_SC_INTERNAL | NVME_SC_DNR;
80 	}
81 
82 	return status;
83 }
84 
85 u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
86 {
87 	pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode,
88 		 req->sq->qid);
89 
90 	req->error_loc = offsetof(struct nvme_common_command, opcode);
91 	return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
92 }
93 
94 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
95 		const char *subsysnqn);
96 
97 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
98 		size_t len)
99 {
100 	if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
101 		req->error_loc = offsetof(struct nvme_common_command, dptr);
102 		return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
103 	}
104 	return 0;
105 }
106 
107 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
108 {
109 	if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
110 		req->error_loc = offsetof(struct nvme_common_command, dptr);
111 		return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
112 	}
113 	return 0;
114 }
115 
116 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
117 {
118 	if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
119 		req->error_loc = offsetof(struct nvme_common_command, dptr);
120 		return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
121 	}
122 	return 0;
123 }
124 
125 static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
126 {
127 	unsigned long nsid = 0;
128 	struct nvmet_ns *cur;
129 	unsigned long idx;
130 
131 	xa_for_each(&subsys->namespaces, idx, cur)
132 		nsid = cur->nsid;
133 
134 	return nsid;
135 }
136 
137 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
138 {
139 	return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
140 }
141 
142 static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
143 {
144 	u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
145 	struct nvmet_req *req;
146 
147 	mutex_lock(&ctrl->lock);
148 	while (ctrl->nr_async_event_cmds) {
149 		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
150 		mutex_unlock(&ctrl->lock);
151 		nvmet_req_complete(req, status);
152 		mutex_lock(&ctrl->lock);
153 	}
154 	mutex_unlock(&ctrl->lock);
155 }
156 
157 static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
158 {
159 	struct nvmet_async_event *aen;
160 	struct nvmet_req *req;
161 
162 	mutex_lock(&ctrl->lock);
163 	while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
164 		aen = list_first_entry(&ctrl->async_events,
165 				       struct nvmet_async_event, entry);
166 		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
167 		nvmet_set_result(req, nvmet_async_event_result(aen));
168 
169 		list_del(&aen->entry);
170 		kfree(aen);
171 
172 		mutex_unlock(&ctrl->lock);
173 		trace_nvmet_async_event(ctrl, req->cqe->result.u32);
174 		nvmet_req_complete(req, 0);
175 		mutex_lock(&ctrl->lock);
176 	}
177 	mutex_unlock(&ctrl->lock);
178 }
179 
180 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
181 {
182 	struct nvmet_async_event *aen, *tmp;
183 
184 	mutex_lock(&ctrl->lock);
185 	list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
186 		list_del(&aen->entry);
187 		kfree(aen);
188 	}
189 	mutex_unlock(&ctrl->lock);
190 }
191 
192 static void nvmet_async_event_work(struct work_struct *work)
193 {
194 	struct nvmet_ctrl *ctrl =
195 		container_of(work, struct nvmet_ctrl, async_event_work);
196 
197 	nvmet_async_events_process(ctrl);
198 }
199 
200 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
201 		u8 event_info, u8 log_page)
202 {
203 	struct nvmet_async_event *aen;
204 
205 	aen = kmalloc(sizeof(*aen), GFP_KERNEL);
206 	if (!aen)
207 		return;
208 
209 	aen->event_type = event_type;
210 	aen->event_info = event_info;
211 	aen->log_page = log_page;
212 
213 	mutex_lock(&ctrl->lock);
214 	list_add_tail(&aen->entry, &ctrl->async_events);
215 	mutex_unlock(&ctrl->lock);
216 
217 	schedule_work(&ctrl->async_event_work);
218 }
219 
220 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
221 {
222 	u32 i;
223 
224 	mutex_lock(&ctrl->lock);
225 	if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
226 		goto out_unlock;
227 
228 	for (i = 0; i < ctrl->nr_changed_ns; i++) {
229 		if (ctrl->changed_ns_list[i] == nsid)
230 			goto out_unlock;
231 	}
232 
233 	if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
234 		ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
235 		ctrl->nr_changed_ns = U32_MAX;
236 		goto out_unlock;
237 	}
238 
239 	ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
240 out_unlock:
241 	mutex_unlock(&ctrl->lock);
242 }
243 
244 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
245 {
246 	struct nvmet_ctrl *ctrl;
247 
248 	lockdep_assert_held(&subsys->lock);
249 
250 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
251 		nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
252 		if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
253 			continue;
254 		nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
255 				NVME_AER_NOTICE_NS_CHANGED,
256 				NVME_LOG_CHANGED_NS);
257 	}
258 }
259 
260 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
261 		struct nvmet_port *port)
262 {
263 	struct nvmet_ctrl *ctrl;
264 
265 	mutex_lock(&subsys->lock);
266 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
267 		if (port && ctrl->port != port)
268 			continue;
269 		if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
270 			continue;
271 		nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
272 				NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
273 	}
274 	mutex_unlock(&subsys->lock);
275 }
276 
277 void nvmet_port_send_ana_event(struct nvmet_port *port)
278 {
279 	struct nvmet_subsys_link *p;
280 
281 	down_read(&nvmet_config_sem);
282 	list_for_each_entry(p, &port->subsystems, entry)
283 		nvmet_send_ana_event(p->subsys, port);
284 	up_read(&nvmet_config_sem);
285 }
286 
287 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
288 {
289 	int ret = 0;
290 
291 	down_write(&nvmet_config_sem);
292 	if (nvmet_transports[ops->type])
293 		ret = -EINVAL;
294 	else
295 		nvmet_transports[ops->type] = ops;
296 	up_write(&nvmet_config_sem);
297 
298 	return ret;
299 }
300 EXPORT_SYMBOL_GPL(nvmet_register_transport);
301 
302 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
303 {
304 	down_write(&nvmet_config_sem);
305 	nvmet_transports[ops->type] = NULL;
306 	up_write(&nvmet_config_sem);
307 }
308 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
309 
310 void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
311 {
312 	struct nvmet_ctrl *ctrl;
313 
314 	mutex_lock(&subsys->lock);
315 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
316 		if (ctrl->port == port)
317 			ctrl->ops->delete_ctrl(ctrl);
318 	}
319 	mutex_unlock(&subsys->lock);
320 }
321 
322 int nvmet_enable_port(struct nvmet_port *port)
323 {
324 	const struct nvmet_fabrics_ops *ops;
325 	int ret;
326 
327 	lockdep_assert_held(&nvmet_config_sem);
328 
329 	ops = nvmet_transports[port->disc_addr.trtype];
330 	if (!ops) {
331 		up_write(&nvmet_config_sem);
332 		request_module("nvmet-transport-%d", port->disc_addr.trtype);
333 		down_write(&nvmet_config_sem);
334 		ops = nvmet_transports[port->disc_addr.trtype];
335 		if (!ops) {
336 			pr_err("transport type %d not supported\n",
337 				port->disc_addr.trtype);
338 			return -EINVAL;
339 		}
340 	}
341 
342 	if (!try_module_get(ops->owner))
343 		return -EINVAL;
344 
345 	/*
346 	 * If the user requested PI support and the transport isn't pi capable,
347 	 * don't enable the port.
348 	 */
349 	if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
350 		pr_err("T10-PI is not supported by transport type %d\n",
351 		       port->disc_addr.trtype);
352 		ret = -EINVAL;
353 		goto out_put;
354 	}
355 
356 	ret = ops->add_port(port);
357 	if (ret)
358 		goto out_put;
359 
360 	/* If the transport didn't set inline_data_size, then disable it. */
361 	if (port->inline_data_size < 0)
362 		port->inline_data_size = 0;
363 
364 	port->enabled = true;
365 	port->tr_ops = ops;
366 	return 0;
367 
368 out_put:
369 	module_put(ops->owner);
370 	return ret;
371 }
372 
373 void nvmet_disable_port(struct nvmet_port *port)
374 {
375 	const struct nvmet_fabrics_ops *ops;
376 
377 	lockdep_assert_held(&nvmet_config_sem);
378 
379 	port->enabled = false;
380 	port->tr_ops = NULL;
381 
382 	ops = nvmet_transports[port->disc_addr.trtype];
383 	ops->remove_port(port);
384 	module_put(ops->owner);
385 }
386 
387 static void nvmet_keep_alive_timer(struct work_struct *work)
388 {
389 	struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
390 			struct nvmet_ctrl, ka_work);
391 	bool cmd_seen = ctrl->cmd_seen;
392 
393 	ctrl->cmd_seen = false;
394 	if (cmd_seen) {
395 		pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
396 			ctrl->cntlid);
397 		schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
398 		return;
399 	}
400 
401 	pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
402 		ctrl->cntlid, ctrl->kato);
403 
404 	nvmet_ctrl_fatal_error(ctrl);
405 }
406 
407 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
408 {
409 	if (unlikely(ctrl->kato == 0))
410 		return;
411 
412 	pr_debug("ctrl %d start keep-alive timer for %d secs\n",
413 		ctrl->cntlid, ctrl->kato);
414 
415 	INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
416 	schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
417 }
418 
419 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
420 {
421 	if (unlikely(ctrl->kato == 0))
422 		return;
423 
424 	pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
425 
426 	cancel_delayed_work_sync(&ctrl->ka_work);
427 }
428 
429 u16 nvmet_req_find_ns(struct nvmet_req *req)
430 {
431 	u32 nsid = le32_to_cpu(req->cmd->common.nsid);
432 
433 	req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
434 	if (unlikely(!req->ns)) {
435 		req->error_loc = offsetof(struct nvme_common_command, nsid);
436 		return NVME_SC_INVALID_NS | NVME_SC_DNR;
437 	}
438 
439 	percpu_ref_get(&req->ns->ref);
440 	return NVME_SC_SUCCESS;
441 }
442 
443 static void nvmet_destroy_namespace(struct percpu_ref *ref)
444 {
445 	struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
446 
447 	complete(&ns->disable_done);
448 }
449 
450 void nvmet_put_namespace(struct nvmet_ns *ns)
451 {
452 	percpu_ref_put(&ns->ref);
453 }
454 
455 static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
456 {
457 	nvmet_bdev_ns_disable(ns);
458 	nvmet_file_ns_disable(ns);
459 }
460 
461 static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
462 {
463 	int ret;
464 	struct pci_dev *p2p_dev;
465 
466 	if (!ns->use_p2pmem)
467 		return 0;
468 
469 	if (!ns->bdev) {
470 		pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
471 		return -EINVAL;
472 	}
473 
474 	if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
475 		pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
476 		       ns->device_path);
477 		return -EINVAL;
478 	}
479 
480 	if (ns->p2p_dev) {
481 		ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
482 		if (ret < 0)
483 			return -EINVAL;
484 	} else {
485 		/*
486 		 * Right now we just check that there is p2pmem available so
487 		 * we can report an error to the user right away if there
488 		 * is not. We'll find the actual device to use once we
489 		 * setup the controller when the port's device is available.
490 		 */
491 
492 		p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
493 		if (!p2p_dev) {
494 			pr_err("no peer-to-peer memory is available for %s\n",
495 			       ns->device_path);
496 			return -EINVAL;
497 		}
498 
499 		pci_dev_put(p2p_dev);
500 	}
501 
502 	return 0;
503 }
504 
505 /*
506  * Note: ctrl->subsys->lock should be held when calling this function
507  */
508 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
509 				    struct nvmet_ns *ns)
510 {
511 	struct device *clients[2];
512 	struct pci_dev *p2p_dev;
513 	int ret;
514 
515 	if (!ctrl->p2p_client || !ns->use_p2pmem)
516 		return;
517 
518 	if (ns->p2p_dev) {
519 		ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
520 		if (ret < 0)
521 			return;
522 
523 		p2p_dev = pci_dev_get(ns->p2p_dev);
524 	} else {
525 		clients[0] = ctrl->p2p_client;
526 		clients[1] = nvmet_ns_dev(ns);
527 
528 		p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
529 		if (!p2p_dev) {
530 			pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
531 			       dev_name(ctrl->p2p_client), ns->device_path);
532 			return;
533 		}
534 	}
535 
536 	ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
537 	if (ret < 0)
538 		pci_dev_put(p2p_dev);
539 
540 	pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
541 		ns->nsid);
542 }
543 
544 void nvmet_ns_revalidate(struct nvmet_ns *ns)
545 {
546 	loff_t oldsize = ns->size;
547 
548 	if (ns->bdev)
549 		nvmet_bdev_ns_revalidate(ns);
550 	else
551 		nvmet_file_ns_revalidate(ns);
552 
553 	if (oldsize != ns->size)
554 		nvmet_ns_changed(ns->subsys, ns->nsid);
555 }
556 
557 int nvmet_ns_enable(struct nvmet_ns *ns)
558 {
559 	struct nvmet_subsys *subsys = ns->subsys;
560 	struct nvmet_ctrl *ctrl;
561 	int ret;
562 
563 	mutex_lock(&subsys->lock);
564 	ret = 0;
565 
566 	if (nvmet_passthru_ctrl(subsys)) {
567 		pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
568 		goto out_unlock;
569 	}
570 
571 	if (ns->enabled)
572 		goto out_unlock;
573 
574 	ret = -EMFILE;
575 	if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
576 		goto out_unlock;
577 
578 	ret = nvmet_bdev_ns_enable(ns);
579 	if (ret == -ENOTBLK)
580 		ret = nvmet_file_ns_enable(ns);
581 	if (ret)
582 		goto out_unlock;
583 
584 	ret = nvmet_p2pmem_ns_enable(ns);
585 	if (ret)
586 		goto out_dev_disable;
587 
588 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
589 		nvmet_p2pmem_ns_add_p2p(ctrl, ns);
590 
591 	ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
592 				0, GFP_KERNEL);
593 	if (ret)
594 		goto out_dev_put;
595 
596 	if (ns->nsid > subsys->max_nsid)
597 		subsys->max_nsid = ns->nsid;
598 
599 	ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
600 	if (ret)
601 		goto out_restore_subsys_maxnsid;
602 
603 	subsys->nr_namespaces++;
604 
605 	nvmet_ns_changed(subsys, ns->nsid);
606 	ns->enabled = true;
607 	ret = 0;
608 out_unlock:
609 	mutex_unlock(&subsys->lock);
610 	return ret;
611 
612 out_restore_subsys_maxnsid:
613 	subsys->max_nsid = nvmet_max_nsid(subsys);
614 	percpu_ref_exit(&ns->ref);
615 out_dev_put:
616 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
617 		pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
618 out_dev_disable:
619 	nvmet_ns_dev_disable(ns);
620 	goto out_unlock;
621 }
622 
623 void nvmet_ns_disable(struct nvmet_ns *ns)
624 {
625 	struct nvmet_subsys *subsys = ns->subsys;
626 	struct nvmet_ctrl *ctrl;
627 
628 	mutex_lock(&subsys->lock);
629 	if (!ns->enabled)
630 		goto out_unlock;
631 
632 	ns->enabled = false;
633 	xa_erase(&ns->subsys->namespaces, ns->nsid);
634 	if (ns->nsid == subsys->max_nsid)
635 		subsys->max_nsid = nvmet_max_nsid(subsys);
636 
637 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
638 		pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
639 
640 	mutex_unlock(&subsys->lock);
641 
642 	/*
643 	 * Now that we removed the namespaces from the lookup list, we
644 	 * can kill the per_cpu ref and wait for any remaining references
645 	 * to be dropped, as well as a RCU grace period for anyone only
646 	 * using the namepace under rcu_read_lock().  Note that we can't
647 	 * use call_rcu here as we need to ensure the namespaces have
648 	 * been fully destroyed before unloading the module.
649 	 */
650 	percpu_ref_kill(&ns->ref);
651 	synchronize_rcu();
652 	wait_for_completion(&ns->disable_done);
653 	percpu_ref_exit(&ns->ref);
654 
655 	mutex_lock(&subsys->lock);
656 
657 	subsys->nr_namespaces--;
658 	nvmet_ns_changed(subsys, ns->nsid);
659 	nvmet_ns_dev_disable(ns);
660 out_unlock:
661 	mutex_unlock(&subsys->lock);
662 }
663 
664 void nvmet_ns_free(struct nvmet_ns *ns)
665 {
666 	nvmet_ns_disable(ns);
667 
668 	down_write(&nvmet_ana_sem);
669 	nvmet_ana_group_enabled[ns->anagrpid]--;
670 	up_write(&nvmet_ana_sem);
671 
672 	kfree(ns->device_path);
673 	kfree(ns);
674 }
675 
676 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
677 {
678 	struct nvmet_ns *ns;
679 
680 	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
681 	if (!ns)
682 		return NULL;
683 
684 	init_completion(&ns->disable_done);
685 
686 	ns->nsid = nsid;
687 	ns->subsys = subsys;
688 
689 	down_write(&nvmet_ana_sem);
690 	ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
691 	nvmet_ana_group_enabled[ns->anagrpid]++;
692 	up_write(&nvmet_ana_sem);
693 
694 	uuid_gen(&ns->uuid);
695 	ns->buffered_io = false;
696 
697 	return ns;
698 }
699 
700 static void nvmet_update_sq_head(struct nvmet_req *req)
701 {
702 	if (req->sq->size) {
703 		u32 old_sqhd, new_sqhd;
704 
705 		do {
706 			old_sqhd = req->sq->sqhd;
707 			new_sqhd = (old_sqhd + 1) % req->sq->size;
708 		} while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
709 					old_sqhd);
710 	}
711 	req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
712 }
713 
714 static void nvmet_set_error(struct nvmet_req *req, u16 status)
715 {
716 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
717 	struct nvme_error_slot *new_error_slot;
718 	unsigned long flags;
719 
720 	req->cqe->status = cpu_to_le16(status << 1);
721 
722 	if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
723 		return;
724 
725 	spin_lock_irqsave(&ctrl->error_lock, flags);
726 	ctrl->err_counter++;
727 	new_error_slot =
728 		&ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
729 
730 	new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
731 	new_error_slot->sqid = cpu_to_le16(req->sq->qid);
732 	new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
733 	new_error_slot->status_field = cpu_to_le16(status << 1);
734 	new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
735 	new_error_slot->lba = cpu_to_le64(req->error_slba);
736 	new_error_slot->nsid = req->cmd->common.nsid;
737 	spin_unlock_irqrestore(&ctrl->error_lock, flags);
738 
739 	/* set the more bit for this request */
740 	req->cqe->status |= cpu_to_le16(1 << 14);
741 }
742 
743 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
744 {
745 	if (!req->sq->sqhd_disabled)
746 		nvmet_update_sq_head(req);
747 	req->cqe->sq_id = cpu_to_le16(req->sq->qid);
748 	req->cqe->command_id = req->cmd->common.command_id;
749 
750 	if (unlikely(status))
751 		nvmet_set_error(req, status);
752 
753 	trace_nvmet_req_complete(req);
754 
755 	if (req->ns)
756 		nvmet_put_namespace(req->ns);
757 	req->ops->queue_response(req);
758 }
759 
760 void nvmet_req_complete(struct nvmet_req *req, u16 status)
761 {
762 	__nvmet_req_complete(req, status);
763 	percpu_ref_put(&req->sq->ref);
764 }
765 EXPORT_SYMBOL_GPL(nvmet_req_complete);
766 
767 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
768 		u16 qid, u16 size)
769 {
770 	cq->qid = qid;
771 	cq->size = size;
772 }
773 
774 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
775 		u16 qid, u16 size)
776 {
777 	sq->sqhd = 0;
778 	sq->qid = qid;
779 	sq->size = size;
780 
781 	ctrl->sqs[qid] = sq;
782 }
783 
784 static void nvmet_confirm_sq(struct percpu_ref *ref)
785 {
786 	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
787 
788 	complete(&sq->confirm_done);
789 }
790 
791 void nvmet_sq_destroy(struct nvmet_sq *sq)
792 {
793 	struct nvmet_ctrl *ctrl = sq->ctrl;
794 
795 	/*
796 	 * If this is the admin queue, complete all AERs so that our
797 	 * queue doesn't have outstanding requests on it.
798 	 */
799 	if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
800 		nvmet_async_events_failall(ctrl);
801 	percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
802 	wait_for_completion(&sq->confirm_done);
803 	wait_for_completion(&sq->free_done);
804 	percpu_ref_exit(&sq->ref);
805 
806 	if (ctrl) {
807 		nvmet_ctrl_put(ctrl);
808 		sq->ctrl = NULL; /* allows reusing the queue later */
809 	}
810 }
811 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
812 
813 static void nvmet_sq_free(struct percpu_ref *ref)
814 {
815 	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
816 
817 	complete(&sq->free_done);
818 }
819 
820 int nvmet_sq_init(struct nvmet_sq *sq)
821 {
822 	int ret;
823 
824 	ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
825 	if (ret) {
826 		pr_err("percpu_ref init failed!\n");
827 		return ret;
828 	}
829 	init_completion(&sq->free_done);
830 	init_completion(&sq->confirm_done);
831 
832 	return 0;
833 }
834 EXPORT_SYMBOL_GPL(nvmet_sq_init);
835 
836 static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
837 		struct nvmet_ns *ns)
838 {
839 	enum nvme_ana_state state = port->ana_state[ns->anagrpid];
840 
841 	if (unlikely(state == NVME_ANA_INACCESSIBLE))
842 		return NVME_SC_ANA_INACCESSIBLE;
843 	if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
844 		return NVME_SC_ANA_PERSISTENT_LOSS;
845 	if (unlikely(state == NVME_ANA_CHANGE))
846 		return NVME_SC_ANA_TRANSITION;
847 	return 0;
848 }
849 
850 static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
851 {
852 	if (unlikely(req->ns->readonly)) {
853 		switch (req->cmd->common.opcode) {
854 		case nvme_cmd_read:
855 		case nvme_cmd_flush:
856 			break;
857 		default:
858 			return NVME_SC_NS_WRITE_PROTECTED;
859 		}
860 	}
861 
862 	return 0;
863 }
864 
865 static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
866 {
867 	u16 ret;
868 
869 	ret = nvmet_check_ctrl_status(req);
870 	if (unlikely(ret))
871 		return ret;
872 
873 	if (nvmet_req_passthru_ctrl(req))
874 		return nvmet_parse_passthru_io_cmd(req);
875 
876 	ret = nvmet_req_find_ns(req);
877 	if (unlikely(ret))
878 		return ret;
879 
880 	ret = nvmet_check_ana_state(req->port, req->ns);
881 	if (unlikely(ret)) {
882 		req->error_loc = offsetof(struct nvme_common_command, nsid);
883 		return ret;
884 	}
885 	ret = nvmet_io_cmd_check_access(req);
886 	if (unlikely(ret)) {
887 		req->error_loc = offsetof(struct nvme_common_command, nsid);
888 		return ret;
889 	}
890 
891 	if (req->ns->file)
892 		return nvmet_file_parse_io_cmd(req);
893 
894 	return nvmet_bdev_parse_io_cmd(req);
895 }
896 
897 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
898 		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
899 {
900 	u8 flags = req->cmd->common.flags;
901 	u16 status;
902 
903 	req->cq = cq;
904 	req->sq = sq;
905 	req->ops = ops;
906 	req->sg = NULL;
907 	req->metadata_sg = NULL;
908 	req->sg_cnt = 0;
909 	req->metadata_sg_cnt = 0;
910 	req->transfer_len = 0;
911 	req->metadata_len = 0;
912 	req->cqe->status = 0;
913 	req->cqe->sq_head = 0;
914 	req->ns = NULL;
915 	req->error_loc = NVMET_NO_ERROR_LOC;
916 	req->error_slba = 0;
917 
918 	/* no support for fused commands yet */
919 	if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
920 		req->error_loc = offsetof(struct nvme_common_command, flags);
921 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
922 		goto fail;
923 	}
924 
925 	/*
926 	 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
927 	 * contains an address of a single contiguous physical buffer that is
928 	 * byte aligned.
929 	 */
930 	if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
931 		req->error_loc = offsetof(struct nvme_common_command, flags);
932 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
933 		goto fail;
934 	}
935 
936 	if (unlikely(!req->sq->ctrl))
937 		/* will return an error for any non-connect command: */
938 		status = nvmet_parse_connect_cmd(req);
939 	else if (likely(req->sq->qid != 0))
940 		status = nvmet_parse_io_cmd(req);
941 	else
942 		status = nvmet_parse_admin_cmd(req);
943 
944 	if (status)
945 		goto fail;
946 
947 	trace_nvmet_req_init(req, req->cmd);
948 
949 	if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
950 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
951 		goto fail;
952 	}
953 
954 	if (sq->ctrl)
955 		sq->ctrl->cmd_seen = true;
956 
957 	return true;
958 
959 fail:
960 	__nvmet_req_complete(req, status);
961 	return false;
962 }
963 EXPORT_SYMBOL_GPL(nvmet_req_init);
964 
965 void nvmet_req_uninit(struct nvmet_req *req)
966 {
967 	percpu_ref_put(&req->sq->ref);
968 	if (req->ns)
969 		nvmet_put_namespace(req->ns);
970 }
971 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
972 
973 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
974 {
975 	if (unlikely(len != req->transfer_len)) {
976 		req->error_loc = offsetof(struct nvme_common_command, dptr);
977 		nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
978 		return false;
979 	}
980 
981 	return true;
982 }
983 EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
984 
985 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
986 {
987 	if (unlikely(data_len > req->transfer_len)) {
988 		req->error_loc = offsetof(struct nvme_common_command, dptr);
989 		nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
990 		return false;
991 	}
992 
993 	return true;
994 }
995 
996 static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
997 {
998 	return req->transfer_len - req->metadata_len;
999 }
1000 
1001 static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
1002 {
1003 	req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt,
1004 			nvmet_data_transfer_len(req));
1005 	if (!req->sg)
1006 		goto out_err;
1007 
1008 	if (req->metadata_len) {
1009 		req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev,
1010 				&req->metadata_sg_cnt, req->metadata_len);
1011 		if (!req->metadata_sg)
1012 			goto out_free_sg;
1013 	}
1014 	return 0;
1015 out_free_sg:
1016 	pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1017 out_err:
1018 	return -ENOMEM;
1019 }
1020 
1021 static bool nvmet_req_find_p2p_dev(struct nvmet_req *req)
1022 {
1023 	if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
1024 		return false;
1025 
1026 	if (req->sq->ctrl && req->sq->qid && req->ns) {
1027 		req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
1028 						 req->ns->nsid);
1029 		if (req->p2p_dev)
1030 			return true;
1031 	}
1032 
1033 	req->p2p_dev = NULL;
1034 	return false;
1035 }
1036 
1037 int nvmet_req_alloc_sgls(struct nvmet_req *req)
1038 {
1039 	if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req))
1040 		return 0;
1041 
1042 	req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
1043 			    &req->sg_cnt);
1044 	if (unlikely(!req->sg))
1045 		goto out;
1046 
1047 	if (req->metadata_len) {
1048 		req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
1049 					     &req->metadata_sg_cnt);
1050 		if (unlikely(!req->metadata_sg))
1051 			goto out_free;
1052 	}
1053 
1054 	return 0;
1055 out_free:
1056 	sgl_free(req->sg);
1057 out:
1058 	return -ENOMEM;
1059 }
1060 EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
1061 
1062 void nvmet_req_free_sgls(struct nvmet_req *req)
1063 {
1064 	if (req->p2p_dev) {
1065 		pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1066 		if (req->metadata_sg)
1067 			pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
1068 	} else {
1069 		sgl_free(req->sg);
1070 		if (req->metadata_sg)
1071 			sgl_free(req->metadata_sg);
1072 	}
1073 
1074 	req->sg = NULL;
1075 	req->metadata_sg = NULL;
1076 	req->sg_cnt = 0;
1077 	req->metadata_sg_cnt = 0;
1078 }
1079 EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
1080 
1081 static inline bool nvmet_cc_en(u32 cc)
1082 {
1083 	return (cc >> NVME_CC_EN_SHIFT) & 0x1;
1084 }
1085 
1086 static inline u8 nvmet_cc_css(u32 cc)
1087 {
1088 	return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
1089 }
1090 
1091 static inline u8 nvmet_cc_mps(u32 cc)
1092 {
1093 	return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
1094 }
1095 
1096 static inline u8 nvmet_cc_ams(u32 cc)
1097 {
1098 	return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
1099 }
1100 
1101 static inline u8 nvmet_cc_shn(u32 cc)
1102 {
1103 	return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
1104 }
1105 
1106 static inline u8 nvmet_cc_iosqes(u32 cc)
1107 {
1108 	return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
1109 }
1110 
1111 static inline u8 nvmet_cc_iocqes(u32 cc)
1112 {
1113 	return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
1114 }
1115 
1116 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
1117 {
1118 	lockdep_assert_held(&ctrl->lock);
1119 
1120 	/*
1121 	 * Only I/O controllers should verify iosqes,iocqes.
1122 	 * Strictly speaking, the spec says a discovery controller
1123 	 * should verify iosqes,iocqes are zeroed, however that
1124 	 * would break backwards compatibility, so don't enforce it.
1125 	 */
1126 	if (ctrl->subsys->type != NVME_NQN_DISC &&
1127 	    (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
1128 	     nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
1129 		ctrl->csts = NVME_CSTS_CFS;
1130 		return;
1131 	}
1132 
1133 	if (nvmet_cc_mps(ctrl->cc) != 0 ||
1134 	    nvmet_cc_ams(ctrl->cc) != 0 ||
1135 	    nvmet_cc_css(ctrl->cc) != 0) {
1136 		ctrl->csts = NVME_CSTS_CFS;
1137 		return;
1138 	}
1139 
1140 	ctrl->csts = NVME_CSTS_RDY;
1141 
1142 	/*
1143 	 * Controllers that are not yet enabled should not really enforce the
1144 	 * keep alive timeout, but we still want to track a timeout and cleanup
1145 	 * in case a host died before it enabled the controller.  Hence, simply
1146 	 * reset the keep alive timer when the controller is enabled.
1147 	 */
1148 	if (ctrl->kato)
1149 		mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1150 }
1151 
1152 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
1153 {
1154 	lockdep_assert_held(&ctrl->lock);
1155 
1156 	/* XXX: tear down queues? */
1157 	ctrl->csts &= ~NVME_CSTS_RDY;
1158 	ctrl->cc = 0;
1159 }
1160 
1161 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
1162 {
1163 	u32 old;
1164 
1165 	mutex_lock(&ctrl->lock);
1166 	old = ctrl->cc;
1167 	ctrl->cc = new;
1168 
1169 	if (nvmet_cc_en(new) && !nvmet_cc_en(old))
1170 		nvmet_start_ctrl(ctrl);
1171 	if (!nvmet_cc_en(new) && nvmet_cc_en(old))
1172 		nvmet_clear_ctrl(ctrl);
1173 	if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
1174 		nvmet_clear_ctrl(ctrl);
1175 		ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1176 	}
1177 	if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
1178 		ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1179 	mutex_unlock(&ctrl->lock);
1180 }
1181 
1182 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
1183 {
1184 	/* command sets supported: NVMe command set: */
1185 	ctrl->cap = (1ULL << 37);
1186 	/* CC.EN timeout in 500msec units: */
1187 	ctrl->cap |= (15ULL << 24);
1188 	/* maximum queue entries supported: */
1189 	ctrl->cap |= NVMET_QUEUE_SIZE - 1;
1190 }
1191 
1192 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
1193 				       const char *hostnqn, u16 cntlid,
1194 				       struct nvmet_req *req)
1195 {
1196 	struct nvmet_ctrl *ctrl = NULL;
1197 	struct nvmet_subsys *subsys;
1198 
1199 	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1200 	if (!subsys) {
1201 		pr_warn("connect request for invalid subsystem %s!\n",
1202 			subsysnqn);
1203 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1204 		goto out;
1205 	}
1206 
1207 	mutex_lock(&subsys->lock);
1208 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
1209 		if (ctrl->cntlid == cntlid) {
1210 			if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
1211 				pr_warn("hostnqn mismatch.\n");
1212 				continue;
1213 			}
1214 			if (!kref_get_unless_zero(&ctrl->ref))
1215 				continue;
1216 
1217 			/* ctrl found */
1218 			goto found;
1219 		}
1220 	}
1221 
1222 	ctrl = NULL; /* ctrl not found */
1223 	pr_warn("could not find controller %d for subsys %s / host %s\n",
1224 		cntlid, subsysnqn, hostnqn);
1225 	req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
1226 
1227 found:
1228 	mutex_unlock(&subsys->lock);
1229 	nvmet_subsys_put(subsys);
1230 out:
1231 	return ctrl;
1232 }
1233 
1234 u16 nvmet_check_ctrl_status(struct nvmet_req *req)
1235 {
1236 	if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
1237 		pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
1238 		       req->cmd->common.opcode, req->sq->qid);
1239 		return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1240 	}
1241 
1242 	if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
1243 		pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
1244 		       req->cmd->common.opcode, req->sq->qid);
1245 		return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1246 	}
1247 	return 0;
1248 }
1249 
1250 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
1251 {
1252 	struct nvmet_host_link *p;
1253 
1254 	lockdep_assert_held(&nvmet_config_sem);
1255 
1256 	if (subsys->allow_any_host)
1257 		return true;
1258 
1259 	if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */
1260 		return true;
1261 
1262 	list_for_each_entry(p, &subsys->hosts, entry) {
1263 		if (!strcmp(nvmet_host_name(p->host), hostnqn))
1264 			return true;
1265 	}
1266 
1267 	return false;
1268 }
1269 
1270 /*
1271  * Note: ctrl->subsys->lock should be held when calling this function
1272  */
1273 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
1274 		struct nvmet_req *req)
1275 {
1276 	struct nvmet_ns *ns;
1277 	unsigned long idx;
1278 
1279 	if (!req->p2p_client)
1280 		return;
1281 
1282 	ctrl->p2p_client = get_device(req->p2p_client);
1283 
1284 	xa_for_each(&ctrl->subsys->namespaces, idx, ns)
1285 		nvmet_p2pmem_ns_add_p2p(ctrl, ns);
1286 }
1287 
1288 /*
1289  * Note: ctrl->subsys->lock should be held when calling this function
1290  */
1291 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
1292 {
1293 	struct radix_tree_iter iter;
1294 	void __rcu **slot;
1295 
1296 	radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
1297 		pci_dev_put(radix_tree_deref_slot(slot));
1298 
1299 	put_device(ctrl->p2p_client);
1300 }
1301 
1302 static void nvmet_fatal_error_handler(struct work_struct *work)
1303 {
1304 	struct nvmet_ctrl *ctrl =
1305 			container_of(work, struct nvmet_ctrl, fatal_err_work);
1306 
1307 	pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
1308 	ctrl->ops->delete_ctrl(ctrl);
1309 }
1310 
1311 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
1312 		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
1313 {
1314 	struct nvmet_subsys *subsys;
1315 	struct nvmet_ctrl *ctrl;
1316 	int ret;
1317 	u16 status;
1318 
1319 	status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1320 	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1321 	if (!subsys) {
1322 		pr_warn("connect request for invalid subsystem %s!\n",
1323 			subsysnqn);
1324 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1325 		req->error_loc = offsetof(struct nvme_common_command, dptr);
1326 		goto out;
1327 	}
1328 
1329 	down_read(&nvmet_config_sem);
1330 	if (!nvmet_host_allowed(subsys, hostnqn)) {
1331 		pr_info("connect by host %s for subsystem %s not allowed\n",
1332 			hostnqn, subsysnqn);
1333 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
1334 		up_read(&nvmet_config_sem);
1335 		status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
1336 		req->error_loc = offsetof(struct nvme_common_command, dptr);
1337 		goto out_put_subsystem;
1338 	}
1339 	up_read(&nvmet_config_sem);
1340 
1341 	status = NVME_SC_INTERNAL;
1342 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1343 	if (!ctrl)
1344 		goto out_put_subsystem;
1345 	mutex_init(&ctrl->lock);
1346 
1347 	nvmet_init_cap(ctrl);
1348 
1349 	ctrl->port = req->port;
1350 
1351 	INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
1352 	INIT_LIST_HEAD(&ctrl->async_events);
1353 	INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
1354 	INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
1355 
1356 	memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
1357 	memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
1358 
1359 	kref_init(&ctrl->ref);
1360 	ctrl->subsys = subsys;
1361 	WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
1362 
1363 	ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
1364 			sizeof(__le32), GFP_KERNEL);
1365 	if (!ctrl->changed_ns_list)
1366 		goto out_free_ctrl;
1367 
1368 	ctrl->sqs = kcalloc(subsys->max_qid + 1,
1369 			sizeof(struct nvmet_sq *),
1370 			GFP_KERNEL);
1371 	if (!ctrl->sqs)
1372 		goto out_free_changed_ns_list;
1373 
1374 	if (subsys->cntlid_min > subsys->cntlid_max)
1375 		goto out_free_changed_ns_list;
1376 
1377 	ret = ida_simple_get(&cntlid_ida,
1378 			     subsys->cntlid_min, subsys->cntlid_max,
1379 			     GFP_KERNEL);
1380 	if (ret < 0) {
1381 		status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
1382 		goto out_free_sqs;
1383 	}
1384 	ctrl->cntlid = ret;
1385 
1386 	ctrl->ops = req->ops;
1387 
1388 	/*
1389 	 * Discovery controllers may use some arbitrary high value
1390 	 * in order to cleanup stale discovery sessions
1391 	 */
1392 	if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato)
1393 		kato = NVMET_DISC_KATO_MS;
1394 
1395 	/* keep-alive timeout in seconds */
1396 	ctrl->kato = DIV_ROUND_UP(kato, 1000);
1397 
1398 	ctrl->err_counter = 0;
1399 	spin_lock_init(&ctrl->error_lock);
1400 
1401 	nvmet_start_keep_alive_timer(ctrl);
1402 
1403 	mutex_lock(&subsys->lock);
1404 	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
1405 	nvmet_setup_p2p_ns_map(ctrl, req);
1406 	mutex_unlock(&subsys->lock);
1407 
1408 	*ctrlp = ctrl;
1409 	return 0;
1410 
1411 out_free_sqs:
1412 	kfree(ctrl->sqs);
1413 out_free_changed_ns_list:
1414 	kfree(ctrl->changed_ns_list);
1415 out_free_ctrl:
1416 	kfree(ctrl);
1417 out_put_subsystem:
1418 	nvmet_subsys_put(subsys);
1419 out:
1420 	return status;
1421 }
1422 
1423 static void nvmet_ctrl_free(struct kref *ref)
1424 {
1425 	struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
1426 	struct nvmet_subsys *subsys = ctrl->subsys;
1427 
1428 	mutex_lock(&subsys->lock);
1429 	nvmet_release_p2p_ns_map(ctrl);
1430 	list_del(&ctrl->subsys_entry);
1431 	mutex_unlock(&subsys->lock);
1432 
1433 	nvmet_stop_keep_alive_timer(ctrl);
1434 
1435 	flush_work(&ctrl->async_event_work);
1436 	cancel_work_sync(&ctrl->fatal_err_work);
1437 
1438 	ida_simple_remove(&cntlid_ida, ctrl->cntlid);
1439 
1440 	nvmet_async_events_free(ctrl);
1441 	kfree(ctrl->sqs);
1442 	kfree(ctrl->changed_ns_list);
1443 	kfree(ctrl);
1444 
1445 	nvmet_subsys_put(subsys);
1446 }
1447 
1448 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
1449 {
1450 	kref_put(&ctrl->ref, nvmet_ctrl_free);
1451 }
1452 
1453 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
1454 {
1455 	mutex_lock(&ctrl->lock);
1456 	if (!(ctrl->csts & NVME_CSTS_CFS)) {
1457 		ctrl->csts |= NVME_CSTS_CFS;
1458 		schedule_work(&ctrl->fatal_err_work);
1459 	}
1460 	mutex_unlock(&ctrl->lock);
1461 }
1462 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
1463 
1464 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
1465 		const char *subsysnqn)
1466 {
1467 	struct nvmet_subsys_link *p;
1468 
1469 	if (!port)
1470 		return NULL;
1471 
1472 	if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
1473 		if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
1474 			return NULL;
1475 		return nvmet_disc_subsys;
1476 	}
1477 
1478 	down_read(&nvmet_config_sem);
1479 	list_for_each_entry(p, &port->subsystems, entry) {
1480 		if (!strncmp(p->subsys->subsysnqn, subsysnqn,
1481 				NVMF_NQN_SIZE)) {
1482 			if (!kref_get_unless_zero(&p->subsys->ref))
1483 				break;
1484 			up_read(&nvmet_config_sem);
1485 			return p->subsys;
1486 		}
1487 	}
1488 	up_read(&nvmet_config_sem);
1489 	return NULL;
1490 }
1491 
1492 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1493 		enum nvme_subsys_type type)
1494 {
1495 	struct nvmet_subsys *subsys;
1496 
1497 	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
1498 	if (!subsys)
1499 		return ERR_PTR(-ENOMEM);
1500 
1501 	subsys->ver = NVMET_DEFAULT_VS;
1502 	/* generate a random serial number as our controllers are ephemeral: */
1503 	get_random_bytes(&subsys->serial, sizeof(subsys->serial));
1504 
1505 	switch (type) {
1506 	case NVME_NQN_NVME:
1507 		subsys->max_qid = NVMET_NR_QUEUES;
1508 		break;
1509 	case NVME_NQN_DISC:
1510 		subsys->max_qid = 0;
1511 		break;
1512 	default:
1513 		pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
1514 		kfree(subsys);
1515 		return ERR_PTR(-EINVAL);
1516 	}
1517 	subsys->type = type;
1518 	subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1519 			GFP_KERNEL);
1520 	if (!subsys->subsysnqn) {
1521 		kfree(subsys);
1522 		return ERR_PTR(-ENOMEM);
1523 	}
1524 	subsys->cntlid_min = NVME_CNTLID_MIN;
1525 	subsys->cntlid_max = NVME_CNTLID_MAX;
1526 	kref_init(&subsys->ref);
1527 
1528 	mutex_init(&subsys->lock);
1529 	xa_init(&subsys->namespaces);
1530 	INIT_LIST_HEAD(&subsys->ctrls);
1531 	INIT_LIST_HEAD(&subsys->hosts);
1532 
1533 	return subsys;
1534 }
1535 
1536 static void nvmet_subsys_free(struct kref *ref)
1537 {
1538 	struct nvmet_subsys *subsys =
1539 		container_of(ref, struct nvmet_subsys, ref);
1540 
1541 	WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
1542 
1543 	xa_destroy(&subsys->namespaces);
1544 	nvmet_passthru_subsys_free(subsys);
1545 
1546 	kfree(subsys->subsysnqn);
1547 	kfree(subsys->model_number);
1548 	kfree(subsys);
1549 }
1550 
1551 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1552 {
1553 	struct nvmet_ctrl *ctrl;
1554 
1555 	mutex_lock(&subsys->lock);
1556 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1557 		ctrl->ops->delete_ctrl(ctrl);
1558 	mutex_unlock(&subsys->lock);
1559 }
1560 
1561 void nvmet_subsys_put(struct nvmet_subsys *subsys)
1562 {
1563 	kref_put(&subsys->ref, nvmet_subsys_free);
1564 }
1565 
1566 static int __init nvmet_init(void)
1567 {
1568 	int error;
1569 
1570 	nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
1571 
1572 	buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
1573 			WQ_MEM_RECLAIM, 0);
1574 	if (!buffered_io_wq) {
1575 		error = -ENOMEM;
1576 		goto out;
1577 	}
1578 
1579 	error = nvmet_init_discovery();
1580 	if (error)
1581 		goto out_free_work_queue;
1582 
1583 	error = nvmet_init_configfs();
1584 	if (error)
1585 		goto out_exit_discovery;
1586 	return 0;
1587 
1588 out_exit_discovery:
1589 	nvmet_exit_discovery();
1590 out_free_work_queue:
1591 	destroy_workqueue(buffered_io_wq);
1592 out:
1593 	return error;
1594 }
1595 
1596 static void __exit nvmet_exit(void)
1597 {
1598 	nvmet_exit_configfs();
1599 	nvmet_exit_discovery();
1600 	ida_destroy(&cntlid_ida);
1601 	destroy_workqueue(buffered_io_wq);
1602 
1603 	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1604 	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1605 }
1606 
1607 module_init(nvmet_init);
1608 module_exit(nvmet_exit);
1609 
1610 MODULE_LICENSE("GPL v2");
1611