xref: /openbmc/linux/drivers/nvme/target/core.c (revision f97769fd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common code for the NVMe target.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/random.h>
9 #include <linux/rculist.h>
10 #include <linux/pci-p2pdma.h>
11 #include <linux/scatterlist.h>
12 
13 #define CREATE_TRACE_POINTS
14 #include "trace.h"
15 
16 #include "nvmet.h"
17 
18 struct workqueue_struct *buffered_io_wq;
19 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
20 static DEFINE_IDA(cntlid_ida);
21 
22 /*
23  * This read/write semaphore is used to synchronize access to configuration
24  * information on a target system that will result in discovery log page
25  * information change for at least one host.
26  * The full list of resources to protected by this semaphore is:
27  *
28  *  - subsystems list
29  *  - per-subsystem allowed hosts list
30  *  - allow_any_host subsystem attribute
31  *  - nvmet_genctr
32  *  - the nvmet_transports array
33  *
34  * When updating any of those lists/structures write lock should be obtained,
35  * while when reading (popolating discovery log page or checking host-subsystem
36  * link) read lock is obtained to allow concurrent reads.
37  */
38 DECLARE_RWSEM(nvmet_config_sem);
39 
40 u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
41 u64 nvmet_ana_chgcnt;
42 DECLARE_RWSEM(nvmet_ana_sem);
43 
44 inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
45 {
46 	u16 status;
47 
48 	switch (errno) {
49 	case 0:
50 		status = NVME_SC_SUCCESS;
51 		break;
52 	case -ENOSPC:
53 		req->error_loc = offsetof(struct nvme_rw_command, length);
54 		status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
55 		break;
56 	case -EREMOTEIO:
57 		req->error_loc = offsetof(struct nvme_rw_command, slba);
58 		status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
59 		break;
60 	case -EOPNOTSUPP:
61 		req->error_loc = offsetof(struct nvme_common_command, opcode);
62 		switch (req->cmd->common.opcode) {
63 		case nvme_cmd_dsm:
64 		case nvme_cmd_write_zeroes:
65 			status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
66 			break;
67 		default:
68 			status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
69 		}
70 		break;
71 	case -ENODATA:
72 		req->error_loc = offsetof(struct nvme_rw_command, nsid);
73 		status = NVME_SC_ACCESS_DENIED;
74 		break;
75 	case -EIO:
76 		fallthrough;
77 	default:
78 		req->error_loc = offsetof(struct nvme_common_command, opcode);
79 		status = NVME_SC_INTERNAL | NVME_SC_DNR;
80 	}
81 
82 	return status;
83 }
84 
85 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
86 		const char *subsysnqn);
87 
88 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
89 		size_t len)
90 {
91 	if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
92 		req->error_loc = offsetof(struct nvme_common_command, dptr);
93 		return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
94 	}
95 	return 0;
96 }
97 
98 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
99 {
100 	if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
101 		req->error_loc = offsetof(struct nvme_common_command, dptr);
102 		return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
103 	}
104 	return 0;
105 }
106 
107 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
108 {
109 	if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
110 		req->error_loc = offsetof(struct nvme_common_command, dptr);
111 		return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
112 	}
113 	return 0;
114 }
115 
116 static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
117 {
118 	unsigned long nsid = 0;
119 	struct nvmet_ns *cur;
120 	unsigned long idx;
121 
122 	xa_for_each(&subsys->namespaces, idx, cur)
123 		nsid = cur->nsid;
124 
125 	return nsid;
126 }
127 
128 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
129 {
130 	return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
131 }
132 
133 static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
134 {
135 	u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
136 	struct nvmet_req *req;
137 
138 	mutex_lock(&ctrl->lock);
139 	while (ctrl->nr_async_event_cmds) {
140 		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
141 		mutex_unlock(&ctrl->lock);
142 		nvmet_req_complete(req, status);
143 		mutex_lock(&ctrl->lock);
144 	}
145 	mutex_unlock(&ctrl->lock);
146 }
147 
148 static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
149 {
150 	struct nvmet_async_event *aen;
151 	struct nvmet_req *req;
152 
153 	mutex_lock(&ctrl->lock);
154 	while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
155 		aen = list_first_entry(&ctrl->async_events,
156 				       struct nvmet_async_event, entry);
157 		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
158 		nvmet_set_result(req, nvmet_async_event_result(aen));
159 
160 		list_del(&aen->entry);
161 		kfree(aen);
162 
163 		mutex_unlock(&ctrl->lock);
164 		trace_nvmet_async_event(ctrl, req->cqe->result.u32);
165 		nvmet_req_complete(req, 0);
166 		mutex_lock(&ctrl->lock);
167 	}
168 	mutex_unlock(&ctrl->lock);
169 }
170 
171 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
172 {
173 	struct nvmet_async_event *aen, *tmp;
174 
175 	mutex_lock(&ctrl->lock);
176 	list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
177 		list_del(&aen->entry);
178 		kfree(aen);
179 	}
180 	mutex_unlock(&ctrl->lock);
181 }
182 
183 static void nvmet_async_event_work(struct work_struct *work)
184 {
185 	struct nvmet_ctrl *ctrl =
186 		container_of(work, struct nvmet_ctrl, async_event_work);
187 
188 	nvmet_async_events_process(ctrl);
189 }
190 
191 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
192 		u8 event_info, u8 log_page)
193 {
194 	struct nvmet_async_event *aen;
195 
196 	aen = kmalloc(sizeof(*aen), GFP_KERNEL);
197 	if (!aen)
198 		return;
199 
200 	aen->event_type = event_type;
201 	aen->event_info = event_info;
202 	aen->log_page = log_page;
203 
204 	mutex_lock(&ctrl->lock);
205 	list_add_tail(&aen->entry, &ctrl->async_events);
206 	mutex_unlock(&ctrl->lock);
207 
208 	schedule_work(&ctrl->async_event_work);
209 }
210 
211 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
212 {
213 	u32 i;
214 
215 	mutex_lock(&ctrl->lock);
216 	if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
217 		goto out_unlock;
218 
219 	for (i = 0; i < ctrl->nr_changed_ns; i++) {
220 		if (ctrl->changed_ns_list[i] == nsid)
221 			goto out_unlock;
222 	}
223 
224 	if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
225 		ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
226 		ctrl->nr_changed_ns = U32_MAX;
227 		goto out_unlock;
228 	}
229 
230 	ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
231 out_unlock:
232 	mutex_unlock(&ctrl->lock);
233 }
234 
235 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
236 {
237 	struct nvmet_ctrl *ctrl;
238 
239 	lockdep_assert_held(&subsys->lock);
240 
241 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
242 		nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
243 		if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
244 			continue;
245 		nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
246 				NVME_AER_NOTICE_NS_CHANGED,
247 				NVME_LOG_CHANGED_NS);
248 	}
249 }
250 
251 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
252 		struct nvmet_port *port)
253 {
254 	struct nvmet_ctrl *ctrl;
255 
256 	mutex_lock(&subsys->lock);
257 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
258 		if (port && ctrl->port != port)
259 			continue;
260 		if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
261 			continue;
262 		nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
263 				NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
264 	}
265 	mutex_unlock(&subsys->lock);
266 }
267 
268 void nvmet_port_send_ana_event(struct nvmet_port *port)
269 {
270 	struct nvmet_subsys_link *p;
271 
272 	down_read(&nvmet_config_sem);
273 	list_for_each_entry(p, &port->subsystems, entry)
274 		nvmet_send_ana_event(p->subsys, port);
275 	up_read(&nvmet_config_sem);
276 }
277 
278 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
279 {
280 	int ret = 0;
281 
282 	down_write(&nvmet_config_sem);
283 	if (nvmet_transports[ops->type])
284 		ret = -EINVAL;
285 	else
286 		nvmet_transports[ops->type] = ops;
287 	up_write(&nvmet_config_sem);
288 
289 	return ret;
290 }
291 EXPORT_SYMBOL_GPL(nvmet_register_transport);
292 
293 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
294 {
295 	down_write(&nvmet_config_sem);
296 	nvmet_transports[ops->type] = NULL;
297 	up_write(&nvmet_config_sem);
298 }
299 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
300 
301 void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
302 {
303 	struct nvmet_ctrl *ctrl;
304 
305 	mutex_lock(&subsys->lock);
306 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
307 		if (ctrl->port == port)
308 			ctrl->ops->delete_ctrl(ctrl);
309 	}
310 	mutex_unlock(&subsys->lock);
311 }
312 
313 int nvmet_enable_port(struct nvmet_port *port)
314 {
315 	const struct nvmet_fabrics_ops *ops;
316 	int ret;
317 
318 	lockdep_assert_held(&nvmet_config_sem);
319 
320 	ops = nvmet_transports[port->disc_addr.trtype];
321 	if (!ops) {
322 		up_write(&nvmet_config_sem);
323 		request_module("nvmet-transport-%d", port->disc_addr.trtype);
324 		down_write(&nvmet_config_sem);
325 		ops = nvmet_transports[port->disc_addr.trtype];
326 		if (!ops) {
327 			pr_err("transport type %d not supported\n",
328 				port->disc_addr.trtype);
329 			return -EINVAL;
330 		}
331 	}
332 
333 	if (!try_module_get(ops->owner))
334 		return -EINVAL;
335 
336 	/*
337 	 * If the user requested PI support and the transport isn't pi capable,
338 	 * don't enable the port.
339 	 */
340 	if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
341 		pr_err("T10-PI is not supported by transport type %d\n",
342 		       port->disc_addr.trtype);
343 		ret = -EINVAL;
344 		goto out_put;
345 	}
346 
347 	ret = ops->add_port(port);
348 	if (ret)
349 		goto out_put;
350 
351 	/* If the transport didn't set inline_data_size, then disable it. */
352 	if (port->inline_data_size < 0)
353 		port->inline_data_size = 0;
354 
355 	port->enabled = true;
356 	port->tr_ops = ops;
357 	return 0;
358 
359 out_put:
360 	module_put(ops->owner);
361 	return ret;
362 }
363 
364 void nvmet_disable_port(struct nvmet_port *port)
365 {
366 	const struct nvmet_fabrics_ops *ops;
367 
368 	lockdep_assert_held(&nvmet_config_sem);
369 
370 	port->enabled = false;
371 	port->tr_ops = NULL;
372 
373 	ops = nvmet_transports[port->disc_addr.trtype];
374 	ops->remove_port(port);
375 	module_put(ops->owner);
376 }
377 
378 static void nvmet_keep_alive_timer(struct work_struct *work)
379 {
380 	struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
381 			struct nvmet_ctrl, ka_work);
382 	bool cmd_seen = ctrl->cmd_seen;
383 
384 	ctrl->cmd_seen = false;
385 	if (cmd_seen) {
386 		pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
387 			ctrl->cntlid);
388 		schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
389 		return;
390 	}
391 
392 	pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
393 		ctrl->cntlid, ctrl->kato);
394 
395 	nvmet_ctrl_fatal_error(ctrl);
396 }
397 
398 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
399 {
400 	if (unlikely(ctrl->kato == 0))
401 		return;
402 
403 	pr_debug("ctrl %d start keep-alive timer for %d secs\n",
404 		ctrl->cntlid, ctrl->kato);
405 
406 	INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
407 	schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
408 }
409 
410 static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
411 {
412 	if (unlikely(ctrl->kato == 0))
413 		return;
414 
415 	pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
416 
417 	cancel_delayed_work_sync(&ctrl->ka_work);
418 }
419 
420 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
421 {
422 	struct nvmet_ns *ns;
423 
424 	ns = xa_load(&ctrl->subsys->namespaces, le32_to_cpu(nsid));
425 	if (ns)
426 		percpu_ref_get(&ns->ref);
427 
428 	return ns;
429 }
430 
431 static void nvmet_destroy_namespace(struct percpu_ref *ref)
432 {
433 	struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
434 
435 	complete(&ns->disable_done);
436 }
437 
438 void nvmet_put_namespace(struct nvmet_ns *ns)
439 {
440 	percpu_ref_put(&ns->ref);
441 }
442 
443 static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
444 {
445 	nvmet_bdev_ns_disable(ns);
446 	nvmet_file_ns_disable(ns);
447 }
448 
449 static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
450 {
451 	int ret;
452 	struct pci_dev *p2p_dev;
453 
454 	if (!ns->use_p2pmem)
455 		return 0;
456 
457 	if (!ns->bdev) {
458 		pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
459 		return -EINVAL;
460 	}
461 
462 	if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
463 		pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
464 		       ns->device_path);
465 		return -EINVAL;
466 	}
467 
468 	if (ns->p2p_dev) {
469 		ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
470 		if (ret < 0)
471 			return -EINVAL;
472 	} else {
473 		/*
474 		 * Right now we just check that there is p2pmem available so
475 		 * we can report an error to the user right away if there
476 		 * is not. We'll find the actual device to use once we
477 		 * setup the controller when the port's device is available.
478 		 */
479 
480 		p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
481 		if (!p2p_dev) {
482 			pr_err("no peer-to-peer memory is available for %s\n",
483 			       ns->device_path);
484 			return -EINVAL;
485 		}
486 
487 		pci_dev_put(p2p_dev);
488 	}
489 
490 	return 0;
491 }
492 
493 /*
494  * Note: ctrl->subsys->lock should be held when calling this function
495  */
496 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
497 				    struct nvmet_ns *ns)
498 {
499 	struct device *clients[2];
500 	struct pci_dev *p2p_dev;
501 	int ret;
502 
503 	if (!ctrl->p2p_client || !ns->use_p2pmem)
504 		return;
505 
506 	if (ns->p2p_dev) {
507 		ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
508 		if (ret < 0)
509 			return;
510 
511 		p2p_dev = pci_dev_get(ns->p2p_dev);
512 	} else {
513 		clients[0] = ctrl->p2p_client;
514 		clients[1] = nvmet_ns_dev(ns);
515 
516 		p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
517 		if (!p2p_dev) {
518 			pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
519 			       dev_name(ctrl->p2p_client), ns->device_path);
520 			return;
521 		}
522 	}
523 
524 	ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
525 	if (ret < 0)
526 		pci_dev_put(p2p_dev);
527 
528 	pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
529 		ns->nsid);
530 }
531 
532 void nvmet_ns_revalidate(struct nvmet_ns *ns)
533 {
534 	loff_t oldsize = ns->size;
535 
536 	if (ns->bdev)
537 		nvmet_bdev_ns_revalidate(ns);
538 	else
539 		nvmet_file_ns_revalidate(ns);
540 
541 	if (oldsize != ns->size)
542 		nvmet_ns_changed(ns->subsys, ns->nsid);
543 }
544 
545 int nvmet_ns_enable(struct nvmet_ns *ns)
546 {
547 	struct nvmet_subsys *subsys = ns->subsys;
548 	struct nvmet_ctrl *ctrl;
549 	int ret;
550 
551 	mutex_lock(&subsys->lock);
552 	ret = 0;
553 
554 	if (nvmet_passthru_ctrl(subsys)) {
555 		pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
556 		goto out_unlock;
557 	}
558 
559 	if (ns->enabled)
560 		goto out_unlock;
561 
562 	ret = -EMFILE;
563 	if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
564 		goto out_unlock;
565 
566 	ret = nvmet_bdev_ns_enable(ns);
567 	if (ret == -ENOTBLK)
568 		ret = nvmet_file_ns_enable(ns);
569 	if (ret)
570 		goto out_unlock;
571 
572 	ret = nvmet_p2pmem_ns_enable(ns);
573 	if (ret)
574 		goto out_dev_disable;
575 
576 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
577 		nvmet_p2pmem_ns_add_p2p(ctrl, ns);
578 
579 	ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
580 				0, GFP_KERNEL);
581 	if (ret)
582 		goto out_dev_put;
583 
584 	if (ns->nsid > subsys->max_nsid)
585 		subsys->max_nsid = ns->nsid;
586 
587 	ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
588 	if (ret)
589 		goto out_restore_subsys_maxnsid;
590 
591 	subsys->nr_namespaces++;
592 
593 	nvmet_ns_changed(subsys, ns->nsid);
594 	ns->enabled = true;
595 	ret = 0;
596 out_unlock:
597 	mutex_unlock(&subsys->lock);
598 	return ret;
599 
600 out_restore_subsys_maxnsid:
601 	subsys->max_nsid = nvmet_max_nsid(subsys);
602 	percpu_ref_exit(&ns->ref);
603 out_dev_put:
604 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
605 		pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
606 out_dev_disable:
607 	nvmet_ns_dev_disable(ns);
608 	goto out_unlock;
609 }
610 
611 void nvmet_ns_disable(struct nvmet_ns *ns)
612 {
613 	struct nvmet_subsys *subsys = ns->subsys;
614 	struct nvmet_ctrl *ctrl;
615 
616 	mutex_lock(&subsys->lock);
617 	if (!ns->enabled)
618 		goto out_unlock;
619 
620 	ns->enabled = false;
621 	xa_erase(&ns->subsys->namespaces, ns->nsid);
622 	if (ns->nsid == subsys->max_nsid)
623 		subsys->max_nsid = nvmet_max_nsid(subsys);
624 
625 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
626 		pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
627 
628 	mutex_unlock(&subsys->lock);
629 
630 	/*
631 	 * Now that we removed the namespaces from the lookup list, we
632 	 * can kill the per_cpu ref and wait for any remaining references
633 	 * to be dropped, as well as a RCU grace period for anyone only
634 	 * using the namepace under rcu_read_lock().  Note that we can't
635 	 * use call_rcu here as we need to ensure the namespaces have
636 	 * been fully destroyed before unloading the module.
637 	 */
638 	percpu_ref_kill(&ns->ref);
639 	synchronize_rcu();
640 	wait_for_completion(&ns->disable_done);
641 	percpu_ref_exit(&ns->ref);
642 
643 	mutex_lock(&subsys->lock);
644 
645 	subsys->nr_namespaces--;
646 	nvmet_ns_changed(subsys, ns->nsid);
647 	nvmet_ns_dev_disable(ns);
648 out_unlock:
649 	mutex_unlock(&subsys->lock);
650 }
651 
652 void nvmet_ns_free(struct nvmet_ns *ns)
653 {
654 	nvmet_ns_disable(ns);
655 
656 	down_write(&nvmet_ana_sem);
657 	nvmet_ana_group_enabled[ns->anagrpid]--;
658 	up_write(&nvmet_ana_sem);
659 
660 	kfree(ns->device_path);
661 	kfree(ns);
662 }
663 
664 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
665 {
666 	struct nvmet_ns *ns;
667 
668 	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
669 	if (!ns)
670 		return NULL;
671 
672 	init_completion(&ns->disable_done);
673 
674 	ns->nsid = nsid;
675 	ns->subsys = subsys;
676 
677 	down_write(&nvmet_ana_sem);
678 	ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
679 	nvmet_ana_group_enabled[ns->anagrpid]++;
680 	up_write(&nvmet_ana_sem);
681 
682 	uuid_gen(&ns->uuid);
683 	ns->buffered_io = false;
684 
685 	return ns;
686 }
687 
688 static void nvmet_update_sq_head(struct nvmet_req *req)
689 {
690 	if (req->sq->size) {
691 		u32 old_sqhd, new_sqhd;
692 
693 		do {
694 			old_sqhd = req->sq->sqhd;
695 			new_sqhd = (old_sqhd + 1) % req->sq->size;
696 		} while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
697 					old_sqhd);
698 	}
699 	req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
700 }
701 
702 static void nvmet_set_error(struct nvmet_req *req, u16 status)
703 {
704 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
705 	struct nvme_error_slot *new_error_slot;
706 	unsigned long flags;
707 
708 	req->cqe->status = cpu_to_le16(status << 1);
709 
710 	if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
711 		return;
712 
713 	spin_lock_irqsave(&ctrl->error_lock, flags);
714 	ctrl->err_counter++;
715 	new_error_slot =
716 		&ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
717 
718 	new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
719 	new_error_slot->sqid = cpu_to_le16(req->sq->qid);
720 	new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
721 	new_error_slot->status_field = cpu_to_le16(status << 1);
722 	new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
723 	new_error_slot->lba = cpu_to_le64(req->error_slba);
724 	new_error_slot->nsid = req->cmd->common.nsid;
725 	spin_unlock_irqrestore(&ctrl->error_lock, flags);
726 
727 	/* set the more bit for this request */
728 	req->cqe->status |= cpu_to_le16(1 << 14);
729 }
730 
731 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
732 {
733 	if (!req->sq->sqhd_disabled)
734 		nvmet_update_sq_head(req);
735 	req->cqe->sq_id = cpu_to_le16(req->sq->qid);
736 	req->cqe->command_id = req->cmd->common.command_id;
737 
738 	if (unlikely(status))
739 		nvmet_set_error(req, status);
740 
741 	trace_nvmet_req_complete(req);
742 
743 	if (req->ns)
744 		nvmet_put_namespace(req->ns);
745 	req->ops->queue_response(req);
746 }
747 
748 void nvmet_req_complete(struct nvmet_req *req, u16 status)
749 {
750 	__nvmet_req_complete(req, status);
751 	percpu_ref_put(&req->sq->ref);
752 }
753 EXPORT_SYMBOL_GPL(nvmet_req_complete);
754 
755 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
756 		u16 qid, u16 size)
757 {
758 	cq->qid = qid;
759 	cq->size = size;
760 
761 	ctrl->cqs[qid] = cq;
762 }
763 
764 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
765 		u16 qid, u16 size)
766 {
767 	sq->sqhd = 0;
768 	sq->qid = qid;
769 	sq->size = size;
770 
771 	ctrl->sqs[qid] = sq;
772 }
773 
774 static void nvmet_confirm_sq(struct percpu_ref *ref)
775 {
776 	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
777 
778 	complete(&sq->confirm_done);
779 }
780 
781 void nvmet_sq_destroy(struct nvmet_sq *sq)
782 {
783 	struct nvmet_ctrl *ctrl = sq->ctrl;
784 
785 	/*
786 	 * If this is the admin queue, complete all AERs so that our
787 	 * queue doesn't have outstanding requests on it.
788 	 */
789 	if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
790 		nvmet_async_events_failall(ctrl);
791 	percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
792 	wait_for_completion(&sq->confirm_done);
793 	wait_for_completion(&sq->free_done);
794 	percpu_ref_exit(&sq->ref);
795 
796 	if (ctrl) {
797 		nvmet_ctrl_put(ctrl);
798 		sq->ctrl = NULL; /* allows reusing the queue later */
799 	}
800 }
801 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
802 
803 static void nvmet_sq_free(struct percpu_ref *ref)
804 {
805 	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
806 
807 	complete(&sq->free_done);
808 }
809 
810 int nvmet_sq_init(struct nvmet_sq *sq)
811 {
812 	int ret;
813 
814 	ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
815 	if (ret) {
816 		pr_err("percpu_ref init failed!\n");
817 		return ret;
818 	}
819 	init_completion(&sq->free_done);
820 	init_completion(&sq->confirm_done);
821 
822 	return 0;
823 }
824 EXPORT_SYMBOL_GPL(nvmet_sq_init);
825 
826 static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
827 		struct nvmet_ns *ns)
828 {
829 	enum nvme_ana_state state = port->ana_state[ns->anagrpid];
830 
831 	if (unlikely(state == NVME_ANA_INACCESSIBLE))
832 		return NVME_SC_ANA_INACCESSIBLE;
833 	if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
834 		return NVME_SC_ANA_PERSISTENT_LOSS;
835 	if (unlikely(state == NVME_ANA_CHANGE))
836 		return NVME_SC_ANA_TRANSITION;
837 	return 0;
838 }
839 
840 static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
841 {
842 	if (unlikely(req->ns->readonly)) {
843 		switch (req->cmd->common.opcode) {
844 		case nvme_cmd_read:
845 		case nvme_cmd_flush:
846 			break;
847 		default:
848 			return NVME_SC_NS_WRITE_PROTECTED;
849 		}
850 	}
851 
852 	return 0;
853 }
854 
855 static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
856 {
857 	struct nvme_command *cmd = req->cmd;
858 	u16 ret;
859 
860 	ret = nvmet_check_ctrl_status(req, cmd);
861 	if (unlikely(ret))
862 		return ret;
863 
864 	if (nvmet_req_passthru_ctrl(req))
865 		return nvmet_parse_passthru_io_cmd(req);
866 
867 	req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
868 	if (unlikely(!req->ns)) {
869 		req->error_loc = offsetof(struct nvme_common_command, nsid);
870 		return NVME_SC_INVALID_NS | NVME_SC_DNR;
871 	}
872 	ret = nvmet_check_ana_state(req->port, req->ns);
873 	if (unlikely(ret)) {
874 		req->error_loc = offsetof(struct nvme_common_command, nsid);
875 		return ret;
876 	}
877 	ret = nvmet_io_cmd_check_access(req);
878 	if (unlikely(ret)) {
879 		req->error_loc = offsetof(struct nvme_common_command, nsid);
880 		return ret;
881 	}
882 
883 	if (req->ns->file)
884 		return nvmet_file_parse_io_cmd(req);
885 	else
886 		return nvmet_bdev_parse_io_cmd(req);
887 }
888 
889 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
890 		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
891 {
892 	u8 flags = req->cmd->common.flags;
893 	u16 status;
894 
895 	req->cq = cq;
896 	req->sq = sq;
897 	req->ops = ops;
898 	req->sg = NULL;
899 	req->metadata_sg = NULL;
900 	req->sg_cnt = 0;
901 	req->metadata_sg_cnt = 0;
902 	req->transfer_len = 0;
903 	req->metadata_len = 0;
904 	req->cqe->status = 0;
905 	req->cqe->sq_head = 0;
906 	req->ns = NULL;
907 	req->error_loc = NVMET_NO_ERROR_LOC;
908 	req->error_slba = 0;
909 
910 	trace_nvmet_req_init(req, req->cmd);
911 
912 	/* no support for fused commands yet */
913 	if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
914 		req->error_loc = offsetof(struct nvme_common_command, flags);
915 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
916 		goto fail;
917 	}
918 
919 	/*
920 	 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
921 	 * contains an address of a single contiguous physical buffer that is
922 	 * byte aligned.
923 	 */
924 	if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
925 		req->error_loc = offsetof(struct nvme_common_command, flags);
926 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
927 		goto fail;
928 	}
929 
930 	if (unlikely(!req->sq->ctrl))
931 		/* will return an error for any non-connect command: */
932 		status = nvmet_parse_connect_cmd(req);
933 	else if (likely(req->sq->qid != 0))
934 		status = nvmet_parse_io_cmd(req);
935 	else
936 		status = nvmet_parse_admin_cmd(req);
937 
938 	if (status)
939 		goto fail;
940 
941 	if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
942 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
943 		goto fail;
944 	}
945 
946 	if (sq->ctrl)
947 		sq->ctrl->cmd_seen = true;
948 
949 	return true;
950 
951 fail:
952 	__nvmet_req_complete(req, status);
953 	return false;
954 }
955 EXPORT_SYMBOL_GPL(nvmet_req_init);
956 
957 void nvmet_req_uninit(struct nvmet_req *req)
958 {
959 	percpu_ref_put(&req->sq->ref);
960 	if (req->ns)
961 		nvmet_put_namespace(req->ns);
962 }
963 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
964 
965 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
966 {
967 	if (unlikely(len != req->transfer_len)) {
968 		req->error_loc = offsetof(struct nvme_common_command, dptr);
969 		nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
970 		return false;
971 	}
972 
973 	return true;
974 }
975 EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
976 
977 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
978 {
979 	if (unlikely(data_len > req->transfer_len)) {
980 		req->error_loc = offsetof(struct nvme_common_command, dptr);
981 		nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
982 		return false;
983 	}
984 
985 	return true;
986 }
987 
988 static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
989 {
990 	return req->transfer_len - req->metadata_len;
991 }
992 
993 static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
994 {
995 	req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt,
996 			nvmet_data_transfer_len(req));
997 	if (!req->sg)
998 		goto out_err;
999 
1000 	if (req->metadata_len) {
1001 		req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev,
1002 				&req->metadata_sg_cnt, req->metadata_len);
1003 		if (!req->metadata_sg)
1004 			goto out_free_sg;
1005 	}
1006 	return 0;
1007 out_free_sg:
1008 	pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1009 out_err:
1010 	return -ENOMEM;
1011 }
1012 
1013 static bool nvmet_req_find_p2p_dev(struct nvmet_req *req)
1014 {
1015 	if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
1016 		return false;
1017 
1018 	if (req->sq->ctrl && req->sq->qid && req->ns) {
1019 		req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
1020 						 req->ns->nsid);
1021 		if (req->p2p_dev)
1022 			return true;
1023 	}
1024 
1025 	req->p2p_dev = NULL;
1026 	return false;
1027 }
1028 
1029 int nvmet_req_alloc_sgls(struct nvmet_req *req)
1030 {
1031 	if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req))
1032 		return 0;
1033 
1034 	req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
1035 			    &req->sg_cnt);
1036 	if (unlikely(!req->sg))
1037 		goto out;
1038 
1039 	if (req->metadata_len) {
1040 		req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
1041 					     &req->metadata_sg_cnt);
1042 		if (unlikely(!req->metadata_sg))
1043 			goto out_free;
1044 	}
1045 
1046 	return 0;
1047 out_free:
1048 	sgl_free(req->sg);
1049 out:
1050 	return -ENOMEM;
1051 }
1052 EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
1053 
1054 void nvmet_req_free_sgls(struct nvmet_req *req)
1055 {
1056 	if (req->p2p_dev) {
1057 		pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1058 		if (req->metadata_sg)
1059 			pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
1060 	} else {
1061 		sgl_free(req->sg);
1062 		if (req->metadata_sg)
1063 			sgl_free(req->metadata_sg);
1064 	}
1065 
1066 	req->sg = NULL;
1067 	req->metadata_sg = NULL;
1068 	req->sg_cnt = 0;
1069 	req->metadata_sg_cnt = 0;
1070 }
1071 EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
1072 
1073 static inline bool nvmet_cc_en(u32 cc)
1074 {
1075 	return (cc >> NVME_CC_EN_SHIFT) & 0x1;
1076 }
1077 
1078 static inline u8 nvmet_cc_css(u32 cc)
1079 {
1080 	return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
1081 }
1082 
1083 static inline u8 nvmet_cc_mps(u32 cc)
1084 {
1085 	return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
1086 }
1087 
1088 static inline u8 nvmet_cc_ams(u32 cc)
1089 {
1090 	return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
1091 }
1092 
1093 static inline u8 nvmet_cc_shn(u32 cc)
1094 {
1095 	return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
1096 }
1097 
1098 static inline u8 nvmet_cc_iosqes(u32 cc)
1099 {
1100 	return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
1101 }
1102 
1103 static inline u8 nvmet_cc_iocqes(u32 cc)
1104 {
1105 	return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
1106 }
1107 
1108 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
1109 {
1110 	lockdep_assert_held(&ctrl->lock);
1111 
1112 	if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
1113 	    nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
1114 	    nvmet_cc_mps(ctrl->cc) != 0 ||
1115 	    nvmet_cc_ams(ctrl->cc) != 0 ||
1116 	    nvmet_cc_css(ctrl->cc) != 0) {
1117 		ctrl->csts = NVME_CSTS_CFS;
1118 		return;
1119 	}
1120 
1121 	ctrl->csts = NVME_CSTS_RDY;
1122 
1123 	/*
1124 	 * Controllers that are not yet enabled should not really enforce the
1125 	 * keep alive timeout, but we still want to track a timeout and cleanup
1126 	 * in case a host died before it enabled the controller.  Hence, simply
1127 	 * reset the keep alive timer when the controller is enabled.
1128 	 */
1129 	mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1130 }
1131 
1132 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
1133 {
1134 	lockdep_assert_held(&ctrl->lock);
1135 
1136 	/* XXX: tear down queues? */
1137 	ctrl->csts &= ~NVME_CSTS_RDY;
1138 	ctrl->cc = 0;
1139 }
1140 
1141 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
1142 {
1143 	u32 old;
1144 
1145 	mutex_lock(&ctrl->lock);
1146 	old = ctrl->cc;
1147 	ctrl->cc = new;
1148 
1149 	if (nvmet_cc_en(new) && !nvmet_cc_en(old))
1150 		nvmet_start_ctrl(ctrl);
1151 	if (!nvmet_cc_en(new) && nvmet_cc_en(old))
1152 		nvmet_clear_ctrl(ctrl);
1153 	if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
1154 		nvmet_clear_ctrl(ctrl);
1155 		ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1156 	}
1157 	if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
1158 		ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1159 	mutex_unlock(&ctrl->lock);
1160 }
1161 
1162 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
1163 {
1164 	/* command sets supported: NVMe command set: */
1165 	ctrl->cap = (1ULL << 37);
1166 	/* CC.EN timeout in 500msec units: */
1167 	ctrl->cap |= (15ULL << 24);
1168 	/* maximum queue entries supported: */
1169 	ctrl->cap |= NVMET_QUEUE_SIZE - 1;
1170 }
1171 
1172 u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
1173 		struct nvmet_req *req, struct nvmet_ctrl **ret)
1174 {
1175 	struct nvmet_subsys *subsys;
1176 	struct nvmet_ctrl *ctrl;
1177 	u16 status = 0;
1178 
1179 	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1180 	if (!subsys) {
1181 		pr_warn("connect request for invalid subsystem %s!\n",
1182 			subsysnqn);
1183 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1184 		return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1185 	}
1186 
1187 	mutex_lock(&subsys->lock);
1188 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
1189 		if (ctrl->cntlid == cntlid) {
1190 			if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
1191 				pr_warn("hostnqn mismatch.\n");
1192 				continue;
1193 			}
1194 			if (!kref_get_unless_zero(&ctrl->ref))
1195 				continue;
1196 
1197 			*ret = ctrl;
1198 			goto out;
1199 		}
1200 	}
1201 
1202 	pr_warn("could not find controller %d for subsys %s / host %s\n",
1203 		cntlid, subsysnqn, hostnqn);
1204 	req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
1205 	status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1206 
1207 out:
1208 	mutex_unlock(&subsys->lock);
1209 	nvmet_subsys_put(subsys);
1210 	return status;
1211 }
1212 
1213 u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
1214 {
1215 	if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
1216 		pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
1217 		       cmd->common.opcode, req->sq->qid);
1218 		return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1219 	}
1220 
1221 	if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
1222 		pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
1223 		       cmd->common.opcode, req->sq->qid);
1224 		return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1225 	}
1226 	return 0;
1227 }
1228 
1229 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
1230 {
1231 	struct nvmet_host_link *p;
1232 
1233 	lockdep_assert_held(&nvmet_config_sem);
1234 
1235 	if (subsys->allow_any_host)
1236 		return true;
1237 
1238 	if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */
1239 		return true;
1240 
1241 	list_for_each_entry(p, &subsys->hosts, entry) {
1242 		if (!strcmp(nvmet_host_name(p->host), hostnqn))
1243 			return true;
1244 	}
1245 
1246 	return false;
1247 }
1248 
1249 /*
1250  * Note: ctrl->subsys->lock should be held when calling this function
1251  */
1252 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
1253 		struct nvmet_req *req)
1254 {
1255 	struct nvmet_ns *ns;
1256 	unsigned long idx;
1257 
1258 	if (!req->p2p_client)
1259 		return;
1260 
1261 	ctrl->p2p_client = get_device(req->p2p_client);
1262 
1263 	xa_for_each(&ctrl->subsys->namespaces, idx, ns)
1264 		nvmet_p2pmem_ns_add_p2p(ctrl, ns);
1265 }
1266 
1267 /*
1268  * Note: ctrl->subsys->lock should be held when calling this function
1269  */
1270 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
1271 {
1272 	struct radix_tree_iter iter;
1273 	void __rcu **slot;
1274 
1275 	radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
1276 		pci_dev_put(radix_tree_deref_slot(slot));
1277 
1278 	put_device(ctrl->p2p_client);
1279 }
1280 
1281 static void nvmet_fatal_error_handler(struct work_struct *work)
1282 {
1283 	struct nvmet_ctrl *ctrl =
1284 			container_of(work, struct nvmet_ctrl, fatal_err_work);
1285 
1286 	pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
1287 	ctrl->ops->delete_ctrl(ctrl);
1288 }
1289 
1290 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
1291 		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
1292 {
1293 	struct nvmet_subsys *subsys;
1294 	struct nvmet_ctrl *ctrl;
1295 	int ret;
1296 	u16 status;
1297 
1298 	status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1299 	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1300 	if (!subsys) {
1301 		pr_warn("connect request for invalid subsystem %s!\n",
1302 			subsysnqn);
1303 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1304 		goto out;
1305 	}
1306 
1307 	status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1308 	down_read(&nvmet_config_sem);
1309 	if (!nvmet_host_allowed(subsys, hostnqn)) {
1310 		pr_info("connect by host %s for subsystem %s not allowed\n",
1311 			hostnqn, subsysnqn);
1312 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
1313 		up_read(&nvmet_config_sem);
1314 		status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
1315 		goto out_put_subsystem;
1316 	}
1317 	up_read(&nvmet_config_sem);
1318 
1319 	status = NVME_SC_INTERNAL;
1320 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1321 	if (!ctrl)
1322 		goto out_put_subsystem;
1323 	mutex_init(&ctrl->lock);
1324 
1325 	nvmet_init_cap(ctrl);
1326 
1327 	ctrl->port = req->port;
1328 
1329 	INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
1330 	INIT_LIST_HEAD(&ctrl->async_events);
1331 	INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
1332 	INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
1333 
1334 	memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
1335 	memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
1336 
1337 	kref_init(&ctrl->ref);
1338 	ctrl->subsys = subsys;
1339 	WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
1340 
1341 	ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
1342 			sizeof(__le32), GFP_KERNEL);
1343 	if (!ctrl->changed_ns_list)
1344 		goto out_free_ctrl;
1345 
1346 	ctrl->cqs = kcalloc(subsys->max_qid + 1,
1347 			sizeof(struct nvmet_cq *),
1348 			GFP_KERNEL);
1349 	if (!ctrl->cqs)
1350 		goto out_free_changed_ns_list;
1351 
1352 	ctrl->sqs = kcalloc(subsys->max_qid + 1,
1353 			sizeof(struct nvmet_sq *),
1354 			GFP_KERNEL);
1355 	if (!ctrl->sqs)
1356 		goto out_free_cqs;
1357 
1358 	if (subsys->cntlid_min > subsys->cntlid_max)
1359 		goto out_free_cqs;
1360 
1361 	ret = ida_simple_get(&cntlid_ida,
1362 			     subsys->cntlid_min, subsys->cntlid_max,
1363 			     GFP_KERNEL);
1364 	if (ret < 0) {
1365 		status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
1366 		goto out_free_sqs;
1367 	}
1368 	ctrl->cntlid = ret;
1369 
1370 	ctrl->ops = req->ops;
1371 
1372 	/*
1373 	 * Discovery controllers may use some arbitrary high value
1374 	 * in order to cleanup stale discovery sessions
1375 	 */
1376 	if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato)
1377 		kato = NVMET_DISC_KATO_MS;
1378 
1379 	/* keep-alive timeout in seconds */
1380 	ctrl->kato = DIV_ROUND_UP(kato, 1000);
1381 
1382 	ctrl->err_counter = 0;
1383 	spin_lock_init(&ctrl->error_lock);
1384 
1385 	nvmet_start_keep_alive_timer(ctrl);
1386 
1387 	mutex_lock(&subsys->lock);
1388 	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
1389 	nvmet_setup_p2p_ns_map(ctrl, req);
1390 	mutex_unlock(&subsys->lock);
1391 
1392 	*ctrlp = ctrl;
1393 	return 0;
1394 
1395 out_free_sqs:
1396 	kfree(ctrl->sqs);
1397 out_free_cqs:
1398 	kfree(ctrl->cqs);
1399 out_free_changed_ns_list:
1400 	kfree(ctrl->changed_ns_list);
1401 out_free_ctrl:
1402 	kfree(ctrl);
1403 out_put_subsystem:
1404 	nvmet_subsys_put(subsys);
1405 out:
1406 	return status;
1407 }
1408 
1409 static void nvmet_ctrl_free(struct kref *ref)
1410 {
1411 	struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
1412 	struct nvmet_subsys *subsys = ctrl->subsys;
1413 
1414 	mutex_lock(&subsys->lock);
1415 	nvmet_release_p2p_ns_map(ctrl);
1416 	list_del(&ctrl->subsys_entry);
1417 	mutex_unlock(&subsys->lock);
1418 
1419 	nvmet_stop_keep_alive_timer(ctrl);
1420 
1421 	flush_work(&ctrl->async_event_work);
1422 	cancel_work_sync(&ctrl->fatal_err_work);
1423 
1424 	ida_simple_remove(&cntlid_ida, ctrl->cntlid);
1425 
1426 	nvmet_async_events_free(ctrl);
1427 	kfree(ctrl->sqs);
1428 	kfree(ctrl->cqs);
1429 	kfree(ctrl->changed_ns_list);
1430 	kfree(ctrl);
1431 
1432 	nvmet_subsys_put(subsys);
1433 }
1434 
1435 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
1436 {
1437 	kref_put(&ctrl->ref, nvmet_ctrl_free);
1438 }
1439 
1440 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
1441 {
1442 	mutex_lock(&ctrl->lock);
1443 	if (!(ctrl->csts & NVME_CSTS_CFS)) {
1444 		ctrl->csts |= NVME_CSTS_CFS;
1445 		schedule_work(&ctrl->fatal_err_work);
1446 	}
1447 	mutex_unlock(&ctrl->lock);
1448 }
1449 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
1450 
1451 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
1452 		const char *subsysnqn)
1453 {
1454 	struct nvmet_subsys_link *p;
1455 
1456 	if (!port)
1457 		return NULL;
1458 
1459 	if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
1460 		if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
1461 			return NULL;
1462 		return nvmet_disc_subsys;
1463 	}
1464 
1465 	down_read(&nvmet_config_sem);
1466 	list_for_each_entry(p, &port->subsystems, entry) {
1467 		if (!strncmp(p->subsys->subsysnqn, subsysnqn,
1468 				NVMF_NQN_SIZE)) {
1469 			if (!kref_get_unless_zero(&p->subsys->ref))
1470 				break;
1471 			up_read(&nvmet_config_sem);
1472 			return p->subsys;
1473 		}
1474 	}
1475 	up_read(&nvmet_config_sem);
1476 	return NULL;
1477 }
1478 
1479 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1480 		enum nvme_subsys_type type)
1481 {
1482 	struct nvmet_subsys *subsys;
1483 
1484 	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
1485 	if (!subsys)
1486 		return ERR_PTR(-ENOMEM);
1487 
1488 	subsys->ver = NVMET_DEFAULT_VS;
1489 	/* generate a random serial number as our controllers are ephemeral: */
1490 	get_random_bytes(&subsys->serial, sizeof(subsys->serial));
1491 
1492 	switch (type) {
1493 	case NVME_NQN_NVME:
1494 		subsys->max_qid = NVMET_NR_QUEUES;
1495 		break;
1496 	case NVME_NQN_DISC:
1497 		subsys->max_qid = 0;
1498 		break;
1499 	default:
1500 		pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
1501 		kfree(subsys);
1502 		return ERR_PTR(-EINVAL);
1503 	}
1504 	subsys->type = type;
1505 	subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1506 			GFP_KERNEL);
1507 	if (!subsys->subsysnqn) {
1508 		kfree(subsys);
1509 		return ERR_PTR(-ENOMEM);
1510 	}
1511 	subsys->cntlid_min = NVME_CNTLID_MIN;
1512 	subsys->cntlid_max = NVME_CNTLID_MAX;
1513 	kref_init(&subsys->ref);
1514 
1515 	mutex_init(&subsys->lock);
1516 	xa_init(&subsys->namespaces);
1517 	INIT_LIST_HEAD(&subsys->ctrls);
1518 	INIT_LIST_HEAD(&subsys->hosts);
1519 
1520 	return subsys;
1521 }
1522 
1523 static void nvmet_subsys_free(struct kref *ref)
1524 {
1525 	struct nvmet_subsys *subsys =
1526 		container_of(ref, struct nvmet_subsys, ref);
1527 
1528 	WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
1529 
1530 	xa_destroy(&subsys->namespaces);
1531 	nvmet_passthru_subsys_free(subsys);
1532 
1533 	kfree(subsys->subsysnqn);
1534 	kfree_rcu(subsys->model, rcuhead);
1535 	kfree(subsys);
1536 }
1537 
1538 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1539 {
1540 	struct nvmet_ctrl *ctrl;
1541 
1542 	mutex_lock(&subsys->lock);
1543 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1544 		ctrl->ops->delete_ctrl(ctrl);
1545 	mutex_unlock(&subsys->lock);
1546 }
1547 
1548 void nvmet_subsys_put(struct nvmet_subsys *subsys)
1549 {
1550 	kref_put(&subsys->ref, nvmet_subsys_free);
1551 }
1552 
1553 static int __init nvmet_init(void)
1554 {
1555 	int error;
1556 
1557 	nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
1558 
1559 	buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
1560 			WQ_MEM_RECLAIM, 0);
1561 	if (!buffered_io_wq) {
1562 		error = -ENOMEM;
1563 		goto out;
1564 	}
1565 
1566 	error = nvmet_init_discovery();
1567 	if (error)
1568 		goto out_free_work_queue;
1569 
1570 	error = nvmet_init_configfs();
1571 	if (error)
1572 		goto out_exit_discovery;
1573 	return 0;
1574 
1575 out_exit_discovery:
1576 	nvmet_exit_discovery();
1577 out_free_work_queue:
1578 	destroy_workqueue(buffered_io_wq);
1579 out:
1580 	return error;
1581 }
1582 
1583 static void __exit nvmet_exit(void)
1584 {
1585 	nvmet_exit_configfs();
1586 	nvmet_exit_discovery();
1587 	ida_destroy(&cntlid_ida);
1588 	destroy_workqueue(buffered_io_wq);
1589 
1590 	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1591 	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1592 }
1593 
1594 module_init(nvmet_init);
1595 module_exit(nvmet_exit);
1596 
1597 MODULE_LICENSE("GPL v2");
1598