xref: /openbmc/linux/drivers/nvme/target/core.c (revision 01c44bf8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common code for the NVMe target.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/random.h>
9 #include <linux/rculist.h>
10 #include <linux/pci-p2pdma.h>
11 #include <linux/scatterlist.h>
12 
13 #define CREATE_TRACE_POINTS
14 #include "trace.h"
15 
16 #include "nvmet.h"
17 
18 struct workqueue_struct *buffered_io_wq;
19 struct workqueue_struct *zbd_wq;
20 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
21 static DEFINE_IDA(cntlid_ida);
22 
23 struct workqueue_struct *nvmet_wq;
24 EXPORT_SYMBOL_GPL(nvmet_wq);
25 
26 /*
27  * This read/write semaphore is used to synchronize access to configuration
28  * information on a target system that will result in discovery log page
29  * information change for at least one host.
30  * The full list of resources to protected by this semaphore is:
31  *
32  *  - subsystems list
33  *  - per-subsystem allowed hosts list
34  *  - allow_any_host subsystem attribute
35  *  - nvmet_genctr
36  *  - the nvmet_transports array
37  *
38  * When updating any of those lists/structures write lock should be obtained,
39  * while when reading (popolating discovery log page or checking host-subsystem
40  * link) read lock is obtained to allow concurrent reads.
41  */
42 DECLARE_RWSEM(nvmet_config_sem);
43 
44 u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
45 u64 nvmet_ana_chgcnt;
46 DECLARE_RWSEM(nvmet_ana_sem);
47 
48 inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
49 {
50 	switch (errno) {
51 	case 0:
52 		return NVME_SC_SUCCESS;
53 	case -ENOSPC:
54 		req->error_loc = offsetof(struct nvme_rw_command, length);
55 		return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
56 	case -EREMOTEIO:
57 		req->error_loc = offsetof(struct nvme_rw_command, slba);
58 		return  NVME_SC_LBA_RANGE | NVME_SC_DNR;
59 	case -EOPNOTSUPP:
60 		req->error_loc = offsetof(struct nvme_common_command, opcode);
61 		switch (req->cmd->common.opcode) {
62 		case nvme_cmd_dsm:
63 		case nvme_cmd_write_zeroes:
64 			return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
65 		default:
66 			return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
67 		}
68 		break;
69 	case -ENODATA:
70 		req->error_loc = offsetof(struct nvme_rw_command, nsid);
71 		return NVME_SC_ACCESS_DENIED;
72 	case -EIO:
73 		fallthrough;
74 	default:
75 		req->error_loc = offsetof(struct nvme_common_command, opcode);
76 		return NVME_SC_INTERNAL | NVME_SC_DNR;
77 	}
78 }
79 
80 u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
81 {
82 	pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode,
83 		 req->sq->qid);
84 
85 	req->error_loc = offsetof(struct nvme_common_command, opcode);
86 	return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
87 }
88 
89 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
90 		const char *subsysnqn);
91 
92 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
93 		size_t len)
94 {
95 	if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
96 		req->error_loc = offsetof(struct nvme_common_command, dptr);
97 		return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
98 	}
99 	return 0;
100 }
101 
102 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
103 {
104 	if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
105 		req->error_loc = offsetof(struct nvme_common_command, dptr);
106 		return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
107 	}
108 	return 0;
109 }
110 
111 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
112 {
113 	if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
114 		req->error_loc = offsetof(struct nvme_common_command, dptr);
115 		return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
116 	}
117 	return 0;
118 }
119 
120 static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
121 {
122 	struct nvmet_ns *cur;
123 	unsigned long idx;
124 	u32 nsid = 0;
125 
126 	xa_for_each(&subsys->namespaces, idx, cur)
127 		nsid = cur->nsid;
128 
129 	return nsid;
130 }
131 
132 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
133 {
134 	return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
135 }
136 
137 static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
138 {
139 	struct nvmet_req *req;
140 
141 	mutex_lock(&ctrl->lock);
142 	while (ctrl->nr_async_event_cmds) {
143 		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
144 		mutex_unlock(&ctrl->lock);
145 		nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
146 		mutex_lock(&ctrl->lock);
147 	}
148 	mutex_unlock(&ctrl->lock);
149 }
150 
151 static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
152 {
153 	struct nvmet_async_event *aen;
154 	struct nvmet_req *req;
155 
156 	mutex_lock(&ctrl->lock);
157 	while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
158 		aen = list_first_entry(&ctrl->async_events,
159 				       struct nvmet_async_event, entry);
160 		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
161 		nvmet_set_result(req, nvmet_async_event_result(aen));
162 
163 		list_del(&aen->entry);
164 		kfree(aen);
165 
166 		mutex_unlock(&ctrl->lock);
167 		trace_nvmet_async_event(ctrl, req->cqe->result.u32);
168 		nvmet_req_complete(req, 0);
169 		mutex_lock(&ctrl->lock);
170 	}
171 	mutex_unlock(&ctrl->lock);
172 }
173 
174 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
175 {
176 	struct nvmet_async_event *aen, *tmp;
177 
178 	mutex_lock(&ctrl->lock);
179 	list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
180 		list_del(&aen->entry);
181 		kfree(aen);
182 	}
183 	mutex_unlock(&ctrl->lock);
184 }
185 
186 static void nvmet_async_event_work(struct work_struct *work)
187 {
188 	struct nvmet_ctrl *ctrl =
189 		container_of(work, struct nvmet_ctrl, async_event_work);
190 
191 	nvmet_async_events_process(ctrl);
192 }
193 
194 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
195 		u8 event_info, u8 log_page)
196 {
197 	struct nvmet_async_event *aen;
198 
199 	aen = kmalloc(sizeof(*aen), GFP_KERNEL);
200 	if (!aen)
201 		return;
202 
203 	aen->event_type = event_type;
204 	aen->event_info = event_info;
205 	aen->log_page = log_page;
206 
207 	mutex_lock(&ctrl->lock);
208 	list_add_tail(&aen->entry, &ctrl->async_events);
209 	mutex_unlock(&ctrl->lock);
210 
211 	queue_work(nvmet_wq, &ctrl->async_event_work);
212 }
213 
214 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
215 {
216 	u32 i;
217 
218 	mutex_lock(&ctrl->lock);
219 	if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
220 		goto out_unlock;
221 
222 	for (i = 0; i < ctrl->nr_changed_ns; i++) {
223 		if (ctrl->changed_ns_list[i] == nsid)
224 			goto out_unlock;
225 	}
226 
227 	if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
228 		ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
229 		ctrl->nr_changed_ns = U32_MAX;
230 		goto out_unlock;
231 	}
232 
233 	ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
234 out_unlock:
235 	mutex_unlock(&ctrl->lock);
236 }
237 
238 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
239 {
240 	struct nvmet_ctrl *ctrl;
241 
242 	lockdep_assert_held(&subsys->lock);
243 
244 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
245 		nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
246 		if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
247 			continue;
248 		nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
249 				NVME_AER_NOTICE_NS_CHANGED,
250 				NVME_LOG_CHANGED_NS);
251 	}
252 }
253 
254 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
255 		struct nvmet_port *port)
256 {
257 	struct nvmet_ctrl *ctrl;
258 
259 	mutex_lock(&subsys->lock);
260 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
261 		if (port && ctrl->port != port)
262 			continue;
263 		if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
264 			continue;
265 		nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
266 				NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
267 	}
268 	mutex_unlock(&subsys->lock);
269 }
270 
271 void nvmet_port_send_ana_event(struct nvmet_port *port)
272 {
273 	struct nvmet_subsys_link *p;
274 
275 	down_read(&nvmet_config_sem);
276 	list_for_each_entry(p, &port->subsystems, entry)
277 		nvmet_send_ana_event(p->subsys, port);
278 	up_read(&nvmet_config_sem);
279 }
280 
281 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
282 {
283 	int ret = 0;
284 
285 	down_write(&nvmet_config_sem);
286 	if (nvmet_transports[ops->type])
287 		ret = -EINVAL;
288 	else
289 		nvmet_transports[ops->type] = ops;
290 	up_write(&nvmet_config_sem);
291 
292 	return ret;
293 }
294 EXPORT_SYMBOL_GPL(nvmet_register_transport);
295 
296 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
297 {
298 	down_write(&nvmet_config_sem);
299 	nvmet_transports[ops->type] = NULL;
300 	up_write(&nvmet_config_sem);
301 }
302 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
303 
304 void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
305 {
306 	struct nvmet_ctrl *ctrl;
307 
308 	mutex_lock(&subsys->lock);
309 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
310 		if (ctrl->port == port)
311 			ctrl->ops->delete_ctrl(ctrl);
312 	}
313 	mutex_unlock(&subsys->lock);
314 }
315 
316 int nvmet_enable_port(struct nvmet_port *port)
317 {
318 	const struct nvmet_fabrics_ops *ops;
319 	int ret;
320 
321 	lockdep_assert_held(&nvmet_config_sem);
322 
323 	ops = nvmet_transports[port->disc_addr.trtype];
324 	if (!ops) {
325 		up_write(&nvmet_config_sem);
326 		request_module("nvmet-transport-%d", port->disc_addr.trtype);
327 		down_write(&nvmet_config_sem);
328 		ops = nvmet_transports[port->disc_addr.trtype];
329 		if (!ops) {
330 			pr_err("transport type %d not supported\n",
331 				port->disc_addr.trtype);
332 			return -EINVAL;
333 		}
334 	}
335 
336 	if (!try_module_get(ops->owner))
337 		return -EINVAL;
338 
339 	/*
340 	 * If the user requested PI support and the transport isn't pi capable,
341 	 * don't enable the port.
342 	 */
343 	if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
344 		pr_err("T10-PI is not supported by transport type %d\n",
345 		       port->disc_addr.trtype);
346 		ret = -EINVAL;
347 		goto out_put;
348 	}
349 
350 	ret = ops->add_port(port);
351 	if (ret)
352 		goto out_put;
353 
354 	/* If the transport didn't set inline_data_size, then disable it. */
355 	if (port->inline_data_size < 0)
356 		port->inline_data_size = 0;
357 
358 	port->enabled = true;
359 	port->tr_ops = ops;
360 	return 0;
361 
362 out_put:
363 	module_put(ops->owner);
364 	return ret;
365 }
366 
367 void nvmet_disable_port(struct nvmet_port *port)
368 {
369 	const struct nvmet_fabrics_ops *ops;
370 
371 	lockdep_assert_held(&nvmet_config_sem);
372 
373 	port->enabled = false;
374 	port->tr_ops = NULL;
375 
376 	ops = nvmet_transports[port->disc_addr.trtype];
377 	ops->remove_port(port);
378 	module_put(ops->owner);
379 }
380 
381 static void nvmet_keep_alive_timer(struct work_struct *work)
382 {
383 	struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
384 			struct nvmet_ctrl, ka_work);
385 	bool reset_tbkas = ctrl->reset_tbkas;
386 
387 	ctrl->reset_tbkas = false;
388 	if (reset_tbkas) {
389 		pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
390 			ctrl->cntlid);
391 		queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
392 		return;
393 	}
394 
395 	pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
396 		ctrl->cntlid, ctrl->kato);
397 
398 	nvmet_ctrl_fatal_error(ctrl);
399 }
400 
401 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
402 {
403 	if (unlikely(ctrl->kato == 0))
404 		return;
405 
406 	pr_debug("ctrl %d start keep-alive timer for %d secs\n",
407 		ctrl->cntlid, ctrl->kato);
408 
409 	queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
410 }
411 
412 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
413 {
414 	if (unlikely(ctrl->kato == 0))
415 		return;
416 
417 	pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
418 
419 	cancel_delayed_work_sync(&ctrl->ka_work);
420 }
421 
422 u16 nvmet_req_find_ns(struct nvmet_req *req)
423 {
424 	u32 nsid = le32_to_cpu(req->cmd->common.nsid);
425 
426 	req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
427 	if (unlikely(!req->ns)) {
428 		req->error_loc = offsetof(struct nvme_common_command, nsid);
429 		return NVME_SC_INVALID_NS | NVME_SC_DNR;
430 	}
431 
432 	percpu_ref_get(&req->ns->ref);
433 	return NVME_SC_SUCCESS;
434 }
435 
436 static void nvmet_destroy_namespace(struct percpu_ref *ref)
437 {
438 	struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
439 
440 	complete(&ns->disable_done);
441 }
442 
443 void nvmet_put_namespace(struct nvmet_ns *ns)
444 {
445 	percpu_ref_put(&ns->ref);
446 }
447 
448 static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
449 {
450 	nvmet_bdev_ns_disable(ns);
451 	nvmet_file_ns_disable(ns);
452 }
453 
454 static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
455 {
456 	int ret;
457 	struct pci_dev *p2p_dev;
458 
459 	if (!ns->use_p2pmem)
460 		return 0;
461 
462 	if (!ns->bdev) {
463 		pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
464 		return -EINVAL;
465 	}
466 
467 	if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
468 		pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
469 		       ns->device_path);
470 		return -EINVAL;
471 	}
472 
473 	if (ns->p2p_dev) {
474 		ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
475 		if (ret < 0)
476 			return -EINVAL;
477 	} else {
478 		/*
479 		 * Right now we just check that there is p2pmem available so
480 		 * we can report an error to the user right away if there
481 		 * is not. We'll find the actual device to use once we
482 		 * setup the controller when the port's device is available.
483 		 */
484 
485 		p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
486 		if (!p2p_dev) {
487 			pr_err("no peer-to-peer memory is available for %s\n",
488 			       ns->device_path);
489 			return -EINVAL;
490 		}
491 
492 		pci_dev_put(p2p_dev);
493 	}
494 
495 	return 0;
496 }
497 
498 /*
499  * Note: ctrl->subsys->lock should be held when calling this function
500  */
501 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
502 				    struct nvmet_ns *ns)
503 {
504 	struct device *clients[2];
505 	struct pci_dev *p2p_dev;
506 	int ret;
507 
508 	if (!ctrl->p2p_client || !ns->use_p2pmem)
509 		return;
510 
511 	if (ns->p2p_dev) {
512 		ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
513 		if (ret < 0)
514 			return;
515 
516 		p2p_dev = pci_dev_get(ns->p2p_dev);
517 	} else {
518 		clients[0] = ctrl->p2p_client;
519 		clients[1] = nvmet_ns_dev(ns);
520 
521 		p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
522 		if (!p2p_dev) {
523 			pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
524 			       dev_name(ctrl->p2p_client), ns->device_path);
525 			return;
526 		}
527 	}
528 
529 	ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
530 	if (ret < 0)
531 		pci_dev_put(p2p_dev);
532 
533 	pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
534 		ns->nsid);
535 }
536 
537 bool nvmet_ns_revalidate(struct nvmet_ns *ns)
538 {
539 	loff_t oldsize = ns->size;
540 
541 	if (ns->bdev)
542 		nvmet_bdev_ns_revalidate(ns);
543 	else
544 		nvmet_file_ns_revalidate(ns);
545 
546 	return oldsize != ns->size;
547 }
548 
549 int nvmet_ns_enable(struct nvmet_ns *ns)
550 {
551 	struct nvmet_subsys *subsys = ns->subsys;
552 	struct nvmet_ctrl *ctrl;
553 	int ret;
554 
555 	mutex_lock(&subsys->lock);
556 	ret = 0;
557 
558 	if (nvmet_is_passthru_subsys(subsys)) {
559 		pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
560 		goto out_unlock;
561 	}
562 
563 	if (ns->enabled)
564 		goto out_unlock;
565 
566 	ret = -EMFILE;
567 	if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
568 		goto out_unlock;
569 
570 	ret = nvmet_bdev_ns_enable(ns);
571 	if (ret == -ENOTBLK)
572 		ret = nvmet_file_ns_enable(ns);
573 	if (ret)
574 		goto out_unlock;
575 
576 	ret = nvmet_p2pmem_ns_enable(ns);
577 	if (ret)
578 		goto out_dev_disable;
579 
580 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
581 		nvmet_p2pmem_ns_add_p2p(ctrl, ns);
582 
583 	ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
584 				0, GFP_KERNEL);
585 	if (ret)
586 		goto out_dev_put;
587 
588 	if (ns->nsid > subsys->max_nsid)
589 		subsys->max_nsid = ns->nsid;
590 
591 	ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
592 	if (ret)
593 		goto out_restore_subsys_maxnsid;
594 
595 	subsys->nr_namespaces++;
596 
597 	nvmet_ns_changed(subsys, ns->nsid);
598 	ns->enabled = true;
599 	ret = 0;
600 out_unlock:
601 	mutex_unlock(&subsys->lock);
602 	return ret;
603 
604 out_restore_subsys_maxnsid:
605 	subsys->max_nsid = nvmet_max_nsid(subsys);
606 	percpu_ref_exit(&ns->ref);
607 out_dev_put:
608 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
609 		pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
610 out_dev_disable:
611 	nvmet_ns_dev_disable(ns);
612 	goto out_unlock;
613 }
614 
615 void nvmet_ns_disable(struct nvmet_ns *ns)
616 {
617 	struct nvmet_subsys *subsys = ns->subsys;
618 	struct nvmet_ctrl *ctrl;
619 
620 	mutex_lock(&subsys->lock);
621 	if (!ns->enabled)
622 		goto out_unlock;
623 
624 	ns->enabled = false;
625 	xa_erase(&ns->subsys->namespaces, ns->nsid);
626 	if (ns->nsid == subsys->max_nsid)
627 		subsys->max_nsid = nvmet_max_nsid(subsys);
628 
629 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
630 		pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
631 
632 	mutex_unlock(&subsys->lock);
633 
634 	/*
635 	 * Now that we removed the namespaces from the lookup list, we
636 	 * can kill the per_cpu ref and wait for any remaining references
637 	 * to be dropped, as well as a RCU grace period for anyone only
638 	 * using the namepace under rcu_read_lock().  Note that we can't
639 	 * use call_rcu here as we need to ensure the namespaces have
640 	 * been fully destroyed before unloading the module.
641 	 */
642 	percpu_ref_kill(&ns->ref);
643 	synchronize_rcu();
644 	wait_for_completion(&ns->disable_done);
645 	percpu_ref_exit(&ns->ref);
646 
647 	mutex_lock(&subsys->lock);
648 
649 	subsys->nr_namespaces--;
650 	nvmet_ns_changed(subsys, ns->nsid);
651 	nvmet_ns_dev_disable(ns);
652 out_unlock:
653 	mutex_unlock(&subsys->lock);
654 }
655 
656 void nvmet_ns_free(struct nvmet_ns *ns)
657 {
658 	nvmet_ns_disable(ns);
659 
660 	down_write(&nvmet_ana_sem);
661 	nvmet_ana_group_enabled[ns->anagrpid]--;
662 	up_write(&nvmet_ana_sem);
663 
664 	kfree(ns->device_path);
665 	kfree(ns);
666 }
667 
668 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
669 {
670 	struct nvmet_ns *ns;
671 
672 	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
673 	if (!ns)
674 		return NULL;
675 
676 	init_completion(&ns->disable_done);
677 
678 	ns->nsid = nsid;
679 	ns->subsys = subsys;
680 
681 	down_write(&nvmet_ana_sem);
682 	ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
683 	nvmet_ana_group_enabled[ns->anagrpid]++;
684 	up_write(&nvmet_ana_sem);
685 
686 	uuid_gen(&ns->uuid);
687 	ns->buffered_io = false;
688 	ns->csi = NVME_CSI_NVM;
689 
690 	return ns;
691 }
692 
693 static void nvmet_update_sq_head(struct nvmet_req *req)
694 {
695 	if (req->sq->size) {
696 		u32 old_sqhd, new_sqhd;
697 
698 		do {
699 			old_sqhd = req->sq->sqhd;
700 			new_sqhd = (old_sqhd + 1) % req->sq->size;
701 		} while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
702 					old_sqhd);
703 	}
704 	req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
705 }
706 
707 static void nvmet_set_error(struct nvmet_req *req, u16 status)
708 {
709 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
710 	struct nvme_error_slot *new_error_slot;
711 	unsigned long flags;
712 
713 	req->cqe->status = cpu_to_le16(status << 1);
714 
715 	if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
716 		return;
717 
718 	spin_lock_irqsave(&ctrl->error_lock, flags);
719 	ctrl->err_counter++;
720 	new_error_slot =
721 		&ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
722 
723 	new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
724 	new_error_slot->sqid = cpu_to_le16(req->sq->qid);
725 	new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
726 	new_error_slot->status_field = cpu_to_le16(status << 1);
727 	new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
728 	new_error_slot->lba = cpu_to_le64(req->error_slba);
729 	new_error_slot->nsid = req->cmd->common.nsid;
730 	spin_unlock_irqrestore(&ctrl->error_lock, flags);
731 
732 	/* set the more bit for this request */
733 	req->cqe->status |= cpu_to_le16(1 << 14);
734 }
735 
736 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
737 {
738 	struct nvmet_ns *ns = req->ns;
739 
740 	if (!req->sq->sqhd_disabled)
741 		nvmet_update_sq_head(req);
742 	req->cqe->sq_id = cpu_to_le16(req->sq->qid);
743 	req->cqe->command_id = req->cmd->common.command_id;
744 
745 	if (unlikely(status))
746 		nvmet_set_error(req, status);
747 
748 	trace_nvmet_req_complete(req);
749 
750 	req->ops->queue_response(req);
751 	if (ns)
752 		nvmet_put_namespace(ns);
753 }
754 
755 void nvmet_req_complete(struct nvmet_req *req, u16 status)
756 {
757 	__nvmet_req_complete(req, status);
758 	percpu_ref_put(&req->sq->ref);
759 }
760 EXPORT_SYMBOL_GPL(nvmet_req_complete);
761 
762 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
763 		u16 qid, u16 size)
764 {
765 	cq->qid = qid;
766 	cq->size = size;
767 }
768 
769 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
770 		u16 qid, u16 size)
771 {
772 	sq->sqhd = 0;
773 	sq->qid = qid;
774 	sq->size = size;
775 
776 	ctrl->sqs[qid] = sq;
777 }
778 
779 static void nvmet_confirm_sq(struct percpu_ref *ref)
780 {
781 	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
782 
783 	complete(&sq->confirm_done);
784 }
785 
786 void nvmet_sq_destroy(struct nvmet_sq *sq)
787 {
788 	struct nvmet_ctrl *ctrl = sq->ctrl;
789 
790 	/*
791 	 * If this is the admin queue, complete all AERs so that our
792 	 * queue doesn't have outstanding requests on it.
793 	 */
794 	if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
795 		nvmet_async_events_failall(ctrl);
796 	percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
797 	wait_for_completion(&sq->confirm_done);
798 	wait_for_completion(&sq->free_done);
799 	percpu_ref_exit(&sq->ref);
800 	nvmet_auth_sq_free(sq);
801 
802 	if (ctrl) {
803 		/*
804 		 * The teardown flow may take some time, and the host may not
805 		 * send us keep-alive during this period, hence reset the
806 		 * traffic based keep-alive timer so we don't trigger a
807 		 * controller teardown as a result of a keep-alive expiration.
808 		 */
809 		ctrl->reset_tbkas = true;
810 		sq->ctrl->sqs[sq->qid] = NULL;
811 		nvmet_ctrl_put(ctrl);
812 		sq->ctrl = NULL; /* allows reusing the queue later */
813 	}
814 }
815 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
816 
817 static void nvmet_sq_free(struct percpu_ref *ref)
818 {
819 	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
820 
821 	complete(&sq->free_done);
822 }
823 
824 int nvmet_sq_init(struct nvmet_sq *sq)
825 {
826 	int ret;
827 
828 	ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
829 	if (ret) {
830 		pr_err("percpu_ref init failed!\n");
831 		return ret;
832 	}
833 	init_completion(&sq->free_done);
834 	init_completion(&sq->confirm_done);
835 
836 	return 0;
837 }
838 EXPORT_SYMBOL_GPL(nvmet_sq_init);
839 
840 static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
841 		struct nvmet_ns *ns)
842 {
843 	enum nvme_ana_state state = port->ana_state[ns->anagrpid];
844 
845 	if (unlikely(state == NVME_ANA_INACCESSIBLE))
846 		return NVME_SC_ANA_INACCESSIBLE;
847 	if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
848 		return NVME_SC_ANA_PERSISTENT_LOSS;
849 	if (unlikely(state == NVME_ANA_CHANGE))
850 		return NVME_SC_ANA_TRANSITION;
851 	return 0;
852 }
853 
854 static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
855 {
856 	if (unlikely(req->ns->readonly)) {
857 		switch (req->cmd->common.opcode) {
858 		case nvme_cmd_read:
859 		case nvme_cmd_flush:
860 			break;
861 		default:
862 			return NVME_SC_NS_WRITE_PROTECTED;
863 		}
864 	}
865 
866 	return 0;
867 }
868 
869 static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
870 {
871 	struct nvme_command *cmd = req->cmd;
872 	u16 ret;
873 
874 	if (nvme_is_fabrics(cmd))
875 		return nvmet_parse_fabrics_io_cmd(req);
876 
877 	if (unlikely(!nvmet_check_auth_status(req)))
878 		return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
879 
880 	ret = nvmet_check_ctrl_status(req);
881 	if (unlikely(ret))
882 		return ret;
883 
884 	if (nvmet_is_passthru_req(req))
885 		return nvmet_parse_passthru_io_cmd(req);
886 
887 	ret = nvmet_req_find_ns(req);
888 	if (unlikely(ret))
889 		return ret;
890 
891 	ret = nvmet_check_ana_state(req->port, req->ns);
892 	if (unlikely(ret)) {
893 		req->error_loc = offsetof(struct nvme_common_command, nsid);
894 		return ret;
895 	}
896 	ret = nvmet_io_cmd_check_access(req);
897 	if (unlikely(ret)) {
898 		req->error_loc = offsetof(struct nvme_common_command, nsid);
899 		return ret;
900 	}
901 
902 	switch (req->ns->csi) {
903 	case NVME_CSI_NVM:
904 		if (req->ns->file)
905 			return nvmet_file_parse_io_cmd(req);
906 		return nvmet_bdev_parse_io_cmd(req);
907 	case NVME_CSI_ZNS:
908 		if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
909 			return nvmet_bdev_zns_parse_io_cmd(req);
910 		return NVME_SC_INVALID_IO_CMD_SET;
911 	default:
912 		return NVME_SC_INVALID_IO_CMD_SET;
913 	}
914 }
915 
916 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
917 		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
918 {
919 	u8 flags = req->cmd->common.flags;
920 	u16 status;
921 
922 	req->cq = cq;
923 	req->sq = sq;
924 	req->ops = ops;
925 	req->sg = NULL;
926 	req->metadata_sg = NULL;
927 	req->sg_cnt = 0;
928 	req->metadata_sg_cnt = 0;
929 	req->transfer_len = 0;
930 	req->metadata_len = 0;
931 	req->cqe->status = 0;
932 	req->cqe->sq_head = 0;
933 	req->ns = NULL;
934 	req->error_loc = NVMET_NO_ERROR_LOC;
935 	req->error_slba = 0;
936 
937 	/* no support for fused commands yet */
938 	if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
939 		req->error_loc = offsetof(struct nvme_common_command, flags);
940 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
941 		goto fail;
942 	}
943 
944 	/*
945 	 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
946 	 * contains an address of a single contiguous physical buffer that is
947 	 * byte aligned.
948 	 */
949 	if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
950 		req->error_loc = offsetof(struct nvme_common_command, flags);
951 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
952 		goto fail;
953 	}
954 
955 	if (unlikely(!req->sq->ctrl))
956 		/* will return an error for any non-connect command: */
957 		status = nvmet_parse_connect_cmd(req);
958 	else if (likely(req->sq->qid != 0))
959 		status = nvmet_parse_io_cmd(req);
960 	else
961 		status = nvmet_parse_admin_cmd(req);
962 
963 	if (status)
964 		goto fail;
965 
966 	trace_nvmet_req_init(req, req->cmd);
967 
968 	if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
969 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
970 		goto fail;
971 	}
972 
973 	if (sq->ctrl)
974 		sq->ctrl->reset_tbkas = true;
975 
976 	return true;
977 
978 fail:
979 	__nvmet_req_complete(req, status);
980 	return false;
981 }
982 EXPORT_SYMBOL_GPL(nvmet_req_init);
983 
984 void nvmet_req_uninit(struct nvmet_req *req)
985 {
986 	percpu_ref_put(&req->sq->ref);
987 	if (req->ns)
988 		nvmet_put_namespace(req->ns);
989 }
990 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
991 
992 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
993 {
994 	if (unlikely(len != req->transfer_len)) {
995 		req->error_loc = offsetof(struct nvme_common_command, dptr);
996 		nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
997 		return false;
998 	}
999 
1000 	return true;
1001 }
1002 EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
1003 
1004 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
1005 {
1006 	if (unlikely(data_len > req->transfer_len)) {
1007 		req->error_loc = offsetof(struct nvme_common_command, dptr);
1008 		nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
1009 		return false;
1010 	}
1011 
1012 	return true;
1013 }
1014 
1015 static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
1016 {
1017 	return req->transfer_len - req->metadata_len;
1018 }
1019 
1020 static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
1021 		struct nvmet_req *req)
1022 {
1023 	req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
1024 			nvmet_data_transfer_len(req));
1025 	if (!req->sg)
1026 		goto out_err;
1027 
1028 	if (req->metadata_len) {
1029 		req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
1030 				&req->metadata_sg_cnt, req->metadata_len);
1031 		if (!req->metadata_sg)
1032 			goto out_free_sg;
1033 	}
1034 
1035 	req->p2p_dev = p2p_dev;
1036 
1037 	return 0;
1038 out_free_sg:
1039 	pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1040 out_err:
1041 	return -ENOMEM;
1042 }
1043 
1044 static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
1045 {
1046 	if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
1047 	    !req->sq->ctrl || !req->sq->qid || !req->ns)
1048 		return NULL;
1049 	return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
1050 }
1051 
1052 int nvmet_req_alloc_sgls(struct nvmet_req *req)
1053 {
1054 	struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
1055 
1056 	if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
1057 		return 0;
1058 
1059 	req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
1060 			    &req->sg_cnt);
1061 	if (unlikely(!req->sg))
1062 		goto out;
1063 
1064 	if (req->metadata_len) {
1065 		req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
1066 					     &req->metadata_sg_cnt);
1067 		if (unlikely(!req->metadata_sg))
1068 			goto out_free;
1069 	}
1070 
1071 	return 0;
1072 out_free:
1073 	sgl_free(req->sg);
1074 out:
1075 	return -ENOMEM;
1076 }
1077 EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
1078 
1079 void nvmet_req_free_sgls(struct nvmet_req *req)
1080 {
1081 	if (req->p2p_dev) {
1082 		pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1083 		if (req->metadata_sg)
1084 			pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
1085 		req->p2p_dev = NULL;
1086 	} else {
1087 		sgl_free(req->sg);
1088 		if (req->metadata_sg)
1089 			sgl_free(req->metadata_sg);
1090 	}
1091 
1092 	req->sg = NULL;
1093 	req->metadata_sg = NULL;
1094 	req->sg_cnt = 0;
1095 	req->metadata_sg_cnt = 0;
1096 }
1097 EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
1098 
1099 static inline bool nvmet_cc_en(u32 cc)
1100 {
1101 	return (cc >> NVME_CC_EN_SHIFT) & 0x1;
1102 }
1103 
1104 static inline u8 nvmet_cc_css(u32 cc)
1105 {
1106 	return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
1107 }
1108 
1109 static inline u8 nvmet_cc_mps(u32 cc)
1110 {
1111 	return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
1112 }
1113 
1114 static inline u8 nvmet_cc_ams(u32 cc)
1115 {
1116 	return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
1117 }
1118 
1119 static inline u8 nvmet_cc_shn(u32 cc)
1120 {
1121 	return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
1122 }
1123 
1124 static inline u8 nvmet_cc_iosqes(u32 cc)
1125 {
1126 	return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
1127 }
1128 
1129 static inline u8 nvmet_cc_iocqes(u32 cc)
1130 {
1131 	return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
1132 }
1133 
1134 static inline bool nvmet_css_supported(u8 cc_css)
1135 {
1136 	switch (cc_css << NVME_CC_CSS_SHIFT) {
1137 	case NVME_CC_CSS_NVM:
1138 	case NVME_CC_CSS_CSI:
1139 		return true;
1140 	default:
1141 		return false;
1142 	}
1143 }
1144 
1145 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
1146 {
1147 	lockdep_assert_held(&ctrl->lock);
1148 
1149 	/*
1150 	 * Only I/O controllers should verify iosqes,iocqes.
1151 	 * Strictly speaking, the spec says a discovery controller
1152 	 * should verify iosqes,iocqes are zeroed, however that
1153 	 * would break backwards compatibility, so don't enforce it.
1154 	 */
1155 	if (!nvmet_is_disc_subsys(ctrl->subsys) &&
1156 	    (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
1157 	     nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
1158 		ctrl->csts = NVME_CSTS_CFS;
1159 		return;
1160 	}
1161 
1162 	if (nvmet_cc_mps(ctrl->cc) != 0 ||
1163 	    nvmet_cc_ams(ctrl->cc) != 0 ||
1164 	    !nvmet_css_supported(nvmet_cc_css(ctrl->cc))) {
1165 		ctrl->csts = NVME_CSTS_CFS;
1166 		return;
1167 	}
1168 
1169 	ctrl->csts = NVME_CSTS_RDY;
1170 
1171 	/*
1172 	 * Controllers that are not yet enabled should not really enforce the
1173 	 * keep alive timeout, but we still want to track a timeout and cleanup
1174 	 * in case a host died before it enabled the controller.  Hence, simply
1175 	 * reset the keep alive timer when the controller is enabled.
1176 	 */
1177 	if (ctrl->kato)
1178 		mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1179 }
1180 
1181 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
1182 {
1183 	lockdep_assert_held(&ctrl->lock);
1184 
1185 	/* XXX: tear down queues? */
1186 	ctrl->csts &= ~NVME_CSTS_RDY;
1187 	ctrl->cc = 0;
1188 }
1189 
1190 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
1191 {
1192 	u32 old;
1193 
1194 	mutex_lock(&ctrl->lock);
1195 	old = ctrl->cc;
1196 	ctrl->cc = new;
1197 
1198 	if (nvmet_cc_en(new) && !nvmet_cc_en(old))
1199 		nvmet_start_ctrl(ctrl);
1200 	if (!nvmet_cc_en(new) && nvmet_cc_en(old))
1201 		nvmet_clear_ctrl(ctrl);
1202 	if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
1203 		nvmet_clear_ctrl(ctrl);
1204 		ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1205 	}
1206 	if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
1207 		ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1208 	mutex_unlock(&ctrl->lock);
1209 }
1210 
1211 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
1212 {
1213 	/* command sets supported: NVMe command set: */
1214 	ctrl->cap = (1ULL << 37);
1215 	/* Controller supports one or more I/O Command Sets */
1216 	ctrl->cap |= (1ULL << 43);
1217 	/* CC.EN timeout in 500msec units: */
1218 	ctrl->cap |= (15ULL << 24);
1219 	/* maximum queue entries supported: */
1220 	if (ctrl->ops->get_max_queue_size)
1221 		ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1;
1222 	else
1223 		ctrl->cap |= NVMET_QUEUE_SIZE - 1;
1224 
1225 	if (nvmet_is_passthru_subsys(ctrl->subsys))
1226 		nvmet_passthrough_override_cap(ctrl);
1227 }
1228 
1229 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
1230 				       const char *hostnqn, u16 cntlid,
1231 				       struct nvmet_req *req)
1232 {
1233 	struct nvmet_ctrl *ctrl = NULL;
1234 	struct nvmet_subsys *subsys;
1235 
1236 	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1237 	if (!subsys) {
1238 		pr_warn("connect request for invalid subsystem %s!\n",
1239 			subsysnqn);
1240 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1241 		goto out;
1242 	}
1243 
1244 	mutex_lock(&subsys->lock);
1245 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
1246 		if (ctrl->cntlid == cntlid) {
1247 			if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
1248 				pr_warn("hostnqn mismatch.\n");
1249 				continue;
1250 			}
1251 			if (!kref_get_unless_zero(&ctrl->ref))
1252 				continue;
1253 
1254 			/* ctrl found */
1255 			goto found;
1256 		}
1257 	}
1258 
1259 	ctrl = NULL; /* ctrl not found */
1260 	pr_warn("could not find controller %d for subsys %s / host %s\n",
1261 		cntlid, subsysnqn, hostnqn);
1262 	req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
1263 
1264 found:
1265 	mutex_unlock(&subsys->lock);
1266 	nvmet_subsys_put(subsys);
1267 out:
1268 	return ctrl;
1269 }
1270 
1271 u16 nvmet_check_ctrl_status(struct nvmet_req *req)
1272 {
1273 	if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
1274 		pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
1275 		       req->cmd->common.opcode, req->sq->qid);
1276 		return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1277 	}
1278 
1279 	if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
1280 		pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
1281 		       req->cmd->common.opcode, req->sq->qid);
1282 		return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1283 	}
1284 
1285 	if (unlikely(!nvmet_check_auth_status(req))) {
1286 		pr_warn("qid %d not authenticated\n", req->sq->qid);
1287 		return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
1288 	}
1289 	return 0;
1290 }
1291 
1292 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
1293 {
1294 	struct nvmet_host_link *p;
1295 
1296 	lockdep_assert_held(&nvmet_config_sem);
1297 
1298 	if (subsys->allow_any_host)
1299 		return true;
1300 
1301 	if (nvmet_is_disc_subsys(subsys)) /* allow all access to disc subsys */
1302 		return true;
1303 
1304 	list_for_each_entry(p, &subsys->hosts, entry) {
1305 		if (!strcmp(nvmet_host_name(p->host), hostnqn))
1306 			return true;
1307 	}
1308 
1309 	return false;
1310 }
1311 
1312 /*
1313  * Note: ctrl->subsys->lock should be held when calling this function
1314  */
1315 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
1316 		struct nvmet_req *req)
1317 {
1318 	struct nvmet_ns *ns;
1319 	unsigned long idx;
1320 
1321 	if (!req->p2p_client)
1322 		return;
1323 
1324 	ctrl->p2p_client = get_device(req->p2p_client);
1325 
1326 	xa_for_each(&ctrl->subsys->namespaces, idx, ns)
1327 		nvmet_p2pmem_ns_add_p2p(ctrl, ns);
1328 }
1329 
1330 /*
1331  * Note: ctrl->subsys->lock should be held when calling this function
1332  */
1333 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
1334 {
1335 	struct radix_tree_iter iter;
1336 	void __rcu **slot;
1337 
1338 	radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
1339 		pci_dev_put(radix_tree_deref_slot(slot));
1340 
1341 	put_device(ctrl->p2p_client);
1342 }
1343 
1344 static void nvmet_fatal_error_handler(struct work_struct *work)
1345 {
1346 	struct nvmet_ctrl *ctrl =
1347 			container_of(work, struct nvmet_ctrl, fatal_err_work);
1348 
1349 	pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
1350 	ctrl->ops->delete_ctrl(ctrl);
1351 }
1352 
1353 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
1354 		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
1355 {
1356 	struct nvmet_subsys *subsys;
1357 	struct nvmet_ctrl *ctrl;
1358 	int ret;
1359 	u16 status;
1360 
1361 	status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1362 	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1363 	if (!subsys) {
1364 		pr_warn("connect request for invalid subsystem %s!\n",
1365 			subsysnqn);
1366 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1367 		req->error_loc = offsetof(struct nvme_common_command, dptr);
1368 		goto out;
1369 	}
1370 
1371 	down_read(&nvmet_config_sem);
1372 	if (!nvmet_host_allowed(subsys, hostnqn)) {
1373 		pr_info("connect by host %s for subsystem %s not allowed\n",
1374 			hostnqn, subsysnqn);
1375 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
1376 		up_read(&nvmet_config_sem);
1377 		status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
1378 		req->error_loc = offsetof(struct nvme_common_command, dptr);
1379 		goto out_put_subsystem;
1380 	}
1381 	up_read(&nvmet_config_sem);
1382 
1383 	status = NVME_SC_INTERNAL;
1384 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1385 	if (!ctrl)
1386 		goto out_put_subsystem;
1387 	mutex_init(&ctrl->lock);
1388 
1389 	ctrl->port = req->port;
1390 	ctrl->ops = req->ops;
1391 
1392 #ifdef CONFIG_NVME_TARGET_PASSTHRU
1393 	/* By default, set loop targets to clear IDS by default */
1394 	if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
1395 		subsys->clear_ids = 1;
1396 #endif
1397 
1398 	INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
1399 	INIT_LIST_HEAD(&ctrl->async_events);
1400 	INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
1401 	INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
1402 	INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
1403 
1404 	memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
1405 	memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
1406 
1407 	kref_init(&ctrl->ref);
1408 	ctrl->subsys = subsys;
1409 	nvmet_init_cap(ctrl);
1410 	WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
1411 
1412 	ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
1413 			sizeof(__le32), GFP_KERNEL);
1414 	if (!ctrl->changed_ns_list)
1415 		goto out_free_ctrl;
1416 
1417 	ctrl->sqs = kcalloc(subsys->max_qid + 1,
1418 			sizeof(struct nvmet_sq *),
1419 			GFP_KERNEL);
1420 	if (!ctrl->sqs)
1421 		goto out_free_changed_ns_list;
1422 
1423 	if (subsys->cntlid_min > subsys->cntlid_max)
1424 		goto out_free_sqs;
1425 
1426 	ret = ida_alloc_range(&cntlid_ida,
1427 			     subsys->cntlid_min, subsys->cntlid_max,
1428 			     GFP_KERNEL);
1429 	if (ret < 0) {
1430 		status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
1431 		goto out_free_sqs;
1432 	}
1433 	ctrl->cntlid = ret;
1434 
1435 	/*
1436 	 * Discovery controllers may use some arbitrary high value
1437 	 * in order to cleanup stale discovery sessions
1438 	 */
1439 	if (nvmet_is_disc_subsys(ctrl->subsys) && !kato)
1440 		kato = NVMET_DISC_KATO_MS;
1441 
1442 	/* keep-alive timeout in seconds */
1443 	ctrl->kato = DIV_ROUND_UP(kato, 1000);
1444 
1445 	ctrl->err_counter = 0;
1446 	spin_lock_init(&ctrl->error_lock);
1447 
1448 	nvmet_start_keep_alive_timer(ctrl);
1449 
1450 	mutex_lock(&subsys->lock);
1451 	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
1452 	nvmet_setup_p2p_ns_map(ctrl, req);
1453 	mutex_unlock(&subsys->lock);
1454 
1455 	*ctrlp = ctrl;
1456 	return 0;
1457 
1458 out_free_sqs:
1459 	kfree(ctrl->sqs);
1460 out_free_changed_ns_list:
1461 	kfree(ctrl->changed_ns_list);
1462 out_free_ctrl:
1463 	kfree(ctrl);
1464 out_put_subsystem:
1465 	nvmet_subsys_put(subsys);
1466 out:
1467 	return status;
1468 }
1469 
1470 static void nvmet_ctrl_free(struct kref *ref)
1471 {
1472 	struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
1473 	struct nvmet_subsys *subsys = ctrl->subsys;
1474 
1475 	mutex_lock(&subsys->lock);
1476 	nvmet_release_p2p_ns_map(ctrl);
1477 	list_del(&ctrl->subsys_entry);
1478 	mutex_unlock(&subsys->lock);
1479 
1480 	nvmet_stop_keep_alive_timer(ctrl);
1481 
1482 	flush_work(&ctrl->async_event_work);
1483 	cancel_work_sync(&ctrl->fatal_err_work);
1484 
1485 	nvmet_destroy_auth(ctrl);
1486 
1487 	ida_free(&cntlid_ida, ctrl->cntlid);
1488 
1489 	nvmet_async_events_free(ctrl);
1490 	kfree(ctrl->sqs);
1491 	kfree(ctrl->changed_ns_list);
1492 	kfree(ctrl);
1493 
1494 	nvmet_subsys_put(subsys);
1495 }
1496 
1497 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
1498 {
1499 	kref_put(&ctrl->ref, nvmet_ctrl_free);
1500 }
1501 
1502 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
1503 {
1504 	mutex_lock(&ctrl->lock);
1505 	if (!(ctrl->csts & NVME_CSTS_CFS)) {
1506 		ctrl->csts |= NVME_CSTS_CFS;
1507 		queue_work(nvmet_wq, &ctrl->fatal_err_work);
1508 	}
1509 	mutex_unlock(&ctrl->lock);
1510 }
1511 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
1512 
1513 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
1514 		const char *subsysnqn)
1515 {
1516 	struct nvmet_subsys_link *p;
1517 
1518 	if (!port)
1519 		return NULL;
1520 
1521 	if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
1522 		if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
1523 			return NULL;
1524 		return nvmet_disc_subsys;
1525 	}
1526 
1527 	down_read(&nvmet_config_sem);
1528 	list_for_each_entry(p, &port->subsystems, entry) {
1529 		if (!strncmp(p->subsys->subsysnqn, subsysnqn,
1530 				NVMF_NQN_SIZE)) {
1531 			if (!kref_get_unless_zero(&p->subsys->ref))
1532 				break;
1533 			up_read(&nvmet_config_sem);
1534 			return p->subsys;
1535 		}
1536 	}
1537 	up_read(&nvmet_config_sem);
1538 	return NULL;
1539 }
1540 
1541 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1542 		enum nvme_subsys_type type)
1543 {
1544 	struct nvmet_subsys *subsys;
1545 	char serial[NVMET_SN_MAX_SIZE / 2];
1546 	int ret;
1547 
1548 	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
1549 	if (!subsys)
1550 		return ERR_PTR(-ENOMEM);
1551 
1552 	subsys->ver = NVMET_DEFAULT_VS;
1553 	/* generate a random serial number as our controllers are ephemeral: */
1554 	get_random_bytes(&serial, sizeof(serial));
1555 	bin2hex(subsys->serial, &serial, sizeof(serial));
1556 
1557 	subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
1558 	if (!subsys->model_number) {
1559 		ret = -ENOMEM;
1560 		goto free_subsys;
1561 	}
1562 
1563 	switch (type) {
1564 	case NVME_NQN_NVME:
1565 		subsys->max_qid = NVMET_NR_QUEUES;
1566 		break;
1567 	case NVME_NQN_DISC:
1568 	case NVME_NQN_CURR:
1569 		subsys->max_qid = 0;
1570 		break;
1571 	default:
1572 		pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
1573 		ret = -EINVAL;
1574 		goto free_mn;
1575 	}
1576 	subsys->type = type;
1577 	subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1578 			GFP_KERNEL);
1579 	if (!subsys->subsysnqn) {
1580 		ret = -ENOMEM;
1581 		goto free_mn;
1582 	}
1583 	subsys->cntlid_min = NVME_CNTLID_MIN;
1584 	subsys->cntlid_max = NVME_CNTLID_MAX;
1585 	kref_init(&subsys->ref);
1586 
1587 	mutex_init(&subsys->lock);
1588 	xa_init(&subsys->namespaces);
1589 	INIT_LIST_HEAD(&subsys->ctrls);
1590 	INIT_LIST_HEAD(&subsys->hosts);
1591 
1592 	return subsys;
1593 
1594 free_mn:
1595 	kfree(subsys->model_number);
1596 free_subsys:
1597 	kfree(subsys);
1598 	return ERR_PTR(ret);
1599 }
1600 
1601 static void nvmet_subsys_free(struct kref *ref)
1602 {
1603 	struct nvmet_subsys *subsys =
1604 		container_of(ref, struct nvmet_subsys, ref);
1605 
1606 	WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
1607 
1608 	xa_destroy(&subsys->namespaces);
1609 	nvmet_passthru_subsys_free(subsys);
1610 
1611 	kfree(subsys->subsysnqn);
1612 	kfree(subsys->model_number);
1613 	kfree(subsys);
1614 }
1615 
1616 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1617 {
1618 	struct nvmet_ctrl *ctrl;
1619 
1620 	mutex_lock(&subsys->lock);
1621 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1622 		ctrl->ops->delete_ctrl(ctrl);
1623 	mutex_unlock(&subsys->lock);
1624 }
1625 
1626 void nvmet_subsys_put(struct nvmet_subsys *subsys)
1627 {
1628 	kref_put(&subsys->ref, nvmet_subsys_free);
1629 }
1630 
1631 static int __init nvmet_init(void)
1632 {
1633 	int error;
1634 
1635 	nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
1636 
1637 	zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
1638 	if (!zbd_wq)
1639 		return -ENOMEM;
1640 
1641 	buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
1642 			WQ_MEM_RECLAIM, 0);
1643 	if (!buffered_io_wq) {
1644 		error = -ENOMEM;
1645 		goto out_free_zbd_work_queue;
1646 	}
1647 
1648 	nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
1649 	if (!nvmet_wq) {
1650 		error = -ENOMEM;
1651 		goto out_free_buffered_work_queue;
1652 	}
1653 
1654 	error = nvmet_init_discovery();
1655 	if (error)
1656 		goto out_free_nvmet_work_queue;
1657 
1658 	error = nvmet_init_configfs();
1659 	if (error)
1660 		goto out_exit_discovery;
1661 	return 0;
1662 
1663 out_exit_discovery:
1664 	nvmet_exit_discovery();
1665 out_free_nvmet_work_queue:
1666 	destroy_workqueue(nvmet_wq);
1667 out_free_buffered_work_queue:
1668 	destroy_workqueue(buffered_io_wq);
1669 out_free_zbd_work_queue:
1670 	destroy_workqueue(zbd_wq);
1671 	return error;
1672 }
1673 
1674 static void __exit nvmet_exit(void)
1675 {
1676 	nvmet_exit_configfs();
1677 	nvmet_exit_discovery();
1678 	ida_destroy(&cntlid_ida);
1679 	destroy_workqueue(nvmet_wq);
1680 	destroy_workqueue(buffered_io_wq);
1681 	destroy_workqueue(zbd_wq);
1682 
1683 	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1684 	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1685 }
1686 
1687 module_init(nvmet_init);
1688 module_exit(nvmet_exit);
1689 
1690 MODULE_LICENSE("GPL v2");
1691