xref: /openbmc/linux/drivers/nvme/target/core.c (revision 7bcae826)
1 /*
2  * Common code for the NVMe target.
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include "nvmet.h"
18 
19 static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
20 static DEFINE_IDA(cntlid_ida);
21 
22 /*
23  * This read/write semaphore is used to synchronize access to configuration
24  * information on a target system that will result in discovery log page
25  * information change for at least one host.
26  * The full list of resources to protected by this semaphore is:
27  *
28  *  - subsystems list
29  *  - per-subsystem allowed hosts list
30  *  - allow_any_host subsystem attribute
31  *  - nvmet_genctr
32  *  - the nvmet_transports array
33  *
34  * When updating any of those lists/structures write lock should be obtained,
35  * while when reading (popolating discovery log page or checking host-subsystem
36  * link) read lock is obtained to allow concurrent reads.
37  */
38 DECLARE_RWSEM(nvmet_config_sem);
39 
40 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
41 		const char *subsysnqn);
42 
43 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
44 		size_t len)
45 {
46 	if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
47 		return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
48 	return 0;
49 }
50 
51 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
52 {
53 	if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
54 		return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
55 	return 0;
56 }
57 
58 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
59 {
60 	return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
61 }
62 
63 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
64 {
65 	struct nvmet_req *req;
66 
67 	while (1) {
68 		mutex_lock(&ctrl->lock);
69 		if (!ctrl->nr_async_event_cmds) {
70 			mutex_unlock(&ctrl->lock);
71 			return;
72 		}
73 
74 		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
75 		mutex_unlock(&ctrl->lock);
76 		nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
77 	}
78 }
79 
80 static void nvmet_async_event_work(struct work_struct *work)
81 {
82 	struct nvmet_ctrl *ctrl =
83 		container_of(work, struct nvmet_ctrl, async_event_work);
84 	struct nvmet_async_event *aen;
85 	struct nvmet_req *req;
86 
87 	while (1) {
88 		mutex_lock(&ctrl->lock);
89 		aen = list_first_entry_or_null(&ctrl->async_events,
90 				struct nvmet_async_event, entry);
91 		if (!aen || !ctrl->nr_async_event_cmds) {
92 			mutex_unlock(&ctrl->lock);
93 			return;
94 		}
95 
96 		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
97 		nvmet_set_result(req, nvmet_async_event_result(aen));
98 
99 		list_del(&aen->entry);
100 		kfree(aen);
101 
102 		mutex_unlock(&ctrl->lock);
103 		nvmet_req_complete(req, 0);
104 	}
105 }
106 
107 static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
108 		u8 event_info, u8 log_page)
109 {
110 	struct nvmet_async_event *aen;
111 
112 	aen = kmalloc(sizeof(*aen), GFP_KERNEL);
113 	if (!aen)
114 		return;
115 
116 	aen->event_type = event_type;
117 	aen->event_info = event_info;
118 	aen->log_page = log_page;
119 
120 	mutex_lock(&ctrl->lock);
121 	list_add_tail(&aen->entry, &ctrl->async_events);
122 	mutex_unlock(&ctrl->lock);
123 
124 	schedule_work(&ctrl->async_event_work);
125 }
126 
127 int nvmet_register_transport(struct nvmet_fabrics_ops *ops)
128 {
129 	int ret = 0;
130 
131 	down_write(&nvmet_config_sem);
132 	if (nvmet_transports[ops->type])
133 		ret = -EINVAL;
134 	else
135 		nvmet_transports[ops->type] = ops;
136 	up_write(&nvmet_config_sem);
137 
138 	return ret;
139 }
140 EXPORT_SYMBOL_GPL(nvmet_register_transport);
141 
142 void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops)
143 {
144 	down_write(&nvmet_config_sem);
145 	nvmet_transports[ops->type] = NULL;
146 	up_write(&nvmet_config_sem);
147 }
148 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
149 
150 int nvmet_enable_port(struct nvmet_port *port)
151 {
152 	struct nvmet_fabrics_ops *ops;
153 	int ret;
154 
155 	lockdep_assert_held(&nvmet_config_sem);
156 
157 	ops = nvmet_transports[port->disc_addr.trtype];
158 	if (!ops) {
159 		up_write(&nvmet_config_sem);
160 		request_module("nvmet-transport-%d", port->disc_addr.trtype);
161 		down_write(&nvmet_config_sem);
162 		ops = nvmet_transports[port->disc_addr.trtype];
163 		if (!ops) {
164 			pr_err("transport type %d not supported\n",
165 				port->disc_addr.trtype);
166 			return -EINVAL;
167 		}
168 	}
169 
170 	if (!try_module_get(ops->owner))
171 		return -EINVAL;
172 
173 	ret = ops->add_port(port);
174 	if (ret) {
175 		module_put(ops->owner);
176 		return ret;
177 	}
178 
179 	port->enabled = true;
180 	return 0;
181 }
182 
183 void nvmet_disable_port(struct nvmet_port *port)
184 {
185 	struct nvmet_fabrics_ops *ops;
186 
187 	lockdep_assert_held(&nvmet_config_sem);
188 
189 	port->enabled = false;
190 
191 	ops = nvmet_transports[port->disc_addr.trtype];
192 	ops->remove_port(port);
193 	module_put(ops->owner);
194 }
195 
196 static void nvmet_keep_alive_timer(struct work_struct *work)
197 {
198 	struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
199 			struct nvmet_ctrl, ka_work);
200 
201 	pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
202 		ctrl->cntlid, ctrl->kato);
203 
204 	nvmet_ctrl_fatal_error(ctrl);
205 }
206 
207 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
208 {
209 	pr_debug("ctrl %d start keep-alive timer for %d secs\n",
210 		ctrl->cntlid, ctrl->kato);
211 
212 	INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
213 	schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
214 }
215 
216 static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
217 {
218 	pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
219 
220 	cancel_delayed_work_sync(&ctrl->ka_work);
221 }
222 
223 static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
224 		__le32 nsid)
225 {
226 	struct nvmet_ns *ns;
227 
228 	list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
229 		if (ns->nsid == le32_to_cpu(nsid))
230 			return ns;
231 	}
232 
233 	return NULL;
234 }
235 
236 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
237 {
238 	struct nvmet_ns *ns;
239 
240 	rcu_read_lock();
241 	ns = __nvmet_find_namespace(ctrl, nsid);
242 	if (ns)
243 		percpu_ref_get(&ns->ref);
244 	rcu_read_unlock();
245 
246 	return ns;
247 }
248 
249 static void nvmet_destroy_namespace(struct percpu_ref *ref)
250 {
251 	struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
252 
253 	complete(&ns->disable_done);
254 }
255 
256 void nvmet_put_namespace(struct nvmet_ns *ns)
257 {
258 	percpu_ref_put(&ns->ref);
259 }
260 
261 int nvmet_ns_enable(struct nvmet_ns *ns)
262 {
263 	struct nvmet_subsys *subsys = ns->subsys;
264 	struct nvmet_ctrl *ctrl;
265 	int ret = 0;
266 
267 	mutex_lock(&subsys->lock);
268 	if (ns->enabled)
269 		goto out_unlock;
270 
271 	ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE,
272 			NULL);
273 	if (IS_ERR(ns->bdev)) {
274 		pr_err("nvmet: failed to open block device %s: (%ld)\n",
275 			ns->device_path, PTR_ERR(ns->bdev));
276 		ret = PTR_ERR(ns->bdev);
277 		ns->bdev = NULL;
278 		goto out_unlock;
279 	}
280 
281 	ns->size = i_size_read(ns->bdev->bd_inode);
282 	ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
283 
284 	ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
285 				0, GFP_KERNEL);
286 	if (ret)
287 		goto out_blkdev_put;
288 
289 	if (ns->nsid > subsys->max_nsid)
290 		subsys->max_nsid = ns->nsid;
291 
292 	/*
293 	 * The namespaces list needs to be sorted to simplify the implementation
294 	 * of the Identify Namepace List subcommand.
295 	 */
296 	if (list_empty(&subsys->namespaces)) {
297 		list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
298 	} else {
299 		struct nvmet_ns *old;
300 
301 		list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
302 			BUG_ON(ns->nsid == old->nsid);
303 			if (ns->nsid < old->nsid)
304 				break;
305 		}
306 
307 		list_add_tail_rcu(&ns->dev_link, &old->dev_link);
308 	}
309 
310 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
311 		nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
312 
313 	ns->enabled = true;
314 	ret = 0;
315 out_unlock:
316 	mutex_unlock(&subsys->lock);
317 	return ret;
318 out_blkdev_put:
319 	blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
320 	ns->bdev = NULL;
321 	goto out_unlock;
322 }
323 
324 void nvmet_ns_disable(struct nvmet_ns *ns)
325 {
326 	struct nvmet_subsys *subsys = ns->subsys;
327 	struct nvmet_ctrl *ctrl;
328 
329 	mutex_lock(&subsys->lock);
330 	if (!ns->enabled)
331 		goto out_unlock;
332 
333 	ns->enabled = false;
334 	list_del_rcu(&ns->dev_link);
335 	mutex_unlock(&subsys->lock);
336 
337 	/*
338 	 * Now that we removed the namespaces from the lookup list, we
339 	 * can kill the per_cpu ref and wait for any remaining references
340 	 * to be dropped, as well as a RCU grace period for anyone only
341 	 * using the namepace under rcu_read_lock().  Note that we can't
342 	 * use call_rcu here as we need to ensure the namespaces have
343 	 * been fully destroyed before unloading the module.
344 	 */
345 	percpu_ref_kill(&ns->ref);
346 	synchronize_rcu();
347 	wait_for_completion(&ns->disable_done);
348 	percpu_ref_exit(&ns->ref);
349 
350 	mutex_lock(&subsys->lock);
351 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
352 		nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
353 
354 	if (ns->bdev)
355 		blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
356 out_unlock:
357 	mutex_unlock(&subsys->lock);
358 }
359 
360 void nvmet_ns_free(struct nvmet_ns *ns)
361 {
362 	nvmet_ns_disable(ns);
363 
364 	kfree(ns->device_path);
365 	kfree(ns);
366 }
367 
368 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
369 {
370 	struct nvmet_ns *ns;
371 
372 	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
373 	if (!ns)
374 		return NULL;
375 
376 	INIT_LIST_HEAD(&ns->dev_link);
377 	init_completion(&ns->disable_done);
378 
379 	ns->nsid = nsid;
380 	ns->subsys = subsys;
381 
382 	return ns;
383 }
384 
385 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
386 {
387 	if (status)
388 		nvmet_set_status(req, status);
389 
390 	/* XXX: need to fill in something useful for sq_head */
391 	req->rsp->sq_head = 0;
392 	if (likely(req->sq)) /* may happen during early failure */
393 		req->rsp->sq_id = cpu_to_le16(req->sq->qid);
394 	req->rsp->command_id = req->cmd->common.command_id;
395 
396 	if (req->ns)
397 		nvmet_put_namespace(req->ns);
398 	req->ops->queue_response(req);
399 }
400 
401 void nvmet_req_complete(struct nvmet_req *req, u16 status)
402 {
403 	__nvmet_req_complete(req, status);
404 	percpu_ref_put(&req->sq->ref);
405 }
406 EXPORT_SYMBOL_GPL(nvmet_req_complete);
407 
408 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
409 		u16 qid, u16 size)
410 {
411 	cq->qid = qid;
412 	cq->size = size;
413 
414 	ctrl->cqs[qid] = cq;
415 }
416 
417 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
418 		u16 qid, u16 size)
419 {
420 	sq->qid = qid;
421 	sq->size = size;
422 
423 	ctrl->sqs[qid] = sq;
424 }
425 
426 void nvmet_sq_destroy(struct nvmet_sq *sq)
427 {
428 	/*
429 	 * If this is the admin queue, complete all AERs so that our
430 	 * queue doesn't have outstanding requests on it.
431 	 */
432 	if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
433 		nvmet_async_events_free(sq->ctrl);
434 	percpu_ref_kill(&sq->ref);
435 	wait_for_completion(&sq->free_done);
436 	percpu_ref_exit(&sq->ref);
437 
438 	if (sq->ctrl) {
439 		nvmet_ctrl_put(sq->ctrl);
440 		sq->ctrl = NULL; /* allows reusing the queue later */
441 	}
442 }
443 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
444 
445 static void nvmet_sq_free(struct percpu_ref *ref)
446 {
447 	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
448 
449 	complete(&sq->free_done);
450 }
451 
452 int nvmet_sq_init(struct nvmet_sq *sq)
453 {
454 	int ret;
455 
456 	ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
457 	if (ret) {
458 		pr_err("percpu_ref init failed!\n");
459 		return ret;
460 	}
461 	init_completion(&sq->free_done);
462 
463 	return 0;
464 }
465 EXPORT_SYMBOL_GPL(nvmet_sq_init);
466 
467 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
468 		struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops)
469 {
470 	u8 flags = req->cmd->common.flags;
471 	u16 status;
472 
473 	req->cq = cq;
474 	req->sq = sq;
475 	req->ops = ops;
476 	req->sg = NULL;
477 	req->sg_cnt = 0;
478 	req->rsp->status = 0;
479 
480 	/* no support for fused commands yet */
481 	if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
482 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
483 		goto fail;
484 	}
485 
486 	/* either variant of SGLs is fine, as we don't support metadata */
487 	if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF &&
488 		     (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) {
489 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
490 		goto fail;
491 	}
492 
493 	if (unlikely(!req->sq->ctrl))
494 		/* will return an error for any Non-connect command: */
495 		status = nvmet_parse_connect_cmd(req);
496 	else if (likely(req->sq->qid != 0))
497 		status = nvmet_parse_io_cmd(req);
498 	else if (req->cmd->common.opcode == nvme_fabrics_command)
499 		status = nvmet_parse_fabrics_cmd(req);
500 	else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
501 		status = nvmet_parse_discovery_cmd(req);
502 	else
503 		status = nvmet_parse_admin_cmd(req);
504 
505 	if (status)
506 		goto fail;
507 
508 	if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
509 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
510 		goto fail;
511 	}
512 
513 	return true;
514 
515 fail:
516 	__nvmet_req_complete(req, status);
517 	return false;
518 }
519 EXPORT_SYMBOL_GPL(nvmet_req_init);
520 
521 static inline bool nvmet_cc_en(u32 cc)
522 {
523 	return cc & 0x1;
524 }
525 
526 static inline u8 nvmet_cc_css(u32 cc)
527 {
528 	return (cc >> 4) & 0x7;
529 }
530 
531 static inline u8 nvmet_cc_mps(u32 cc)
532 {
533 	return (cc >> 7) & 0xf;
534 }
535 
536 static inline u8 nvmet_cc_ams(u32 cc)
537 {
538 	return (cc >> 11) & 0x7;
539 }
540 
541 static inline u8 nvmet_cc_shn(u32 cc)
542 {
543 	return (cc >> 14) & 0x3;
544 }
545 
546 static inline u8 nvmet_cc_iosqes(u32 cc)
547 {
548 	return (cc >> 16) & 0xf;
549 }
550 
551 static inline u8 nvmet_cc_iocqes(u32 cc)
552 {
553 	return (cc >> 20) & 0xf;
554 }
555 
556 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
557 {
558 	lockdep_assert_held(&ctrl->lock);
559 
560 	if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
561 	    nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
562 	    nvmet_cc_mps(ctrl->cc) != 0 ||
563 	    nvmet_cc_ams(ctrl->cc) != 0 ||
564 	    nvmet_cc_css(ctrl->cc) != 0) {
565 		ctrl->csts = NVME_CSTS_CFS;
566 		return;
567 	}
568 
569 	ctrl->csts = NVME_CSTS_RDY;
570 }
571 
572 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
573 {
574 	lockdep_assert_held(&ctrl->lock);
575 
576 	/* XXX: tear down queues? */
577 	ctrl->csts &= ~NVME_CSTS_RDY;
578 	ctrl->cc = 0;
579 }
580 
581 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
582 {
583 	u32 old;
584 
585 	mutex_lock(&ctrl->lock);
586 	old = ctrl->cc;
587 	ctrl->cc = new;
588 
589 	if (nvmet_cc_en(new) && !nvmet_cc_en(old))
590 		nvmet_start_ctrl(ctrl);
591 	if (!nvmet_cc_en(new) && nvmet_cc_en(old))
592 		nvmet_clear_ctrl(ctrl);
593 	if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
594 		nvmet_clear_ctrl(ctrl);
595 		ctrl->csts |= NVME_CSTS_SHST_CMPLT;
596 	}
597 	if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
598 		ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
599 	mutex_unlock(&ctrl->lock);
600 }
601 
602 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
603 {
604 	/* command sets supported: NVMe command set: */
605 	ctrl->cap = (1ULL << 37);
606 	/* CC.EN timeout in 500msec units: */
607 	ctrl->cap |= (15ULL << 24);
608 	/* maximum queue entries supported: */
609 	ctrl->cap |= NVMET_QUEUE_SIZE - 1;
610 }
611 
612 u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
613 		struct nvmet_req *req, struct nvmet_ctrl **ret)
614 {
615 	struct nvmet_subsys *subsys;
616 	struct nvmet_ctrl *ctrl;
617 	u16 status = 0;
618 
619 	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
620 	if (!subsys) {
621 		pr_warn("connect request for invalid subsystem %s!\n",
622 			subsysnqn);
623 		req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
624 		return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
625 	}
626 
627 	mutex_lock(&subsys->lock);
628 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
629 		if (ctrl->cntlid == cntlid) {
630 			if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
631 				pr_warn("hostnqn mismatch.\n");
632 				continue;
633 			}
634 			if (!kref_get_unless_zero(&ctrl->ref))
635 				continue;
636 
637 			*ret = ctrl;
638 			goto out;
639 		}
640 	}
641 
642 	pr_warn("could not find controller %d for subsys %s / host %s\n",
643 		cntlid, subsysnqn, hostnqn);
644 	req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
645 	status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
646 
647 out:
648 	mutex_unlock(&subsys->lock);
649 	nvmet_subsys_put(subsys);
650 	return status;
651 }
652 
653 static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
654 		const char *hostnqn)
655 {
656 	struct nvmet_host_link *p;
657 
658 	if (subsys->allow_any_host)
659 		return true;
660 
661 	list_for_each_entry(p, &subsys->hosts, entry) {
662 		if (!strcmp(nvmet_host_name(p->host), hostnqn))
663 			return true;
664 	}
665 
666 	return false;
667 }
668 
669 static bool nvmet_host_discovery_allowed(struct nvmet_req *req,
670 		const char *hostnqn)
671 {
672 	struct nvmet_subsys_link *s;
673 
674 	list_for_each_entry(s, &req->port->subsystems, entry) {
675 		if (__nvmet_host_allowed(s->subsys, hostnqn))
676 			return true;
677 	}
678 
679 	return false;
680 }
681 
682 bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
683 		const char *hostnqn)
684 {
685 	lockdep_assert_held(&nvmet_config_sem);
686 
687 	if (subsys->type == NVME_NQN_DISC)
688 		return nvmet_host_discovery_allowed(req, hostnqn);
689 	else
690 		return __nvmet_host_allowed(subsys, hostnqn);
691 }
692 
693 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
694 		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
695 {
696 	struct nvmet_subsys *subsys;
697 	struct nvmet_ctrl *ctrl;
698 	int ret;
699 	u16 status;
700 
701 	status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
702 	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
703 	if (!subsys) {
704 		pr_warn("connect request for invalid subsystem %s!\n",
705 			subsysnqn);
706 		req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
707 		goto out;
708 	}
709 
710 	status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
711 	down_read(&nvmet_config_sem);
712 	if (!nvmet_host_allowed(req, subsys, hostnqn)) {
713 		pr_info("connect by host %s for subsystem %s not allowed\n",
714 			hostnqn, subsysnqn);
715 		req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
716 		up_read(&nvmet_config_sem);
717 		goto out_put_subsystem;
718 	}
719 	up_read(&nvmet_config_sem);
720 
721 	status = NVME_SC_INTERNAL;
722 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
723 	if (!ctrl)
724 		goto out_put_subsystem;
725 	mutex_init(&ctrl->lock);
726 
727 	nvmet_init_cap(ctrl);
728 
729 	INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
730 	INIT_LIST_HEAD(&ctrl->async_events);
731 
732 	memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
733 	memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
734 
735 	/* generate a random serial number as our controllers are ephemeral: */
736 	get_random_bytes(&ctrl->serial, sizeof(ctrl->serial));
737 
738 	kref_init(&ctrl->ref);
739 	ctrl->subsys = subsys;
740 
741 	ctrl->cqs = kcalloc(subsys->max_qid + 1,
742 			sizeof(struct nvmet_cq *),
743 			GFP_KERNEL);
744 	if (!ctrl->cqs)
745 		goto out_free_ctrl;
746 
747 	ctrl->sqs = kcalloc(subsys->max_qid + 1,
748 			sizeof(struct nvmet_sq *),
749 			GFP_KERNEL);
750 	if (!ctrl->sqs)
751 		goto out_free_cqs;
752 
753 	ret = ida_simple_get(&cntlid_ida,
754 			     NVME_CNTLID_MIN, NVME_CNTLID_MAX,
755 			     GFP_KERNEL);
756 	if (ret < 0) {
757 		status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
758 		goto out_free_sqs;
759 	}
760 	ctrl->cntlid = ret;
761 
762 	ctrl->ops = req->ops;
763 	if (ctrl->subsys->type == NVME_NQN_DISC) {
764 		/* Don't accept keep-alive timeout for discovery controllers */
765 		if (kato) {
766 			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
767 			goto out_free_sqs;
768 		}
769 
770 		/*
771 		 * Discovery controllers use some arbitrary high value in order
772 		 * to cleanup stale discovery sessions
773 		 *
774 		 * From the latest base diff RC:
775 		 * "The Keep Alive command is not supported by
776 		 * Discovery controllers. A transport may specify a
777 		 * fixed Discovery controller activity timeout value
778 		 * (e.g., 2 minutes).  If no commands are received
779 		 * by a Discovery controller within that time
780 		 * period, the controller may perform the
781 		 * actions for Keep Alive Timer expiration".
782 		 */
783 		ctrl->kato = NVMET_DISC_KATO;
784 	} else {
785 		/* keep-alive timeout in seconds */
786 		ctrl->kato = DIV_ROUND_UP(kato, 1000);
787 	}
788 	nvmet_start_keep_alive_timer(ctrl);
789 
790 	mutex_lock(&subsys->lock);
791 	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
792 	mutex_unlock(&subsys->lock);
793 
794 	*ctrlp = ctrl;
795 	return 0;
796 
797 out_free_sqs:
798 	kfree(ctrl->sqs);
799 out_free_cqs:
800 	kfree(ctrl->cqs);
801 out_free_ctrl:
802 	kfree(ctrl);
803 out_put_subsystem:
804 	nvmet_subsys_put(subsys);
805 out:
806 	return status;
807 }
808 
809 static void nvmet_ctrl_free(struct kref *ref)
810 {
811 	struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
812 	struct nvmet_subsys *subsys = ctrl->subsys;
813 
814 	nvmet_stop_keep_alive_timer(ctrl);
815 
816 	mutex_lock(&subsys->lock);
817 	list_del(&ctrl->subsys_entry);
818 	mutex_unlock(&subsys->lock);
819 
820 	flush_work(&ctrl->async_event_work);
821 	cancel_work_sync(&ctrl->fatal_err_work);
822 
823 	ida_simple_remove(&cntlid_ida, ctrl->cntlid);
824 	nvmet_subsys_put(subsys);
825 
826 	kfree(ctrl->sqs);
827 	kfree(ctrl->cqs);
828 	kfree(ctrl);
829 }
830 
831 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
832 {
833 	kref_put(&ctrl->ref, nvmet_ctrl_free);
834 }
835 
836 static void nvmet_fatal_error_handler(struct work_struct *work)
837 {
838 	struct nvmet_ctrl *ctrl =
839 			container_of(work, struct nvmet_ctrl, fatal_err_work);
840 
841 	pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
842 	ctrl->ops->delete_ctrl(ctrl);
843 }
844 
845 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
846 {
847 	mutex_lock(&ctrl->lock);
848 	if (!(ctrl->csts & NVME_CSTS_CFS)) {
849 		ctrl->csts |= NVME_CSTS_CFS;
850 		INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
851 		schedule_work(&ctrl->fatal_err_work);
852 	}
853 	mutex_unlock(&ctrl->lock);
854 }
855 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
856 
857 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
858 		const char *subsysnqn)
859 {
860 	struct nvmet_subsys_link *p;
861 
862 	if (!port)
863 		return NULL;
864 
865 	if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
866 			NVMF_NQN_SIZE)) {
867 		if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
868 			return NULL;
869 		return nvmet_disc_subsys;
870 	}
871 
872 	down_read(&nvmet_config_sem);
873 	list_for_each_entry(p, &port->subsystems, entry) {
874 		if (!strncmp(p->subsys->subsysnqn, subsysnqn,
875 				NVMF_NQN_SIZE)) {
876 			if (!kref_get_unless_zero(&p->subsys->ref))
877 				break;
878 			up_read(&nvmet_config_sem);
879 			return p->subsys;
880 		}
881 	}
882 	up_read(&nvmet_config_sem);
883 	return NULL;
884 }
885 
886 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
887 		enum nvme_subsys_type type)
888 {
889 	struct nvmet_subsys *subsys;
890 
891 	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
892 	if (!subsys)
893 		return NULL;
894 
895 	subsys->ver = NVME_VS(1, 2, 1); /* NVMe 1.2.1 */
896 
897 	switch (type) {
898 	case NVME_NQN_NVME:
899 		subsys->max_qid = NVMET_NR_QUEUES;
900 		break;
901 	case NVME_NQN_DISC:
902 		subsys->max_qid = 0;
903 		break;
904 	default:
905 		pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
906 		kfree(subsys);
907 		return NULL;
908 	}
909 	subsys->type = type;
910 	subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
911 			GFP_KERNEL);
912 	if (!subsys->subsysnqn) {
913 		kfree(subsys);
914 		return NULL;
915 	}
916 
917 	kref_init(&subsys->ref);
918 
919 	mutex_init(&subsys->lock);
920 	INIT_LIST_HEAD(&subsys->namespaces);
921 	INIT_LIST_HEAD(&subsys->ctrls);
922 	INIT_LIST_HEAD(&subsys->hosts);
923 
924 	return subsys;
925 }
926 
927 static void nvmet_subsys_free(struct kref *ref)
928 {
929 	struct nvmet_subsys *subsys =
930 		container_of(ref, struct nvmet_subsys, ref);
931 
932 	WARN_ON_ONCE(!list_empty(&subsys->namespaces));
933 
934 	kfree(subsys->subsysnqn);
935 	kfree(subsys);
936 }
937 
938 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
939 {
940 	struct nvmet_ctrl *ctrl;
941 
942 	mutex_lock(&subsys->lock);
943 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
944 		ctrl->ops->delete_ctrl(ctrl);
945 	mutex_unlock(&subsys->lock);
946 }
947 
948 void nvmet_subsys_put(struct nvmet_subsys *subsys)
949 {
950 	kref_put(&subsys->ref, nvmet_subsys_free);
951 }
952 
953 static int __init nvmet_init(void)
954 {
955 	int error;
956 
957 	error = nvmet_init_discovery();
958 	if (error)
959 		goto out;
960 
961 	error = nvmet_init_configfs();
962 	if (error)
963 		goto out_exit_discovery;
964 	return 0;
965 
966 out_exit_discovery:
967 	nvmet_exit_discovery();
968 out:
969 	return error;
970 }
971 
972 static void __exit nvmet_exit(void)
973 {
974 	nvmet_exit_configfs();
975 	nvmet_exit_discovery();
976 	ida_destroy(&cntlid_ida);
977 
978 	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
979 	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
980 }
981 
982 module_init(nvmet_init);
983 module_exit(nvmet_exit);
984 
985 MODULE_LICENSE("GPL v2");
986