xref: /openbmc/linux/drivers/nvme/target/configfs.c (revision 1083681e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Configfs interface for the NVMe target.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/kstrtox.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/stat.h>
12 #include <linux/ctype.h>
13 #include <linux/pci.h>
14 #include <linux/pci-p2pdma.h>
15 #ifdef CONFIG_NVME_TARGET_AUTH
16 #include <linux/nvme-auth.h>
17 #endif
18 #include <crypto/hash.h>
19 #include <crypto/kpp.h>
20 #include <linux/nospec.h>
21 
22 #include "nvmet.h"
23 
24 static const struct config_item_type nvmet_host_type;
25 static const struct config_item_type nvmet_subsys_type;
26 
27 static LIST_HEAD(nvmet_ports_list);
28 struct list_head *nvmet_ports = &nvmet_ports_list;
29 
30 struct nvmet_type_name_map {
31 	u8		type;
32 	const char	*name;
33 };
34 
35 static struct nvmet_type_name_map nvmet_transport[] = {
36 	{ NVMF_TRTYPE_RDMA,	"rdma" },
37 	{ NVMF_TRTYPE_FC,	"fc" },
38 	{ NVMF_TRTYPE_TCP,	"tcp" },
39 	{ NVMF_TRTYPE_LOOP,	"loop" },
40 };
41 
42 static const struct nvmet_type_name_map nvmet_addr_family[] = {
43 	{ NVMF_ADDR_FAMILY_PCI,		"pcie" },
44 	{ NVMF_ADDR_FAMILY_IP4,		"ipv4" },
45 	{ NVMF_ADDR_FAMILY_IP6,		"ipv6" },
46 	{ NVMF_ADDR_FAMILY_IB,		"ib" },
47 	{ NVMF_ADDR_FAMILY_FC,		"fc" },
48 	{ NVMF_ADDR_FAMILY_LOOP,	"loop" },
49 };
50 
51 static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
52 {
53 	if (p->enabled)
54 		pr_err("Disable port '%u' before changing attribute in %s\n",
55 		       le16_to_cpu(p->disc_addr.portid), caller);
56 	return p->enabled;
57 }
58 
59 /*
60  * nvmet_port Generic ConfigFS definitions.
61  * Used in any place in the ConfigFS tree that refers to an address.
62  */
63 static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
64 {
65 	u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam;
66 	int i;
67 
68 	for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
69 		if (nvmet_addr_family[i].type == adrfam)
70 			return snprintf(page, PAGE_SIZE, "%s\n",
71 					nvmet_addr_family[i].name);
72 	}
73 
74 	return snprintf(page, PAGE_SIZE, "\n");
75 }
76 
77 static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
78 		const char *page, size_t count)
79 {
80 	struct nvmet_port *port = to_nvmet_port(item);
81 	int i;
82 
83 	if (nvmet_is_port_enabled(port, __func__))
84 		return -EACCES;
85 
86 	for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
87 		if (sysfs_streq(page, nvmet_addr_family[i].name))
88 			goto found;
89 	}
90 
91 	pr_err("Invalid value '%s' for adrfam\n", page);
92 	return -EINVAL;
93 
94 found:
95 	port->disc_addr.adrfam = nvmet_addr_family[i].type;
96 	return count;
97 }
98 
99 CONFIGFS_ATTR(nvmet_, addr_adrfam);
100 
101 static ssize_t nvmet_addr_portid_show(struct config_item *item,
102 		char *page)
103 {
104 	__le16 portid = to_nvmet_port(item)->disc_addr.portid;
105 
106 	return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid));
107 }
108 
109 static ssize_t nvmet_addr_portid_store(struct config_item *item,
110 		const char *page, size_t count)
111 {
112 	struct nvmet_port *port = to_nvmet_port(item);
113 	u16 portid = 0;
114 
115 	if (kstrtou16(page, 0, &portid)) {
116 		pr_err("Invalid value '%s' for portid\n", page);
117 		return -EINVAL;
118 	}
119 
120 	if (nvmet_is_port_enabled(port, __func__))
121 		return -EACCES;
122 
123 	port->disc_addr.portid = cpu_to_le16(portid);
124 	return count;
125 }
126 
127 CONFIGFS_ATTR(nvmet_, addr_portid);
128 
129 static ssize_t nvmet_addr_traddr_show(struct config_item *item,
130 		char *page)
131 {
132 	struct nvmet_port *port = to_nvmet_port(item);
133 
134 	return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr);
135 }
136 
137 static ssize_t nvmet_addr_traddr_store(struct config_item *item,
138 		const char *page, size_t count)
139 {
140 	struct nvmet_port *port = to_nvmet_port(item);
141 
142 	if (count > NVMF_TRADDR_SIZE) {
143 		pr_err("Invalid value '%s' for traddr\n", page);
144 		return -EINVAL;
145 	}
146 
147 	if (nvmet_is_port_enabled(port, __func__))
148 		return -EACCES;
149 
150 	if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
151 		return -EINVAL;
152 	return count;
153 }
154 
155 CONFIGFS_ATTR(nvmet_, addr_traddr);
156 
157 static const struct nvmet_type_name_map nvmet_addr_treq[] = {
158 	{ NVMF_TREQ_NOT_SPECIFIED,	"not specified" },
159 	{ NVMF_TREQ_REQUIRED,		"required" },
160 	{ NVMF_TREQ_NOT_REQUIRED,	"not required" },
161 };
162 
163 static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
164 {
165 	u8 treq = to_nvmet_port(item)->disc_addr.treq &
166 		NVME_TREQ_SECURE_CHANNEL_MASK;
167 	int i;
168 
169 	for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
170 		if (treq == nvmet_addr_treq[i].type)
171 			return snprintf(page, PAGE_SIZE, "%s\n",
172 					nvmet_addr_treq[i].name);
173 	}
174 
175 	return snprintf(page, PAGE_SIZE, "\n");
176 }
177 
178 static ssize_t nvmet_addr_treq_store(struct config_item *item,
179 		const char *page, size_t count)
180 {
181 	struct nvmet_port *port = to_nvmet_port(item);
182 	u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK;
183 	int i;
184 
185 	if (nvmet_is_port_enabled(port, __func__))
186 		return -EACCES;
187 
188 	for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
189 		if (sysfs_streq(page, nvmet_addr_treq[i].name))
190 			goto found;
191 	}
192 
193 	pr_err("Invalid value '%s' for treq\n", page);
194 	return -EINVAL;
195 
196 found:
197 	treq |= nvmet_addr_treq[i].type;
198 	port->disc_addr.treq = treq;
199 	return count;
200 }
201 
202 CONFIGFS_ATTR(nvmet_, addr_treq);
203 
204 static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
205 		char *page)
206 {
207 	struct nvmet_port *port = to_nvmet_port(item);
208 
209 	return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid);
210 }
211 
212 static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
213 		const char *page, size_t count)
214 {
215 	struct nvmet_port *port = to_nvmet_port(item);
216 
217 	if (count > NVMF_TRSVCID_SIZE) {
218 		pr_err("Invalid value '%s' for trsvcid\n", page);
219 		return -EINVAL;
220 	}
221 	if (nvmet_is_port_enabled(port, __func__))
222 		return -EACCES;
223 
224 	if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
225 		return -EINVAL;
226 	return count;
227 }
228 
229 CONFIGFS_ATTR(nvmet_, addr_trsvcid);
230 
231 static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
232 		char *page)
233 {
234 	struct nvmet_port *port = to_nvmet_port(item);
235 
236 	return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
237 }
238 
239 static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
240 		const char *page, size_t count)
241 {
242 	struct nvmet_port *port = to_nvmet_port(item);
243 	int ret;
244 
245 	if (nvmet_is_port_enabled(port, __func__))
246 		return -EACCES;
247 	ret = kstrtoint(page, 0, &port->inline_data_size);
248 	if (ret) {
249 		pr_err("Invalid value '%s' for inline_data_size\n", page);
250 		return -EINVAL;
251 	}
252 	return count;
253 }
254 
255 CONFIGFS_ATTR(nvmet_, param_inline_data_size);
256 
257 #ifdef CONFIG_BLK_DEV_INTEGRITY
258 static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
259 		char *page)
260 {
261 	struct nvmet_port *port = to_nvmet_port(item);
262 
263 	return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable);
264 }
265 
266 static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
267 		const char *page, size_t count)
268 {
269 	struct nvmet_port *port = to_nvmet_port(item);
270 	bool val;
271 
272 	if (kstrtobool(page, &val))
273 		return -EINVAL;
274 
275 	if (nvmet_is_port_enabled(port, __func__))
276 		return -EACCES;
277 
278 	port->pi_enable = val;
279 	return count;
280 }
281 
282 CONFIGFS_ATTR(nvmet_, param_pi_enable);
283 #endif
284 
285 static ssize_t nvmet_addr_trtype_show(struct config_item *item,
286 		char *page)
287 {
288 	struct nvmet_port *port = to_nvmet_port(item);
289 	int i;
290 
291 	for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
292 		if (port->disc_addr.trtype == nvmet_transport[i].type)
293 			return snprintf(page, PAGE_SIZE,
294 					"%s\n", nvmet_transport[i].name);
295 	}
296 
297 	return sprintf(page, "\n");
298 }
299 
300 static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
301 {
302 	port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
303 	port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
304 	port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
305 }
306 
307 static ssize_t nvmet_addr_trtype_store(struct config_item *item,
308 		const char *page, size_t count)
309 {
310 	struct nvmet_port *port = to_nvmet_port(item);
311 	int i;
312 
313 	if (nvmet_is_port_enabled(port, __func__))
314 		return -EACCES;
315 
316 	for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
317 		if (sysfs_streq(page, nvmet_transport[i].name))
318 			goto found;
319 	}
320 
321 	pr_err("Invalid value '%s' for trtype\n", page);
322 	return -EINVAL;
323 
324 found:
325 	memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
326 	port->disc_addr.trtype = nvmet_transport[i].type;
327 	if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
328 		nvmet_port_init_tsas_rdma(port);
329 	return count;
330 }
331 
332 CONFIGFS_ATTR(nvmet_, addr_trtype);
333 
334 /*
335  * Namespace structures & file operation functions below
336  */
337 static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
338 {
339 	return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
340 }
341 
342 static ssize_t nvmet_ns_device_path_store(struct config_item *item,
343 		const char *page, size_t count)
344 {
345 	struct nvmet_ns *ns = to_nvmet_ns(item);
346 	struct nvmet_subsys *subsys = ns->subsys;
347 	size_t len;
348 	int ret;
349 
350 	mutex_lock(&subsys->lock);
351 	ret = -EBUSY;
352 	if (ns->enabled)
353 		goto out_unlock;
354 
355 	ret = -EINVAL;
356 	len = strcspn(page, "\n");
357 	if (!len)
358 		goto out_unlock;
359 
360 	kfree(ns->device_path);
361 	ret = -ENOMEM;
362 	ns->device_path = kmemdup_nul(page, len, GFP_KERNEL);
363 	if (!ns->device_path)
364 		goto out_unlock;
365 
366 	mutex_unlock(&subsys->lock);
367 	return count;
368 
369 out_unlock:
370 	mutex_unlock(&subsys->lock);
371 	return ret;
372 }
373 
374 CONFIGFS_ATTR(nvmet_ns_, device_path);
375 
376 #ifdef CONFIG_PCI_P2PDMA
377 static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
378 {
379 	struct nvmet_ns *ns = to_nvmet_ns(item);
380 
381 	return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
382 }
383 
384 static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
385 		const char *page, size_t count)
386 {
387 	struct nvmet_ns *ns = to_nvmet_ns(item);
388 	struct pci_dev *p2p_dev = NULL;
389 	bool use_p2pmem;
390 	int ret = count;
391 	int error;
392 
393 	mutex_lock(&ns->subsys->lock);
394 	if (ns->enabled) {
395 		ret = -EBUSY;
396 		goto out_unlock;
397 	}
398 
399 	error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
400 	if (error) {
401 		ret = error;
402 		goto out_unlock;
403 	}
404 
405 	ns->use_p2pmem = use_p2pmem;
406 	pci_dev_put(ns->p2p_dev);
407 	ns->p2p_dev = p2p_dev;
408 
409 out_unlock:
410 	mutex_unlock(&ns->subsys->lock);
411 
412 	return ret;
413 }
414 
415 CONFIGFS_ATTR(nvmet_ns_, p2pmem);
416 #endif /* CONFIG_PCI_P2PDMA */
417 
418 static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
419 {
420 	return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
421 }
422 
423 static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
424 					  const char *page, size_t count)
425 {
426 	struct nvmet_ns *ns = to_nvmet_ns(item);
427 	struct nvmet_subsys *subsys = ns->subsys;
428 	int ret = 0;
429 
430 	mutex_lock(&subsys->lock);
431 	if (ns->enabled) {
432 		ret = -EBUSY;
433 		goto out_unlock;
434 	}
435 
436 	if (uuid_parse(page, &ns->uuid))
437 		ret = -EINVAL;
438 
439 out_unlock:
440 	mutex_unlock(&subsys->lock);
441 	return ret ? ret : count;
442 }
443 
444 CONFIGFS_ATTR(nvmet_ns_, device_uuid);
445 
446 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
447 {
448 	return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
449 }
450 
451 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
452 		const char *page, size_t count)
453 {
454 	struct nvmet_ns *ns = to_nvmet_ns(item);
455 	struct nvmet_subsys *subsys = ns->subsys;
456 	u8 nguid[16];
457 	const char *p = page;
458 	int i;
459 	int ret = 0;
460 
461 	mutex_lock(&subsys->lock);
462 	if (ns->enabled) {
463 		ret = -EBUSY;
464 		goto out_unlock;
465 	}
466 
467 	for (i = 0; i < 16; i++) {
468 		if (p + 2 > page + count) {
469 			ret = -EINVAL;
470 			goto out_unlock;
471 		}
472 		if (!isxdigit(p[0]) || !isxdigit(p[1])) {
473 			ret = -EINVAL;
474 			goto out_unlock;
475 		}
476 
477 		nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
478 		p += 2;
479 
480 		if (*p == '-' || *p == ':')
481 			p++;
482 	}
483 
484 	memcpy(&ns->nguid, nguid, sizeof(nguid));
485 out_unlock:
486 	mutex_unlock(&subsys->lock);
487 	return ret ? ret : count;
488 }
489 
490 CONFIGFS_ATTR(nvmet_ns_, device_nguid);
491 
492 static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
493 {
494 	return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid);
495 }
496 
497 static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
498 		const char *page, size_t count)
499 {
500 	struct nvmet_ns *ns = to_nvmet_ns(item);
501 	u32 oldgrpid, newgrpid;
502 	int ret;
503 
504 	ret = kstrtou32(page, 0, &newgrpid);
505 	if (ret)
506 		return ret;
507 
508 	if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS)
509 		return -EINVAL;
510 
511 	down_write(&nvmet_ana_sem);
512 	oldgrpid = ns->anagrpid;
513 	newgrpid = array_index_nospec(newgrpid, NVMET_MAX_ANAGRPS);
514 	nvmet_ana_group_enabled[newgrpid]++;
515 	ns->anagrpid = newgrpid;
516 	nvmet_ana_group_enabled[oldgrpid]--;
517 	nvmet_ana_chgcnt++;
518 	up_write(&nvmet_ana_sem);
519 
520 	nvmet_send_ana_event(ns->subsys, NULL);
521 	return count;
522 }
523 
524 CONFIGFS_ATTR(nvmet_ns_, ana_grpid);
525 
526 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
527 {
528 	return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
529 }
530 
531 static ssize_t nvmet_ns_enable_store(struct config_item *item,
532 		const char *page, size_t count)
533 {
534 	struct nvmet_ns *ns = to_nvmet_ns(item);
535 	bool enable;
536 	int ret = 0;
537 
538 	if (kstrtobool(page, &enable))
539 		return -EINVAL;
540 
541 	if (enable)
542 		ret = nvmet_ns_enable(ns);
543 	else
544 		nvmet_ns_disable(ns);
545 
546 	return ret ? ret : count;
547 }
548 
549 CONFIGFS_ATTR(nvmet_ns_, enable);
550 
551 static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
552 {
553 	return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
554 }
555 
556 static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
557 		const char *page, size_t count)
558 {
559 	struct nvmet_ns *ns = to_nvmet_ns(item);
560 	bool val;
561 
562 	if (kstrtobool(page, &val))
563 		return -EINVAL;
564 
565 	mutex_lock(&ns->subsys->lock);
566 	if (ns->enabled) {
567 		pr_err("disable ns before setting buffered_io value.\n");
568 		mutex_unlock(&ns->subsys->lock);
569 		return -EINVAL;
570 	}
571 
572 	ns->buffered_io = val;
573 	mutex_unlock(&ns->subsys->lock);
574 	return count;
575 }
576 
577 CONFIGFS_ATTR(nvmet_ns_, buffered_io);
578 
579 static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
580 		const char *page, size_t count)
581 {
582 	struct nvmet_ns *ns = to_nvmet_ns(item);
583 	bool val;
584 
585 	if (kstrtobool(page, &val))
586 		return -EINVAL;
587 
588 	if (!val)
589 		return -EINVAL;
590 
591 	mutex_lock(&ns->subsys->lock);
592 	if (!ns->enabled) {
593 		pr_err("enable ns before revalidate.\n");
594 		mutex_unlock(&ns->subsys->lock);
595 		return -EINVAL;
596 	}
597 	if (nvmet_ns_revalidate(ns))
598 		nvmet_ns_changed(ns->subsys, ns->nsid);
599 	mutex_unlock(&ns->subsys->lock);
600 	return count;
601 }
602 
603 CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
604 
605 static struct configfs_attribute *nvmet_ns_attrs[] = {
606 	&nvmet_ns_attr_device_path,
607 	&nvmet_ns_attr_device_nguid,
608 	&nvmet_ns_attr_device_uuid,
609 	&nvmet_ns_attr_ana_grpid,
610 	&nvmet_ns_attr_enable,
611 	&nvmet_ns_attr_buffered_io,
612 	&nvmet_ns_attr_revalidate_size,
613 #ifdef CONFIG_PCI_P2PDMA
614 	&nvmet_ns_attr_p2pmem,
615 #endif
616 	NULL,
617 };
618 
619 bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
620 {
621 	struct config_item *ns_item;
622 	char name[12];
623 
624 	snprintf(name, sizeof(name), "%u", nsid);
625 	mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
626 	ns_item = config_group_find_item(&subsys->namespaces_group, name);
627 	mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);
628 	return ns_item != NULL;
629 }
630 
631 static void nvmet_ns_release(struct config_item *item)
632 {
633 	struct nvmet_ns *ns = to_nvmet_ns(item);
634 
635 	nvmet_ns_free(ns);
636 }
637 
638 static struct configfs_item_operations nvmet_ns_item_ops = {
639 	.release		= nvmet_ns_release,
640 };
641 
642 static const struct config_item_type nvmet_ns_type = {
643 	.ct_item_ops		= &nvmet_ns_item_ops,
644 	.ct_attrs		= nvmet_ns_attrs,
645 	.ct_owner		= THIS_MODULE,
646 };
647 
648 static struct config_group *nvmet_ns_make(struct config_group *group,
649 		const char *name)
650 {
651 	struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
652 	struct nvmet_ns *ns;
653 	int ret;
654 	u32 nsid;
655 
656 	ret = kstrtou32(name, 0, &nsid);
657 	if (ret)
658 		goto out;
659 
660 	ret = -EINVAL;
661 	if (nsid == 0 || nsid == NVME_NSID_ALL) {
662 		pr_err("invalid nsid %#x", nsid);
663 		goto out;
664 	}
665 
666 	ret = -ENOMEM;
667 	ns = nvmet_ns_alloc(subsys, nsid);
668 	if (!ns)
669 		goto out;
670 	config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
671 
672 	pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
673 
674 	return &ns->group;
675 out:
676 	return ERR_PTR(ret);
677 }
678 
679 static struct configfs_group_operations nvmet_namespaces_group_ops = {
680 	.make_group		= nvmet_ns_make,
681 };
682 
683 static const struct config_item_type nvmet_namespaces_type = {
684 	.ct_group_ops		= &nvmet_namespaces_group_ops,
685 	.ct_owner		= THIS_MODULE,
686 };
687 
688 #ifdef CONFIG_NVME_TARGET_PASSTHRU
689 
690 static ssize_t nvmet_passthru_device_path_show(struct config_item *item,
691 		char *page)
692 {
693 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
694 
695 	return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
696 }
697 
698 static ssize_t nvmet_passthru_device_path_store(struct config_item *item,
699 		const char *page, size_t count)
700 {
701 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
702 	size_t len;
703 	int ret;
704 
705 	mutex_lock(&subsys->lock);
706 
707 	ret = -EBUSY;
708 	if (subsys->passthru_ctrl)
709 		goto out_unlock;
710 
711 	ret = -EINVAL;
712 	len = strcspn(page, "\n");
713 	if (!len)
714 		goto out_unlock;
715 
716 	kfree(subsys->passthru_ctrl_path);
717 	ret = -ENOMEM;
718 	subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL);
719 	if (!subsys->passthru_ctrl_path)
720 		goto out_unlock;
721 
722 	mutex_unlock(&subsys->lock);
723 
724 	return count;
725 out_unlock:
726 	mutex_unlock(&subsys->lock);
727 	return ret;
728 }
729 CONFIGFS_ATTR(nvmet_passthru_, device_path);
730 
731 static ssize_t nvmet_passthru_enable_show(struct config_item *item,
732 		char *page)
733 {
734 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
735 
736 	return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0);
737 }
738 
739 static ssize_t nvmet_passthru_enable_store(struct config_item *item,
740 		const char *page, size_t count)
741 {
742 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
743 	bool enable;
744 	int ret = 0;
745 
746 	if (kstrtobool(page, &enable))
747 		return -EINVAL;
748 
749 	if (enable)
750 		ret = nvmet_passthru_ctrl_enable(subsys);
751 	else
752 		nvmet_passthru_ctrl_disable(subsys);
753 
754 	return ret ? ret : count;
755 }
756 CONFIGFS_ATTR(nvmet_passthru_, enable);
757 
758 static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item,
759 		char *page)
760 {
761 	return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout);
762 }
763 
764 static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item,
765 		const char *page, size_t count)
766 {
767 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
768 	unsigned int timeout;
769 
770 	if (kstrtouint(page, 0, &timeout))
771 		return -EINVAL;
772 	subsys->admin_timeout = timeout;
773 	return count;
774 }
775 CONFIGFS_ATTR(nvmet_passthru_, admin_timeout);
776 
777 static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item,
778 		char *page)
779 {
780 	return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout);
781 }
782 
783 static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
784 		const char *page, size_t count)
785 {
786 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
787 	unsigned int timeout;
788 
789 	if (kstrtouint(page, 0, &timeout))
790 		return -EINVAL;
791 	subsys->io_timeout = timeout;
792 	return count;
793 }
794 CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
795 
796 static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
797 		char *page)
798 {
799 	return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
800 }
801 
802 static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
803 		const char *page, size_t count)
804 {
805 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
806 	unsigned int clear_ids;
807 
808 	if (kstrtouint(page, 0, &clear_ids))
809 		return -EINVAL;
810 	subsys->clear_ids = clear_ids;
811 	return count;
812 }
813 CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
814 
815 static struct configfs_attribute *nvmet_passthru_attrs[] = {
816 	&nvmet_passthru_attr_device_path,
817 	&nvmet_passthru_attr_enable,
818 	&nvmet_passthru_attr_admin_timeout,
819 	&nvmet_passthru_attr_io_timeout,
820 	&nvmet_passthru_attr_clear_ids,
821 	NULL,
822 };
823 
824 static const struct config_item_type nvmet_passthru_type = {
825 	.ct_attrs		= nvmet_passthru_attrs,
826 	.ct_owner		= THIS_MODULE,
827 };
828 
829 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
830 {
831 	config_group_init_type_name(&subsys->passthru_group,
832 				    "passthru", &nvmet_passthru_type);
833 	configfs_add_default_group(&subsys->passthru_group,
834 				   &subsys->group);
835 }
836 
837 #else /* CONFIG_NVME_TARGET_PASSTHRU */
838 
839 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
840 {
841 }
842 
843 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
844 
845 static int nvmet_port_subsys_allow_link(struct config_item *parent,
846 		struct config_item *target)
847 {
848 	struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
849 	struct nvmet_subsys *subsys;
850 	struct nvmet_subsys_link *link, *p;
851 	int ret;
852 
853 	if (target->ci_type != &nvmet_subsys_type) {
854 		pr_err("can only link subsystems into the subsystems dir.!\n");
855 		return -EINVAL;
856 	}
857 	subsys = to_subsys(target);
858 	link = kmalloc(sizeof(*link), GFP_KERNEL);
859 	if (!link)
860 		return -ENOMEM;
861 	link->subsys = subsys;
862 
863 	down_write(&nvmet_config_sem);
864 	ret = -EEXIST;
865 	list_for_each_entry(p, &port->subsystems, entry) {
866 		if (p->subsys == subsys)
867 			goto out_free_link;
868 	}
869 
870 	if (list_empty(&port->subsystems)) {
871 		ret = nvmet_enable_port(port);
872 		if (ret)
873 			goto out_free_link;
874 	}
875 
876 	list_add_tail(&link->entry, &port->subsystems);
877 	nvmet_port_disc_changed(port, subsys);
878 
879 	up_write(&nvmet_config_sem);
880 	return 0;
881 
882 out_free_link:
883 	up_write(&nvmet_config_sem);
884 	kfree(link);
885 	return ret;
886 }
887 
888 static void nvmet_port_subsys_drop_link(struct config_item *parent,
889 		struct config_item *target)
890 {
891 	struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
892 	struct nvmet_subsys *subsys = to_subsys(target);
893 	struct nvmet_subsys_link *p;
894 
895 	down_write(&nvmet_config_sem);
896 	list_for_each_entry(p, &port->subsystems, entry) {
897 		if (p->subsys == subsys)
898 			goto found;
899 	}
900 	up_write(&nvmet_config_sem);
901 	return;
902 
903 found:
904 	list_del(&p->entry);
905 	nvmet_port_del_ctrls(port, subsys);
906 	nvmet_port_disc_changed(port, subsys);
907 
908 	if (list_empty(&port->subsystems))
909 		nvmet_disable_port(port);
910 	up_write(&nvmet_config_sem);
911 	kfree(p);
912 }
913 
914 static struct configfs_item_operations nvmet_port_subsys_item_ops = {
915 	.allow_link		= nvmet_port_subsys_allow_link,
916 	.drop_link		= nvmet_port_subsys_drop_link,
917 };
918 
919 static const struct config_item_type nvmet_port_subsys_type = {
920 	.ct_item_ops		= &nvmet_port_subsys_item_ops,
921 	.ct_owner		= THIS_MODULE,
922 };
923 
924 static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
925 		struct config_item *target)
926 {
927 	struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
928 	struct nvmet_host *host;
929 	struct nvmet_host_link *link, *p;
930 	int ret;
931 
932 	if (target->ci_type != &nvmet_host_type) {
933 		pr_err("can only link hosts into the allowed_hosts directory!\n");
934 		return -EINVAL;
935 	}
936 
937 	host = to_host(target);
938 	link = kmalloc(sizeof(*link), GFP_KERNEL);
939 	if (!link)
940 		return -ENOMEM;
941 	link->host = host;
942 
943 	down_write(&nvmet_config_sem);
944 	ret = -EINVAL;
945 	if (subsys->allow_any_host) {
946 		pr_err("can't add hosts when allow_any_host is set!\n");
947 		goto out_free_link;
948 	}
949 
950 	ret = -EEXIST;
951 	list_for_each_entry(p, &subsys->hosts, entry) {
952 		if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
953 			goto out_free_link;
954 	}
955 	list_add_tail(&link->entry, &subsys->hosts);
956 	nvmet_subsys_disc_changed(subsys, host);
957 
958 	up_write(&nvmet_config_sem);
959 	return 0;
960 out_free_link:
961 	up_write(&nvmet_config_sem);
962 	kfree(link);
963 	return ret;
964 }
965 
966 static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
967 		struct config_item *target)
968 {
969 	struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
970 	struct nvmet_host *host = to_host(target);
971 	struct nvmet_host_link *p;
972 
973 	down_write(&nvmet_config_sem);
974 	list_for_each_entry(p, &subsys->hosts, entry) {
975 		if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
976 			goto found;
977 	}
978 	up_write(&nvmet_config_sem);
979 	return;
980 
981 found:
982 	list_del(&p->entry);
983 	nvmet_subsys_disc_changed(subsys, host);
984 
985 	up_write(&nvmet_config_sem);
986 	kfree(p);
987 }
988 
989 static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
990 	.allow_link		= nvmet_allowed_hosts_allow_link,
991 	.drop_link		= nvmet_allowed_hosts_drop_link,
992 };
993 
994 static const struct config_item_type nvmet_allowed_hosts_type = {
995 	.ct_item_ops		= &nvmet_allowed_hosts_item_ops,
996 	.ct_owner		= THIS_MODULE,
997 };
998 
999 static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
1000 		char *page)
1001 {
1002 	return snprintf(page, PAGE_SIZE, "%d\n",
1003 		to_subsys(item)->allow_any_host);
1004 }
1005 
1006 static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
1007 		const char *page, size_t count)
1008 {
1009 	struct nvmet_subsys *subsys = to_subsys(item);
1010 	bool allow_any_host;
1011 	int ret = 0;
1012 
1013 	if (kstrtobool(page, &allow_any_host))
1014 		return -EINVAL;
1015 
1016 	down_write(&nvmet_config_sem);
1017 	if (allow_any_host && !list_empty(&subsys->hosts)) {
1018 		pr_err("Can't set allow_any_host when explicit hosts are set!\n");
1019 		ret = -EINVAL;
1020 		goto out_unlock;
1021 	}
1022 
1023 	if (subsys->allow_any_host != allow_any_host) {
1024 		subsys->allow_any_host = allow_any_host;
1025 		nvmet_subsys_disc_changed(subsys, NULL);
1026 	}
1027 
1028 out_unlock:
1029 	up_write(&nvmet_config_sem);
1030 	return ret ? ret : count;
1031 }
1032 
1033 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
1034 
1035 static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
1036 					      char *page)
1037 {
1038 	struct nvmet_subsys *subsys = to_subsys(item);
1039 
1040 	if (NVME_TERTIARY(subsys->ver))
1041 		return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n",
1042 				NVME_MAJOR(subsys->ver),
1043 				NVME_MINOR(subsys->ver),
1044 				NVME_TERTIARY(subsys->ver));
1045 
1046 	return snprintf(page, PAGE_SIZE, "%llu.%llu\n",
1047 			NVME_MAJOR(subsys->ver),
1048 			NVME_MINOR(subsys->ver));
1049 }
1050 
1051 static ssize_t
1052 nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
1053 		const char *page, size_t count)
1054 {
1055 	int major, minor, tertiary = 0;
1056 	int ret;
1057 
1058 	if (subsys->subsys_discovered) {
1059 		if (NVME_TERTIARY(subsys->ver))
1060 			pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n",
1061 			       NVME_MAJOR(subsys->ver),
1062 			       NVME_MINOR(subsys->ver),
1063 			       NVME_TERTIARY(subsys->ver));
1064 		else
1065 			pr_err("Can't set version number. %llu.%llu is already assigned\n",
1066 			       NVME_MAJOR(subsys->ver),
1067 			       NVME_MINOR(subsys->ver));
1068 		return -EINVAL;
1069 	}
1070 
1071 	/* passthru subsystems use the underlying controller's version */
1072 	if (nvmet_is_passthru_subsys(subsys))
1073 		return -EINVAL;
1074 
1075 	ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
1076 	if (ret != 2 && ret != 3)
1077 		return -EINVAL;
1078 
1079 	subsys->ver = NVME_VS(major, minor, tertiary);
1080 
1081 	return count;
1082 }
1083 
1084 static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
1085 					       const char *page, size_t count)
1086 {
1087 	struct nvmet_subsys *subsys = to_subsys(item);
1088 	ssize_t ret;
1089 
1090 	down_write(&nvmet_config_sem);
1091 	mutex_lock(&subsys->lock);
1092 	ret = nvmet_subsys_attr_version_store_locked(subsys, page, count);
1093 	mutex_unlock(&subsys->lock);
1094 	up_write(&nvmet_config_sem);
1095 
1096 	return ret;
1097 }
1098 CONFIGFS_ATTR(nvmet_subsys_, attr_version);
1099 
1100 /* See Section 1.5 of NVMe 1.4 */
1101 static bool nvmet_is_ascii(const char c)
1102 {
1103 	return c >= 0x20 && c <= 0x7e;
1104 }
1105 
1106 static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
1107 					     char *page)
1108 {
1109 	struct nvmet_subsys *subsys = to_subsys(item);
1110 
1111 	return snprintf(page, PAGE_SIZE, "%.*s\n",
1112 			NVMET_SN_MAX_SIZE, subsys->serial);
1113 }
1114 
1115 static ssize_t
1116 nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys,
1117 		const char *page, size_t count)
1118 {
1119 	int pos, len = strcspn(page, "\n");
1120 
1121 	if (subsys->subsys_discovered) {
1122 		pr_err("Can't set serial number. %s is already assigned\n",
1123 		       subsys->serial);
1124 		return -EINVAL;
1125 	}
1126 
1127 	if (!len || len > NVMET_SN_MAX_SIZE) {
1128 		pr_err("Serial Number can not be empty or exceed %d Bytes\n",
1129 		       NVMET_SN_MAX_SIZE);
1130 		return -EINVAL;
1131 	}
1132 
1133 	for (pos = 0; pos < len; pos++) {
1134 		if (!nvmet_is_ascii(page[pos])) {
1135 			pr_err("Serial Number must contain only ASCII strings\n");
1136 			return -EINVAL;
1137 		}
1138 	}
1139 
1140 	memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' ');
1141 
1142 	return count;
1143 }
1144 
1145 static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
1146 					      const char *page, size_t count)
1147 {
1148 	struct nvmet_subsys *subsys = to_subsys(item);
1149 	ssize_t ret;
1150 
1151 	down_write(&nvmet_config_sem);
1152 	mutex_lock(&subsys->lock);
1153 	ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count);
1154 	mutex_unlock(&subsys->lock);
1155 	up_write(&nvmet_config_sem);
1156 
1157 	return ret;
1158 }
1159 CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
1160 
1161 static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
1162 						 char *page)
1163 {
1164 	return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
1165 }
1166 
1167 static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
1168 						  const char *page, size_t cnt)
1169 {
1170 	u16 cntlid_min;
1171 
1172 	if (sscanf(page, "%hu\n", &cntlid_min) != 1)
1173 		return -EINVAL;
1174 
1175 	if (cntlid_min == 0)
1176 		return -EINVAL;
1177 
1178 	down_write(&nvmet_config_sem);
1179 	if (cntlid_min >= to_subsys(item)->cntlid_max)
1180 		goto out_unlock;
1181 	to_subsys(item)->cntlid_min = cntlid_min;
1182 	up_write(&nvmet_config_sem);
1183 	return cnt;
1184 
1185 out_unlock:
1186 	up_write(&nvmet_config_sem);
1187 	return -EINVAL;
1188 }
1189 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
1190 
1191 static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
1192 						 char *page)
1193 {
1194 	return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
1195 }
1196 
1197 static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
1198 						  const char *page, size_t cnt)
1199 {
1200 	u16 cntlid_max;
1201 
1202 	if (sscanf(page, "%hu\n", &cntlid_max) != 1)
1203 		return -EINVAL;
1204 
1205 	if (cntlid_max == 0)
1206 		return -EINVAL;
1207 
1208 	down_write(&nvmet_config_sem);
1209 	if (cntlid_max <= to_subsys(item)->cntlid_min)
1210 		goto out_unlock;
1211 	to_subsys(item)->cntlid_max = cntlid_max;
1212 	up_write(&nvmet_config_sem);
1213 	return cnt;
1214 
1215 out_unlock:
1216 	up_write(&nvmet_config_sem);
1217 	return -EINVAL;
1218 }
1219 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
1220 
1221 static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
1222 					    char *page)
1223 {
1224 	struct nvmet_subsys *subsys = to_subsys(item);
1225 
1226 	return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number);
1227 }
1228 
1229 static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
1230 		const char *page, size_t count)
1231 {
1232 	int pos = 0, len;
1233 	char *val;
1234 
1235 	if (subsys->subsys_discovered) {
1236 		pr_err("Can't set model number. %s is already assigned\n",
1237 		       subsys->model_number);
1238 		return -EINVAL;
1239 	}
1240 
1241 	len = strcspn(page, "\n");
1242 	if (!len)
1243 		return -EINVAL;
1244 
1245 	if (len > NVMET_MN_MAX_SIZE) {
1246 		pr_err("Model number size can not exceed %d Bytes\n",
1247 		       NVMET_MN_MAX_SIZE);
1248 		return -EINVAL;
1249 	}
1250 
1251 	for (pos = 0; pos < len; pos++) {
1252 		if (!nvmet_is_ascii(page[pos]))
1253 			return -EINVAL;
1254 	}
1255 
1256 	val = kmemdup_nul(page, len, GFP_KERNEL);
1257 	if (!val)
1258 		return -ENOMEM;
1259 	kfree(subsys->model_number);
1260 	subsys->model_number = val;
1261 	return count;
1262 }
1263 
1264 static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
1265 					     const char *page, size_t count)
1266 {
1267 	struct nvmet_subsys *subsys = to_subsys(item);
1268 	ssize_t ret;
1269 
1270 	down_write(&nvmet_config_sem);
1271 	mutex_lock(&subsys->lock);
1272 	ret = nvmet_subsys_attr_model_store_locked(subsys, page, count);
1273 	mutex_unlock(&subsys->lock);
1274 	up_write(&nvmet_config_sem);
1275 
1276 	return ret;
1277 }
1278 CONFIGFS_ATTR(nvmet_subsys_, attr_model);
1279 
1280 static ssize_t nvmet_subsys_attr_ieee_oui_show(struct config_item *item,
1281 					    char *page)
1282 {
1283 	struct nvmet_subsys *subsys = to_subsys(item);
1284 
1285 	return sysfs_emit(page, "0x%06x\n", subsys->ieee_oui);
1286 }
1287 
1288 static ssize_t nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys *subsys,
1289 		const char *page, size_t count)
1290 {
1291 	uint32_t val = 0;
1292 	int ret;
1293 
1294 	if (subsys->subsys_discovered) {
1295 		pr_err("Can't set IEEE OUI. 0x%06x is already assigned\n",
1296 		      subsys->ieee_oui);
1297 		return -EINVAL;
1298 	}
1299 
1300 	ret = kstrtou32(page, 0, &val);
1301 	if (ret < 0)
1302 		return ret;
1303 
1304 	if (val >= 0x1000000)
1305 		return -EINVAL;
1306 
1307 	subsys->ieee_oui = val;
1308 
1309 	return count;
1310 }
1311 
1312 static ssize_t nvmet_subsys_attr_ieee_oui_store(struct config_item *item,
1313 					     const char *page, size_t count)
1314 {
1315 	struct nvmet_subsys *subsys = to_subsys(item);
1316 	ssize_t ret;
1317 
1318 	down_write(&nvmet_config_sem);
1319 	mutex_lock(&subsys->lock);
1320 	ret = nvmet_subsys_attr_ieee_oui_store_locked(subsys, page, count);
1321 	mutex_unlock(&subsys->lock);
1322 	up_write(&nvmet_config_sem);
1323 
1324 	return ret;
1325 }
1326 CONFIGFS_ATTR(nvmet_subsys_, attr_ieee_oui);
1327 
1328 static ssize_t nvmet_subsys_attr_firmware_show(struct config_item *item,
1329 					    char *page)
1330 {
1331 	struct nvmet_subsys *subsys = to_subsys(item);
1332 
1333 	return sysfs_emit(page, "%s\n", subsys->firmware_rev);
1334 }
1335 
1336 static ssize_t nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys *subsys,
1337 		const char *page, size_t count)
1338 {
1339 	int pos = 0, len;
1340 	char *val;
1341 
1342 	if (subsys->subsys_discovered) {
1343 		pr_err("Can't set firmware revision. %s is already assigned\n",
1344 		       subsys->firmware_rev);
1345 		return -EINVAL;
1346 	}
1347 
1348 	len = strcspn(page, "\n");
1349 	if (!len)
1350 		return -EINVAL;
1351 
1352 	if (len > NVMET_FR_MAX_SIZE) {
1353 		pr_err("Firmware revision size can not exceed %d Bytes\n",
1354 		       NVMET_FR_MAX_SIZE);
1355 		return -EINVAL;
1356 	}
1357 
1358 	for (pos = 0; pos < len; pos++) {
1359 		if (!nvmet_is_ascii(page[pos]))
1360 			return -EINVAL;
1361 	}
1362 
1363 	val = kmemdup_nul(page, len, GFP_KERNEL);
1364 	if (!val)
1365 		return -ENOMEM;
1366 
1367 	kfree(subsys->firmware_rev);
1368 
1369 	subsys->firmware_rev = val;
1370 
1371 	return count;
1372 }
1373 
1374 static ssize_t nvmet_subsys_attr_firmware_store(struct config_item *item,
1375 					     const char *page, size_t count)
1376 {
1377 	struct nvmet_subsys *subsys = to_subsys(item);
1378 	ssize_t ret;
1379 
1380 	down_write(&nvmet_config_sem);
1381 	mutex_lock(&subsys->lock);
1382 	ret = nvmet_subsys_attr_firmware_store_locked(subsys, page, count);
1383 	mutex_unlock(&subsys->lock);
1384 	up_write(&nvmet_config_sem);
1385 
1386 	return ret;
1387 }
1388 CONFIGFS_ATTR(nvmet_subsys_, attr_firmware);
1389 
1390 #ifdef CONFIG_BLK_DEV_INTEGRITY
1391 static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
1392 						char *page)
1393 {
1394 	return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support);
1395 }
1396 
1397 static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
1398 						 const char *page, size_t count)
1399 {
1400 	struct nvmet_subsys *subsys = to_subsys(item);
1401 	bool pi_enable;
1402 
1403 	if (kstrtobool(page, &pi_enable))
1404 		return -EINVAL;
1405 
1406 	subsys->pi_support = pi_enable;
1407 	return count;
1408 }
1409 CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
1410 #endif
1411 
1412 static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item,
1413 					      char *page)
1414 {
1415 	return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid);
1416 }
1417 
1418 static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
1419 					       const char *page, size_t cnt)
1420 {
1421 	struct nvmet_subsys *subsys = to_subsys(item);
1422 	struct nvmet_ctrl *ctrl;
1423 	u16 qid_max;
1424 
1425 	if (sscanf(page, "%hu\n", &qid_max) != 1)
1426 		return -EINVAL;
1427 
1428 	if (qid_max < 1 || qid_max > NVMET_NR_QUEUES)
1429 		return -EINVAL;
1430 
1431 	down_write(&nvmet_config_sem);
1432 	subsys->max_qid = qid_max;
1433 
1434 	/* Force reconnect */
1435 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1436 		ctrl->ops->delete_ctrl(ctrl);
1437 	up_write(&nvmet_config_sem);
1438 
1439 	return cnt;
1440 }
1441 CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max);
1442 
1443 static struct configfs_attribute *nvmet_subsys_attrs[] = {
1444 	&nvmet_subsys_attr_attr_allow_any_host,
1445 	&nvmet_subsys_attr_attr_version,
1446 	&nvmet_subsys_attr_attr_serial,
1447 	&nvmet_subsys_attr_attr_cntlid_min,
1448 	&nvmet_subsys_attr_attr_cntlid_max,
1449 	&nvmet_subsys_attr_attr_model,
1450 	&nvmet_subsys_attr_attr_qid_max,
1451 	&nvmet_subsys_attr_attr_ieee_oui,
1452 	&nvmet_subsys_attr_attr_firmware,
1453 #ifdef CONFIG_BLK_DEV_INTEGRITY
1454 	&nvmet_subsys_attr_attr_pi_enable,
1455 #endif
1456 	NULL,
1457 };
1458 
1459 /*
1460  * Subsystem structures & folder operation functions below
1461  */
1462 static void nvmet_subsys_release(struct config_item *item)
1463 {
1464 	struct nvmet_subsys *subsys = to_subsys(item);
1465 
1466 	nvmet_subsys_del_ctrls(subsys);
1467 	nvmet_subsys_put(subsys);
1468 }
1469 
1470 static struct configfs_item_operations nvmet_subsys_item_ops = {
1471 	.release		= nvmet_subsys_release,
1472 };
1473 
1474 static const struct config_item_type nvmet_subsys_type = {
1475 	.ct_item_ops		= &nvmet_subsys_item_ops,
1476 	.ct_attrs		= nvmet_subsys_attrs,
1477 	.ct_owner		= THIS_MODULE,
1478 };
1479 
1480 static struct config_group *nvmet_subsys_make(struct config_group *group,
1481 		const char *name)
1482 {
1483 	struct nvmet_subsys *subsys;
1484 
1485 	if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) {
1486 		pr_err("can't create discovery subsystem through configfs\n");
1487 		return ERR_PTR(-EINVAL);
1488 	}
1489 
1490 	subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
1491 	if (IS_ERR(subsys))
1492 		return ERR_CAST(subsys);
1493 
1494 	config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
1495 
1496 	config_group_init_type_name(&subsys->namespaces_group,
1497 			"namespaces", &nvmet_namespaces_type);
1498 	configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
1499 
1500 	config_group_init_type_name(&subsys->allowed_hosts_group,
1501 			"allowed_hosts", &nvmet_allowed_hosts_type);
1502 	configfs_add_default_group(&subsys->allowed_hosts_group,
1503 			&subsys->group);
1504 
1505 	nvmet_add_passthru_group(subsys);
1506 
1507 	return &subsys->group;
1508 }
1509 
1510 static struct configfs_group_operations nvmet_subsystems_group_ops = {
1511 	.make_group		= nvmet_subsys_make,
1512 };
1513 
1514 static const struct config_item_type nvmet_subsystems_type = {
1515 	.ct_group_ops		= &nvmet_subsystems_group_ops,
1516 	.ct_owner		= THIS_MODULE,
1517 };
1518 
1519 static ssize_t nvmet_referral_enable_show(struct config_item *item,
1520 		char *page)
1521 {
1522 	return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
1523 }
1524 
1525 static ssize_t nvmet_referral_enable_store(struct config_item *item,
1526 		const char *page, size_t count)
1527 {
1528 	struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1529 	struct nvmet_port *port = to_nvmet_port(item);
1530 	bool enable;
1531 
1532 	if (kstrtobool(page, &enable))
1533 		goto inval;
1534 
1535 	if (enable)
1536 		nvmet_referral_enable(parent, port);
1537 	else
1538 		nvmet_referral_disable(parent, port);
1539 
1540 	return count;
1541 inval:
1542 	pr_err("Invalid value '%s' for enable\n", page);
1543 	return -EINVAL;
1544 }
1545 
1546 CONFIGFS_ATTR(nvmet_referral_, enable);
1547 
1548 /*
1549  * Discovery Service subsystem definitions
1550  */
1551 static struct configfs_attribute *nvmet_referral_attrs[] = {
1552 	&nvmet_attr_addr_adrfam,
1553 	&nvmet_attr_addr_portid,
1554 	&nvmet_attr_addr_treq,
1555 	&nvmet_attr_addr_traddr,
1556 	&nvmet_attr_addr_trsvcid,
1557 	&nvmet_attr_addr_trtype,
1558 	&nvmet_referral_attr_enable,
1559 	NULL,
1560 };
1561 
1562 static void nvmet_referral_notify(struct config_group *group,
1563 		struct config_item *item)
1564 {
1565 	struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1566 	struct nvmet_port *port = to_nvmet_port(item);
1567 
1568 	nvmet_referral_disable(parent, port);
1569 }
1570 
1571 static void nvmet_referral_release(struct config_item *item)
1572 {
1573 	struct nvmet_port *port = to_nvmet_port(item);
1574 
1575 	kfree(port);
1576 }
1577 
1578 static struct configfs_item_operations nvmet_referral_item_ops = {
1579 	.release	= nvmet_referral_release,
1580 };
1581 
1582 static const struct config_item_type nvmet_referral_type = {
1583 	.ct_owner	= THIS_MODULE,
1584 	.ct_attrs	= nvmet_referral_attrs,
1585 	.ct_item_ops	= &nvmet_referral_item_ops,
1586 };
1587 
1588 static struct config_group *nvmet_referral_make(
1589 		struct config_group *group, const char *name)
1590 {
1591 	struct nvmet_port *port;
1592 
1593 	port = kzalloc(sizeof(*port), GFP_KERNEL);
1594 	if (!port)
1595 		return ERR_PTR(-ENOMEM);
1596 
1597 	INIT_LIST_HEAD(&port->entry);
1598 	config_group_init_type_name(&port->group, name, &nvmet_referral_type);
1599 
1600 	return &port->group;
1601 }
1602 
1603 static struct configfs_group_operations nvmet_referral_group_ops = {
1604 	.make_group		= nvmet_referral_make,
1605 	.disconnect_notify	= nvmet_referral_notify,
1606 };
1607 
1608 static const struct config_item_type nvmet_referrals_type = {
1609 	.ct_owner	= THIS_MODULE,
1610 	.ct_group_ops	= &nvmet_referral_group_ops,
1611 };
1612 
1613 static struct nvmet_type_name_map nvmet_ana_state[] = {
1614 	{ NVME_ANA_OPTIMIZED,		"optimized" },
1615 	{ NVME_ANA_NONOPTIMIZED,	"non-optimized" },
1616 	{ NVME_ANA_INACCESSIBLE,	"inaccessible" },
1617 	{ NVME_ANA_PERSISTENT_LOSS,	"persistent-loss" },
1618 	{ NVME_ANA_CHANGE,		"change" },
1619 };
1620 
1621 static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
1622 		char *page)
1623 {
1624 	struct nvmet_ana_group *grp = to_ana_group(item);
1625 	enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
1626 	int i;
1627 
1628 	for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1629 		if (state == nvmet_ana_state[i].type)
1630 			return sprintf(page, "%s\n", nvmet_ana_state[i].name);
1631 	}
1632 
1633 	return sprintf(page, "\n");
1634 }
1635 
1636 static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
1637 		const char *page, size_t count)
1638 {
1639 	struct nvmet_ana_group *grp = to_ana_group(item);
1640 	enum nvme_ana_state *ana_state = grp->port->ana_state;
1641 	int i;
1642 
1643 	for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1644 		if (sysfs_streq(page, nvmet_ana_state[i].name))
1645 			goto found;
1646 	}
1647 
1648 	pr_err("Invalid value '%s' for ana_state\n", page);
1649 	return -EINVAL;
1650 
1651 found:
1652 	down_write(&nvmet_ana_sem);
1653 	ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type;
1654 	nvmet_ana_chgcnt++;
1655 	up_write(&nvmet_ana_sem);
1656 	nvmet_port_send_ana_event(grp->port);
1657 	return count;
1658 }
1659 
1660 CONFIGFS_ATTR(nvmet_ana_group_, ana_state);
1661 
1662 static struct configfs_attribute *nvmet_ana_group_attrs[] = {
1663 	&nvmet_ana_group_attr_ana_state,
1664 	NULL,
1665 };
1666 
1667 static void nvmet_ana_group_release(struct config_item *item)
1668 {
1669 	struct nvmet_ana_group *grp = to_ana_group(item);
1670 
1671 	if (grp == &grp->port->ana_default_group)
1672 		return;
1673 
1674 	down_write(&nvmet_ana_sem);
1675 	grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE;
1676 	nvmet_ana_group_enabled[grp->grpid]--;
1677 	up_write(&nvmet_ana_sem);
1678 
1679 	nvmet_port_send_ana_event(grp->port);
1680 	kfree(grp);
1681 }
1682 
1683 static struct configfs_item_operations nvmet_ana_group_item_ops = {
1684 	.release		= nvmet_ana_group_release,
1685 };
1686 
1687 static const struct config_item_type nvmet_ana_group_type = {
1688 	.ct_item_ops		= &nvmet_ana_group_item_ops,
1689 	.ct_attrs		= nvmet_ana_group_attrs,
1690 	.ct_owner		= THIS_MODULE,
1691 };
1692 
1693 static struct config_group *nvmet_ana_groups_make_group(
1694 		struct config_group *group, const char *name)
1695 {
1696 	struct nvmet_port *port = ana_groups_to_port(&group->cg_item);
1697 	struct nvmet_ana_group *grp;
1698 	u32 grpid;
1699 	int ret;
1700 
1701 	ret = kstrtou32(name, 0, &grpid);
1702 	if (ret)
1703 		goto out;
1704 
1705 	ret = -EINVAL;
1706 	if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS)
1707 		goto out;
1708 
1709 	ret = -ENOMEM;
1710 	grp = kzalloc(sizeof(*grp), GFP_KERNEL);
1711 	if (!grp)
1712 		goto out;
1713 	grp->port = port;
1714 	grp->grpid = grpid;
1715 
1716 	down_write(&nvmet_ana_sem);
1717 	grpid = array_index_nospec(grpid, NVMET_MAX_ANAGRPS);
1718 	nvmet_ana_group_enabled[grpid]++;
1719 	up_write(&nvmet_ana_sem);
1720 
1721 	nvmet_port_send_ana_event(grp->port);
1722 
1723 	config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type);
1724 	return &grp->group;
1725 out:
1726 	return ERR_PTR(ret);
1727 }
1728 
1729 static struct configfs_group_operations nvmet_ana_groups_group_ops = {
1730 	.make_group		= nvmet_ana_groups_make_group,
1731 };
1732 
1733 static const struct config_item_type nvmet_ana_groups_type = {
1734 	.ct_group_ops		= &nvmet_ana_groups_group_ops,
1735 	.ct_owner		= THIS_MODULE,
1736 };
1737 
1738 /*
1739  * Ports definitions.
1740  */
1741 static void nvmet_port_release(struct config_item *item)
1742 {
1743 	struct nvmet_port *port = to_nvmet_port(item);
1744 
1745 	/* Let inflight controllers teardown complete */
1746 	flush_workqueue(nvmet_wq);
1747 	list_del(&port->global_entry);
1748 
1749 	kfree(port->ana_state);
1750 	kfree(port);
1751 }
1752 
1753 static struct configfs_attribute *nvmet_port_attrs[] = {
1754 	&nvmet_attr_addr_adrfam,
1755 	&nvmet_attr_addr_treq,
1756 	&nvmet_attr_addr_traddr,
1757 	&nvmet_attr_addr_trsvcid,
1758 	&nvmet_attr_addr_trtype,
1759 	&nvmet_attr_param_inline_data_size,
1760 #ifdef CONFIG_BLK_DEV_INTEGRITY
1761 	&nvmet_attr_param_pi_enable,
1762 #endif
1763 	NULL,
1764 };
1765 
1766 static struct configfs_item_operations nvmet_port_item_ops = {
1767 	.release		= nvmet_port_release,
1768 };
1769 
1770 static const struct config_item_type nvmet_port_type = {
1771 	.ct_attrs		= nvmet_port_attrs,
1772 	.ct_item_ops		= &nvmet_port_item_ops,
1773 	.ct_owner		= THIS_MODULE,
1774 };
1775 
1776 static struct config_group *nvmet_ports_make(struct config_group *group,
1777 		const char *name)
1778 {
1779 	struct nvmet_port *port;
1780 	u16 portid;
1781 	u32 i;
1782 
1783 	if (kstrtou16(name, 0, &portid))
1784 		return ERR_PTR(-EINVAL);
1785 
1786 	port = kzalloc(sizeof(*port), GFP_KERNEL);
1787 	if (!port)
1788 		return ERR_PTR(-ENOMEM);
1789 
1790 	port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1,
1791 			sizeof(*port->ana_state), GFP_KERNEL);
1792 	if (!port->ana_state) {
1793 		kfree(port);
1794 		return ERR_PTR(-ENOMEM);
1795 	}
1796 
1797 	for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
1798 		if (i == NVMET_DEFAULT_ANA_GRPID)
1799 			port->ana_state[1] = NVME_ANA_OPTIMIZED;
1800 		else
1801 			port->ana_state[i] = NVME_ANA_INACCESSIBLE;
1802 	}
1803 
1804 	list_add(&port->global_entry, &nvmet_ports_list);
1805 
1806 	INIT_LIST_HEAD(&port->entry);
1807 	INIT_LIST_HEAD(&port->subsystems);
1808 	INIT_LIST_HEAD(&port->referrals);
1809 	port->inline_data_size = -1;	/* < 0 == let the transport choose */
1810 
1811 	port->disc_addr.portid = cpu_to_le16(portid);
1812 	port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
1813 	port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
1814 	config_group_init_type_name(&port->group, name, &nvmet_port_type);
1815 
1816 	config_group_init_type_name(&port->subsys_group,
1817 			"subsystems", &nvmet_port_subsys_type);
1818 	configfs_add_default_group(&port->subsys_group, &port->group);
1819 
1820 	config_group_init_type_name(&port->referrals_group,
1821 			"referrals", &nvmet_referrals_type);
1822 	configfs_add_default_group(&port->referrals_group, &port->group);
1823 
1824 	config_group_init_type_name(&port->ana_groups_group,
1825 			"ana_groups", &nvmet_ana_groups_type);
1826 	configfs_add_default_group(&port->ana_groups_group, &port->group);
1827 
1828 	port->ana_default_group.port = port;
1829 	port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID;
1830 	config_group_init_type_name(&port->ana_default_group.group,
1831 			__stringify(NVMET_DEFAULT_ANA_GRPID),
1832 			&nvmet_ana_group_type);
1833 	configfs_add_default_group(&port->ana_default_group.group,
1834 			&port->ana_groups_group);
1835 
1836 	return &port->group;
1837 }
1838 
1839 static struct configfs_group_operations nvmet_ports_group_ops = {
1840 	.make_group		= nvmet_ports_make,
1841 };
1842 
1843 static const struct config_item_type nvmet_ports_type = {
1844 	.ct_group_ops		= &nvmet_ports_group_ops,
1845 	.ct_owner		= THIS_MODULE,
1846 };
1847 
1848 static struct config_group nvmet_subsystems_group;
1849 static struct config_group nvmet_ports_group;
1850 
1851 #ifdef CONFIG_NVME_TARGET_AUTH
1852 static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
1853 		char *page)
1854 {
1855 	u8 *dhchap_secret = to_host(item)->dhchap_secret;
1856 
1857 	if (!dhchap_secret)
1858 		return sprintf(page, "\n");
1859 	return sprintf(page, "%s\n", dhchap_secret);
1860 }
1861 
1862 static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
1863 		const char *page, size_t count)
1864 {
1865 	struct nvmet_host *host = to_host(item);
1866 	int ret;
1867 
1868 	ret = nvmet_auth_set_key(host, page, false);
1869 	/*
1870 	 * Re-authentication is a soft state, so keep the
1871 	 * current authentication valid until the host
1872 	 * requests re-authentication.
1873 	 */
1874 	return ret < 0 ? ret : count;
1875 }
1876 
1877 CONFIGFS_ATTR(nvmet_host_, dhchap_key);
1878 
1879 static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
1880 		char *page)
1881 {
1882 	u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
1883 
1884 	if (!dhchap_secret)
1885 		return sprintf(page, "\n");
1886 	return sprintf(page, "%s\n", dhchap_secret);
1887 }
1888 
1889 static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
1890 		const char *page, size_t count)
1891 {
1892 	struct nvmet_host *host = to_host(item);
1893 	int ret;
1894 
1895 	ret = nvmet_auth_set_key(host, page, true);
1896 	/*
1897 	 * Re-authentication is a soft state, so keep the
1898 	 * current authentication valid until the host
1899 	 * requests re-authentication.
1900 	 */
1901 	return ret < 0 ? ret : count;
1902 }
1903 
1904 CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key);
1905 
1906 static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item,
1907 		char *page)
1908 {
1909 	struct nvmet_host *host = to_host(item);
1910 	const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
1911 
1912 	return sprintf(page, "%s\n", hash_name ? hash_name : "none");
1913 }
1914 
1915 static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item,
1916 		const char *page, size_t count)
1917 {
1918 	struct nvmet_host *host = to_host(item);
1919 	u8 hmac_id;
1920 
1921 	hmac_id = nvme_auth_hmac_id(page);
1922 	if (hmac_id == NVME_AUTH_HASH_INVALID)
1923 		return -EINVAL;
1924 	if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0))
1925 		return -ENOTSUPP;
1926 	host->dhchap_hash_id = hmac_id;
1927 	return count;
1928 }
1929 
1930 CONFIGFS_ATTR(nvmet_host_, dhchap_hash);
1931 
1932 static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item,
1933 		char *page)
1934 {
1935 	struct nvmet_host *host = to_host(item);
1936 	const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id);
1937 
1938 	return sprintf(page, "%s\n", dhgroup ? dhgroup : "none");
1939 }
1940 
1941 static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item,
1942 		const char *page, size_t count)
1943 {
1944 	struct nvmet_host *host = to_host(item);
1945 	int dhgroup_id;
1946 
1947 	dhgroup_id = nvme_auth_dhgroup_id(page);
1948 	if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID)
1949 		return -EINVAL;
1950 	if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) {
1951 		const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
1952 
1953 		if (!crypto_has_kpp(kpp, 0, 0))
1954 			return -EINVAL;
1955 	}
1956 	host->dhchap_dhgroup_id = dhgroup_id;
1957 	return count;
1958 }
1959 
1960 CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup);
1961 
1962 static struct configfs_attribute *nvmet_host_attrs[] = {
1963 	&nvmet_host_attr_dhchap_key,
1964 	&nvmet_host_attr_dhchap_ctrl_key,
1965 	&nvmet_host_attr_dhchap_hash,
1966 	&nvmet_host_attr_dhchap_dhgroup,
1967 	NULL,
1968 };
1969 #endif /* CONFIG_NVME_TARGET_AUTH */
1970 
1971 static void nvmet_host_release(struct config_item *item)
1972 {
1973 	struct nvmet_host *host = to_host(item);
1974 
1975 #ifdef CONFIG_NVME_TARGET_AUTH
1976 	kfree(host->dhchap_secret);
1977 	kfree(host->dhchap_ctrl_secret);
1978 #endif
1979 	kfree(host);
1980 }
1981 
1982 static struct configfs_item_operations nvmet_host_item_ops = {
1983 	.release		= nvmet_host_release,
1984 };
1985 
1986 static const struct config_item_type nvmet_host_type = {
1987 	.ct_item_ops		= &nvmet_host_item_ops,
1988 #ifdef CONFIG_NVME_TARGET_AUTH
1989 	.ct_attrs		= nvmet_host_attrs,
1990 #endif
1991 	.ct_owner		= THIS_MODULE,
1992 };
1993 
1994 static struct config_group *nvmet_hosts_make_group(struct config_group *group,
1995 		const char *name)
1996 {
1997 	struct nvmet_host *host;
1998 
1999 	host = kzalloc(sizeof(*host), GFP_KERNEL);
2000 	if (!host)
2001 		return ERR_PTR(-ENOMEM);
2002 
2003 #ifdef CONFIG_NVME_TARGET_AUTH
2004 	/* Default to SHA256 */
2005 	host->dhchap_hash_id = NVME_AUTH_HASH_SHA256;
2006 #endif
2007 
2008 	config_group_init_type_name(&host->group, name, &nvmet_host_type);
2009 
2010 	return &host->group;
2011 }
2012 
2013 static struct configfs_group_operations nvmet_hosts_group_ops = {
2014 	.make_group		= nvmet_hosts_make_group,
2015 };
2016 
2017 static const struct config_item_type nvmet_hosts_type = {
2018 	.ct_group_ops		= &nvmet_hosts_group_ops,
2019 	.ct_owner		= THIS_MODULE,
2020 };
2021 
2022 static struct config_group nvmet_hosts_group;
2023 
2024 static const struct config_item_type nvmet_root_type = {
2025 	.ct_owner		= THIS_MODULE,
2026 };
2027 
2028 static struct configfs_subsystem nvmet_configfs_subsystem = {
2029 	.su_group = {
2030 		.cg_item = {
2031 			.ci_namebuf	= "nvmet",
2032 			.ci_type	= &nvmet_root_type,
2033 		},
2034 	},
2035 };
2036 
2037 int __init nvmet_init_configfs(void)
2038 {
2039 	int ret;
2040 
2041 	config_group_init(&nvmet_configfs_subsystem.su_group);
2042 	mutex_init(&nvmet_configfs_subsystem.su_mutex);
2043 
2044 	config_group_init_type_name(&nvmet_subsystems_group,
2045 			"subsystems", &nvmet_subsystems_type);
2046 	configfs_add_default_group(&nvmet_subsystems_group,
2047 			&nvmet_configfs_subsystem.su_group);
2048 
2049 	config_group_init_type_name(&nvmet_ports_group,
2050 			"ports", &nvmet_ports_type);
2051 	configfs_add_default_group(&nvmet_ports_group,
2052 			&nvmet_configfs_subsystem.su_group);
2053 
2054 	config_group_init_type_name(&nvmet_hosts_group,
2055 			"hosts", &nvmet_hosts_type);
2056 	configfs_add_default_group(&nvmet_hosts_group,
2057 			&nvmet_configfs_subsystem.su_group);
2058 
2059 	ret = configfs_register_subsystem(&nvmet_configfs_subsystem);
2060 	if (ret) {
2061 		pr_err("configfs_register_subsystem: %d\n", ret);
2062 		return ret;
2063 	}
2064 
2065 	return 0;
2066 }
2067 
2068 void __exit nvmet_exit_configfs(void)
2069 {
2070 	configfs_unregister_subsystem(&nvmet_configfs_subsystem);
2071 }
2072