xref: /openbmc/linux/drivers/nvme/target/configfs.c (revision ca3b4293)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Configfs interface for the NVMe target.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/kstrtox.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/stat.h>
12 #include <linux/ctype.h>
13 #include <linux/pci.h>
14 #include <linux/pci-p2pdma.h>
15 #ifdef CONFIG_NVME_TARGET_AUTH
16 #include <linux/nvme-auth.h>
17 #endif
18 #include <crypto/hash.h>
19 #include <crypto/kpp.h>
20 #include <linux/nospec.h>
21 
22 #include "nvmet.h"
23 
24 static const struct config_item_type nvmet_host_type;
25 static const struct config_item_type nvmet_subsys_type;
26 
27 static LIST_HEAD(nvmet_ports_list);
28 struct list_head *nvmet_ports = &nvmet_ports_list;
29 
30 struct nvmet_type_name_map {
31 	u8		type;
32 	const char	*name;
33 };
34 
35 static struct nvmet_type_name_map nvmet_transport[] = {
36 	{ NVMF_TRTYPE_RDMA,	"rdma" },
37 	{ NVMF_TRTYPE_FC,	"fc" },
38 	{ NVMF_TRTYPE_TCP,	"tcp" },
39 	{ NVMF_TRTYPE_LOOP,	"loop" },
40 };
41 
42 static const struct nvmet_type_name_map nvmet_addr_family[] = {
43 	{ NVMF_ADDR_FAMILY_PCI,		"pcie" },
44 	{ NVMF_ADDR_FAMILY_IP4,		"ipv4" },
45 	{ NVMF_ADDR_FAMILY_IP6,		"ipv6" },
46 	{ NVMF_ADDR_FAMILY_IB,		"ib" },
47 	{ NVMF_ADDR_FAMILY_FC,		"fc" },
48 	{ NVMF_ADDR_FAMILY_LOOP,	"loop" },
49 };
50 
nvmet_is_port_enabled(struct nvmet_port * p,const char * caller)51 static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
52 {
53 	if (p->enabled)
54 		pr_err("Disable port '%u' before changing attribute in %s\n",
55 		       le16_to_cpu(p->disc_addr.portid), caller);
56 	return p->enabled;
57 }
58 
59 /*
60  * nvmet_port Generic ConfigFS definitions.
61  * Used in any place in the ConfigFS tree that refers to an address.
62  */
nvmet_addr_adrfam_show(struct config_item * item,char * page)63 static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
64 {
65 	u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam;
66 	int i;
67 
68 	for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
69 		if (nvmet_addr_family[i].type == adrfam)
70 			return snprintf(page, PAGE_SIZE, "%s\n",
71 					nvmet_addr_family[i].name);
72 	}
73 
74 	return snprintf(page, PAGE_SIZE, "\n");
75 }
76 
nvmet_addr_adrfam_store(struct config_item * item,const char * page,size_t count)77 static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
78 		const char *page, size_t count)
79 {
80 	struct nvmet_port *port = to_nvmet_port(item);
81 	int i;
82 
83 	if (nvmet_is_port_enabled(port, __func__))
84 		return -EACCES;
85 
86 	for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
87 		if (sysfs_streq(page, nvmet_addr_family[i].name))
88 			goto found;
89 	}
90 
91 	pr_err("Invalid value '%s' for adrfam\n", page);
92 	return -EINVAL;
93 
94 found:
95 	port->disc_addr.adrfam = nvmet_addr_family[i].type;
96 	return count;
97 }
98 
99 CONFIGFS_ATTR(nvmet_, addr_adrfam);
100 
nvmet_addr_portid_show(struct config_item * item,char * page)101 static ssize_t nvmet_addr_portid_show(struct config_item *item,
102 		char *page)
103 {
104 	__le16 portid = to_nvmet_port(item)->disc_addr.portid;
105 
106 	return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid));
107 }
108 
nvmet_addr_portid_store(struct config_item * item,const char * page,size_t count)109 static ssize_t nvmet_addr_portid_store(struct config_item *item,
110 		const char *page, size_t count)
111 {
112 	struct nvmet_port *port = to_nvmet_port(item);
113 	u16 portid = 0;
114 
115 	if (kstrtou16(page, 0, &portid)) {
116 		pr_err("Invalid value '%s' for portid\n", page);
117 		return -EINVAL;
118 	}
119 
120 	if (nvmet_is_port_enabled(port, __func__))
121 		return -EACCES;
122 
123 	port->disc_addr.portid = cpu_to_le16(portid);
124 	return count;
125 }
126 
127 CONFIGFS_ATTR(nvmet_, addr_portid);
128 
nvmet_addr_traddr_show(struct config_item * item,char * page)129 static ssize_t nvmet_addr_traddr_show(struct config_item *item,
130 		char *page)
131 {
132 	struct nvmet_port *port = to_nvmet_port(item);
133 
134 	return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr);
135 }
136 
nvmet_addr_traddr_store(struct config_item * item,const char * page,size_t count)137 static ssize_t nvmet_addr_traddr_store(struct config_item *item,
138 		const char *page, size_t count)
139 {
140 	struct nvmet_port *port = to_nvmet_port(item);
141 
142 	if (count > NVMF_TRADDR_SIZE) {
143 		pr_err("Invalid value '%s' for traddr\n", page);
144 		return -EINVAL;
145 	}
146 
147 	if (nvmet_is_port_enabled(port, __func__))
148 		return -EACCES;
149 
150 	if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
151 		return -EINVAL;
152 	return count;
153 }
154 
155 CONFIGFS_ATTR(nvmet_, addr_traddr);
156 
157 static const struct nvmet_type_name_map nvmet_addr_treq[] = {
158 	{ NVMF_TREQ_NOT_SPECIFIED,	"not specified" },
159 	{ NVMF_TREQ_REQUIRED,		"required" },
160 	{ NVMF_TREQ_NOT_REQUIRED,	"not required" },
161 };
162 
nvmet_addr_treq_show(struct config_item * item,char * page)163 static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
164 {
165 	u8 treq = to_nvmet_port(item)->disc_addr.treq &
166 		NVME_TREQ_SECURE_CHANNEL_MASK;
167 	int i;
168 
169 	for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
170 		if (treq == nvmet_addr_treq[i].type)
171 			return snprintf(page, PAGE_SIZE, "%s\n",
172 					nvmet_addr_treq[i].name);
173 	}
174 
175 	return snprintf(page, PAGE_SIZE, "\n");
176 }
177 
nvmet_addr_treq_store(struct config_item * item,const char * page,size_t count)178 static ssize_t nvmet_addr_treq_store(struct config_item *item,
179 		const char *page, size_t count)
180 {
181 	struct nvmet_port *port = to_nvmet_port(item);
182 	u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK;
183 	int i;
184 
185 	if (nvmet_is_port_enabled(port, __func__))
186 		return -EACCES;
187 
188 	for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
189 		if (sysfs_streq(page, nvmet_addr_treq[i].name))
190 			goto found;
191 	}
192 
193 	pr_err("Invalid value '%s' for treq\n", page);
194 	return -EINVAL;
195 
196 found:
197 	treq |= nvmet_addr_treq[i].type;
198 	port->disc_addr.treq = treq;
199 	return count;
200 }
201 
202 CONFIGFS_ATTR(nvmet_, addr_treq);
203 
nvmet_addr_trsvcid_show(struct config_item * item,char * page)204 static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
205 		char *page)
206 {
207 	struct nvmet_port *port = to_nvmet_port(item);
208 
209 	return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid);
210 }
211 
nvmet_addr_trsvcid_store(struct config_item * item,const char * page,size_t count)212 static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
213 		const char *page, size_t count)
214 {
215 	struct nvmet_port *port = to_nvmet_port(item);
216 
217 	if (count > NVMF_TRSVCID_SIZE) {
218 		pr_err("Invalid value '%s' for trsvcid\n", page);
219 		return -EINVAL;
220 	}
221 	if (nvmet_is_port_enabled(port, __func__))
222 		return -EACCES;
223 
224 	if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
225 		return -EINVAL;
226 	return count;
227 }
228 
229 CONFIGFS_ATTR(nvmet_, addr_trsvcid);
230 
nvmet_param_inline_data_size_show(struct config_item * item,char * page)231 static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
232 		char *page)
233 {
234 	struct nvmet_port *port = to_nvmet_port(item);
235 
236 	return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
237 }
238 
nvmet_param_inline_data_size_store(struct config_item * item,const char * page,size_t count)239 static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
240 		const char *page, size_t count)
241 {
242 	struct nvmet_port *port = to_nvmet_port(item);
243 	int ret;
244 
245 	if (nvmet_is_port_enabled(port, __func__))
246 		return -EACCES;
247 	ret = kstrtoint(page, 0, &port->inline_data_size);
248 	if (ret) {
249 		pr_err("Invalid value '%s' for inline_data_size\n", page);
250 		return -EINVAL;
251 	}
252 	return count;
253 }
254 
255 CONFIGFS_ATTR(nvmet_, param_inline_data_size);
256 
257 #ifdef CONFIG_BLK_DEV_INTEGRITY
nvmet_param_pi_enable_show(struct config_item * item,char * page)258 static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
259 		char *page)
260 {
261 	struct nvmet_port *port = to_nvmet_port(item);
262 
263 	return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable);
264 }
265 
nvmet_param_pi_enable_store(struct config_item * item,const char * page,size_t count)266 static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
267 		const char *page, size_t count)
268 {
269 	struct nvmet_port *port = to_nvmet_port(item);
270 	bool val;
271 
272 	if (kstrtobool(page, &val))
273 		return -EINVAL;
274 
275 	if (nvmet_is_port_enabled(port, __func__))
276 		return -EACCES;
277 
278 	port->pi_enable = val;
279 	return count;
280 }
281 
282 CONFIGFS_ATTR(nvmet_, param_pi_enable);
283 #endif
284 
nvmet_addr_trtype_show(struct config_item * item,char * page)285 static ssize_t nvmet_addr_trtype_show(struct config_item *item,
286 		char *page)
287 {
288 	struct nvmet_port *port = to_nvmet_port(item);
289 	int i;
290 
291 	for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
292 		if (port->disc_addr.trtype == nvmet_transport[i].type)
293 			return snprintf(page, PAGE_SIZE,
294 					"%s\n", nvmet_transport[i].name);
295 	}
296 
297 	return sprintf(page, "\n");
298 }
299 
nvmet_port_init_tsas_rdma(struct nvmet_port * port)300 static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
301 {
302 	port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
303 	port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
304 	port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
305 }
306 
nvmet_addr_trtype_store(struct config_item * item,const char * page,size_t count)307 static ssize_t nvmet_addr_trtype_store(struct config_item *item,
308 		const char *page, size_t count)
309 {
310 	struct nvmet_port *port = to_nvmet_port(item);
311 	int i;
312 
313 	if (nvmet_is_port_enabled(port, __func__))
314 		return -EACCES;
315 
316 	for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
317 		if (sysfs_streq(page, nvmet_transport[i].name))
318 			goto found;
319 	}
320 
321 	pr_err("Invalid value '%s' for trtype\n", page);
322 	return -EINVAL;
323 
324 found:
325 	memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
326 	port->disc_addr.trtype = nvmet_transport[i].type;
327 	if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
328 		nvmet_port_init_tsas_rdma(port);
329 	return count;
330 }
331 
332 CONFIGFS_ATTR(nvmet_, addr_trtype);
333 
334 /*
335  * Namespace structures & file operation functions below
336  */
nvmet_ns_device_path_show(struct config_item * item,char * page)337 static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
338 {
339 	return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
340 }
341 
nvmet_ns_device_path_store(struct config_item * item,const char * page,size_t count)342 static ssize_t nvmet_ns_device_path_store(struct config_item *item,
343 		const char *page, size_t count)
344 {
345 	struct nvmet_ns *ns = to_nvmet_ns(item);
346 	struct nvmet_subsys *subsys = ns->subsys;
347 	size_t len;
348 	int ret;
349 
350 	mutex_lock(&subsys->lock);
351 	ret = -EBUSY;
352 	if (ns->enabled)
353 		goto out_unlock;
354 
355 	ret = -EINVAL;
356 	len = strcspn(page, "\n");
357 	if (!len)
358 		goto out_unlock;
359 
360 	kfree(ns->device_path);
361 	ret = -ENOMEM;
362 	ns->device_path = kmemdup_nul(page, len, GFP_KERNEL);
363 	if (!ns->device_path)
364 		goto out_unlock;
365 
366 	mutex_unlock(&subsys->lock);
367 	return count;
368 
369 out_unlock:
370 	mutex_unlock(&subsys->lock);
371 	return ret;
372 }
373 
374 CONFIGFS_ATTR(nvmet_ns_, device_path);
375 
376 #ifdef CONFIG_PCI_P2PDMA
nvmet_ns_p2pmem_show(struct config_item * item,char * page)377 static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
378 {
379 	struct nvmet_ns *ns = to_nvmet_ns(item);
380 
381 	return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
382 }
383 
nvmet_ns_p2pmem_store(struct config_item * item,const char * page,size_t count)384 static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
385 		const char *page, size_t count)
386 {
387 	struct nvmet_ns *ns = to_nvmet_ns(item);
388 	struct pci_dev *p2p_dev = NULL;
389 	bool use_p2pmem;
390 	int ret = count;
391 	int error;
392 
393 	mutex_lock(&ns->subsys->lock);
394 	if (ns->enabled) {
395 		ret = -EBUSY;
396 		goto out_unlock;
397 	}
398 
399 	error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
400 	if (error) {
401 		ret = error;
402 		goto out_unlock;
403 	}
404 
405 	ns->use_p2pmem = use_p2pmem;
406 	pci_dev_put(ns->p2p_dev);
407 	ns->p2p_dev = p2p_dev;
408 
409 out_unlock:
410 	mutex_unlock(&ns->subsys->lock);
411 
412 	return ret;
413 }
414 
415 CONFIGFS_ATTR(nvmet_ns_, p2pmem);
416 #endif /* CONFIG_PCI_P2PDMA */
417 
nvmet_ns_device_uuid_show(struct config_item * item,char * page)418 static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
419 {
420 	return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
421 }
422 
nvmet_ns_device_uuid_store(struct config_item * item,const char * page,size_t count)423 static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
424 					  const char *page, size_t count)
425 {
426 	struct nvmet_ns *ns = to_nvmet_ns(item);
427 	struct nvmet_subsys *subsys = ns->subsys;
428 	int ret = 0;
429 
430 	mutex_lock(&subsys->lock);
431 	if (ns->enabled) {
432 		ret = -EBUSY;
433 		goto out_unlock;
434 	}
435 
436 	if (uuid_parse(page, &ns->uuid))
437 		ret = -EINVAL;
438 
439 out_unlock:
440 	mutex_unlock(&subsys->lock);
441 	return ret ? ret : count;
442 }
443 
444 CONFIGFS_ATTR(nvmet_ns_, device_uuid);
445 
nvmet_ns_device_nguid_show(struct config_item * item,char * page)446 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
447 {
448 	return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
449 }
450 
nvmet_ns_device_nguid_store(struct config_item * item,const char * page,size_t count)451 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
452 		const char *page, size_t count)
453 {
454 	struct nvmet_ns *ns = to_nvmet_ns(item);
455 	struct nvmet_subsys *subsys = ns->subsys;
456 	u8 nguid[16];
457 	const char *p = page;
458 	int i;
459 	int ret = 0;
460 
461 	mutex_lock(&subsys->lock);
462 	if (ns->enabled) {
463 		ret = -EBUSY;
464 		goto out_unlock;
465 	}
466 
467 	for (i = 0; i < 16; i++) {
468 		if (p + 2 > page + count) {
469 			ret = -EINVAL;
470 			goto out_unlock;
471 		}
472 		if (!isxdigit(p[0]) || !isxdigit(p[1])) {
473 			ret = -EINVAL;
474 			goto out_unlock;
475 		}
476 
477 		nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
478 		p += 2;
479 
480 		if (*p == '-' || *p == ':')
481 			p++;
482 	}
483 
484 	memcpy(&ns->nguid, nguid, sizeof(nguid));
485 out_unlock:
486 	mutex_unlock(&subsys->lock);
487 	return ret ? ret : count;
488 }
489 
490 CONFIGFS_ATTR(nvmet_ns_, device_nguid);
491 
nvmet_ns_ana_grpid_show(struct config_item * item,char * page)492 static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
493 {
494 	return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid);
495 }
496 
nvmet_ns_ana_grpid_store(struct config_item * item,const char * page,size_t count)497 static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
498 		const char *page, size_t count)
499 {
500 	struct nvmet_ns *ns = to_nvmet_ns(item);
501 	u32 oldgrpid, newgrpid;
502 	int ret;
503 
504 	ret = kstrtou32(page, 0, &newgrpid);
505 	if (ret)
506 		return ret;
507 
508 	if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS)
509 		return -EINVAL;
510 
511 	down_write(&nvmet_ana_sem);
512 	oldgrpid = ns->anagrpid;
513 	newgrpid = array_index_nospec(newgrpid, NVMET_MAX_ANAGRPS);
514 	nvmet_ana_group_enabled[newgrpid]++;
515 	ns->anagrpid = newgrpid;
516 	nvmet_ana_group_enabled[oldgrpid]--;
517 	nvmet_ana_chgcnt++;
518 	up_write(&nvmet_ana_sem);
519 
520 	nvmet_send_ana_event(ns->subsys, NULL);
521 	return count;
522 }
523 
524 CONFIGFS_ATTR(nvmet_ns_, ana_grpid);
525 
nvmet_ns_enable_show(struct config_item * item,char * page)526 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
527 {
528 	return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
529 }
530 
nvmet_ns_enable_store(struct config_item * item,const char * page,size_t count)531 static ssize_t nvmet_ns_enable_store(struct config_item *item,
532 		const char *page, size_t count)
533 {
534 	struct nvmet_ns *ns = to_nvmet_ns(item);
535 	bool enable;
536 	int ret = 0;
537 
538 	if (kstrtobool(page, &enable))
539 		return -EINVAL;
540 
541 	/*
542 	 * take a global nvmet_config_sem because the disable routine has a
543 	 * window where it releases the subsys-lock, giving a chance to
544 	 * a parallel enable to concurrently execute causing the disable to
545 	 * have a misaccounting of the ns percpu_ref.
546 	 */
547 	down_write(&nvmet_config_sem);
548 	if (enable)
549 		ret = nvmet_ns_enable(ns);
550 	else
551 		nvmet_ns_disable(ns);
552 	up_write(&nvmet_config_sem);
553 
554 	return ret ? ret : count;
555 }
556 
557 CONFIGFS_ATTR(nvmet_ns_, enable);
558 
nvmet_ns_buffered_io_show(struct config_item * item,char * page)559 static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
560 {
561 	return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
562 }
563 
nvmet_ns_buffered_io_store(struct config_item * item,const char * page,size_t count)564 static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
565 		const char *page, size_t count)
566 {
567 	struct nvmet_ns *ns = to_nvmet_ns(item);
568 	bool val;
569 
570 	if (kstrtobool(page, &val))
571 		return -EINVAL;
572 
573 	mutex_lock(&ns->subsys->lock);
574 	if (ns->enabled) {
575 		pr_err("disable ns before setting buffered_io value.\n");
576 		mutex_unlock(&ns->subsys->lock);
577 		return -EINVAL;
578 	}
579 
580 	ns->buffered_io = val;
581 	mutex_unlock(&ns->subsys->lock);
582 	return count;
583 }
584 
585 CONFIGFS_ATTR(nvmet_ns_, buffered_io);
586 
nvmet_ns_revalidate_size_store(struct config_item * item,const char * page,size_t count)587 static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
588 		const char *page, size_t count)
589 {
590 	struct nvmet_ns *ns = to_nvmet_ns(item);
591 	bool val;
592 
593 	if (kstrtobool(page, &val))
594 		return -EINVAL;
595 
596 	if (!val)
597 		return -EINVAL;
598 
599 	mutex_lock(&ns->subsys->lock);
600 	if (!ns->enabled) {
601 		pr_err("enable ns before revalidate.\n");
602 		mutex_unlock(&ns->subsys->lock);
603 		return -EINVAL;
604 	}
605 	if (nvmet_ns_revalidate(ns))
606 		nvmet_ns_changed(ns->subsys, ns->nsid);
607 	mutex_unlock(&ns->subsys->lock);
608 	return count;
609 }
610 
611 CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
612 
613 static struct configfs_attribute *nvmet_ns_attrs[] = {
614 	&nvmet_ns_attr_device_path,
615 	&nvmet_ns_attr_device_nguid,
616 	&nvmet_ns_attr_device_uuid,
617 	&nvmet_ns_attr_ana_grpid,
618 	&nvmet_ns_attr_enable,
619 	&nvmet_ns_attr_buffered_io,
620 	&nvmet_ns_attr_revalidate_size,
621 #ifdef CONFIG_PCI_P2PDMA
622 	&nvmet_ns_attr_p2pmem,
623 #endif
624 	NULL,
625 };
626 
nvmet_subsys_nsid_exists(struct nvmet_subsys * subsys,u32 nsid)627 bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
628 {
629 	struct config_item *ns_item;
630 	char name[12];
631 
632 	snprintf(name, sizeof(name), "%u", nsid);
633 	mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
634 	ns_item = config_group_find_item(&subsys->namespaces_group, name);
635 	mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);
636 	return ns_item != NULL;
637 }
638 
nvmet_ns_release(struct config_item * item)639 static void nvmet_ns_release(struct config_item *item)
640 {
641 	struct nvmet_ns *ns = to_nvmet_ns(item);
642 
643 	nvmet_ns_free(ns);
644 }
645 
646 static struct configfs_item_operations nvmet_ns_item_ops = {
647 	.release		= nvmet_ns_release,
648 };
649 
650 static const struct config_item_type nvmet_ns_type = {
651 	.ct_item_ops		= &nvmet_ns_item_ops,
652 	.ct_attrs		= nvmet_ns_attrs,
653 	.ct_owner		= THIS_MODULE,
654 };
655 
nvmet_ns_make(struct config_group * group,const char * name)656 static struct config_group *nvmet_ns_make(struct config_group *group,
657 		const char *name)
658 {
659 	struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
660 	struct nvmet_ns *ns;
661 	int ret;
662 	u32 nsid;
663 
664 	ret = kstrtou32(name, 0, &nsid);
665 	if (ret)
666 		goto out;
667 
668 	ret = -EINVAL;
669 	if (nsid == 0 || nsid == NVME_NSID_ALL) {
670 		pr_err("invalid nsid %#x", nsid);
671 		goto out;
672 	}
673 
674 	ret = -ENOMEM;
675 	ns = nvmet_ns_alloc(subsys, nsid);
676 	if (!ns)
677 		goto out;
678 	config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
679 
680 	pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
681 
682 	return &ns->group;
683 out:
684 	return ERR_PTR(ret);
685 }
686 
687 static struct configfs_group_operations nvmet_namespaces_group_ops = {
688 	.make_group		= nvmet_ns_make,
689 };
690 
691 static const struct config_item_type nvmet_namespaces_type = {
692 	.ct_group_ops		= &nvmet_namespaces_group_ops,
693 	.ct_owner		= THIS_MODULE,
694 };
695 
696 #ifdef CONFIG_NVME_TARGET_PASSTHRU
697 
nvmet_passthru_device_path_show(struct config_item * item,char * page)698 static ssize_t nvmet_passthru_device_path_show(struct config_item *item,
699 		char *page)
700 {
701 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
702 
703 	return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
704 }
705 
nvmet_passthru_device_path_store(struct config_item * item,const char * page,size_t count)706 static ssize_t nvmet_passthru_device_path_store(struct config_item *item,
707 		const char *page, size_t count)
708 {
709 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
710 	size_t len;
711 	int ret;
712 
713 	mutex_lock(&subsys->lock);
714 
715 	ret = -EBUSY;
716 	if (subsys->passthru_ctrl)
717 		goto out_unlock;
718 
719 	ret = -EINVAL;
720 	len = strcspn(page, "\n");
721 	if (!len)
722 		goto out_unlock;
723 
724 	kfree(subsys->passthru_ctrl_path);
725 	ret = -ENOMEM;
726 	subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL);
727 	if (!subsys->passthru_ctrl_path)
728 		goto out_unlock;
729 
730 	mutex_unlock(&subsys->lock);
731 
732 	return count;
733 out_unlock:
734 	mutex_unlock(&subsys->lock);
735 	return ret;
736 }
737 CONFIGFS_ATTR(nvmet_passthru_, device_path);
738 
nvmet_passthru_enable_show(struct config_item * item,char * page)739 static ssize_t nvmet_passthru_enable_show(struct config_item *item,
740 		char *page)
741 {
742 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
743 
744 	return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0);
745 }
746 
nvmet_passthru_enable_store(struct config_item * item,const char * page,size_t count)747 static ssize_t nvmet_passthru_enable_store(struct config_item *item,
748 		const char *page, size_t count)
749 {
750 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
751 	bool enable;
752 	int ret = 0;
753 
754 	if (kstrtobool(page, &enable))
755 		return -EINVAL;
756 
757 	if (enable)
758 		ret = nvmet_passthru_ctrl_enable(subsys);
759 	else
760 		nvmet_passthru_ctrl_disable(subsys);
761 
762 	return ret ? ret : count;
763 }
764 CONFIGFS_ATTR(nvmet_passthru_, enable);
765 
nvmet_passthru_admin_timeout_show(struct config_item * item,char * page)766 static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item,
767 		char *page)
768 {
769 	return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout);
770 }
771 
nvmet_passthru_admin_timeout_store(struct config_item * item,const char * page,size_t count)772 static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item,
773 		const char *page, size_t count)
774 {
775 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
776 	unsigned int timeout;
777 
778 	if (kstrtouint(page, 0, &timeout))
779 		return -EINVAL;
780 	subsys->admin_timeout = timeout;
781 	return count;
782 }
783 CONFIGFS_ATTR(nvmet_passthru_, admin_timeout);
784 
nvmet_passthru_io_timeout_show(struct config_item * item,char * page)785 static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item,
786 		char *page)
787 {
788 	return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout);
789 }
790 
nvmet_passthru_io_timeout_store(struct config_item * item,const char * page,size_t count)791 static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
792 		const char *page, size_t count)
793 {
794 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
795 	unsigned int timeout;
796 
797 	if (kstrtouint(page, 0, &timeout))
798 		return -EINVAL;
799 	subsys->io_timeout = timeout;
800 	return count;
801 }
802 CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
803 
nvmet_passthru_clear_ids_show(struct config_item * item,char * page)804 static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
805 		char *page)
806 {
807 	return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
808 }
809 
nvmet_passthru_clear_ids_store(struct config_item * item,const char * page,size_t count)810 static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
811 		const char *page, size_t count)
812 {
813 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
814 	unsigned int clear_ids;
815 
816 	if (kstrtouint(page, 0, &clear_ids))
817 		return -EINVAL;
818 	subsys->clear_ids = clear_ids;
819 	return count;
820 }
821 CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
822 
823 static struct configfs_attribute *nvmet_passthru_attrs[] = {
824 	&nvmet_passthru_attr_device_path,
825 	&nvmet_passthru_attr_enable,
826 	&nvmet_passthru_attr_admin_timeout,
827 	&nvmet_passthru_attr_io_timeout,
828 	&nvmet_passthru_attr_clear_ids,
829 	NULL,
830 };
831 
832 static const struct config_item_type nvmet_passthru_type = {
833 	.ct_attrs		= nvmet_passthru_attrs,
834 	.ct_owner		= THIS_MODULE,
835 };
836 
nvmet_add_passthru_group(struct nvmet_subsys * subsys)837 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
838 {
839 	config_group_init_type_name(&subsys->passthru_group,
840 				    "passthru", &nvmet_passthru_type);
841 	configfs_add_default_group(&subsys->passthru_group,
842 				   &subsys->group);
843 }
844 
845 #else /* CONFIG_NVME_TARGET_PASSTHRU */
846 
nvmet_add_passthru_group(struct nvmet_subsys * subsys)847 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
848 {
849 }
850 
851 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
852 
nvmet_port_subsys_allow_link(struct config_item * parent,struct config_item * target)853 static int nvmet_port_subsys_allow_link(struct config_item *parent,
854 		struct config_item *target)
855 {
856 	struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
857 	struct nvmet_subsys *subsys;
858 	struct nvmet_subsys_link *link, *p;
859 	int ret;
860 
861 	if (target->ci_type != &nvmet_subsys_type) {
862 		pr_err("can only link subsystems into the subsystems dir.!\n");
863 		return -EINVAL;
864 	}
865 	subsys = to_subsys(target);
866 	link = kmalloc(sizeof(*link), GFP_KERNEL);
867 	if (!link)
868 		return -ENOMEM;
869 	link->subsys = subsys;
870 
871 	down_write(&nvmet_config_sem);
872 	ret = -EEXIST;
873 	list_for_each_entry(p, &port->subsystems, entry) {
874 		if (p->subsys == subsys)
875 			goto out_free_link;
876 	}
877 
878 	if (list_empty(&port->subsystems)) {
879 		ret = nvmet_enable_port(port);
880 		if (ret)
881 			goto out_free_link;
882 	}
883 
884 	list_add_tail(&link->entry, &port->subsystems);
885 	nvmet_port_disc_changed(port, subsys);
886 
887 	up_write(&nvmet_config_sem);
888 	return 0;
889 
890 out_free_link:
891 	up_write(&nvmet_config_sem);
892 	kfree(link);
893 	return ret;
894 }
895 
nvmet_port_subsys_drop_link(struct config_item * parent,struct config_item * target)896 static void nvmet_port_subsys_drop_link(struct config_item *parent,
897 		struct config_item *target)
898 {
899 	struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
900 	struct nvmet_subsys *subsys = to_subsys(target);
901 	struct nvmet_subsys_link *p;
902 
903 	down_write(&nvmet_config_sem);
904 	list_for_each_entry(p, &port->subsystems, entry) {
905 		if (p->subsys == subsys)
906 			goto found;
907 	}
908 	up_write(&nvmet_config_sem);
909 	return;
910 
911 found:
912 	list_del(&p->entry);
913 	nvmet_port_del_ctrls(port, subsys);
914 	nvmet_port_disc_changed(port, subsys);
915 
916 	if (list_empty(&port->subsystems))
917 		nvmet_disable_port(port);
918 	up_write(&nvmet_config_sem);
919 	kfree(p);
920 }
921 
922 static struct configfs_item_operations nvmet_port_subsys_item_ops = {
923 	.allow_link		= nvmet_port_subsys_allow_link,
924 	.drop_link		= nvmet_port_subsys_drop_link,
925 };
926 
927 static const struct config_item_type nvmet_port_subsys_type = {
928 	.ct_item_ops		= &nvmet_port_subsys_item_ops,
929 	.ct_owner		= THIS_MODULE,
930 };
931 
nvmet_allowed_hosts_allow_link(struct config_item * parent,struct config_item * target)932 static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
933 		struct config_item *target)
934 {
935 	struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
936 	struct nvmet_host *host;
937 	struct nvmet_host_link *link, *p;
938 	int ret;
939 
940 	if (target->ci_type != &nvmet_host_type) {
941 		pr_err("can only link hosts into the allowed_hosts directory!\n");
942 		return -EINVAL;
943 	}
944 
945 	host = to_host(target);
946 	link = kmalloc(sizeof(*link), GFP_KERNEL);
947 	if (!link)
948 		return -ENOMEM;
949 	link->host = host;
950 
951 	down_write(&nvmet_config_sem);
952 	ret = -EINVAL;
953 	if (subsys->allow_any_host) {
954 		pr_err("can't add hosts when allow_any_host is set!\n");
955 		goto out_free_link;
956 	}
957 
958 	ret = -EEXIST;
959 	list_for_each_entry(p, &subsys->hosts, entry) {
960 		if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
961 			goto out_free_link;
962 	}
963 	list_add_tail(&link->entry, &subsys->hosts);
964 	nvmet_subsys_disc_changed(subsys, host);
965 
966 	up_write(&nvmet_config_sem);
967 	return 0;
968 out_free_link:
969 	up_write(&nvmet_config_sem);
970 	kfree(link);
971 	return ret;
972 }
973 
nvmet_allowed_hosts_drop_link(struct config_item * parent,struct config_item * target)974 static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
975 		struct config_item *target)
976 {
977 	struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
978 	struct nvmet_host *host = to_host(target);
979 	struct nvmet_host_link *p;
980 
981 	down_write(&nvmet_config_sem);
982 	list_for_each_entry(p, &subsys->hosts, entry) {
983 		if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
984 			goto found;
985 	}
986 	up_write(&nvmet_config_sem);
987 	return;
988 
989 found:
990 	list_del(&p->entry);
991 	nvmet_subsys_disc_changed(subsys, host);
992 
993 	up_write(&nvmet_config_sem);
994 	kfree(p);
995 }
996 
997 static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
998 	.allow_link		= nvmet_allowed_hosts_allow_link,
999 	.drop_link		= nvmet_allowed_hosts_drop_link,
1000 };
1001 
1002 static const struct config_item_type nvmet_allowed_hosts_type = {
1003 	.ct_item_ops		= &nvmet_allowed_hosts_item_ops,
1004 	.ct_owner		= THIS_MODULE,
1005 };
1006 
nvmet_subsys_attr_allow_any_host_show(struct config_item * item,char * page)1007 static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
1008 		char *page)
1009 {
1010 	return snprintf(page, PAGE_SIZE, "%d\n",
1011 		to_subsys(item)->allow_any_host);
1012 }
1013 
nvmet_subsys_attr_allow_any_host_store(struct config_item * item,const char * page,size_t count)1014 static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
1015 		const char *page, size_t count)
1016 {
1017 	struct nvmet_subsys *subsys = to_subsys(item);
1018 	bool allow_any_host;
1019 	int ret = 0;
1020 
1021 	if (kstrtobool(page, &allow_any_host))
1022 		return -EINVAL;
1023 
1024 	down_write(&nvmet_config_sem);
1025 	if (allow_any_host && !list_empty(&subsys->hosts)) {
1026 		pr_err("Can't set allow_any_host when explicit hosts are set!\n");
1027 		ret = -EINVAL;
1028 		goto out_unlock;
1029 	}
1030 
1031 	if (subsys->allow_any_host != allow_any_host) {
1032 		subsys->allow_any_host = allow_any_host;
1033 		nvmet_subsys_disc_changed(subsys, NULL);
1034 	}
1035 
1036 out_unlock:
1037 	up_write(&nvmet_config_sem);
1038 	return ret ? ret : count;
1039 }
1040 
1041 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
1042 
nvmet_subsys_attr_version_show(struct config_item * item,char * page)1043 static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
1044 					      char *page)
1045 {
1046 	struct nvmet_subsys *subsys = to_subsys(item);
1047 
1048 	if (NVME_TERTIARY(subsys->ver))
1049 		return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n",
1050 				NVME_MAJOR(subsys->ver),
1051 				NVME_MINOR(subsys->ver),
1052 				NVME_TERTIARY(subsys->ver));
1053 
1054 	return snprintf(page, PAGE_SIZE, "%llu.%llu\n",
1055 			NVME_MAJOR(subsys->ver),
1056 			NVME_MINOR(subsys->ver));
1057 }
1058 
1059 static ssize_t
nvmet_subsys_attr_version_store_locked(struct nvmet_subsys * subsys,const char * page,size_t count)1060 nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
1061 		const char *page, size_t count)
1062 {
1063 	int major, minor, tertiary = 0;
1064 	int ret;
1065 
1066 	if (subsys->subsys_discovered) {
1067 		if (NVME_TERTIARY(subsys->ver))
1068 			pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n",
1069 			       NVME_MAJOR(subsys->ver),
1070 			       NVME_MINOR(subsys->ver),
1071 			       NVME_TERTIARY(subsys->ver));
1072 		else
1073 			pr_err("Can't set version number. %llu.%llu is already assigned\n",
1074 			       NVME_MAJOR(subsys->ver),
1075 			       NVME_MINOR(subsys->ver));
1076 		return -EINVAL;
1077 	}
1078 
1079 	/* passthru subsystems use the underlying controller's version */
1080 	if (nvmet_is_passthru_subsys(subsys))
1081 		return -EINVAL;
1082 
1083 	ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
1084 	if (ret != 2 && ret != 3)
1085 		return -EINVAL;
1086 
1087 	subsys->ver = NVME_VS(major, minor, tertiary);
1088 
1089 	return count;
1090 }
1091 
nvmet_subsys_attr_version_store(struct config_item * item,const char * page,size_t count)1092 static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
1093 					       const char *page, size_t count)
1094 {
1095 	struct nvmet_subsys *subsys = to_subsys(item);
1096 	ssize_t ret;
1097 
1098 	down_write(&nvmet_config_sem);
1099 	mutex_lock(&subsys->lock);
1100 	ret = nvmet_subsys_attr_version_store_locked(subsys, page, count);
1101 	mutex_unlock(&subsys->lock);
1102 	up_write(&nvmet_config_sem);
1103 
1104 	return ret;
1105 }
1106 CONFIGFS_ATTR(nvmet_subsys_, attr_version);
1107 
1108 /* See Section 1.5 of NVMe 1.4 */
nvmet_is_ascii(const char c)1109 static bool nvmet_is_ascii(const char c)
1110 {
1111 	return c >= 0x20 && c <= 0x7e;
1112 }
1113 
nvmet_subsys_attr_serial_show(struct config_item * item,char * page)1114 static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
1115 					     char *page)
1116 {
1117 	struct nvmet_subsys *subsys = to_subsys(item);
1118 
1119 	return snprintf(page, PAGE_SIZE, "%.*s\n",
1120 			NVMET_SN_MAX_SIZE, subsys->serial);
1121 }
1122 
1123 static ssize_t
nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys * subsys,const char * page,size_t count)1124 nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys,
1125 		const char *page, size_t count)
1126 {
1127 	int pos, len = strcspn(page, "\n");
1128 
1129 	if (subsys->subsys_discovered) {
1130 		pr_err("Can't set serial number. %s is already assigned\n",
1131 		       subsys->serial);
1132 		return -EINVAL;
1133 	}
1134 
1135 	if (!len || len > NVMET_SN_MAX_SIZE) {
1136 		pr_err("Serial Number can not be empty or exceed %d Bytes\n",
1137 		       NVMET_SN_MAX_SIZE);
1138 		return -EINVAL;
1139 	}
1140 
1141 	for (pos = 0; pos < len; pos++) {
1142 		if (!nvmet_is_ascii(page[pos])) {
1143 			pr_err("Serial Number must contain only ASCII strings\n");
1144 			return -EINVAL;
1145 		}
1146 	}
1147 
1148 	memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' ');
1149 
1150 	return count;
1151 }
1152 
nvmet_subsys_attr_serial_store(struct config_item * item,const char * page,size_t count)1153 static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
1154 					      const char *page, size_t count)
1155 {
1156 	struct nvmet_subsys *subsys = to_subsys(item);
1157 	ssize_t ret;
1158 
1159 	down_write(&nvmet_config_sem);
1160 	mutex_lock(&subsys->lock);
1161 	ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count);
1162 	mutex_unlock(&subsys->lock);
1163 	up_write(&nvmet_config_sem);
1164 
1165 	return ret;
1166 }
1167 CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
1168 
nvmet_subsys_attr_cntlid_min_show(struct config_item * item,char * page)1169 static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
1170 						 char *page)
1171 {
1172 	return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
1173 }
1174 
nvmet_subsys_attr_cntlid_min_store(struct config_item * item,const char * page,size_t cnt)1175 static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
1176 						  const char *page, size_t cnt)
1177 {
1178 	u16 cntlid_min;
1179 
1180 	if (sscanf(page, "%hu\n", &cntlid_min) != 1)
1181 		return -EINVAL;
1182 
1183 	if (cntlid_min == 0)
1184 		return -EINVAL;
1185 
1186 	down_write(&nvmet_config_sem);
1187 	if (cntlid_min >= to_subsys(item)->cntlid_max)
1188 		goto out_unlock;
1189 	to_subsys(item)->cntlid_min = cntlid_min;
1190 	up_write(&nvmet_config_sem);
1191 	return cnt;
1192 
1193 out_unlock:
1194 	up_write(&nvmet_config_sem);
1195 	return -EINVAL;
1196 }
1197 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
1198 
nvmet_subsys_attr_cntlid_max_show(struct config_item * item,char * page)1199 static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
1200 						 char *page)
1201 {
1202 	return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
1203 }
1204 
nvmet_subsys_attr_cntlid_max_store(struct config_item * item,const char * page,size_t cnt)1205 static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
1206 						  const char *page, size_t cnt)
1207 {
1208 	u16 cntlid_max;
1209 
1210 	if (sscanf(page, "%hu\n", &cntlid_max) != 1)
1211 		return -EINVAL;
1212 
1213 	if (cntlid_max == 0)
1214 		return -EINVAL;
1215 
1216 	down_write(&nvmet_config_sem);
1217 	if (cntlid_max <= to_subsys(item)->cntlid_min)
1218 		goto out_unlock;
1219 	to_subsys(item)->cntlid_max = cntlid_max;
1220 	up_write(&nvmet_config_sem);
1221 	return cnt;
1222 
1223 out_unlock:
1224 	up_write(&nvmet_config_sem);
1225 	return -EINVAL;
1226 }
1227 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
1228 
nvmet_subsys_attr_model_show(struct config_item * item,char * page)1229 static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
1230 					    char *page)
1231 {
1232 	struct nvmet_subsys *subsys = to_subsys(item);
1233 
1234 	return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number);
1235 }
1236 
nvmet_subsys_attr_model_store_locked(struct nvmet_subsys * subsys,const char * page,size_t count)1237 static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
1238 		const char *page, size_t count)
1239 {
1240 	int pos = 0, len;
1241 	char *val;
1242 
1243 	if (subsys->subsys_discovered) {
1244 		pr_err("Can't set model number. %s is already assigned\n",
1245 		       subsys->model_number);
1246 		return -EINVAL;
1247 	}
1248 
1249 	len = strcspn(page, "\n");
1250 	if (!len)
1251 		return -EINVAL;
1252 
1253 	if (len > NVMET_MN_MAX_SIZE) {
1254 		pr_err("Model number size can not exceed %d Bytes\n",
1255 		       NVMET_MN_MAX_SIZE);
1256 		return -EINVAL;
1257 	}
1258 
1259 	for (pos = 0; pos < len; pos++) {
1260 		if (!nvmet_is_ascii(page[pos]))
1261 			return -EINVAL;
1262 	}
1263 
1264 	val = kmemdup_nul(page, len, GFP_KERNEL);
1265 	if (!val)
1266 		return -ENOMEM;
1267 	kfree(subsys->model_number);
1268 	subsys->model_number = val;
1269 	return count;
1270 }
1271 
nvmet_subsys_attr_model_store(struct config_item * item,const char * page,size_t count)1272 static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
1273 					     const char *page, size_t count)
1274 {
1275 	struct nvmet_subsys *subsys = to_subsys(item);
1276 	ssize_t ret;
1277 
1278 	down_write(&nvmet_config_sem);
1279 	mutex_lock(&subsys->lock);
1280 	ret = nvmet_subsys_attr_model_store_locked(subsys, page, count);
1281 	mutex_unlock(&subsys->lock);
1282 	up_write(&nvmet_config_sem);
1283 
1284 	return ret;
1285 }
1286 CONFIGFS_ATTR(nvmet_subsys_, attr_model);
1287 
nvmet_subsys_attr_ieee_oui_show(struct config_item * item,char * page)1288 static ssize_t nvmet_subsys_attr_ieee_oui_show(struct config_item *item,
1289 					    char *page)
1290 {
1291 	struct nvmet_subsys *subsys = to_subsys(item);
1292 
1293 	return sysfs_emit(page, "0x%06x\n", subsys->ieee_oui);
1294 }
1295 
nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys * subsys,const char * page,size_t count)1296 static ssize_t nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys *subsys,
1297 		const char *page, size_t count)
1298 {
1299 	uint32_t val = 0;
1300 	int ret;
1301 
1302 	if (subsys->subsys_discovered) {
1303 		pr_err("Can't set IEEE OUI. 0x%06x is already assigned\n",
1304 		      subsys->ieee_oui);
1305 		return -EINVAL;
1306 	}
1307 
1308 	ret = kstrtou32(page, 0, &val);
1309 	if (ret < 0)
1310 		return ret;
1311 
1312 	if (val >= 0x1000000)
1313 		return -EINVAL;
1314 
1315 	subsys->ieee_oui = val;
1316 
1317 	return count;
1318 }
1319 
nvmet_subsys_attr_ieee_oui_store(struct config_item * item,const char * page,size_t count)1320 static ssize_t nvmet_subsys_attr_ieee_oui_store(struct config_item *item,
1321 					     const char *page, size_t count)
1322 {
1323 	struct nvmet_subsys *subsys = to_subsys(item);
1324 	ssize_t ret;
1325 
1326 	down_write(&nvmet_config_sem);
1327 	mutex_lock(&subsys->lock);
1328 	ret = nvmet_subsys_attr_ieee_oui_store_locked(subsys, page, count);
1329 	mutex_unlock(&subsys->lock);
1330 	up_write(&nvmet_config_sem);
1331 
1332 	return ret;
1333 }
1334 CONFIGFS_ATTR(nvmet_subsys_, attr_ieee_oui);
1335 
nvmet_subsys_attr_firmware_show(struct config_item * item,char * page)1336 static ssize_t nvmet_subsys_attr_firmware_show(struct config_item *item,
1337 					    char *page)
1338 {
1339 	struct nvmet_subsys *subsys = to_subsys(item);
1340 
1341 	return sysfs_emit(page, "%s\n", subsys->firmware_rev);
1342 }
1343 
nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys * subsys,const char * page,size_t count)1344 static ssize_t nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys *subsys,
1345 		const char *page, size_t count)
1346 {
1347 	int pos = 0, len;
1348 	char *val;
1349 
1350 	if (subsys->subsys_discovered) {
1351 		pr_err("Can't set firmware revision. %s is already assigned\n",
1352 		       subsys->firmware_rev);
1353 		return -EINVAL;
1354 	}
1355 
1356 	len = strcspn(page, "\n");
1357 	if (!len)
1358 		return -EINVAL;
1359 
1360 	if (len > NVMET_FR_MAX_SIZE) {
1361 		pr_err("Firmware revision size can not exceed %d Bytes\n",
1362 		       NVMET_FR_MAX_SIZE);
1363 		return -EINVAL;
1364 	}
1365 
1366 	for (pos = 0; pos < len; pos++) {
1367 		if (!nvmet_is_ascii(page[pos]))
1368 			return -EINVAL;
1369 	}
1370 
1371 	val = kmemdup_nul(page, len, GFP_KERNEL);
1372 	if (!val)
1373 		return -ENOMEM;
1374 
1375 	kfree(subsys->firmware_rev);
1376 
1377 	subsys->firmware_rev = val;
1378 
1379 	return count;
1380 }
1381 
nvmet_subsys_attr_firmware_store(struct config_item * item,const char * page,size_t count)1382 static ssize_t nvmet_subsys_attr_firmware_store(struct config_item *item,
1383 					     const char *page, size_t count)
1384 {
1385 	struct nvmet_subsys *subsys = to_subsys(item);
1386 	ssize_t ret;
1387 
1388 	down_write(&nvmet_config_sem);
1389 	mutex_lock(&subsys->lock);
1390 	ret = nvmet_subsys_attr_firmware_store_locked(subsys, page, count);
1391 	mutex_unlock(&subsys->lock);
1392 	up_write(&nvmet_config_sem);
1393 
1394 	return ret;
1395 }
1396 CONFIGFS_ATTR(nvmet_subsys_, attr_firmware);
1397 
1398 #ifdef CONFIG_BLK_DEV_INTEGRITY
nvmet_subsys_attr_pi_enable_show(struct config_item * item,char * page)1399 static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
1400 						char *page)
1401 {
1402 	return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support);
1403 }
1404 
nvmet_subsys_attr_pi_enable_store(struct config_item * item,const char * page,size_t count)1405 static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
1406 						 const char *page, size_t count)
1407 {
1408 	struct nvmet_subsys *subsys = to_subsys(item);
1409 	bool pi_enable;
1410 
1411 	if (kstrtobool(page, &pi_enable))
1412 		return -EINVAL;
1413 
1414 	subsys->pi_support = pi_enable;
1415 	return count;
1416 }
1417 CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
1418 #endif
1419 
nvmet_subsys_attr_qid_max_show(struct config_item * item,char * page)1420 static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item,
1421 					      char *page)
1422 {
1423 	return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid);
1424 }
1425 
nvmet_subsys_attr_qid_max_store(struct config_item * item,const char * page,size_t cnt)1426 static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
1427 					       const char *page, size_t cnt)
1428 {
1429 	struct nvmet_subsys *subsys = to_subsys(item);
1430 	struct nvmet_ctrl *ctrl;
1431 	u16 qid_max;
1432 
1433 	if (sscanf(page, "%hu\n", &qid_max) != 1)
1434 		return -EINVAL;
1435 
1436 	if (qid_max < 1 || qid_max > NVMET_NR_QUEUES)
1437 		return -EINVAL;
1438 
1439 	down_write(&nvmet_config_sem);
1440 	subsys->max_qid = qid_max;
1441 
1442 	/* Force reconnect */
1443 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1444 		ctrl->ops->delete_ctrl(ctrl);
1445 	up_write(&nvmet_config_sem);
1446 
1447 	return cnt;
1448 }
1449 CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max);
1450 
1451 static struct configfs_attribute *nvmet_subsys_attrs[] = {
1452 	&nvmet_subsys_attr_attr_allow_any_host,
1453 	&nvmet_subsys_attr_attr_version,
1454 	&nvmet_subsys_attr_attr_serial,
1455 	&nvmet_subsys_attr_attr_cntlid_min,
1456 	&nvmet_subsys_attr_attr_cntlid_max,
1457 	&nvmet_subsys_attr_attr_model,
1458 	&nvmet_subsys_attr_attr_qid_max,
1459 	&nvmet_subsys_attr_attr_ieee_oui,
1460 	&nvmet_subsys_attr_attr_firmware,
1461 #ifdef CONFIG_BLK_DEV_INTEGRITY
1462 	&nvmet_subsys_attr_attr_pi_enable,
1463 #endif
1464 	NULL,
1465 };
1466 
1467 /*
1468  * Subsystem structures & folder operation functions below
1469  */
nvmet_subsys_release(struct config_item * item)1470 static void nvmet_subsys_release(struct config_item *item)
1471 {
1472 	struct nvmet_subsys *subsys = to_subsys(item);
1473 
1474 	nvmet_subsys_del_ctrls(subsys);
1475 	nvmet_subsys_put(subsys);
1476 }
1477 
1478 static struct configfs_item_operations nvmet_subsys_item_ops = {
1479 	.release		= nvmet_subsys_release,
1480 };
1481 
1482 static const struct config_item_type nvmet_subsys_type = {
1483 	.ct_item_ops		= &nvmet_subsys_item_ops,
1484 	.ct_attrs		= nvmet_subsys_attrs,
1485 	.ct_owner		= THIS_MODULE,
1486 };
1487 
nvmet_subsys_make(struct config_group * group,const char * name)1488 static struct config_group *nvmet_subsys_make(struct config_group *group,
1489 		const char *name)
1490 {
1491 	struct nvmet_subsys *subsys;
1492 
1493 	if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) {
1494 		pr_err("can't create discovery subsystem through configfs\n");
1495 		return ERR_PTR(-EINVAL);
1496 	}
1497 
1498 	subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
1499 	if (IS_ERR(subsys))
1500 		return ERR_CAST(subsys);
1501 
1502 	config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
1503 
1504 	config_group_init_type_name(&subsys->namespaces_group,
1505 			"namespaces", &nvmet_namespaces_type);
1506 	configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
1507 
1508 	config_group_init_type_name(&subsys->allowed_hosts_group,
1509 			"allowed_hosts", &nvmet_allowed_hosts_type);
1510 	configfs_add_default_group(&subsys->allowed_hosts_group,
1511 			&subsys->group);
1512 
1513 	nvmet_add_passthru_group(subsys);
1514 
1515 	return &subsys->group;
1516 }
1517 
1518 static struct configfs_group_operations nvmet_subsystems_group_ops = {
1519 	.make_group		= nvmet_subsys_make,
1520 };
1521 
1522 static const struct config_item_type nvmet_subsystems_type = {
1523 	.ct_group_ops		= &nvmet_subsystems_group_ops,
1524 	.ct_owner		= THIS_MODULE,
1525 };
1526 
nvmet_referral_enable_show(struct config_item * item,char * page)1527 static ssize_t nvmet_referral_enable_show(struct config_item *item,
1528 		char *page)
1529 {
1530 	return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
1531 }
1532 
nvmet_referral_enable_store(struct config_item * item,const char * page,size_t count)1533 static ssize_t nvmet_referral_enable_store(struct config_item *item,
1534 		const char *page, size_t count)
1535 {
1536 	struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1537 	struct nvmet_port *port = to_nvmet_port(item);
1538 	bool enable;
1539 
1540 	if (kstrtobool(page, &enable))
1541 		goto inval;
1542 
1543 	if (enable)
1544 		nvmet_referral_enable(parent, port);
1545 	else
1546 		nvmet_referral_disable(parent, port);
1547 
1548 	return count;
1549 inval:
1550 	pr_err("Invalid value '%s' for enable\n", page);
1551 	return -EINVAL;
1552 }
1553 
1554 CONFIGFS_ATTR(nvmet_referral_, enable);
1555 
1556 /*
1557  * Discovery Service subsystem definitions
1558  */
1559 static struct configfs_attribute *nvmet_referral_attrs[] = {
1560 	&nvmet_attr_addr_adrfam,
1561 	&nvmet_attr_addr_portid,
1562 	&nvmet_attr_addr_treq,
1563 	&nvmet_attr_addr_traddr,
1564 	&nvmet_attr_addr_trsvcid,
1565 	&nvmet_attr_addr_trtype,
1566 	&nvmet_referral_attr_enable,
1567 	NULL,
1568 };
1569 
nvmet_referral_notify(struct config_group * group,struct config_item * item)1570 static void nvmet_referral_notify(struct config_group *group,
1571 		struct config_item *item)
1572 {
1573 	struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1574 	struct nvmet_port *port = to_nvmet_port(item);
1575 
1576 	nvmet_referral_disable(parent, port);
1577 }
1578 
nvmet_referral_release(struct config_item * item)1579 static void nvmet_referral_release(struct config_item *item)
1580 {
1581 	struct nvmet_port *port = to_nvmet_port(item);
1582 
1583 	kfree(port);
1584 }
1585 
1586 static struct configfs_item_operations nvmet_referral_item_ops = {
1587 	.release	= nvmet_referral_release,
1588 };
1589 
1590 static const struct config_item_type nvmet_referral_type = {
1591 	.ct_owner	= THIS_MODULE,
1592 	.ct_attrs	= nvmet_referral_attrs,
1593 	.ct_item_ops	= &nvmet_referral_item_ops,
1594 };
1595 
nvmet_referral_make(struct config_group * group,const char * name)1596 static struct config_group *nvmet_referral_make(
1597 		struct config_group *group, const char *name)
1598 {
1599 	struct nvmet_port *port;
1600 
1601 	port = kzalloc(sizeof(*port), GFP_KERNEL);
1602 	if (!port)
1603 		return ERR_PTR(-ENOMEM);
1604 
1605 	INIT_LIST_HEAD(&port->entry);
1606 	config_group_init_type_name(&port->group, name, &nvmet_referral_type);
1607 
1608 	return &port->group;
1609 }
1610 
1611 static struct configfs_group_operations nvmet_referral_group_ops = {
1612 	.make_group		= nvmet_referral_make,
1613 	.disconnect_notify	= nvmet_referral_notify,
1614 };
1615 
1616 static const struct config_item_type nvmet_referrals_type = {
1617 	.ct_owner	= THIS_MODULE,
1618 	.ct_group_ops	= &nvmet_referral_group_ops,
1619 };
1620 
1621 static struct nvmet_type_name_map nvmet_ana_state[] = {
1622 	{ NVME_ANA_OPTIMIZED,		"optimized" },
1623 	{ NVME_ANA_NONOPTIMIZED,	"non-optimized" },
1624 	{ NVME_ANA_INACCESSIBLE,	"inaccessible" },
1625 	{ NVME_ANA_PERSISTENT_LOSS,	"persistent-loss" },
1626 	{ NVME_ANA_CHANGE,		"change" },
1627 };
1628 
nvmet_ana_group_ana_state_show(struct config_item * item,char * page)1629 static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
1630 		char *page)
1631 {
1632 	struct nvmet_ana_group *grp = to_ana_group(item);
1633 	enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
1634 	int i;
1635 
1636 	for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1637 		if (state == nvmet_ana_state[i].type)
1638 			return sprintf(page, "%s\n", nvmet_ana_state[i].name);
1639 	}
1640 
1641 	return sprintf(page, "\n");
1642 }
1643 
nvmet_ana_group_ana_state_store(struct config_item * item,const char * page,size_t count)1644 static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
1645 		const char *page, size_t count)
1646 {
1647 	struct nvmet_ana_group *grp = to_ana_group(item);
1648 	enum nvme_ana_state *ana_state = grp->port->ana_state;
1649 	int i;
1650 
1651 	for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1652 		if (sysfs_streq(page, nvmet_ana_state[i].name))
1653 			goto found;
1654 	}
1655 
1656 	pr_err("Invalid value '%s' for ana_state\n", page);
1657 	return -EINVAL;
1658 
1659 found:
1660 	down_write(&nvmet_ana_sem);
1661 	ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type;
1662 	nvmet_ana_chgcnt++;
1663 	up_write(&nvmet_ana_sem);
1664 	nvmet_port_send_ana_event(grp->port);
1665 	return count;
1666 }
1667 
1668 CONFIGFS_ATTR(nvmet_ana_group_, ana_state);
1669 
1670 static struct configfs_attribute *nvmet_ana_group_attrs[] = {
1671 	&nvmet_ana_group_attr_ana_state,
1672 	NULL,
1673 };
1674 
nvmet_ana_group_release(struct config_item * item)1675 static void nvmet_ana_group_release(struct config_item *item)
1676 {
1677 	struct nvmet_ana_group *grp = to_ana_group(item);
1678 
1679 	if (grp == &grp->port->ana_default_group)
1680 		return;
1681 
1682 	down_write(&nvmet_ana_sem);
1683 	grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE;
1684 	nvmet_ana_group_enabled[grp->grpid]--;
1685 	up_write(&nvmet_ana_sem);
1686 
1687 	nvmet_port_send_ana_event(grp->port);
1688 	kfree(grp);
1689 }
1690 
1691 static struct configfs_item_operations nvmet_ana_group_item_ops = {
1692 	.release		= nvmet_ana_group_release,
1693 };
1694 
1695 static const struct config_item_type nvmet_ana_group_type = {
1696 	.ct_item_ops		= &nvmet_ana_group_item_ops,
1697 	.ct_attrs		= nvmet_ana_group_attrs,
1698 	.ct_owner		= THIS_MODULE,
1699 };
1700 
nvmet_ana_groups_make_group(struct config_group * group,const char * name)1701 static struct config_group *nvmet_ana_groups_make_group(
1702 		struct config_group *group, const char *name)
1703 {
1704 	struct nvmet_port *port = ana_groups_to_port(&group->cg_item);
1705 	struct nvmet_ana_group *grp;
1706 	u32 grpid;
1707 	int ret;
1708 
1709 	ret = kstrtou32(name, 0, &grpid);
1710 	if (ret)
1711 		goto out;
1712 
1713 	ret = -EINVAL;
1714 	if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS)
1715 		goto out;
1716 
1717 	ret = -ENOMEM;
1718 	grp = kzalloc(sizeof(*grp), GFP_KERNEL);
1719 	if (!grp)
1720 		goto out;
1721 	grp->port = port;
1722 	grp->grpid = grpid;
1723 
1724 	down_write(&nvmet_ana_sem);
1725 	grpid = array_index_nospec(grpid, NVMET_MAX_ANAGRPS);
1726 	nvmet_ana_group_enabled[grpid]++;
1727 	up_write(&nvmet_ana_sem);
1728 
1729 	nvmet_port_send_ana_event(grp->port);
1730 
1731 	config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type);
1732 	return &grp->group;
1733 out:
1734 	return ERR_PTR(ret);
1735 }
1736 
1737 static struct configfs_group_operations nvmet_ana_groups_group_ops = {
1738 	.make_group		= nvmet_ana_groups_make_group,
1739 };
1740 
1741 static const struct config_item_type nvmet_ana_groups_type = {
1742 	.ct_group_ops		= &nvmet_ana_groups_group_ops,
1743 	.ct_owner		= THIS_MODULE,
1744 };
1745 
1746 /*
1747  * Ports definitions.
1748  */
nvmet_port_release(struct config_item * item)1749 static void nvmet_port_release(struct config_item *item)
1750 {
1751 	struct nvmet_port *port = to_nvmet_port(item);
1752 
1753 	/* Let inflight controllers teardown complete */
1754 	flush_workqueue(nvmet_wq);
1755 	list_del(&port->global_entry);
1756 
1757 	kfree(port->ana_state);
1758 	kfree(port);
1759 }
1760 
1761 static struct configfs_attribute *nvmet_port_attrs[] = {
1762 	&nvmet_attr_addr_adrfam,
1763 	&nvmet_attr_addr_treq,
1764 	&nvmet_attr_addr_traddr,
1765 	&nvmet_attr_addr_trsvcid,
1766 	&nvmet_attr_addr_trtype,
1767 	&nvmet_attr_param_inline_data_size,
1768 #ifdef CONFIG_BLK_DEV_INTEGRITY
1769 	&nvmet_attr_param_pi_enable,
1770 #endif
1771 	NULL,
1772 };
1773 
1774 static struct configfs_item_operations nvmet_port_item_ops = {
1775 	.release		= nvmet_port_release,
1776 };
1777 
1778 static const struct config_item_type nvmet_port_type = {
1779 	.ct_attrs		= nvmet_port_attrs,
1780 	.ct_item_ops		= &nvmet_port_item_ops,
1781 	.ct_owner		= THIS_MODULE,
1782 };
1783 
nvmet_ports_make(struct config_group * group,const char * name)1784 static struct config_group *nvmet_ports_make(struct config_group *group,
1785 		const char *name)
1786 {
1787 	struct nvmet_port *port;
1788 	u16 portid;
1789 	u32 i;
1790 
1791 	if (kstrtou16(name, 0, &portid))
1792 		return ERR_PTR(-EINVAL);
1793 
1794 	port = kzalloc(sizeof(*port), GFP_KERNEL);
1795 	if (!port)
1796 		return ERR_PTR(-ENOMEM);
1797 
1798 	port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1,
1799 			sizeof(*port->ana_state), GFP_KERNEL);
1800 	if (!port->ana_state) {
1801 		kfree(port);
1802 		return ERR_PTR(-ENOMEM);
1803 	}
1804 
1805 	for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
1806 		if (i == NVMET_DEFAULT_ANA_GRPID)
1807 			port->ana_state[1] = NVME_ANA_OPTIMIZED;
1808 		else
1809 			port->ana_state[i] = NVME_ANA_INACCESSIBLE;
1810 	}
1811 
1812 	list_add(&port->global_entry, &nvmet_ports_list);
1813 
1814 	INIT_LIST_HEAD(&port->entry);
1815 	INIT_LIST_HEAD(&port->subsystems);
1816 	INIT_LIST_HEAD(&port->referrals);
1817 	port->inline_data_size = -1;	/* < 0 == let the transport choose */
1818 
1819 	port->disc_addr.portid = cpu_to_le16(portid);
1820 	port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
1821 	port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
1822 	config_group_init_type_name(&port->group, name, &nvmet_port_type);
1823 
1824 	config_group_init_type_name(&port->subsys_group,
1825 			"subsystems", &nvmet_port_subsys_type);
1826 	configfs_add_default_group(&port->subsys_group, &port->group);
1827 
1828 	config_group_init_type_name(&port->referrals_group,
1829 			"referrals", &nvmet_referrals_type);
1830 	configfs_add_default_group(&port->referrals_group, &port->group);
1831 
1832 	config_group_init_type_name(&port->ana_groups_group,
1833 			"ana_groups", &nvmet_ana_groups_type);
1834 	configfs_add_default_group(&port->ana_groups_group, &port->group);
1835 
1836 	port->ana_default_group.port = port;
1837 	port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID;
1838 	config_group_init_type_name(&port->ana_default_group.group,
1839 			__stringify(NVMET_DEFAULT_ANA_GRPID),
1840 			&nvmet_ana_group_type);
1841 	configfs_add_default_group(&port->ana_default_group.group,
1842 			&port->ana_groups_group);
1843 
1844 	return &port->group;
1845 }
1846 
1847 static struct configfs_group_operations nvmet_ports_group_ops = {
1848 	.make_group		= nvmet_ports_make,
1849 };
1850 
1851 static const struct config_item_type nvmet_ports_type = {
1852 	.ct_group_ops		= &nvmet_ports_group_ops,
1853 	.ct_owner		= THIS_MODULE,
1854 };
1855 
1856 static struct config_group nvmet_subsystems_group;
1857 static struct config_group nvmet_ports_group;
1858 
1859 #ifdef CONFIG_NVME_TARGET_AUTH
nvmet_host_dhchap_key_show(struct config_item * item,char * page)1860 static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
1861 		char *page)
1862 {
1863 	u8 *dhchap_secret = to_host(item)->dhchap_secret;
1864 
1865 	if (!dhchap_secret)
1866 		return sprintf(page, "\n");
1867 	return sprintf(page, "%s\n", dhchap_secret);
1868 }
1869 
nvmet_host_dhchap_key_store(struct config_item * item,const char * page,size_t count)1870 static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
1871 		const char *page, size_t count)
1872 {
1873 	struct nvmet_host *host = to_host(item);
1874 	int ret;
1875 
1876 	ret = nvmet_auth_set_key(host, page, false);
1877 	/*
1878 	 * Re-authentication is a soft state, so keep the
1879 	 * current authentication valid until the host
1880 	 * requests re-authentication.
1881 	 */
1882 	return ret < 0 ? ret : count;
1883 }
1884 
1885 CONFIGFS_ATTR(nvmet_host_, dhchap_key);
1886 
nvmet_host_dhchap_ctrl_key_show(struct config_item * item,char * page)1887 static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
1888 		char *page)
1889 {
1890 	u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
1891 
1892 	if (!dhchap_secret)
1893 		return sprintf(page, "\n");
1894 	return sprintf(page, "%s\n", dhchap_secret);
1895 }
1896 
nvmet_host_dhchap_ctrl_key_store(struct config_item * item,const char * page,size_t count)1897 static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
1898 		const char *page, size_t count)
1899 {
1900 	struct nvmet_host *host = to_host(item);
1901 	int ret;
1902 
1903 	ret = nvmet_auth_set_key(host, page, true);
1904 	/*
1905 	 * Re-authentication is a soft state, so keep the
1906 	 * current authentication valid until the host
1907 	 * requests re-authentication.
1908 	 */
1909 	return ret < 0 ? ret : count;
1910 }
1911 
1912 CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key);
1913 
nvmet_host_dhchap_hash_show(struct config_item * item,char * page)1914 static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item,
1915 		char *page)
1916 {
1917 	struct nvmet_host *host = to_host(item);
1918 	const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
1919 
1920 	return sprintf(page, "%s\n", hash_name ? hash_name : "none");
1921 }
1922 
nvmet_host_dhchap_hash_store(struct config_item * item,const char * page,size_t count)1923 static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item,
1924 		const char *page, size_t count)
1925 {
1926 	struct nvmet_host *host = to_host(item);
1927 	u8 hmac_id;
1928 
1929 	hmac_id = nvme_auth_hmac_id(page);
1930 	if (hmac_id == NVME_AUTH_HASH_INVALID)
1931 		return -EINVAL;
1932 	if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0))
1933 		return -ENOTSUPP;
1934 	host->dhchap_hash_id = hmac_id;
1935 	return count;
1936 }
1937 
1938 CONFIGFS_ATTR(nvmet_host_, dhchap_hash);
1939 
nvmet_host_dhchap_dhgroup_show(struct config_item * item,char * page)1940 static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item,
1941 		char *page)
1942 {
1943 	struct nvmet_host *host = to_host(item);
1944 	const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id);
1945 
1946 	return sprintf(page, "%s\n", dhgroup ? dhgroup : "none");
1947 }
1948 
nvmet_host_dhchap_dhgroup_store(struct config_item * item,const char * page,size_t count)1949 static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item,
1950 		const char *page, size_t count)
1951 {
1952 	struct nvmet_host *host = to_host(item);
1953 	int dhgroup_id;
1954 
1955 	dhgroup_id = nvme_auth_dhgroup_id(page);
1956 	if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID)
1957 		return -EINVAL;
1958 	if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) {
1959 		const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
1960 
1961 		if (!crypto_has_kpp(kpp, 0, 0))
1962 			return -EINVAL;
1963 	}
1964 	host->dhchap_dhgroup_id = dhgroup_id;
1965 	return count;
1966 }
1967 
1968 CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup);
1969 
1970 static struct configfs_attribute *nvmet_host_attrs[] = {
1971 	&nvmet_host_attr_dhchap_key,
1972 	&nvmet_host_attr_dhchap_ctrl_key,
1973 	&nvmet_host_attr_dhchap_hash,
1974 	&nvmet_host_attr_dhchap_dhgroup,
1975 	NULL,
1976 };
1977 #endif /* CONFIG_NVME_TARGET_AUTH */
1978 
nvmet_host_release(struct config_item * item)1979 static void nvmet_host_release(struct config_item *item)
1980 {
1981 	struct nvmet_host *host = to_host(item);
1982 
1983 #ifdef CONFIG_NVME_TARGET_AUTH
1984 	kfree(host->dhchap_secret);
1985 	kfree(host->dhchap_ctrl_secret);
1986 #endif
1987 	kfree(host);
1988 }
1989 
1990 static struct configfs_item_operations nvmet_host_item_ops = {
1991 	.release		= nvmet_host_release,
1992 };
1993 
1994 static const struct config_item_type nvmet_host_type = {
1995 	.ct_item_ops		= &nvmet_host_item_ops,
1996 #ifdef CONFIG_NVME_TARGET_AUTH
1997 	.ct_attrs		= nvmet_host_attrs,
1998 #endif
1999 	.ct_owner		= THIS_MODULE,
2000 };
2001 
nvmet_hosts_make_group(struct config_group * group,const char * name)2002 static struct config_group *nvmet_hosts_make_group(struct config_group *group,
2003 		const char *name)
2004 {
2005 	struct nvmet_host *host;
2006 
2007 	host = kzalloc(sizeof(*host), GFP_KERNEL);
2008 	if (!host)
2009 		return ERR_PTR(-ENOMEM);
2010 
2011 #ifdef CONFIG_NVME_TARGET_AUTH
2012 	/* Default to SHA256 */
2013 	host->dhchap_hash_id = NVME_AUTH_HASH_SHA256;
2014 #endif
2015 
2016 	config_group_init_type_name(&host->group, name, &nvmet_host_type);
2017 
2018 	return &host->group;
2019 }
2020 
2021 static struct configfs_group_operations nvmet_hosts_group_ops = {
2022 	.make_group		= nvmet_hosts_make_group,
2023 };
2024 
2025 static const struct config_item_type nvmet_hosts_type = {
2026 	.ct_group_ops		= &nvmet_hosts_group_ops,
2027 	.ct_owner		= THIS_MODULE,
2028 };
2029 
2030 static struct config_group nvmet_hosts_group;
2031 
2032 static const struct config_item_type nvmet_root_type = {
2033 	.ct_owner		= THIS_MODULE,
2034 };
2035 
2036 static struct configfs_subsystem nvmet_configfs_subsystem = {
2037 	.su_group = {
2038 		.cg_item = {
2039 			.ci_namebuf	= "nvmet",
2040 			.ci_type	= &nvmet_root_type,
2041 		},
2042 	},
2043 };
2044 
nvmet_init_configfs(void)2045 int __init nvmet_init_configfs(void)
2046 {
2047 	int ret;
2048 
2049 	config_group_init(&nvmet_configfs_subsystem.su_group);
2050 	mutex_init(&nvmet_configfs_subsystem.su_mutex);
2051 
2052 	config_group_init_type_name(&nvmet_subsystems_group,
2053 			"subsystems", &nvmet_subsystems_type);
2054 	configfs_add_default_group(&nvmet_subsystems_group,
2055 			&nvmet_configfs_subsystem.su_group);
2056 
2057 	config_group_init_type_name(&nvmet_ports_group,
2058 			"ports", &nvmet_ports_type);
2059 	configfs_add_default_group(&nvmet_ports_group,
2060 			&nvmet_configfs_subsystem.su_group);
2061 
2062 	config_group_init_type_name(&nvmet_hosts_group,
2063 			"hosts", &nvmet_hosts_type);
2064 	configfs_add_default_group(&nvmet_hosts_group,
2065 			&nvmet_configfs_subsystem.su_group);
2066 
2067 	ret = configfs_register_subsystem(&nvmet_configfs_subsystem);
2068 	if (ret) {
2069 		pr_err("configfs_register_subsystem: %d\n", ret);
2070 		return ret;
2071 	}
2072 
2073 	return 0;
2074 }
2075 
nvmet_exit_configfs(void)2076 void __exit nvmet_exit_configfs(void)
2077 {
2078 	configfs_unregister_subsystem(&nvmet_configfs_subsystem);
2079 }
2080