1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4  */
5 #include <linux/module.h>
6 #include <linux/device.h>
7 #include <linux/sort.h>
8 #include <linux/slab.h>
9 #include <linux/list.h>
10 #include <linux/nd.h>
11 #include "nd-core.h"
12 #include "pmem.h"
13 #include "nd.h"
14 
15 static void namespace_io_release(struct device *dev)
16 {
17 	struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
18 
19 	kfree(nsio);
20 }
21 
22 static void namespace_pmem_release(struct device *dev)
23 {
24 	struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
25 	struct nd_region *nd_region = to_nd_region(dev->parent);
26 
27 	if (nspm->id >= 0)
28 		ida_simple_remove(&nd_region->ns_ida, nspm->id);
29 	kfree(nspm->alt_name);
30 	kfree(nspm->uuid);
31 	kfree(nspm);
32 }
33 
34 static void namespace_blk_release(struct device *dev)
35 {
36 	struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
37 	struct nd_region *nd_region = to_nd_region(dev->parent);
38 
39 	if (nsblk->id >= 0)
40 		ida_simple_remove(&nd_region->ns_ida, nsblk->id);
41 	kfree(nsblk->alt_name);
42 	kfree(nsblk->uuid);
43 	kfree(nsblk->res);
44 	kfree(nsblk);
45 }
46 
47 static const struct device_type namespace_io_device_type = {
48 	.name = "nd_namespace_io",
49 	.release = namespace_io_release,
50 };
51 
52 static const struct device_type namespace_pmem_device_type = {
53 	.name = "nd_namespace_pmem",
54 	.release = namespace_pmem_release,
55 };
56 
57 static const struct device_type namespace_blk_device_type = {
58 	.name = "nd_namespace_blk",
59 	.release = namespace_blk_release,
60 };
61 
62 static bool is_namespace_pmem(const struct device *dev)
63 {
64 	return dev ? dev->type == &namespace_pmem_device_type : false;
65 }
66 
67 static bool is_namespace_blk(const struct device *dev)
68 {
69 	return dev ? dev->type == &namespace_blk_device_type : false;
70 }
71 
72 static bool is_namespace_io(const struct device *dev)
73 {
74 	return dev ? dev->type == &namespace_io_device_type : false;
75 }
76 
77 static int is_uuid_busy(struct device *dev, void *data)
78 {
79 	u8 *uuid1 = data, *uuid2 = NULL;
80 
81 	if (is_namespace_pmem(dev)) {
82 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
83 
84 		uuid2 = nspm->uuid;
85 	} else if (is_namespace_blk(dev)) {
86 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
87 
88 		uuid2 = nsblk->uuid;
89 	} else if (is_nd_btt(dev)) {
90 		struct nd_btt *nd_btt = to_nd_btt(dev);
91 
92 		uuid2 = nd_btt->uuid;
93 	} else if (is_nd_pfn(dev)) {
94 		struct nd_pfn *nd_pfn = to_nd_pfn(dev);
95 
96 		uuid2 = nd_pfn->uuid;
97 	}
98 
99 	if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0)
100 		return -EBUSY;
101 
102 	return 0;
103 }
104 
105 static int is_namespace_uuid_busy(struct device *dev, void *data)
106 {
107 	if (is_nd_region(dev))
108 		return device_for_each_child(dev, data, is_uuid_busy);
109 	return 0;
110 }
111 
112 /**
113  * nd_is_uuid_unique - verify that no other namespace has @uuid
114  * @dev: any device on a nvdimm_bus
115  * @uuid: uuid to check
116  */
117 bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
118 {
119 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
120 
121 	if (!nvdimm_bus)
122 		return false;
123 	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
124 	if (device_for_each_child(&nvdimm_bus->dev, uuid,
125 				is_namespace_uuid_busy) != 0)
126 		return false;
127 	return true;
128 }
129 
130 bool pmem_should_map_pages(struct device *dev)
131 {
132 	struct nd_region *nd_region = to_nd_region(dev->parent);
133 	struct nd_namespace_common *ndns = to_ndns(dev);
134 	struct nd_namespace_io *nsio;
135 
136 	if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
137 		return false;
138 
139 	if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
140 		return false;
141 
142 	if (is_nd_pfn(dev) || is_nd_btt(dev))
143 		return false;
144 
145 	if (ndns->force_raw)
146 		return false;
147 
148 	nsio = to_nd_namespace_io(dev);
149 	if (region_intersects(nsio->res.start, resource_size(&nsio->res),
150 				IORESOURCE_SYSTEM_RAM,
151 				IORES_DESC_NONE) == REGION_MIXED)
152 		return false;
153 
154 	return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
155 }
156 EXPORT_SYMBOL(pmem_should_map_pages);
157 
158 unsigned int pmem_sector_size(struct nd_namespace_common *ndns)
159 {
160 	if (is_namespace_pmem(&ndns->dev)) {
161 		struct nd_namespace_pmem *nspm;
162 
163 		nspm = to_nd_namespace_pmem(&ndns->dev);
164 		if (nspm->lbasize == 0 || nspm->lbasize == 512)
165 			/* default */;
166 		else if (nspm->lbasize == 4096)
167 			return 4096;
168 		else
169 			dev_WARN(&ndns->dev, "unsupported sector size: %ld\n",
170 					nspm->lbasize);
171 	}
172 
173 	/*
174 	 * There is no namespace label (is_namespace_io()), or the label
175 	 * indicates the default sector size.
176 	 */
177 	return 512;
178 }
179 EXPORT_SYMBOL(pmem_sector_size);
180 
181 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
182 		char *name)
183 {
184 	struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
185 	const char *suffix = NULL;
186 
187 	if (ndns->claim && is_nd_btt(ndns->claim))
188 		suffix = "s";
189 
190 	if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
191 		int nsidx = 0;
192 
193 		if (is_namespace_pmem(&ndns->dev)) {
194 			struct nd_namespace_pmem *nspm;
195 
196 			nspm = to_nd_namespace_pmem(&ndns->dev);
197 			nsidx = nspm->id;
198 		}
199 
200 		if (nsidx)
201 			sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
202 					suffix ? suffix : "");
203 		else
204 			sprintf(name, "pmem%d%s", nd_region->id,
205 					suffix ? suffix : "");
206 	} else if (is_namespace_blk(&ndns->dev)) {
207 		struct nd_namespace_blk *nsblk;
208 
209 		nsblk = to_nd_namespace_blk(&ndns->dev);
210 		sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
211 				suffix ? suffix : "");
212 	} else {
213 		return NULL;
214 	}
215 
216 	return name;
217 }
218 EXPORT_SYMBOL(nvdimm_namespace_disk_name);
219 
220 const u8 *nd_dev_to_uuid(struct device *dev)
221 {
222 	static const u8 null_uuid[16];
223 
224 	if (!dev)
225 		return null_uuid;
226 
227 	if (is_namespace_pmem(dev)) {
228 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
229 
230 		return nspm->uuid;
231 	} else if (is_namespace_blk(dev)) {
232 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
233 
234 		return nsblk->uuid;
235 	} else
236 		return null_uuid;
237 }
238 EXPORT_SYMBOL(nd_dev_to_uuid);
239 
240 static ssize_t nstype_show(struct device *dev,
241 		struct device_attribute *attr, char *buf)
242 {
243 	struct nd_region *nd_region = to_nd_region(dev->parent);
244 
245 	return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
246 }
247 static DEVICE_ATTR_RO(nstype);
248 
249 static ssize_t __alt_name_store(struct device *dev, const char *buf,
250 		const size_t len)
251 {
252 	char *input, *pos, *alt_name, **ns_altname;
253 	ssize_t rc;
254 
255 	if (is_namespace_pmem(dev)) {
256 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
257 
258 		ns_altname = &nspm->alt_name;
259 	} else if (is_namespace_blk(dev)) {
260 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
261 
262 		ns_altname = &nsblk->alt_name;
263 	} else
264 		return -ENXIO;
265 
266 	if (dev->driver || to_ndns(dev)->claim)
267 		return -EBUSY;
268 
269 	input = kstrndup(buf, len, GFP_KERNEL);
270 	if (!input)
271 		return -ENOMEM;
272 
273 	pos = strim(input);
274 	if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
275 		rc = -EINVAL;
276 		goto out;
277 	}
278 
279 	alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
280 	if (!alt_name) {
281 		rc = -ENOMEM;
282 		goto out;
283 	}
284 	kfree(*ns_altname);
285 	*ns_altname = alt_name;
286 	sprintf(*ns_altname, "%s", pos);
287 	rc = len;
288 
289 out:
290 	kfree(input);
291 	return rc;
292 }
293 
294 static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk)
295 {
296 	struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
297 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
298 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
299 	struct nd_label_id label_id;
300 	resource_size_t size = 0;
301 	struct resource *res;
302 
303 	if (!nsblk->uuid)
304 		return 0;
305 	nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
306 	for_each_dpa_resource(ndd, res)
307 		if (strcmp(res->name, label_id.id) == 0)
308 			size += resource_size(res);
309 	return size;
310 }
311 
312 static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
313 {
314 	struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
315 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
316 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
317 	struct nd_label_id label_id;
318 	struct resource *res;
319 	int count, i;
320 
321 	if (!nsblk->uuid || !nsblk->lbasize || !ndd)
322 		return false;
323 
324 	count = 0;
325 	nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
326 	for_each_dpa_resource(ndd, res) {
327 		if (strcmp(res->name, label_id.id) != 0)
328 			continue;
329 		/*
330 		 * Resources with unacknowledged adjustments indicate a
331 		 * failure to update labels
332 		 */
333 		if (res->flags & DPA_RESOURCE_ADJUSTED)
334 			return false;
335 		count++;
336 	}
337 
338 	/* These values match after a successful label update */
339 	if (count != nsblk->num_resources)
340 		return false;
341 
342 	for (i = 0; i < nsblk->num_resources; i++) {
343 		struct resource *found = NULL;
344 
345 		for_each_dpa_resource(ndd, res)
346 			if (res == nsblk->res[i]) {
347 				found = res;
348 				break;
349 			}
350 		/* stale resource */
351 		if (!found)
352 			return false;
353 	}
354 
355 	return true;
356 }
357 
358 resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
359 {
360 	resource_size_t size;
361 
362 	nvdimm_bus_lock(&nsblk->common.dev);
363 	size = __nd_namespace_blk_validate(nsblk);
364 	nvdimm_bus_unlock(&nsblk->common.dev);
365 
366 	return size;
367 }
368 EXPORT_SYMBOL(nd_namespace_blk_validate);
369 
370 
371 static int nd_namespace_label_update(struct nd_region *nd_region,
372 		struct device *dev)
373 {
374 	dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
375 			"namespace must be idle during label update\n");
376 	if (dev->driver || to_ndns(dev)->claim)
377 		return 0;
378 
379 	/*
380 	 * Only allow label writes that will result in a valid namespace
381 	 * or deletion of an existing namespace.
382 	 */
383 	if (is_namespace_pmem(dev)) {
384 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
385 		resource_size_t size = resource_size(&nspm->nsio.res);
386 
387 		if (size == 0 && nspm->uuid)
388 			/* delete allocation */;
389 		else if (!nspm->uuid)
390 			return 0;
391 
392 		return nd_pmem_namespace_label_update(nd_region, nspm, size);
393 	} else if (is_namespace_blk(dev)) {
394 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
395 		resource_size_t size = nd_namespace_blk_size(nsblk);
396 
397 		if (size == 0 && nsblk->uuid)
398 			/* delete allocation */;
399 		else if (!nsblk->uuid || !nsblk->lbasize)
400 			return 0;
401 
402 		return nd_blk_namespace_label_update(nd_region, nsblk, size);
403 	} else
404 		return -ENXIO;
405 }
406 
407 static ssize_t alt_name_store(struct device *dev,
408 		struct device_attribute *attr, const char *buf, size_t len)
409 {
410 	struct nd_region *nd_region = to_nd_region(dev->parent);
411 	ssize_t rc;
412 
413 	nd_device_lock(dev);
414 	nvdimm_bus_lock(dev);
415 	wait_nvdimm_bus_probe_idle(dev);
416 	rc = __alt_name_store(dev, buf, len);
417 	if (rc >= 0)
418 		rc = nd_namespace_label_update(nd_region, dev);
419 	dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
420 	nvdimm_bus_unlock(dev);
421 	nd_device_unlock(dev);
422 
423 	return rc < 0 ? rc : len;
424 }
425 
426 static ssize_t alt_name_show(struct device *dev,
427 		struct device_attribute *attr, char *buf)
428 {
429 	char *ns_altname;
430 
431 	if (is_namespace_pmem(dev)) {
432 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
433 
434 		ns_altname = nspm->alt_name;
435 	} else if (is_namespace_blk(dev)) {
436 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
437 
438 		ns_altname = nsblk->alt_name;
439 	} else
440 		return -ENXIO;
441 
442 	return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
443 }
444 static DEVICE_ATTR_RW(alt_name);
445 
446 static int scan_free(struct nd_region *nd_region,
447 		struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
448 		resource_size_t n)
449 {
450 	bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
451 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
452 	int rc = 0;
453 
454 	while (n) {
455 		struct resource *res, *last;
456 		resource_size_t new_start;
457 
458 		last = NULL;
459 		for_each_dpa_resource(ndd, res)
460 			if (strcmp(res->name, label_id->id) == 0)
461 				last = res;
462 		res = last;
463 		if (!res)
464 			return 0;
465 
466 		if (n >= resource_size(res)) {
467 			n -= resource_size(res);
468 			nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
469 			nvdimm_free_dpa(ndd, res);
470 			/* retry with last resource deleted */
471 			continue;
472 		}
473 
474 		/*
475 		 * Keep BLK allocations relegated to high DPA as much as
476 		 * possible
477 		 */
478 		if (is_blk)
479 			new_start = res->start + n;
480 		else
481 			new_start = res->start;
482 
483 		rc = adjust_resource(res, new_start, resource_size(res) - n);
484 		if (rc == 0)
485 			res->flags |= DPA_RESOURCE_ADJUSTED;
486 		nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
487 		break;
488 	}
489 
490 	return rc;
491 }
492 
493 /**
494  * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
495  * @nd_region: the set of dimms to reclaim @n bytes from
496  * @label_id: unique identifier for the namespace consuming this dpa range
497  * @n: number of bytes per-dimm to release
498  *
499  * Assumes resources are ordered.  Starting from the end try to
500  * adjust_resource() the allocation to @n, but if @n is larger than the
501  * allocation delete it and find the 'new' last allocation in the label
502  * set.
503  */
504 static int shrink_dpa_allocation(struct nd_region *nd_region,
505 		struct nd_label_id *label_id, resource_size_t n)
506 {
507 	int i;
508 
509 	for (i = 0; i < nd_region->ndr_mappings; i++) {
510 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
511 		int rc;
512 
513 		rc = scan_free(nd_region, nd_mapping, label_id, n);
514 		if (rc)
515 			return rc;
516 	}
517 
518 	return 0;
519 }
520 
521 static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
522 		struct nd_region *nd_region, struct nd_mapping *nd_mapping,
523 		resource_size_t n)
524 {
525 	bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
526 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
527 	resource_size_t first_dpa;
528 	struct resource *res;
529 	int rc = 0;
530 
531 	/* allocate blk from highest dpa first */
532 	if (is_blk)
533 		first_dpa = nd_mapping->start + nd_mapping->size - n;
534 	else
535 		first_dpa = nd_mapping->start;
536 
537 	/* first resource allocation for this label-id or dimm */
538 	res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n);
539 	if (!res)
540 		rc = -EBUSY;
541 
542 	nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
543 	return rc ? n : 0;
544 }
545 
546 
547 /**
548  * space_valid() - validate free dpa space against constraints
549  * @nd_region: hosting region of the free space
550  * @ndd: dimm device data for debug
551  * @label_id: namespace id to allocate space
552  * @prev: potential allocation that precedes free space
553  * @next: allocation that follows the given free space range
554  * @exist: first allocation with same id in the mapping
555  * @n: range that must satisfied for pmem allocations
556  * @valid: free space range to validate
557  *
558  * BLK-space is valid as long as it does not precede a PMEM
559  * allocation in a given region. PMEM-space must be contiguous
560  * and adjacent to an existing existing allocation (if one
561  * exists).  If reserving PMEM any space is valid.
562  */
563 static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
564 		struct nd_label_id *label_id, struct resource *prev,
565 		struct resource *next, struct resource *exist,
566 		resource_size_t n, struct resource *valid)
567 {
568 	bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
569 	bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
570 
571 	if (valid->start >= valid->end)
572 		goto invalid;
573 
574 	if (is_reserve)
575 		return;
576 
577 	if (!is_pmem) {
578 		struct nd_mapping *nd_mapping = &nd_region->mapping[0];
579 		struct nvdimm_bus *nvdimm_bus;
580 		struct blk_alloc_info info = {
581 			.nd_mapping = nd_mapping,
582 			.available = nd_mapping->size,
583 			.res = valid,
584 		};
585 
586 		WARN_ON(!is_nd_blk(&nd_region->dev));
587 		nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
588 		device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
589 		return;
590 	}
591 
592 	/* allocation needs to be contiguous, so this is all or nothing */
593 	if (resource_size(valid) < n)
594 		goto invalid;
595 
596 	/* we've got all the space we need and no existing allocation */
597 	if (!exist)
598 		return;
599 
600 	/* allocation needs to be contiguous with the existing namespace */
601 	if (valid->start == exist->end + 1
602 			|| valid->end == exist->start - 1)
603 		return;
604 
605  invalid:
606 	/* truncate @valid size to 0 */
607 	valid->end = valid->start - 1;
608 }
609 
610 enum alloc_loc {
611 	ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
612 };
613 
614 static resource_size_t scan_allocate(struct nd_region *nd_region,
615 		struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
616 		resource_size_t n)
617 {
618 	resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
619 	bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
620 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
621 	struct resource *res, *exist = NULL, valid;
622 	const resource_size_t to_allocate = n;
623 	int first;
624 
625 	for_each_dpa_resource(ndd, res)
626 		if (strcmp(label_id->id, res->name) == 0)
627 			exist = res;
628 
629 	valid.start = nd_mapping->start;
630 	valid.end = mapping_end;
631 	valid.name = "free space";
632  retry:
633 	first = 0;
634 	for_each_dpa_resource(ndd, res) {
635 		struct resource *next = res->sibling, *new_res = NULL;
636 		resource_size_t allocate, available = 0;
637 		enum alloc_loc loc = ALLOC_ERR;
638 		const char *action;
639 		int rc = 0;
640 
641 		/* ignore resources outside this nd_mapping */
642 		if (res->start > mapping_end)
643 			continue;
644 		if (res->end < nd_mapping->start)
645 			continue;
646 
647 		/* space at the beginning of the mapping */
648 		if (!first++ && res->start > nd_mapping->start) {
649 			valid.start = nd_mapping->start;
650 			valid.end = res->start - 1;
651 			space_valid(nd_region, ndd, label_id, NULL, next, exist,
652 					to_allocate, &valid);
653 			available = resource_size(&valid);
654 			if (available)
655 				loc = ALLOC_BEFORE;
656 		}
657 
658 		/* space between allocations */
659 		if (!loc && next) {
660 			valid.start = res->start + resource_size(res);
661 			valid.end = min(mapping_end, next->start - 1);
662 			space_valid(nd_region, ndd, label_id, res, next, exist,
663 					to_allocate, &valid);
664 			available = resource_size(&valid);
665 			if (available)
666 				loc = ALLOC_MID;
667 		}
668 
669 		/* space at the end of the mapping */
670 		if (!loc && !next) {
671 			valid.start = res->start + resource_size(res);
672 			valid.end = mapping_end;
673 			space_valid(nd_region, ndd, label_id, res, next, exist,
674 					to_allocate, &valid);
675 			available = resource_size(&valid);
676 			if (available)
677 				loc = ALLOC_AFTER;
678 		}
679 
680 		if (!loc || !available)
681 			continue;
682 		allocate = min(available, n);
683 		switch (loc) {
684 		case ALLOC_BEFORE:
685 			if (strcmp(res->name, label_id->id) == 0) {
686 				/* adjust current resource up */
687 				rc = adjust_resource(res, res->start - allocate,
688 						resource_size(res) + allocate);
689 				action = "cur grow up";
690 			} else
691 				action = "allocate";
692 			break;
693 		case ALLOC_MID:
694 			if (strcmp(next->name, label_id->id) == 0) {
695 				/* adjust next resource up */
696 				rc = adjust_resource(next, next->start
697 						- allocate, resource_size(next)
698 						+ allocate);
699 				new_res = next;
700 				action = "next grow up";
701 			} else if (strcmp(res->name, label_id->id) == 0) {
702 				action = "grow down";
703 			} else
704 				action = "allocate";
705 			break;
706 		case ALLOC_AFTER:
707 			if (strcmp(res->name, label_id->id) == 0)
708 				action = "grow down";
709 			else
710 				action = "allocate";
711 			break;
712 		default:
713 			return n;
714 		}
715 
716 		if (strcmp(action, "allocate") == 0) {
717 			/* BLK allocate bottom up */
718 			if (!is_pmem)
719 				valid.start += available - allocate;
720 
721 			new_res = nvdimm_allocate_dpa(ndd, label_id,
722 					valid.start, allocate);
723 			if (!new_res)
724 				rc = -EBUSY;
725 		} else if (strcmp(action, "grow down") == 0) {
726 			/* adjust current resource down */
727 			rc = adjust_resource(res, res->start, resource_size(res)
728 					+ allocate);
729 			if (rc == 0)
730 				res->flags |= DPA_RESOURCE_ADJUSTED;
731 		}
732 
733 		if (!new_res)
734 			new_res = res;
735 
736 		nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
737 				action, loc, rc);
738 
739 		if (rc)
740 			return n;
741 
742 		n -= allocate;
743 		if (n) {
744 			/*
745 			 * Retry scan with newly inserted resources.
746 			 * For example, if we did an ALLOC_BEFORE
747 			 * insertion there may also have been space
748 			 * available for an ALLOC_AFTER insertion, so we
749 			 * need to check this same resource again
750 			 */
751 			goto retry;
752 		} else
753 			return 0;
754 	}
755 
756 	/*
757 	 * If we allocated nothing in the BLK case it may be because we are in
758 	 * an initial "pmem-reserve pass".  Only do an initial BLK allocation
759 	 * when none of the DPA space is reserved.
760 	 */
761 	if ((is_pmem || !ndd->dpa.child) && n == to_allocate)
762 		return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
763 	return n;
764 }
765 
766 static int merge_dpa(struct nd_region *nd_region,
767 		struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
768 {
769 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
770 	struct resource *res;
771 
772 	if (strncmp("pmem", label_id->id, 4) == 0)
773 		return 0;
774  retry:
775 	for_each_dpa_resource(ndd, res) {
776 		int rc;
777 		struct resource *next = res->sibling;
778 		resource_size_t end = res->start + resource_size(res);
779 
780 		if (!next || strcmp(res->name, label_id->id) != 0
781 				|| strcmp(next->name, label_id->id) != 0
782 				|| end != next->start)
783 			continue;
784 		end += resource_size(next);
785 		nvdimm_free_dpa(ndd, next);
786 		rc = adjust_resource(res, res->start, end - res->start);
787 		nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
788 		if (rc)
789 			return rc;
790 		res->flags |= DPA_RESOURCE_ADJUSTED;
791 		goto retry;
792 	}
793 
794 	return 0;
795 }
796 
797 int __reserve_free_pmem(struct device *dev, void *data)
798 {
799 	struct nvdimm *nvdimm = data;
800 	struct nd_region *nd_region;
801 	struct nd_label_id label_id;
802 	int i;
803 
804 	if (!is_memory(dev))
805 		return 0;
806 
807 	nd_region = to_nd_region(dev);
808 	if (nd_region->ndr_mappings == 0)
809 		return 0;
810 
811 	memset(&label_id, 0, sizeof(label_id));
812 	strcat(label_id.id, "pmem-reserve");
813 	for (i = 0; i < nd_region->ndr_mappings; i++) {
814 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
815 		resource_size_t n, rem = 0;
816 
817 		if (nd_mapping->nvdimm != nvdimm)
818 			continue;
819 
820 		n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
821 		if (n == 0)
822 			return 0;
823 		rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
824 		dev_WARN_ONCE(&nd_region->dev, rem,
825 				"pmem reserve underrun: %#llx of %#llx bytes\n",
826 				(unsigned long long) n - rem,
827 				(unsigned long long) n);
828 		return rem ? -ENXIO : 0;
829 	}
830 
831 	return 0;
832 }
833 
834 void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
835 		struct nd_mapping *nd_mapping)
836 {
837 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
838 	struct resource *res, *_res;
839 
840 	for_each_dpa_resource_safe(ndd, res, _res)
841 		if (strcmp(res->name, "pmem-reserve") == 0)
842 			nvdimm_free_dpa(ndd, res);
843 }
844 
845 static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
846 		struct nd_mapping *nd_mapping)
847 {
848 	struct nvdimm *nvdimm = nd_mapping->nvdimm;
849 	int rc;
850 
851 	rc = device_for_each_child(&nvdimm_bus->dev, nvdimm,
852 			__reserve_free_pmem);
853 	if (rc)
854 		release_free_pmem(nvdimm_bus, nd_mapping);
855 	return rc;
856 }
857 
858 /**
859  * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
860  * @nd_region: the set of dimms to allocate @n more bytes from
861  * @label_id: unique identifier for the namespace consuming this dpa range
862  * @n: number of bytes per-dimm to add to the existing allocation
863  *
864  * Assumes resources are ordered.  For BLK regions, first consume
865  * BLK-only available DPA free space, then consume PMEM-aliased DPA
866  * space starting at the highest DPA.  For PMEM regions start
867  * allocations from the start of an interleave set and end at the first
868  * BLK allocation or the end of the interleave set, whichever comes
869  * first.
870  */
871 static int grow_dpa_allocation(struct nd_region *nd_region,
872 		struct nd_label_id *label_id, resource_size_t n)
873 {
874 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
875 	bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
876 	int i;
877 
878 	for (i = 0; i < nd_region->ndr_mappings; i++) {
879 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
880 		resource_size_t rem = n;
881 		int rc, j;
882 
883 		/*
884 		 * In the BLK case try once with all unallocated PMEM
885 		 * reserved, and once without
886 		 */
887 		for (j = is_pmem; j < 2; j++) {
888 			bool blk_only = j == 0;
889 
890 			if (blk_only) {
891 				rc = reserve_free_pmem(nvdimm_bus, nd_mapping);
892 				if (rc)
893 					return rc;
894 			}
895 			rem = scan_allocate(nd_region, nd_mapping,
896 					label_id, rem);
897 			if (blk_only)
898 				release_free_pmem(nvdimm_bus, nd_mapping);
899 
900 			/* try again and allow encroachments into PMEM */
901 			if (rem == 0)
902 				break;
903 		}
904 
905 		dev_WARN_ONCE(&nd_region->dev, rem,
906 				"allocation underrun: %#llx of %#llx bytes\n",
907 				(unsigned long long) n - rem,
908 				(unsigned long long) n);
909 		if (rem)
910 			return -ENXIO;
911 
912 		rc = merge_dpa(nd_region, nd_mapping, label_id);
913 		if (rc)
914 			return rc;
915 	}
916 
917 	return 0;
918 }
919 
920 static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
921 		struct nd_namespace_pmem *nspm, resource_size_t size)
922 {
923 	struct resource *res = &nspm->nsio.res;
924 	resource_size_t offset = 0;
925 
926 	if (size && !nspm->uuid) {
927 		WARN_ON_ONCE(1);
928 		size = 0;
929 	}
930 
931 	if (size && nspm->uuid) {
932 		struct nd_mapping *nd_mapping = &nd_region->mapping[0];
933 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
934 		struct nd_label_id label_id;
935 		struct resource *res;
936 
937 		if (!ndd) {
938 			size = 0;
939 			goto out;
940 		}
941 
942 		nd_label_gen_id(&label_id, nspm->uuid, 0);
943 
944 		/* calculate a spa offset from the dpa allocation offset */
945 		for_each_dpa_resource(ndd, res)
946 			if (strcmp(res->name, label_id.id) == 0) {
947 				offset = (res->start - nd_mapping->start)
948 					* nd_region->ndr_mappings;
949 				goto out;
950 			}
951 
952 		WARN_ON_ONCE(1);
953 		size = 0;
954 	}
955 
956  out:
957 	res->start = nd_region->ndr_start + offset;
958 	res->end = res->start + size - 1;
959 }
960 
961 static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where)
962 {
963 	if (!uuid) {
964 		dev_dbg(dev, "%s: uuid not set\n", where);
965 		return true;
966 	}
967 	return false;
968 }
969 
970 static ssize_t __size_store(struct device *dev, unsigned long long val)
971 {
972 	resource_size_t allocated = 0, available = 0;
973 	struct nd_region *nd_region = to_nd_region(dev->parent);
974 	struct nd_namespace_common *ndns = to_ndns(dev);
975 	struct nd_mapping *nd_mapping;
976 	struct nvdimm_drvdata *ndd;
977 	struct nd_label_id label_id;
978 	u32 flags = 0, remainder;
979 	int rc, i, id = -1;
980 	u8 *uuid = NULL;
981 
982 	if (dev->driver || ndns->claim)
983 		return -EBUSY;
984 
985 	if (is_namespace_pmem(dev)) {
986 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
987 
988 		uuid = nspm->uuid;
989 		id = nspm->id;
990 	} else if (is_namespace_blk(dev)) {
991 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
992 
993 		uuid = nsblk->uuid;
994 		flags = NSLABEL_FLAG_LOCAL;
995 		id = nsblk->id;
996 	}
997 
998 	/*
999 	 * We need a uuid for the allocation-label and dimm(s) on which
1000 	 * to store the label.
1001 	 */
1002 	if (uuid_not_set(uuid, dev, __func__))
1003 		return -ENXIO;
1004 	if (nd_region->ndr_mappings == 0) {
1005 		dev_dbg(dev, "not associated with dimm(s)\n");
1006 		return -ENXIO;
1007 	}
1008 
1009 	div_u64_rem(val, PAGE_SIZE * nd_region->ndr_mappings, &remainder);
1010 	if (remainder) {
1011 		dev_dbg(dev, "%llu is not %ldK aligned\n", val,
1012 				(PAGE_SIZE * nd_region->ndr_mappings) / SZ_1K);
1013 		return -EINVAL;
1014 	}
1015 
1016 	nd_label_gen_id(&label_id, uuid, flags);
1017 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1018 		nd_mapping = &nd_region->mapping[i];
1019 		ndd = to_ndd(nd_mapping);
1020 
1021 		/*
1022 		 * All dimms in an interleave set, or the base dimm for a blk
1023 		 * region, need to be enabled for the size to be changed.
1024 		 */
1025 		if (!ndd)
1026 			return -ENXIO;
1027 
1028 		allocated += nvdimm_allocated_dpa(ndd, &label_id);
1029 	}
1030 	available = nd_region_allocatable_dpa(nd_region);
1031 
1032 	if (val > available + allocated)
1033 		return -ENOSPC;
1034 
1035 	if (val == allocated)
1036 		return 0;
1037 
1038 	val = div_u64(val, nd_region->ndr_mappings);
1039 	allocated = div_u64(allocated, nd_region->ndr_mappings);
1040 	if (val < allocated)
1041 		rc = shrink_dpa_allocation(nd_region, &label_id,
1042 				allocated - val);
1043 	else
1044 		rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
1045 
1046 	if (rc)
1047 		return rc;
1048 
1049 	if (is_namespace_pmem(dev)) {
1050 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1051 
1052 		nd_namespace_pmem_set_resource(nd_region, nspm,
1053 				val * nd_region->ndr_mappings);
1054 	}
1055 
1056 	/*
1057 	 * Try to delete the namespace if we deleted all of its
1058 	 * allocation, this is not the seed or 0th device for the
1059 	 * region, and it is not actively claimed by a btt, pfn, or dax
1060 	 * instance.
1061 	 */
1062 	if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
1063 		nd_device_unregister(dev, ND_ASYNC);
1064 
1065 	return rc;
1066 }
1067 
1068 static ssize_t size_store(struct device *dev,
1069 		struct device_attribute *attr, const char *buf, size_t len)
1070 {
1071 	struct nd_region *nd_region = to_nd_region(dev->parent);
1072 	unsigned long long val;
1073 	u8 **uuid = NULL;
1074 	int rc;
1075 
1076 	rc = kstrtoull(buf, 0, &val);
1077 	if (rc)
1078 		return rc;
1079 
1080 	nd_device_lock(dev);
1081 	nvdimm_bus_lock(dev);
1082 	wait_nvdimm_bus_probe_idle(dev);
1083 	rc = __size_store(dev, val);
1084 	if (rc >= 0)
1085 		rc = nd_namespace_label_update(nd_region, dev);
1086 
1087 	if (is_namespace_pmem(dev)) {
1088 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1089 
1090 		uuid = &nspm->uuid;
1091 	} else if (is_namespace_blk(dev)) {
1092 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1093 
1094 		uuid = &nsblk->uuid;
1095 	}
1096 
1097 	if (rc == 0 && val == 0 && uuid) {
1098 		/* setting size zero == 'delete namespace' */
1099 		kfree(*uuid);
1100 		*uuid = NULL;
1101 	}
1102 
1103 	dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
1104 
1105 	nvdimm_bus_unlock(dev);
1106 	nd_device_unlock(dev);
1107 
1108 	return rc < 0 ? rc : len;
1109 }
1110 
1111 resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
1112 {
1113 	struct device *dev = &ndns->dev;
1114 
1115 	if (is_namespace_pmem(dev)) {
1116 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1117 
1118 		return resource_size(&nspm->nsio.res);
1119 	} else if (is_namespace_blk(dev)) {
1120 		return nd_namespace_blk_size(to_nd_namespace_blk(dev));
1121 	} else if (is_namespace_io(dev)) {
1122 		struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1123 
1124 		return resource_size(&nsio->res);
1125 	} else
1126 		WARN_ONCE(1, "unknown namespace type\n");
1127 	return 0;
1128 }
1129 
1130 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
1131 {
1132 	resource_size_t size;
1133 
1134 	nvdimm_bus_lock(&ndns->dev);
1135 	size = __nvdimm_namespace_capacity(ndns);
1136 	nvdimm_bus_unlock(&ndns->dev);
1137 
1138 	return size;
1139 }
1140 EXPORT_SYMBOL(nvdimm_namespace_capacity);
1141 
1142 bool nvdimm_namespace_locked(struct nd_namespace_common *ndns)
1143 {
1144 	int i;
1145 	bool locked = false;
1146 	struct device *dev = &ndns->dev;
1147 	struct nd_region *nd_region = to_nd_region(dev->parent);
1148 
1149 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1150 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1151 		struct nvdimm *nvdimm = nd_mapping->nvdimm;
1152 
1153 		if (test_bit(NDD_LOCKED, &nvdimm->flags)) {
1154 			dev_dbg(dev, "%s locked\n", nvdimm_name(nvdimm));
1155 			locked = true;
1156 		}
1157 	}
1158 	return locked;
1159 }
1160 EXPORT_SYMBOL(nvdimm_namespace_locked);
1161 
1162 static ssize_t size_show(struct device *dev,
1163 		struct device_attribute *attr, char *buf)
1164 {
1165 	return sprintf(buf, "%llu\n", (unsigned long long)
1166 			nvdimm_namespace_capacity(to_ndns(dev)));
1167 }
1168 static DEVICE_ATTR(size, 0444, size_show, size_store);
1169 
1170 static u8 *namespace_to_uuid(struct device *dev)
1171 {
1172 	if (is_namespace_pmem(dev)) {
1173 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1174 
1175 		return nspm->uuid;
1176 	} else if (is_namespace_blk(dev)) {
1177 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1178 
1179 		return nsblk->uuid;
1180 	} else
1181 		return ERR_PTR(-ENXIO);
1182 }
1183 
1184 static ssize_t uuid_show(struct device *dev,
1185 		struct device_attribute *attr, char *buf)
1186 {
1187 	u8 *uuid = namespace_to_uuid(dev);
1188 
1189 	if (IS_ERR(uuid))
1190 		return PTR_ERR(uuid);
1191 	if (uuid)
1192 		return sprintf(buf, "%pUb\n", uuid);
1193 	return sprintf(buf, "\n");
1194 }
1195 
1196 /**
1197  * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
1198  * @nd_region: parent region so we can updates all dimms in the set
1199  * @dev: namespace type for generating label_id
1200  * @new_uuid: incoming uuid
1201  * @old_uuid: reference to the uuid storage location in the namespace object
1202  */
1203 static int namespace_update_uuid(struct nd_region *nd_region,
1204 		struct device *dev, u8 *new_uuid, u8 **old_uuid)
1205 {
1206 	u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
1207 	struct nd_label_id old_label_id;
1208 	struct nd_label_id new_label_id;
1209 	int i;
1210 
1211 	if (!nd_is_uuid_unique(dev, new_uuid))
1212 		return -EINVAL;
1213 
1214 	if (*old_uuid == NULL)
1215 		goto out;
1216 
1217 	/*
1218 	 * If we've already written a label with this uuid, then it's
1219 	 * too late to rename because we can't reliably update the uuid
1220 	 * without losing the old namespace.  Userspace must delete this
1221 	 * namespace to abandon the old uuid.
1222 	 */
1223 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1224 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1225 
1226 		/*
1227 		 * This check by itself is sufficient because old_uuid
1228 		 * would be NULL above if this uuid did not exist in the
1229 		 * currently written set.
1230 		 *
1231 		 * FIXME: can we delete uuid with zero dpa allocated?
1232 		 */
1233 		if (list_empty(&nd_mapping->labels))
1234 			return -EBUSY;
1235 	}
1236 
1237 	nd_label_gen_id(&old_label_id, *old_uuid, flags);
1238 	nd_label_gen_id(&new_label_id, new_uuid, flags);
1239 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1240 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1241 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1242 		struct nd_label_ent *label_ent;
1243 		struct resource *res;
1244 
1245 		for_each_dpa_resource(ndd, res)
1246 			if (strcmp(res->name, old_label_id.id) == 0)
1247 				sprintf((void *) res->name, "%s",
1248 						new_label_id.id);
1249 
1250 		mutex_lock(&nd_mapping->lock);
1251 		list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1252 			struct nd_namespace_label *nd_label = label_ent->label;
1253 			struct nd_label_id label_id;
1254 
1255 			if (!nd_label)
1256 				continue;
1257 			nd_label_gen_id(&label_id, nd_label->uuid,
1258 					__le32_to_cpu(nd_label->flags));
1259 			if (strcmp(old_label_id.id, label_id.id) == 0)
1260 				set_bit(ND_LABEL_REAP, &label_ent->flags);
1261 		}
1262 		mutex_unlock(&nd_mapping->lock);
1263 	}
1264 	kfree(*old_uuid);
1265  out:
1266 	*old_uuid = new_uuid;
1267 	return 0;
1268 }
1269 
1270 static ssize_t uuid_store(struct device *dev,
1271 		struct device_attribute *attr, const char *buf, size_t len)
1272 {
1273 	struct nd_region *nd_region = to_nd_region(dev->parent);
1274 	u8 *uuid = NULL;
1275 	ssize_t rc = 0;
1276 	u8 **ns_uuid;
1277 
1278 	if (is_namespace_pmem(dev)) {
1279 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1280 
1281 		ns_uuid = &nspm->uuid;
1282 	} else if (is_namespace_blk(dev)) {
1283 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1284 
1285 		ns_uuid = &nsblk->uuid;
1286 	} else
1287 		return -ENXIO;
1288 
1289 	nd_device_lock(dev);
1290 	nvdimm_bus_lock(dev);
1291 	wait_nvdimm_bus_probe_idle(dev);
1292 	if (to_ndns(dev)->claim)
1293 		rc = -EBUSY;
1294 	if (rc >= 0)
1295 		rc = nd_uuid_store(dev, &uuid, buf, len);
1296 	if (rc >= 0)
1297 		rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
1298 	if (rc >= 0)
1299 		rc = nd_namespace_label_update(nd_region, dev);
1300 	else
1301 		kfree(uuid);
1302 	dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
1303 			buf[len - 1] == '\n' ? "" : "\n");
1304 	nvdimm_bus_unlock(dev);
1305 	nd_device_unlock(dev);
1306 
1307 	return rc < 0 ? rc : len;
1308 }
1309 static DEVICE_ATTR_RW(uuid);
1310 
1311 static ssize_t resource_show(struct device *dev,
1312 		struct device_attribute *attr, char *buf)
1313 {
1314 	struct resource *res;
1315 
1316 	if (is_namespace_pmem(dev)) {
1317 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1318 
1319 		res = &nspm->nsio.res;
1320 	} else if (is_namespace_io(dev)) {
1321 		struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1322 
1323 		res = &nsio->res;
1324 	} else
1325 		return -ENXIO;
1326 
1327 	/* no address to convey if the namespace has no allocation */
1328 	if (resource_size(res) == 0)
1329 		return -ENXIO;
1330 	return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
1331 }
1332 static DEVICE_ATTR_RO(resource);
1333 
1334 static const unsigned long blk_lbasize_supported[] = { 512, 520, 528,
1335 	4096, 4104, 4160, 4224, 0 };
1336 
1337 static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 };
1338 
1339 static ssize_t sector_size_show(struct device *dev,
1340 		struct device_attribute *attr, char *buf)
1341 {
1342 	if (is_namespace_blk(dev)) {
1343 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1344 
1345 		return nd_size_select_show(nsblk->lbasize,
1346 				blk_lbasize_supported, buf);
1347 	}
1348 
1349 	if (is_namespace_pmem(dev)) {
1350 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1351 
1352 		return nd_size_select_show(nspm->lbasize,
1353 				pmem_lbasize_supported, buf);
1354 	}
1355 	return -ENXIO;
1356 }
1357 
1358 static ssize_t sector_size_store(struct device *dev,
1359 		struct device_attribute *attr, const char *buf, size_t len)
1360 {
1361 	struct nd_region *nd_region = to_nd_region(dev->parent);
1362 	const unsigned long *supported;
1363 	unsigned long *lbasize;
1364 	ssize_t rc = 0;
1365 
1366 	if (is_namespace_blk(dev)) {
1367 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1368 
1369 		lbasize = &nsblk->lbasize;
1370 		supported = blk_lbasize_supported;
1371 	} else if (is_namespace_pmem(dev)) {
1372 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1373 
1374 		lbasize = &nspm->lbasize;
1375 		supported = pmem_lbasize_supported;
1376 	} else
1377 		return -ENXIO;
1378 
1379 	nd_device_lock(dev);
1380 	nvdimm_bus_lock(dev);
1381 	if (to_ndns(dev)->claim)
1382 		rc = -EBUSY;
1383 	if (rc >= 0)
1384 		rc = nd_size_select_store(dev, buf, lbasize, supported);
1385 	if (rc >= 0)
1386 		rc = nd_namespace_label_update(nd_region, dev);
1387 	dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
1388 			buf, buf[len - 1] == '\n' ? "" : "\n");
1389 	nvdimm_bus_unlock(dev);
1390 	nd_device_unlock(dev);
1391 
1392 	return rc ? rc : len;
1393 }
1394 static DEVICE_ATTR_RW(sector_size);
1395 
1396 static ssize_t dpa_extents_show(struct device *dev,
1397 		struct device_attribute *attr, char *buf)
1398 {
1399 	struct nd_region *nd_region = to_nd_region(dev->parent);
1400 	struct nd_label_id label_id;
1401 	int count = 0, i;
1402 	u8 *uuid = NULL;
1403 	u32 flags = 0;
1404 
1405 	nvdimm_bus_lock(dev);
1406 	if (is_namespace_pmem(dev)) {
1407 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1408 
1409 		uuid = nspm->uuid;
1410 		flags = 0;
1411 	} else if (is_namespace_blk(dev)) {
1412 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1413 
1414 		uuid = nsblk->uuid;
1415 		flags = NSLABEL_FLAG_LOCAL;
1416 	}
1417 
1418 	if (!uuid)
1419 		goto out;
1420 
1421 	nd_label_gen_id(&label_id, uuid, flags);
1422 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1423 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1424 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1425 		struct resource *res;
1426 
1427 		for_each_dpa_resource(ndd, res)
1428 			if (strcmp(res->name, label_id.id) == 0)
1429 				count++;
1430 	}
1431  out:
1432 	nvdimm_bus_unlock(dev);
1433 
1434 	return sprintf(buf, "%d\n", count);
1435 }
1436 static DEVICE_ATTR_RO(dpa_extents);
1437 
1438 static int btt_claim_class(struct device *dev)
1439 {
1440 	struct nd_region *nd_region = to_nd_region(dev->parent);
1441 	int i, loop_bitmask = 0;
1442 
1443 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1444 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1445 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1446 		struct nd_namespace_index *nsindex;
1447 
1448 		/*
1449 		 * If any of the DIMMs do not support labels the only
1450 		 * possible BTT format is v1.
1451 		 */
1452 		if (!ndd) {
1453 			loop_bitmask = 0;
1454 			break;
1455 		}
1456 
1457 		nsindex = to_namespace_index(ndd, ndd->ns_current);
1458 		if (nsindex == NULL)
1459 			loop_bitmask |= 1;
1460 		else {
1461 			/* check whether existing labels are v1.1 or v1.2 */
1462 			if (__le16_to_cpu(nsindex->major) == 1
1463 					&& __le16_to_cpu(nsindex->minor) == 1)
1464 				loop_bitmask |= 2;
1465 			else
1466 				loop_bitmask |= 4;
1467 		}
1468 	}
1469 	/*
1470 	 * If nsindex is null loop_bitmask's bit 0 will be set, and if an index
1471 	 * block is found, a v1.1 label for any mapping will set bit 1, and a
1472 	 * v1.2 label will set bit 2.
1473 	 *
1474 	 * At the end of the loop, at most one of the three bits must be set.
1475 	 * If multiple bits were set, it means the different mappings disagree
1476 	 * about their labels, and this must be cleaned up first.
1477 	 *
1478 	 * If all the label index blocks are found to agree, nsindex of NULL
1479 	 * implies labels haven't been initialized yet, and when they will,
1480 	 * they will be of the 1.2 format, so we can assume BTT2.0
1481 	 *
1482 	 * If 1.1 labels are found, we enforce BTT1.1, and if 1.2 labels are
1483 	 * found, we enforce BTT2.0
1484 	 *
1485 	 * If the loop was never entered, default to BTT1.1 (legacy namespaces)
1486 	 */
1487 	switch (loop_bitmask) {
1488 	case 0:
1489 	case 2:
1490 		return NVDIMM_CCLASS_BTT;
1491 	case 1:
1492 	case 4:
1493 		return NVDIMM_CCLASS_BTT2;
1494 	default:
1495 		return -ENXIO;
1496 	}
1497 }
1498 
1499 static ssize_t holder_show(struct device *dev,
1500 		struct device_attribute *attr, char *buf)
1501 {
1502 	struct nd_namespace_common *ndns = to_ndns(dev);
1503 	ssize_t rc;
1504 
1505 	nd_device_lock(dev);
1506 	rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
1507 	nd_device_unlock(dev);
1508 
1509 	return rc;
1510 }
1511 static DEVICE_ATTR_RO(holder);
1512 
1513 static ssize_t __holder_class_store(struct device *dev, const char *buf)
1514 {
1515 	struct nd_namespace_common *ndns = to_ndns(dev);
1516 
1517 	if (dev->driver || ndns->claim)
1518 		return -EBUSY;
1519 
1520 	if (sysfs_streq(buf, "btt"))
1521 		ndns->claim_class = btt_claim_class(dev);
1522 	else if (sysfs_streq(buf, "pfn"))
1523 		ndns->claim_class = NVDIMM_CCLASS_PFN;
1524 	else if (sysfs_streq(buf, "dax"))
1525 		ndns->claim_class = NVDIMM_CCLASS_DAX;
1526 	else if (sysfs_streq(buf, ""))
1527 		ndns->claim_class = NVDIMM_CCLASS_NONE;
1528 	else
1529 		return -EINVAL;
1530 
1531 	/* btt_claim_class() could've returned an error */
1532 	if (ndns->claim_class < 0)
1533 		return ndns->claim_class;
1534 
1535 	return 0;
1536 }
1537 
1538 static ssize_t holder_class_store(struct device *dev,
1539 		struct device_attribute *attr, const char *buf, size_t len)
1540 {
1541 	struct nd_region *nd_region = to_nd_region(dev->parent);
1542 	ssize_t rc;
1543 
1544 	nd_device_lock(dev);
1545 	nvdimm_bus_lock(dev);
1546 	wait_nvdimm_bus_probe_idle(dev);
1547 	rc = __holder_class_store(dev, buf);
1548 	if (rc >= 0)
1549 		rc = nd_namespace_label_update(nd_region, dev);
1550 	dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
1551 	nvdimm_bus_unlock(dev);
1552 	nd_device_unlock(dev);
1553 
1554 	return rc < 0 ? rc : len;
1555 }
1556 
1557 static ssize_t holder_class_show(struct device *dev,
1558 		struct device_attribute *attr, char *buf)
1559 {
1560 	struct nd_namespace_common *ndns = to_ndns(dev);
1561 	ssize_t rc;
1562 
1563 	nd_device_lock(dev);
1564 	if (ndns->claim_class == NVDIMM_CCLASS_NONE)
1565 		rc = sprintf(buf, "\n");
1566 	else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
1567 			(ndns->claim_class == NVDIMM_CCLASS_BTT2))
1568 		rc = sprintf(buf, "btt\n");
1569 	else if (ndns->claim_class == NVDIMM_CCLASS_PFN)
1570 		rc = sprintf(buf, "pfn\n");
1571 	else if (ndns->claim_class == NVDIMM_CCLASS_DAX)
1572 		rc = sprintf(buf, "dax\n");
1573 	else
1574 		rc = sprintf(buf, "<unknown>\n");
1575 	nd_device_unlock(dev);
1576 
1577 	return rc;
1578 }
1579 static DEVICE_ATTR_RW(holder_class);
1580 
1581 static ssize_t mode_show(struct device *dev,
1582 		struct device_attribute *attr, char *buf)
1583 {
1584 	struct nd_namespace_common *ndns = to_ndns(dev);
1585 	struct device *claim;
1586 	char *mode;
1587 	ssize_t rc;
1588 
1589 	nd_device_lock(dev);
1590 	claim = ndns->claim;
1591 	if (claim && is_nd_btt(claim))
1592 		mode = "safe";
1593 	else if (claim && is_nd_pfn(claim))
1594 		mode = "memory";
1595 	else if (claim && is_nd_dax(claim))
1596 		mode = "dax";
1597 	else if (!claim && pmem_should_map_pages(dev))
1598 		mode = "memory";
1599 	else
1600 		mode = "raw";
1601 	rc = sprintf(buf, "%s\n", mode);
1602 	nd_device_unlock(dev);
1603 
1604 	return rc;
1605 }
1606 static DEVICE_ATTR_RO(mode);
1607 
1608 static ssize_t force_raw_store(struct device *dev,
1609 		struct device_attribute *attr, const char *buf, size_t len)
1610 {
1611 	bool force_raw;
1612 	int rc = strtobool(buf, &force_raw);
1613 
1614 	if (rc)
1615 		return rc;
1616 
1617 	to_ndns(dev)->force_raw = force_raw;
1618 	return len;
1619 }
1620 
1621 static ssize_t force_raw_show(struct device *dev,
1622 		struct device_attribute *attr, char *buf)
1623 {
1624 	return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
1625 }
1626 static DEVICE_ATTR_RW(force_raw);
1627 
1628 static struct attribute *nd_namespace_attributes[] = {
1629 	&dev_attr_nstype.attr,
1630 	&dev_attr_size.attr,
1631 	&dev_attr_mode.attr,
1632 	&dev_attr_uuid.attr,
1633 	&dev_attr_holder.attr,
1634 	&dev_attr_resource.attr,
1635 	&dev_attr_alt_name.attr,
1636 	&dev_attr_force_raw.attr,
1637 	&dev_attr_sector_size.attr,
1638 	&dev_attr_dpa_extents.attr,
1639 	&dev_attr_holder_class.attr,
1640 	NULL,
1641 };
1642 
1643 static umode_t namespace_visible(struct kobject *kobj,
1644 		struct attribute *a, int n)
1645 {
1646 	struct device *dev = container_of(kobj, struct device, kobj);
1647 
1648 	if (a == &dev_attr_resource.attr) {
1649 		if (is_namespace_blk(dev))
1650 			return 0;
1651 		return 0400;
1652 	}
1653 
1654 	if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
1655 		if (a == &dev_attr_size.attr)
1656 			return 0644;
1657 
1658 		return a->mode;
1659 	}
1660 
1661 	if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
1662 			|| a == &dev_attr_holder.attr
1663 			|| a == &dev_attr_holder_class.attr
1664 			|| a == &dev_attr_force_raw.attr
1665 			|| a == &dev_attr_mode.attr)
1666 		return a->mode;
1667 
1668 	return 0;
1669 }
1670 
1671 static struct attribute_group nd_namespace_attribute_group = {
1672 	.attrs = nd_namespace_attributes,
1673 	.is_visible = namespace_visible,
1674 };
1675 
1676 static const struct attribute_group *nd_namespace_attribute_groups[] = {
1677 	&nd_device_attribute_group,
1678 	&nd_namespace_attribute_group,
1679 	&nd_numa_attribute_group,
1680 	NULL,
1681 };
1682 
1683 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1684 {
1685 	struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
1686 	struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
1687 	struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
1688 	struct nd_namespace_common *ndns = NULL;
1689 	resource_size_t size;
1690 
1691 	if (nd_btt || nd_pfn || nd_dax) {
1692 		if (nd_btt)
1693 			ndns = nd_btt->ndns;
1694 		else if (nd_pfn)
1695 			ndns = nd_pfn->ndns;
1696 		else if (nd_dax)
1697 			ndns = nd_dax->nd_pfn.ndns;
1698 
1699 		if (!ndns)
1700 			return ERR_PTR(-ENODEV);
1701 
1702 		/*
1703 		 * Flush any in-progess probes / removals in the driver
1704 		 * for the raw personality of this namespace.
1705 		 */
1706 		nd_device_lock(&ndns->dev);
1707 		nd_device_unlock(&ndns->dev);
1708 		if (ndns->dev.driver) {
1709 			dev_dbg(&ndns->dev, "is active, can't bind %s\n",
1710 					dev_name(dev));
1711 			return ERR_PTR(-EBUSY);
1712 		}
1713 		if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
1714 					"host (%s) vs claim (%s) mismatch\n",
1715 					dev_name(dev),
1716 					dev_name(ndns->claim)))
1717 			return ERR_PTR(-ENXIO);
1718 	} else {
1719 		ndns = to_ndns(dev);
1720 		if (ndns->claim) {
1721 			dev_dbg(dev, "claimed by %s, failing probe\n",
1722 				dev_name(ndns->claim));
1723 
1724 			return ERR_PTR(-ENXIO);
1725 		}
1726 	}
1727 
1728 	if (nvdimm_namespace_locked(ndns))
1729 		return ERR_PTR(-EACCES);
1730 
1731 	size = nvdimm_namespace_capacity(ndns);
1732 	if (size < ND_MIN_NAMESPACE_SIZE) {
1733 		dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
1734 				&size, ND_MIN_NAMESPACE_SIZE);
1735 		return ERR_PTR(-ENODEV);
1736 	}
1737 
1738 	if (is_namespace_pmem(&ndns->dev)) {
1739 		struct nd_namespace_pmem *nspm;
1740 
1741 		nspm = to_nd_namespace_pmem(&ndns->dev);
1742 		if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
1743 			return ERR_PTR(-ENODEV);
1744 	} else if (is_namespace_blk(&ndns->dev)) {
1745 		struct nd_namespace_blk *nsblk;
1746 
1747 		nsblk = to_nd_namespace_blk(&ndns->dev);
1748 		if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__))
1749 			return ERR_PTR(-ENODEV);
1750 		if (!nsblk->lbasize) {
1751 			dev_dbg(&ndns->dev, "sector size not set\n");
1752 			return ERR_PTR(-ENODEV);
1753 		}
1754 		if (!nd_namespace_blk_validate(nsblk))
1755 			return ERR_PTR(-ENODEV);
1756 	}
1757 
1758 	return ndns;
1759 }
1760 EXPORT_SYMBOL(nvdimm_namespace_common_probe);
1761 
1762 static struct device **create_namespace_io(struct nd_region *nd_region)
1763 {
1764 	struct nd_namespace_io *nsio;
1765 	struct device *dev, **devs;
1766 	struct resource *res;
1767 
1768 	nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
1769 	if (!nsio)
1770 		return NULL;
1771 
1772 	devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1773 	if (!devs) {
1774 		kfree(nsio);
1775 		return NULL;
1776 	}
1777 
1778 	dev = &nsio->common.dev;
1779 	dev->type = &namespace_io_device_type;
1780 	dev->parent = &nd_region->dev;
1781 	res = &nsio->res;
1782 	res->name = dev_name(&nd_region->dev);
1783 	res->flags = IORESOURCE_MEM;
1784 	res->start = nd_region->ndr_start;
1785 	res->end = res->start + nd_region->ndr_size - 1;
1786 
1787 	devs[0] = dev;
1788 	return devs;
1789 }
1790 
1791 static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
1792 		u64 cookie, u16 pos)
1793 {
1794 	struct nd_namespace_label *found = NULL;
1795 	int i;
1796 
1797 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1798 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1799 		struct nd_interleave_set *nd_set = nd_region->nd_set;
1800 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1801 		struct nd_label_ent *label_ent;
1802 		bool found_uuid = false;
1803 
1804 		list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1805 			struct nd_namespace_label *nd_label = label_ent->label;
1806 			u16 position, nlabel;
1807 			u64 isetcookie;
1808 
1809 			if (!nd_label)
1810 				continue;
1811 			isetcookie = __le64_to_cpu(nd_label->isetcookie);
1812 			position = __le16_to_cpu(nd_label->position);
1813 			nlabel = __le16_to_cpu(nd_label->nlabel);
1814 
1815 			if (isetcookie != cookie)
1816 				continue;
1817 
1818 			if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
1819 				continue;
1820 
1821 			if (namespace_label_has(ndd, type_guid)
1822 					&& !guid_equal(&nd_set->type_guid,
1823 						&nd_label->type_guid)) {
1824 				dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
1825 						&nd_set->type_guid,
1826 						&nd_label->type_guid);
1827 				continue;
1828 			}
1829 
1830 			if (found_uuid) {
1831 				dev_dbg(ndd->dev, "duplicate entry for uuid\n");
1832 				return false;
1833 			}
1834 			found_uuid = true;
1835 			if (nlabel != nd_region->ndr_mappings)
1836 				continue;
1837 			if (position != pos)
1838 				continue;
1839 			found = nd_label;
1840 			break;
1841 		}
1842 		if (found)
1843 			break;
1844 	}
1845 	return found != NULL;
1846 }
1847 
1848 static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
1849 {
1850 	int i;
1851 
1852 	if (!pmem_id)
1853 		return -ENODEV;
1854 
1855 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1856 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1857 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1858 		struct nd_namespace_label *nd_label = NULL;
1859 		u64 hw_start, hw_end, pmem_start, pmem_end;
1860 		struct nd_label_ent *label_ent;
1861 
1862 		lockdep_assert_held(&nd_mapping->lock);
1863 		list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1864 			nd_label = label_ent->label;
1865 			if (!nd_label)
1866 				continue;
1867 			if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0)
1868 				break;
1869 			nd_label = NULL;
1870 		}
1871 
1872 		if (!nd_label) {
1873 			WARN_ON(1);
1874 			return -EINVAL;
1875 		}
1876 
1877 		/*
1878 		 * Check that this label is compliant with the dpa
1879 		 * range published in NFIT
1880 		 */
1881 		hw_start = nd_mapping->start;
1882 		hw_end = hw_start + nd_mapping->size;
1883 		pmem_start = __le64_to_cpu(nd_label->dpa);
1884 		pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize);
1885 		if (pmem_start >= hw_start && pmem_start < hw_end
1886 				&& pmem_end <= hw_end && pmem_end > hw_start)
1887 			/* pass */;
1888 		else {
1889 			dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
1890 					dev_name(ndd->dev), nd_label->uuid);
1891 			return -EINVAL;
1892 		}
1893 
1894 		/* move recently validated label to the front of the list */
1895 		list_move(&label_ent->list, &nd_mapping->labels);
1896 	}
1897 	return 0;
1898 }
1899 
1900 /**
1901  * create_namespace_pmem - validate interleave set labelling, retrieve label0
1902  * @nd_region: region with mappings to validate
1903  * @nspm: target namespace to create
1904  * @nd_label: target pmem namespace label to evaluate
1905  */
1906 static struct device *create_namespace_pmem(struct nd_region *nd_region,
1907 		struct nd_namespace_index *nsindex,
1908 		struct nd_namespace_label *nd_label)
1909 {
1910 	u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
1911 	u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
1912 	struct nd_label_ent *label_ent;
1913 	struct nd_namespace_pmem *nspm;
1914 	struct nd_mapping *nd_mapping;
1915 	resource_size_t size = 0;
1916 	struct resource *res;
1917 	struct device *dev;
1918 	int rc = 0;
1919 	u16 i;
1920 
1921 	if (cookie == 0) {
1922 		dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
1923 		return ERR_PTR(-ENXIO);
1924 	}
1925 
1926 	if (__le64_to_cpu(nd_label->isetcookie) != cookie) {
1927 		dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
1928 				nd_label->uuid);
1929 		if (__le64_to_cpu(nd_label->isetcookie) != altcookie)
1930 			return ERR_PTR(-EAGAIN);
1931 
1932 		dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
1933 				nd_label->uuid);
1934 	}
1935 
1936 	nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1937 	if (!nspm)
1938 		return ERR_PTR(-ENOMEM);
1939 
1940 	nspm->id = -1;
1941 	dev = &nspm->nsio.common.dev;
1942 	dev->type = &namespace_pmem_device_type;
1943 	dev->parent = &nd_region->dev;
1944 	res = &nspm->nsio.res;
1945 	res->name = dev_name(&nd_region->dev);
1946 	res->flags = IORESOURCE_MEM;
1947 
1948 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1949 		if (has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i))
1950 			continue;
1951 		if (has_uuid_at_pos(nd_region, nd_label->uuid, altcookie, i))
1952 			continue;
1953 		break;
1954 	}
1955 
1956 	if (i < nd_region->ndr_mappings) {
1957 		struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
1958 
1959 		/*
1960 		 * Give up if we don't find an instance of a uuid at each
1961 		 * position (from 0 to nd_region->ndr_mappings - 1), or if we
1962 		 * find a dimm with two instances of the same uuid.
1963 		 */
1964 		dev_err(&nd_region->dev, "%s missing label for %pUb\n",
1965 				nvdimm_name(nvdimm), nd_label->uuid);
1966 		rc = -EINVAL;
1967 		goto err;
1968 	}
1969 
1970 	/*
1971 	 * Fix up each mapping's 'labels' to have the validated pmem label for
1972 	 * that position at labels[0], and NULL at labels[1].  In the process,
1973 	 * check that the namespace aligns with interleave-set.  We know
1974 	 * that it does not overlap with any blk namespaces by virtue of
1975 	 * the dimm being enabled (i.e. nd_label_reserve_dpa()
1976 	 * succeeded).
1977 	 */
1978 	rc = select_pmem_id(nd_region, nd_label->uuid);
1979 	if (rc)
1980 		goto err;
1981 
1982 	/* Calculate total size and populate namespace properties from label0 */
1983 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1984 		struct nd_namespace_label *label0;
1985 		struct nvdimm_drvdata *ndd;
1986 
1987 		nd_mapping = &nd_region->mapping[i];
1988 		label_ent = list_first_entry_or_null(&nd_mapping->labels,
1989 				typeof(*label_ent), list);
1990 		label0 = label_ent ? label_ent->label : NULL;
1991 
1992 		if (!label0) {
1993 			WARN_ON(1);
1994 			continue;
1995 		}
1996 
1997 		size += __le64_to_cpu(label0->rawsize);
1998 		if (__le16_to_cpu(label0->position) != 0)
1999 			continue;
2000 		WARN_ON(nspm->alt_name || nspm->uuid);
2001 		nspm->alt_name = kmemdup((void __force *) label0->name,
2002 				NSLABEL_NAME_LEN, GFP_KERNEL);
2003 		nspm->uuid = kmemdup((void __force *) label0->uuid,
2004 				NSLABEL_UUID_LEN, GFP_KERNEL);
2005 		nspm->lbasize = __le64_to_cpu(label0->lbasize);
2006 		ndd = to_ndd(nd_mapping);
2007 		if (namespace_label_has(ndd, abstraction_guid))
2008 			nspm->nsio.common.claim_class
2009 				= to_nvdimm_cclass(&label0->abstraction_guid);
2010 
2011 	}
2012 
2013 	if (!nspm->alt_name || !nspm->uuid) {
2014 		rc = -ENOMEM;
2015 		goto err;
2016 	}
2017 
2018 	nd_namespace_pmem_set_resource(nd_region, nspm, size);
2019 
2020 	return dev;
2021  err:
2022 	namespace_pmem_release(dev);
2023 	switch (rc) {
2024 	case -EINVAL:
2025 		dev_dbg(&nd_region->dev, "invalid label(s)\n");
2026 		break;
2027 	case -ENODEV:
2028 		dev_dbg(&nd_region->dev, "label not found\n");
2029 		break;
2030 	default:
2031 		dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
2032 		break;
2033 	}
2034 	return ERR_PTR(rc);
2035 }
2036 
2037 struct resource *nsblk_add_resource(struct nd_region *nd_region,
2038 		struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
2039 		resource_size_t start)
2040 {
2041 	struct nd_label_id label_id;
2042 	struct resource *res;
2043 
2044 	nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
2045 	res = krealloc(nsblk->res,
2046 			sizeof(void *) * (nsblk->num_resources + 1),
2047 			GFP_KERNEL);
2048 	if (!res)
2049 		return NULL;
2050 	nsblk->res = (struct resource **) res;
2051 	for_each_dpa_resource(ndd, res)
2052 		if (strcmp(res->name, label_id.id) == 0
2053 				&& res->start == start) {
2054 			nsblk->res[nsblk->num_resources++] = res;
2055 			return res;
2056 		}
2057 	return NULL;
2058 }
2059 
2060 static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
2061 {
2062 	struct nd_namespace_blk *nsblk;
2063 	struct device *dev;
2064 
2065 	if (!is_nd_blk(&nd_region->dev))
2066 		return NULL;
2067 
2068 	nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
2069 	if (!nsblk)
2070 		return NULL;
2071 
2072 	dev = &nsblk->common.dev;
2073 	dev->type = &namespace_blk_device_type;
2074 	nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
2075 	if (nsblk->id < 0) {
2076 		kfree(nsblk);
2077 		return NULL;
2078 	}
2079 	dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
2080 	dev->parent = &nd_region->dev;
2081 	dev->groups = nd_namespace_attribute_groups;
2082 
2083 	return &nsblk->common.dev;
2084 }
2085 
2086 static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
2087 {
2088 	struct nd_namespace_pmem *nspm;
2089 	struct resource *res;
2090 	struct device *dev;
2091 
2092 	if (!is_memory(&nd_region->dev))
2093 		return NULL;
2094 
2095 	nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
2096 	if (!nspm)
2097 		return NULL;
2098 
2099 	dev = &nspm->nsio.common.dev;
2100 	dev->type = &namespace_pmem_device_type;
2101 	dev->parent = &nd_region->dev;
2102 	res = &nspm->nsio.res;
2103 	res->name = dev_name(&nd_region->dev);
2104 	res->flags = IORESOURCE_MEM;
2105 
2106 	nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
2107 	if (nspm->id < 0) {
2108 		kfree(nspm);
2109 		return NULL;
2110 	}
2111 	dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
2112 	dev->groups = nd_namespace_attribute_groups;
2113 	nd_namespace_pmem_set_resource(nd_region, nspm, 0);
2114 
2115 	return dev;
2116 }
2117 
2118 void nd_region_create_ns_seed(struct nd_region *nd_region)
2119 {
2120 	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2121 
2122 	if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO)
2123 		return;
2124 
2125 	if (is_nd_blk(&nd_region->dev))
2126 		nd_region->ns_seed = nd_namespace_blk_create(nd_region);
2127 	else
2128 		nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
2129 
2130 	/*
2131 	 * Seed creation failures are not fatal, provisioning is simply
2132 	 * disabled until memory becomes available
2133 	 */
2134 	if (!nd_region->ns_seed)
2135 		dev_err(&nd_region->dev, "failed to create %s namespace\n",
2136 				is_nd_blk(&nd_region->dev) ? "blk" : "pmem");
2137 	else
2138 		nd_device_register(nd_region->ns_seed);
2139 }
2140 
2141 void nd_region_create_dax_seed(struct nd_region *nd_region)
2142 {
2143 	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2144 	nd_region->dax_seed = nd_dax_create(nd_region);
2145 	/*
2146 	 * Seed creation failures are not fatal, provisioning is simply
2147 	 * disabled until memory becomes available
2148 	 */
2149 	if (!nd_region->dax_seed)
2150 		dev_err(&nd_region->dev, "failed to create dax namespace\n");
2151 }
2152 
2153 void nd_region_create_pfn_seed(struct nd_region *nd_region)
2154 {
2155 	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2156 	nd_region->pfn_seed = nd_pfn_create(nd_region);
2157 	/*
2158 	 * Seed creation failures are not fatal, provisioning is simply
2159 	 * disabled until memory becomes available
2160 	 */
2161 	if (!nd_region->pfn_seed)
2162 		dev_err(&nd_region->dev, "failed to create pfn namespace\n");
2163 }
2164 
2165 void nd_region_create_btt_seed(struct nd_region *nd_region)
2166 {
2167 	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2168 	nd_region->btt_seed = nd_btt_create(nd_region);
2169 	/*
2170 	 * Seed creation failures are not fatal, provisioning is simply
2171 	 * disabled until memory becomes available
2172 	 */
2173 	if (!nd_region->btt_seed)
2174 		dev_err(&nd_region->dev, "failed to create btt namespace\n");
2175 }
2176 
2177 static int add_namespace_resource(struct nd_region *nd_region,
2178 		struct nd_namespace_label *nd_label, struct device **devs,
2179 		int count)
2180 {
2181 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2182 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2183 	int i;
2184 
2185 	for (i = 0; i < count; i++) {
2186 		u8 *uuid = namespace_to_uuid(devs[i]);
2187 		struct resource *res;
2188 
2189 		if (IS_ERR_OR_NULL(uuid)) {
2190 			WARN_ON(1);
2191 			continue;
2192 		}
2193 
2194 		if (memcmp(uuid, nd_label->uuid, NSLABEL_UUID_LEN) != 0)
2195 			continue;
2196 		if (is_namespace_blk(devs[i])) {
2197 			res = nsblk_add_resource(nd_region, ndd,
2198 					to_nd_namespace_blk(devs[i]),
2199 					__le64_to_cpu(nd_label->dpa));
2200 			if (!res)
2201 				return -ENXIO;
2202 			nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
2203 		} else {
2204 			dev_err(&nd_region->dev,
2205 					"error: conflicting extents for uuid: %pUb\n",
2206 					nd_label->uuid);
2207 			return -ENXIO;
2208 		}
2209 		break;
2210 	}
2211 
2212 	return i;
2213 }
2214 
2215 static struct device *create_namespace_blk(struct nd_region *nd_region,
2216 		struct nd_namespace_label *nd_label, int count)
2217 {
2218 
2219 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2220 	struct nd_interleave_set *nd_set = nd_region->nd_set;
2221 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2222 	struct nd_namespace_blk *nsblk;
2223 	char name[NSLABEL_NAME_LEN];
2224 	struct device *dev = NULL;
2225 	struct resource *res;
2226 
2227 	if (namespace_label_has(ndd, type_guid)) {
2228 		if (!guid_equal(&nd_set->type_guid, &nd_label->type_guid)) {
2229 			dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
2230 					&nd_set->type_guid,
2231 					&nd_label->type_guid);
2232 			return ERR_PTR(-EAGAIN);
2233 		}
2234 
2235 		if (nd_label->isetcookie != __cpu_to_le64(nd_set->cookie2)) {
2236 			dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n",
2237 					nd_set->cookie2,
2238 					__le64_to_cpu(nd_label->isetcookie));
2239 			return ERR_PTR(-EAGAIN);
2240 		}
2241 	}
2242 
2243 	nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
2244 	if (!nsblk)
2245 		return ERR_PTR(-ENOMEM);
2246 	dev = &nsblk->common.dev;
2247 	dev->type = &namespace_blk_device_type;
2248 	dev->parent = &nd_region->dev;
2249 	nsblk->id = -1;
2250 	nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
2251 	nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
2252 			GFP_KERNEL);
2253 	if (namespace_label_has(ndd, abstraction_guid))
2254 		nsblk->common.claim_class
2255 			= to_nvdimm_cclass(&nd_label->abstraction_guid);
2256 	if (!nsblk->uuid)
2257 		goto blk_err;
2258 	memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
2259 	if (name[0]) {
2260 		nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
2261 				GFP_KERNEL);
2262 		if (!nsblk->alt_name)
2263 			goto blk_err;
2264 	}
2265 	res = nsblk_add_resource(nd_region, ndd, nsblk,
2266 			__le64_to_cpu(nd_label->dpa));
2267 	if (!res)
2268 		goto blk_err;
2269 	nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
2270 	return dev;
2271  blk_err:
2272 	namespace_blk_release(dev);
2273 	return ERR_PTR(-ENXIO);
2274 }
2275 
2276 static int cmp_dpa(const void *a, const void *b)
2277 {
2278 	const struct device *dev_a = *(const struct device **) a;
2279 	const struct device *dev_b = *(const struct device **) b;
2280 	struct nd_namespace_blk *nsblk_a, *nsblk_b;
2281 	struct nd_namespace_pmem *nspm_a, *nspm_b;
2282 
2283 	if (is_namespace_io(dev_a))
2284 		return 0;
2285 
2286 	if (is_namespace_blk(dev_a)) {
2287 		nsblk_a = to_nd_namespace_blk(dev_a);
2288 		nsblk_b = to_nd_namespace_blk(dev_b);
2289 
2290 		return memcmp(&nsblk_a->res[0]->start, &nsblk_b->res[0]->start,
2291 				sizeof(resource_size_t));
2292 	}
2293 
2294 	nspm_a = to_nd_namespace_pmem(dev_a);
2295 	nspm_b = to_nd_namespace_pmem(dev_b);
2296 
2297 	return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start,
2298 			sizeof(resource_size_t));
2299 }
2300 
2301 static struct device **scan_labels(struct nd_region *nd_region)
2302 {
2303 	int i, count = 0;
2304 	struct device *dev, **devs = NULL;
2305 	struct nd_label_ent *label_ent, *e;
2306 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2307 	resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
2308 
2309 	/* "safe" because create_namespace_pmem() might list_move() label_ent */
2310 	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
2311 		struct nd_namespace_label *nd_label = label_ent->label;
2312 		struct device **__devs;
2313 		u32 flags;
2314 
2315 		if (!nd_label)
2316 			continue;
2317 		flags = __le32_to_cpu(nd_label->flags);
2318 		if (is_nd_blk(&nd_region->dev)
2319 				== !!(flags & NSLABEL_FLAG_LOCAL))
2320 			/* pass, region matches label type */;
2321 		else
2322 			continue;
2323 
2324 		/* skip labels that describe extents outside of the region */
2325 		if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start ||
2326 		    __le64_to_cpu(nd_label->dpa) > map_end)
2327 				continue;
2328 
2329 		i = add_namespace_resource(nd_region, nd_label, devs, count);
2330 		if (i < 0)
2331 			goto err;
2332 		if (i < count)
2333 			continue;
2334 		__devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
2335 		if (!__devs)
2336 			goto err;
2337 		memcpy(__devs, devs, sizeof(dev) * count);
2338 		kfree(devs);
2339 		devs = __devs;
2340 
2341 		if (is_nd_blk(&nd_region->dev))
2342 			dev = create_namespace_blk(nd_region, nd_label, count);
2343 		else {
2344 			struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2345 			struct nd_namespace_index *nsindex;
2346 
2347 			nsindex = to_namespace_index(ndd, ndd->ns_current);
2348 			dev = create_namespace_pmem(nd_region, nsindex, nd_label);
2349 		}
2350 
2351 		if (IS_ERR(dev)) {
2352 			switch (PTR_ERR(dev)) {
2353 			case -EAGAIN:
2354 				/* skip invalid labels */
2355 				continue;
2356 			case -ENODEV:
2357 				/* fallthrough to seed creation */
2358 				break;
2359 			default:
2360 				goto err;
2361 			}
2362 		} else
2363 			devs[count++] = dev;
2364 
2365 	}
2366 
2367 	dev_dbg(&nd_region->dev, "discovered %d %s namespace%s\n",
2368 			count, is_nd_blk(&nd_region->dev)
2369 			? "blk" : "pmem", count == 1 ? "" : "s");
2370 
2371 	if (count == 0) {
2372 		/* Publish a zero-sized namespace for userspace to configure. */
2373 		nd_mapping_free_labels(nd_mapping);
2374 
2375 		devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
2376 		if (!devs)
2377 			goto err;
2378 		if (is_nd_blk(&nd_region->dev)) {
2379 			struct nd_namespace_blk *nsblk;
2380 
2381 			nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
2382 			if (!nsblk)
2383 				goto err;
2384 			dev = &nsblk->common.dev;
2385 			dev->type = &namespace_blk_device_type;
2386 		} else {
2387 			struct nd_namespace_pmem *nspm;
2388 
2389 			nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
2390 			if (!nspm)
2391 				goto err;
2392 			dev = &nspm->nsio.common.dev;
2393 			dev->type = &namespace_pmem_device_type;
2394 			nd_namespace_pmem_set_resource(nd_region, nspm, 0);
2395 		}
2396 		dev->parent = &nd_region->dev;
2397 		devs[count++] = dev;
2398 	} else if (is_memory(&nd_region->dev)) {
2399 		/* clean unselected labels */
2400 		for (i = 0; i < nd_region->ndr_mappings; i++) {
2401 			struct list_head *l, *e;
2402 			LIST_HEAD(list);
2403 			int j;
2404 
2405 			nd_mapping = &nd_region->mapping[i];
2406 			if (list_empty(&nd_mapping->labels)) {
2407 				WARN_ON(1);
2408 				continue;
2409 			}
2410 
2411 			j = count;
2412 			list_for_each_safe(l, e, &nd_mapping->labels) {
2413 				if (!j--)
2414 					break;
2415 				list_move_tail(l, &list);
2416 			}
2417 			nd_mapping_free_labels(nd_mapping);
2418 			list_splice_init(&list, &nd_mapping->labels);
2419 		}
2420 	}
2421 
2422 	if (count > 1)
2423 		sort(devs, count, sizeof(struct device *), cmp_dpa, NULL);
2424 
2425 	return devs;
2426 
2427  err:
2428 	if (devs) {
2429 		for (i = 0; devs[i]; i++)
2430 			if (is_nd_blk(&nd_region->dev))
2431 				namespace_blk_release(devs[i]);
2432 			else
2433 				namespace_pmem_release(devs[i]);
2434 		kfree(devs);
2435 	}
2436 	return NULL;
2437 }
2438 
2439 static struct device **create_namespaces(struct nd_region *nd_region)
2440 {
2441 	struct nd_mapping *nd_mapping;
2442 	struct device **devs;
2443 	int i;
2444 
2445 	if (nd_region->ndr_mappings == 0)
2446 		return NULL;
2447 
2448 	/* lock down all mappings while we scan labels */
2449 	for (i = 0; i < nd_region->ndr_mappings; i++) {
2450 		nd_mapping = &nd_region->mapping[i];
2451 		mutex_lock_nested(&nd_mapping->lock, i);
2452 	}
2453 
2454 	devs = scan_labels(nd_region);
2455 
2456 	for (i = 0; i < nd_region->ndr_mappings; i++) {
2457 		int reverse = nd_region->ndr_mappings - 1 - i;
2458 
2459 		nd_mapping = &nd_region->mapping[reverse];
2460 		mutex_unlock(&nd_mapping->lock);
2461 	}
2462 
2463 	return devs;
2464 }
2465 
2466 static void deactivate_labels(void *region)
2467 {
2468 	struct nd_region *nd_region = region;
2469 	int i;
2470 
2471 	for (i = 0; i < nd_region->ndr_mappings; i++) {
2472 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2473 		struct nvdimm_drvdata *ndd = nd_mapping->ndd;
2474 		struct nvdimm *nvdimm = nd_mapping->nvdimm;
2475 
2476 		mutex_lock(&nd_mapping->lock);
2477 		nd_mapping_free_labels(nd_mapping);
2478 		mutex_unlock(&nd_mapping->lock);
2479 
2480 		put_ndd(ndd);
2481 		nd_mapping->ndd = NULL;
2482 		if (ndd)
2483 			atomic_dec(&nvdimm->busy);
2484 	}
2485 }
2486 
2487 static int init_active_labels(struct nd_region *nd_region)
2488 {
2489 	int i;
2490 
2491 	for (i = 0; i < nd_region->ndr_mappings; i++) {
2492 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2493 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2494 		struct nvdimm *nvdimm = nd_mapping->nvdimm;
2495 		struct nd_label_ent *label_ent;
2496 		int count, j;
2497 
2498 		/*
2499 		 * If the dimm is disabled then we may need to prevent
2500 		 * the region from being activated.
2501 		 */
2502 		if (!ndd) {
2503 			if (test_bit(NDD_LOCKED, &nvdimm->flags))
2504 				/* fail, label data may be unreadable */;
2505 			else if (test_bit(NDD_ALIASING, &nvdimm->flags))
2506 				/* fail, labels needed to disambiguate dpa */;
2507 			else
2508 				return 0;
2509 
2510 			dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
2511 					dev_name(&nd_mapping->nvdimm->dev),
2512 					test_bit(NDD_LOCKED, &nvdimm->flags)
2513 					? "locked" : "disabled");
2514 			return -ENXIO;
2515 		}
2516 		nd_mapping->ndd = ndd;
2517 		atomic_inc(&nvdimm->busy);
2518 		get_ndd(ndd);
2519 
2520 		count = nd_label_active_count(ndd);
2521 		dev_dbg(ndd->dev, "count: %d\n", count);
2522 		if (!count)
2523 			continue;
2524 		for (j = 0; j < count; j++) {
2525 			struct nd_namespace_label *label;
2526 
2527 			label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
2528 			if (!label_ent)
2529 				break;
2530 			label = nd_label_active(ndd, j);
2531 			if (test_bit(NDD_NOBLK, &nvdimm->flags)) {
2532 				u32 flags = __le32_to_cpu(label->flags);
2533 
2534 				flags &= ~NSLABEL_FLAG_LOCAL;
2535 				label->flags = __cpu_to_le32(flags);
2536 			}
2537 			label_ent->label = label;
2538 
2539 			mutex_lock(&nd_mapping->lock);
2540 			list_add_tail(&label_ent->list, &nd_mapping->labels);
2541 			mutex_unlock(&nd_mapping->lock);
2542 		}
2543 
2544 		if (j < count)
2545 			break;
2546 	}
2547 
2548 	if (i < nd_region->ndr_mappings) {
2549 		deactivate_labels(nd_region);
2550 		return -ENOMEM;
2551 	}
2552 
2553 	return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
2554 			nd_region);
2555 }
2556 
2557 int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
2558 {
2559 	struct device **devs = NULL;
2560 	int i, rc = 0, type;
2561 
2562 	*err = 0;
2563 	nvdimm_bus_lock(&nd_region->dev);
2564 	rc = init_active_labels(nd_region);
2565 	if (rc) {
2566 		nvdimm_bus_unlock(&nd_region->dev);
2567 		return rc;
2568 	}
2569 
2570 	type = nd_region_to_nstype(nd_region);
2571 	switch (type) {
2572 	case ND_DEVICE_NAMESPACE_IO:
2573 		devs = create_namespace_io(nd_region);
2574 		break;
2575 	case ND_DEVICE_NAMESPACE_PMEM:
2576 	case ND_DEVICE_NAMESPACE_BLK:
2577 		devs = create_namespaces(nd_region);
2578 		break;
2579 	default:
2580 		break;
2581 	}
2582 	nvdimm_bus_unlock(&nd_region->dev);
2583 
2584 	if (!devs)
2585 		return -ENODEV;
2586 
2587 	for (i = 0; devs[i]; i++) {
2588 		struct device *dev = devs[i];
2589 		int id;
2590 
2591 		if (type == ND_DEVICE_NAMESPACE_BLK) {
2592 			struct nd_namespace_blk *nsblk;
2593 
2594 			nsblk = to_nd_namespace_blk(dev);
2595 			id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2596 					GFP_KERNEL);
2597 			nsblk->id = id;
2598 		} else if (type == ND_DEVICE_NAMESPACE_PMEM) {
2599 			struct nd_namespace_pmem *nspm;
2600 
2601 			nspm = to_nd_namespace_pmem(dev);
2602 			id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2603 					GFP_KERNEL);
2604 			nspm->id = id;
2605 		} else
2606 			id = i;
2607 
2608 		if (id < 0)
2609 			break;
2610 		dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
2611 		dev->groups = nd_namespace_attribute_groups;
2612 		nd_device_register(dev);
2613 	}
2614 	if (i)
2615 		nd_region->ns_seed = devs[0];
2616 
2617 	if (devs[i]) {
2618 		int j;
2619 
2620 		for (j = i; devs[j]; j++) {
2621 			struct device *dev = devs[j];
2622 
2623 			device_initialize(dev);
2624 			put_device(dev);
2625 		}
2626 		*err = j - i;
2627 		/*
2628 		 * All of the namespaces we tried to register failed, so
2629 		 * fail region activation.
2630 		 */
2631 		if (*err == 0)
2632 			rc = -ENODEV;
2633 	}
2634 	kfree(devs);
2635 
2636 	if (rc == -ENODEV)
2637 		return rc;
2638 
2639 	return i;
2640 }
2641