xref: /openbmc/linux/drivers/nvdimm/label.c (revision c18614a1)
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/device.h>
14 #include <linux/ndctl.h>
15 #include <linux/uuid.h>
16 #include <linux/slab.h>
17 #include <linux/io.h>
18 #include <linux/nd.h>
19 #include "nd-core.h"
20 #include "label.h"
21 #include "nd.h"
22 
23 static guid_t nvdimm_btt_guid;
24 static guid_t nvdimm_btt2_guid;
25 static guid_t nvdimm_pfn_guid;
26 static guid_t nvdimm_dax_guid;
27 
28 static u32 best_seq(u32 a, u32 b)
29 {
30 	a &= NSINDEX_SEQ_MASK;
31 	b &= NSINDEX_SEQ_MASK;
32 
33 	if (a == 0 || a == b)
34 		return b;
35 	else if (b == 0)
36 		return a;
37 	else if (nd_inc_seq(a) == b)
38 		return b;
39 	else
40 		return a;
41 }
42 
43 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
44 {
45 	return ndd->nslabel_size;
46 }
47 
48 static size_t __sizeof_namespace_index(u32 nslot)
49 {
50 	return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
51 			NSINDEX_ALIGN);
52 }
53 
54 static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd,
55 		size_t index_size)
56 {
57 	return (ndd->nsarea.config_size - index_size * 2) /
58 			sizeof_namespace_label(ndd);
59 }
60 
61 int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
62 {
63 	u32 tmp_nslot, n;
64 
65 	tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd);
66 	n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN;
67 
68 	return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n);
69 }
70 
71 size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
72 {
73 	u32 nslot, space, size;
74 
75 	/*
76 	 * Per UEFI 2.7, the minimum size of the Label Storage Area is large
77 	 * enough to hold 2 index blocks and 2 labels.  The minimum index
78 	 * block size is 256 bytes. The label size is 128 for namespaces
79 	 * prior to version 1.2 and at minimum 256 for version 1.2 and later.
80 	 */
81 	nslot = nvdimm_num_label_slots(ndd);
82 	space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
83 	size = __sizeof_namespace_index(nslot) * 2;
84 	if (size <= space && nslot >= 2)
85 		return size / 2;
86 
87 	dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
88 			ndd->nsarea.config_size, sizeof_namespace_label(ndd));
89 	return 0;
90 }
91 
92 static int __nd_label_validate(struct nvdimm_drvdata *ndd)
93 {
94 	/*
95 	 * On media label format consists of two index blocks followed
96 	 * by an array of labels.  None of these structures are ever
97 	 * updated in place.  A sequence number tracks the current
98 	 * active index and the next one to write, while labels are
99 	 * written to free slots.
100 	 *
101 	 *     +------------+
102 	 *     |            |
103 	 *     |  nsindex0  |
104 	 *     |            |
105 	 *     +------------+
106 	 *     |            |
107 	 *     |  nsindex1  |
108 	 *     |            |
109 	 *     +------------+
110 	 *     |   label0   |
111 	 *     +------------+
112 	 *     |   label1   |
113 	 *     +------------+
114 	 *     |            |
115 	 *      ....nslot...
116 	 *     |            |
117 	 *     +------------+
118 	 *     |   labelN   |
119 	 *     +------------+
120 	 */
121 	struct nd_namespace_index *nsindex[] = {
122 		to_namespace_index(ndd, 0),
123 		to_namespace_index(ndd, 1),
124 	};
125 	const int num_index = ARRAY_SIZE(nsindex);
126 	struct device *dev = ndd->dev;
127 	bool valid[2] = { 0 };
128 	int i, num_valid = 0;
129 	u32 seq;
130 
131 	for (i = 0; i < num_index; i++) {
132 		u32 nslot;
133 		u8 sig[NSINDEX_SIG_LEN];
134 		u64 sum_save, sum, size;
135 		unsigned int version, labelsize;
136 
137 		memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
138 		if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
139 			dev_dbg(dev, "nsindex%d signature invalid\n", i);
140 			continue;
141 		}
142 
143 		/* label sizes larger than 128 arrived with v1.2 */
144 		version = __le16_to_cpu(nsindex[i]->major) * 100
145 			+ __le16_to_cpu(nsindex[i]->minor);
146 		if (version >= 102)
147 			labelsize = 1 << (7 + nsindex[i]->labelsize);
148 		else
149 			labelsize = 128;
150 
151 		if (labelsize != sizeof_namespace_label(ndd)) {
152 			dev_dbg(dev, "nsindex%d labelsize %d invalid\n",
153 					i, nsindex[i]->labelsize);
154 			continue;
155 		}
156 
157 		sum_save = __le64_to_cpu(nsindex[i]->checksum);
158 		nsindex[i]->checksum = __cpu_to_le64(0);
159 		sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
160 		nsindex[i]->checksum = __cpu_to_le64(sum_save);
161 		if (sum != sum_save) {
162 			dev_dbg(dev, "nsindex%d checksum invalid\n", i);
163 			continue;
164 		}
165 
166 		seq = __le32_to_cpu(nsindex[i]->seq);
167 		if ((seq & NSINDEX_SEQ_MASK) == 0) {
168 			dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq);
169 			continue;
170 		}
171 
172 		/* sanity check the index against expected values */
173 		if (__le64_to_cpu(nsindex[i]->myoff)
174 				!= i * sizeof_namespace_index(ndd)) {
175 			dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n",
176 					i, (unsigned long long)
177 					__le64_to_cpu(nsindex[i]->myoff));
178 			continue;
179 		}
180 		if (__le64_to_cpu(nsindex[i]->otheroff)
181 				!= (!i) * sizeof_namespace_index(ndd)) {
182 			dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n",
183 					i, (unsigned long long)
184 					__le64_to_cpu(nsindex[i]->otheroff));
185 			continue;
186 		}
187 		if (__le64_to_cpu(nsindex[i]->labeloff)
188 				!= 2 * sizeof_namespace_index(ndd)) {
189 			dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n",
190 					i, (unsigned long long)
191 					__le64_to_cpu(nsindex[i]->labeloff));
192 			continue;
193 		}
194 
195 		size = __le64_to_cpu(nsindex[i]->mysize);
196 		if (size > sizeof_namespace_index(ndd)
197 				|| size < sizeof(struct nd_namespace_index)) {
198 			dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
199 			continue;
200 		}
201 
202 		nslot = __le32_to_cpu(nsindex[i]->nslot);
203 		if (nslot * sizeof_namespace_label(ndd)
204 				+ 2 * sizeof_namespace_index(ndd)
205 				> ndd->nsarea.config_size) {
206 			dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n",
207 					i, nslot, ndd->nsarea.config_size);
208 			continue;
209 		}
210 		valid[i] = true;
211 		num_valid++;
212 	}
213 
214 	switch (num_valid) {
215 	case 0:
216 		break;
217 	case 1:
218 		for (i = 0; i < num_index; i++)
219 			if (valid[i])
220 				return i;
221 		/* can't have num_valid > 0 but valid[] = { false, false } */
222 		WARN_ON(1);
223 		break;
224 	default:
225 		/* pick the best index... */
226 		seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
227 				__le32_to_cpu(nsindex[1]->seq));
228 		if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
229 			return 1;
230 		else
231 			return 0;
232 		break;
233 	}
234 
235 	return -1;
236 }
237 
238 static int nd_label_validate(struct nvdimm_drvdata *ndd)
239 {
240 	/*
241 	 * In order to probe for and validate namespace index blocks we
242 	 * need to know the size of the labels, and we can't trust the
243 	 * size of the labels until we validate the index blocks.
244 	 * Resolve this dependency loop by probing for known label
245 	 * sizes, but default to v1.2 256-byte namespace labels if
246 	 * discovery fails.
247 	 */
248 	int label_size[] = { 128, 256 };
249 	int i, rc;
250 
251 	for (i = 0; i < ARRAY_SIZE(label_size); i++) {
252 		ndd->nslabel_size = label_size[i];
253 		rc = __nd_label_validate(ndd);
254 		if (rc >= 0)
255 			return rc;
256 	}
257 
258 	return -1;
259 }
260 
261 static void nd_label_copy(struct nvdimm_drvdata *ndd,
262 			  struct nd_namespace_index *dst,
263 			  struct nd_namespace_index *src)
264 {
265 	/* just exit if either destination or source is NULL */
266 	if (!dst || !src)
267 		return;
268 
269 	memcpy(dst, src, sizeof_namespace_index(ndd));
270 }
271 
272 static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
273 {
274 	void *base = to_namespace_index(ndd, 0);
275 
276 	return base + 2 * sizeof_namespace_index(ndd);
277 }
278 
279 static int to_slot(struct nvdimm_drvdata *ndd,
280 		struct nd_namespace_label *nd_label)
281 {
282 	unsigned long label, base;
283 
284 	label = (unsigned long) nd_label;
285 	base = (unsigned long) nd_label_base(ndd);
286 
287 	return (label - base) / sizeof_namespace_label(ndd);
288 }
289 
290 static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
291 {
292 	unsigned long label, base;
293 
294 	base = (unsigned long) nd_label_base(ndd);
295 	label = base + sizeof_namespace_label(ndd) * slot;
296 
297 	return (struct nd_namespace_label *) label;
298 }
299 
300 #define for_each_clear_bit_le(bit, addr, size) \
301 	for ((bit) = find_next_zero_bit_le((addr), (size), 0);  \
302 	     (bit) < (size);                                    \
303 	     (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
304 
305 /**
306  * preamble_index - common variable initialization for nd_label_* routines
307  * @ndd: dimm container for the relevant label set
308  * @idx: namespace_index index
309  * @nsindex_out: on return set to the currently active namespace index
310  * @free: on return set to the free label bitmap in the index
311  * @nslot: on return set to the number of slots in the label space
312  */
313 static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
314 		struct nd_namespace_index **nsindex_out,
315 		unsigned long **free, u32 *nslot)
316 {
317 	struct nd_namespace_index *nsindex;
318 
319 	nsindex = to_namespace_index(ndd, idx);
320 	if (nsindex == NULL)
321 		return false;
322 
323 	*free = (unsigned long *) nsindex->free;
324 	*nslot = __le32_to_cpu(nsindex->nslot);
325 	*nsindex_out = nsindex;
326 
327 	return true;
328 }
329 
330 char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags)
331 {
332 	if (!label_id || !uuid)
333 		return NULL;
334 	snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb",
335 			flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid);
336 	return label_id->id;
337 }
338 
339 static bool preamble_current(struct nvdimm_drvdata *ndd,
340 		struct nd_namespace_index **nsindex,
341 		unsigned long **free, u32 *nslot)
342 {
343 	return preamble_index(ndd, ndd->ns_current, nsindex,
344 			free, nslot);
345 }
346 
347 static bool preamble_next(struct nvdimm_drvdata *ndd,
348 		struct nd_namespace_index **nsindex,
349 		unsigned long **free, u32 *nslot)
350 {
351 	return preamble_index(ndd, ndd->ns_next, nsindex,
352 			free, nslot);
353 }
354 
355 static bool slot_valid(struct nvdimm_drvdata *ndd,
356 		struct nd_namespace_label *nd_label, u32 slot)
357 {
358 	/* check that we are written where we expect to be written */
359 	if (slot != __le32_to_cpu(nd_label->slot))
360 		return false;
361 
362 	/* check that DPA allocations are page aligned */
363 	if ((__le64_to_cpu(nd_label->dpa)
364 				| __le64_to_cpu(nd_label->rawsize)) % SZ_4K)
365 		return false;
366 
367 	/* check checksum */
368 	if (namespace_label_has(ndd, checksum)) {
369 		u64 sum, sum_save;
370 
371 		sum_save = __le64_to_cpu(nd_label->checksum);
372 		nd_label->checksum = __cpu_to_le64(0);
373 		sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
374 		nd_label->checksum = __cpu_to_le64(sum_save);
375 		if (sum != sum_save) {
376 			dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n",
377 				slot, sum);
378 			return false;
379 		}
380 	}
381 
382 	return true;
383 }
384 
385 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
386 {
387 	struct nd_namespace_index *nsindex;
388 	unsigned long *free;
389 	u32 nslot, slot;
390 
391 	if (!preamble_current(ndd, &nsindex, &free, &nslot))
392 		return 0; /* no label, nothing to reserve */
393 
394 	for_each_clear_bit_le(slot, free, nslot) {
395 		struct nd_namespace_label *nd_label;
396 		struct nd_region *nd_region = NULL;
397 		u8 label_uuid[NSLABEL_UUID_LEN];
398 		struct nd_label_id label_id;
399 		struct resource *res;
400 		u32 flags;
401 
402 		nd_label = to_label(ndd, slot);
403 
404 		if (!slot_valid(ndd, nd_label, slot))
405 			continue;
406 
407 		memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
408 		flags = __le32_to_cpu(nd_label->flags);
409 		nd_label_gen_id(&label_id, label_uuid, flags);
410 		res = nvdimm_allocate_dpa(ndd, &label_id,
411 				__le64_to_cpu(nd_label->dpa),
412 				__le64_to_cpu(nd_label->rawsize));
413 		nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
414 		if (!res)
415 			return -EBUSY;
416 	}
417 
418 	return 0;
419 }
420 
421 int nd_label_data_init(struct nvdimm_drvdata *ndd)
422 {
423 	size_t config_size, read_size, max_xfer, offset;
424 	struct nd_namespace_index *nsindex;
425 	unsigned int i;
426 	int rc = 0;
427 	u32 nslot;
428 
429 	if (ndd->data)
430 		return 0;
431 
432 	if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
433 		dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
434 			ndd->nsarea.max_xfer, ndd->nsarea.config_size);
435 		return -ENXIO;
436 	}
437 
438 	/*
439 	 * We need to determine the maximum index area as this is the section
440 	 * we must read and validate before we can start processing labels.
441 	 *
442 	 * If the area is too small to contain the two indexes and 2 labels
443 	 * then we abort.
444 	 *
445 	 * Start at a label size of 128 as this should result in the largest
446 	 * possible namespace index size.
447 	 */
448 	ndd->nslabel_size = 128;
449 	read_size = sizeof_namespace_index(ndd) * 2;
450 	if (!read_size)
451 		return -ENXIO;
452 
453 	/* Allocate config data */
454 	config_size = ndd->nsarea.config_size;
455 	ndd->data = kvzalloc(config_size, GFP_KERNEL);
456 	if (!ndd->data)
457 		return -ENOMEM;
458 
459 	/*
460 	 * We want to guarantee as few reads as possible while conserving
461 	 * memory. To do that we figure out how much unused space will be left
462 	 * in the last read, divide that by the total number of reads it is
463 	 * going to take given our maximum transfer size, and then reduce our
464 	 * maximum transfer size based on that result.
465 	 */
466 	max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size);
467 	if (read_size < max_xfer) {
468 		/* trim waste */
469 		max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) /
470 			    DIV_ROUND_UP(config_size, max_xfer);
471 		/* make certain we read indexes in exactly 1 read */
472 		if (max_xfer < read_size)
473 			max_xfer = read_size;
474 	}
475 
476 	/* Make our initial read size a multiple of max_xfer size */
477 	read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer,
478 			config_size);
479 
480 	/* Read the index data */
481 	rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size);
482 	if (rc)
483 		goto out_err;
484 
485 	/* Validate index data, if not valid assume all labels are invalid */
486 	ndd->ns_current = nd_label_validate(ndd);
487 	if (ndd->ns_current < 0)
488 		return 0;
489 
490 	/* Record our index values */
491 	ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
492 
493 	/* Copy "current" index on top of the "next" index */
494 	nsindex = to_current_namespace_index(ndd);
495 	nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex);
496 
497 	/* Determine starting offset for label data */
498 	offset = __le64_to_cpu(nsindex->labeloff);
499 	nslot = __le32_to_cpu(nsindex->nslot);
500 
501 	/* Loop through the free list pulling in any active labels */
502 	for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) {
503 		size_t label_read_size;
504 
505 		/* zero out the unused labels */
506 		if (test_bit_le(i, nsindex->free)) {
507 			memset(ndd->data + offset, 0, ndd->nslabel_size);
508 			continue;
509 		}
510 
511 		/* if we already read past here then just continue */
512 		if (offset + ndd->nslabel_size <= read_size)
513 			continue;
514 
515 		/* if we haven't read in a while reset our read_size offset */
516 		if (read_size < offset)
517 			read_size = offset;
518 
519 		/* determine how much more will be read after this next call. */
520 		label_read_size = offset + ndd->nslabel_size - read_size;
521 		label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) *
522 				  max_xfer;
523 
524 		/* truncate last read if needed */
525 		if (read_size + label_read_size > config_size)
526 			label_read_size = config_size - read_size;
527 
528 		/* Read the label data */
529 		rc = nvdimm_get_config_data(ndd, ndd->data + read_size,
530 					    read_size, label_read_size);
531 		if (rc)
532 			goto out_err;
533 
534 		/* push read_size to next read offset */
535 		read_size += label_read_size;
536 	}
537 
538 	dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
539 out_err:
540 	return rc;
541 }
542 
543 int nd_label_active_count(struct nvdimm_drvdata *ndd)
544 {
545 	struct nd_namespace_index *nsindex;
546 	unsigned long *free;
547 	u32 nslot, slot;
548 	int count = 0;
549 
550 	if (!preamble_current(ndd, &nsindex, &free, &nslot))
551 		return 0;
552 
553 	for_each_clear_bit_le(slot, free, nslot) {
554 		struct nd_namespace_label *nd_label;
555 
556 		nd_label = to_label(ndd, slot);
557 
558 		if (!slot_valid(ndd, nd_label, slot)) {
559 			u32 label_slot = __le32_to_cpu(nd_label->slot);
560 			u64 size = __le64_to_cpu(nd_label->rawsize);
561 			u64 dpa = __le64_to_cpu(nd_label->dpa);
562 
563 			dev_dbg(ndd->dev,
564 				"slot%d invalid slot: %d dpa: %llx size: %llx\n",
565 					slot, label_slot, dpa, size);
566 			continue;
567 		}
568 		count++;
569 	}
570 	return count;
571 }
572 
573 struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
574 {
575 	struct nd_namespace_index *nsindex;
576 	unsigned long *free;
577 	u32 nslot, slot;
578 
579 	if (!preamble_current(ndd, &nsindex, &free, &nslot))
580 		return NULL;
581 
582 	for_each_clear_bit_le(slot, free, nslot) {
583 		struct nd_namespace_label *nd_label;
584 
585 		nd_label = to_label(ndd, slot);
586 		if (!slot_valid(ndd, nd_label, slot))
587 			continue;
588 
589 		if (n-- == 0)
590 			return to_label(ndd, slot);
591 	}
592 
593 	return NULL;
594 }
595 
596 u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
597 {
598 	struct nd_namespace_index *nsindex;
599 	unsigned long *free;
600 	u32 nslot, slot;
601 
602 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
603 		return UINT_MAX;
604 
605 	WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
606 
607 	slot = find_next_bit_le(free, nslot, 0);
608 	if (slot == nslot)
609 		return UINT_MAX;
610 
611 	clear_bit_le(slot, free);
612 
613 	return slot;
614 }
615 
616 bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
617 {
618 	struct nd_namespace_index *nsindex;
619 	unsigned long *free;
620 	u32 nslot;
621 
622 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
623 		return false;
624 
625 	WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
626 
627 	if (slot < nslot)
628 		return !test_and_set_bit_le(slot, free);
629 	return false;
630 }
631 
632 u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
633 {
634 	struct nd_namespace_index *nsindex;
635 	unsigned long *free;
636 	u32 nslot;
637 
638 	WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
639 
640 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
641 		return nvdimm_num_label_slots(ndd);
642 
643 	return bitmap_weight(free, nslot);
644 }
645 
646 static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
647 		unsigned long flags)
648 {
649 	struct nd_namespace_index *nsindex;
650 	unsigned long offset;
651 	u64 checksum;
652 	u32 nslot;
653 	int rc;
654 
655 	nsindex = to_namespace_index(ndd, index);
656 	if (flags & ND_NSINDEX_INIT)
657 		nslot = nvdimm_num_label_slots(ndd);
658 	else
659 		nslot = __le32_to_cpu(nsindex->nslot);
660 
661 	memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
662 	memset(&nsindex->flags, 0, 3);
663 	nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
664 	nsindex->seq = __cpu_to_le32(seq);
665 	offset = (unsigned long) nsindex
666 		- (unsigned long) to_namespace_index(ndd, 0);
667 	nsindex->myoff = __cpu_to_le64(offset);
668 	nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
669 	offset = (unsigned long) to_namespace_index(ndd,
670 			nd_label_next_nsindex(index))
671 		- (unsigned long) to_namespace_index(ndd, 0);
672 	nsindex->otheroff = __cpu_to_le64(offset);
673 	offset = (unsigned long) nd_label_base(ndd)
674 		- (unsigned long) to_namespace_index(ndd, 0);
675 	nsindex->labeloff = __cpu_to_le64(offset);
676 	nsindex->nslot = __cpu_to_le32(nslot);
677 	nsindex->major = __cpu_to_le16(1);
678 	if (sizeof_namespace_label(ndd) < 256)
679 		nsindex->minor = __cpu_to_le16(1);
680 	else
681 		nsindex->minor = __cpu_to_le16(2);
682 	nsindex->checksum = __cpu_to_le64(0);
683 	if (flags & ND_NSINDEX_INIT) {
684 		unsigned long *free = (unsigned long *) nsindex->free;
685 		u32 nfree = ALIGN(nslot, BITS_PER_LONG);
686 		int last_bits, i;
687 
688 		memset(nsindex->free, 0xff, nfree / 8);
689 		for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
690 			clear_bit_le(nslot + i, free);
691 	}
692 	checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
693 	nsindex->checksum = __cpu_to_le64(checksum);
694 	rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
695 			nsindex, sizeof_namespace_index(ndd));
696 	if (rc < 0)
697 		return rc;
698 
699 	if (flags & ND_NSINDEX_INIT)
700 		return 0;
701 
702 	/* copy the index we just wrote to the new 'next' */
703 	WARN_ON(index != ndd->ns_next);
704 	nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
705 	ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
706 	ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
707 	WARN_ON(ndd->ns_current == ndd->ns_next);
708 
709 	return 0;
710 }
711 
712 static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
713 		struct nd_namespace_label *nd_label)
714 {
715 	return (unsigned long) nd_label
716 		- (unsigned long) to_namespace_index(ndd, 0);
717 }
718 
719 enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
720 {
721 	if (guid_equal(guid, &nvdimm_btt_guid))
722 		return NVDIMM_CCLASS_BTT;
723 	else if (guid_equal(guid, &nvdimm_btt2_guid))
724 		return NVDIMM_CCLASS_BTT2;
725 	else if (guid_equal(guid, &nvdimm_pfn_guid))
726 		return NVDIMM_CCLASS_PFN;
727 	else if (guid_equal(guid, &nvdimm_dax_guid))
728 		return NVDIMM_CCLASS_DAX;
729 	else if (guid_equal(guid, &guid_null))
730 		return NVDIMM_CCLASS_NONE;
731 
732 	return NVDIMM_CCLASS_UNKNOWN;
733 }
734 
735 static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
736 	guid_t *target)
737 {
738 	if (claim_class == NVDIMM_CCLASS_BTT)
739 		return &nvdimm_btt_guid;
740 	else if (claim_class == NVDIMM_CCLASS_BTT2)
741 		return &nvdimm_btt2_guid;
742 	else if (claim_class == NVDIMM_CCLASS_PFN)
743 		return &nvdimm_pfn_guid;
744 	else if (claim_class == NVDIMM_CCLASS_DAX)
745 		return &nvdimm_dax_guid;
746 	else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
747 		/*
748 		 * If we're modifying a namespace for which we don't
749 		 * know the claim_class, don't touch the existing guid.
750 		 */
751 		return target;
752 	} else
753 		return &guid_null;
754 }
755 
756 static int __pmem_label_update(struct nd_region *nd_region,
757 		struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
758 		int pos)
759 {
760 	struct nd_namespace_common *ndns = &nspm->nsio.common;
761 	struct nd_interleave_set *nd_set = nd_region->nd_set;
762 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
763 	struct nd_label_ent *label_ent, *victim = NULL;
764 	struct nd_namespace_label *nd_label;
765 	struct nd_namespace_index *nsindex;
766 	struct nd_label_id label_id;
767 	struct resource *res;
768 	unsigned long *free;
769 	u32 nslot, slot;
770 	size_t offset;
771 	u64 cookie;
772 	int rc;
773 
774 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
775 		return -ENXIO;
776 
777 	cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
778 	nd_label_gen_id(&label_id, nspm->uuid, 0);
779 	for_each_dpa_resource(ndd, res)
780 		if (strcmp(res->name, label_id.id) == 0)
781 			break;
782 
783 	if (!res) {
784 		WARN_ON_ONCE(1);
785 		return -ENXIO;
786 	}
787 
788 	/* allocate and write the label to the staging (next) index */
789 	slot = nd_label_alloc_slot(ndd);
790 	if (slot == UINT_MAX)
791 		return -ENXIO;
792 	dev_dbg(ndd->dev, "allocated: %d\n", slot);
793 
794 	nd_label = to_label(ndd, slot);
795 	memset(nd_label, 0, sizeof_namespace_label(ndd));
796 	memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
797 	if (nspm->alt_name)
798 		memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
799 	nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING);
800 	nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
801 	nd_label->position = __cpu_to_le16(pos);
802 	nd_label->isetcookie = __cpu_to_le64(cookie);
803 	nd_label->rawsize = __cpu_to_le64(resource_size(res));
804 	nd_label->lbasize = __cpu_to_le64(nspm->lbasize);
805 	nd_label->dpa = __cpu_to_le64(res->start);
806 	nd_label->slot = __cpu_to_le32(slot);
807 	if (namespace_label_has(ndd, type_guid))
808 		guid_copy(&nd_label->type_guid, &nd_set->type_guid);
809 	if (namespace_label_has(ndd, abstraction_guid))
810 		guid_copy(&nd_label->abstraction_guid,
811 				to_abstraction_guid(ndns->claim_class,
812 					&nd_label->abstraction_guid));
813 	if (namespace_label_has(ndd, checksum)) {
814 		u64 sum;
815 
816 		nd_label->checksum = __cpu_to_le64(0);
817 		sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
818 		nd_label->checksum = __cpu_to_le64(sum);
819 	}
820 	nd_dbg_dpa(nd_region, ndd, res, "\n");
821 
822 	/* update label */
823 	offset = nd_label_offset(ndd, nd_label);
824 	rc = nvdimm_set_config_data(ndd, offset, nd_label,
825 			sizeof_namespace_label(ndd));
826 	if (rc < 0)
827 		return rc;
828 
829 	/* Garbage collect the previous label */
830 	mutex_lock(&nd_mapping->lock);
831 	list_for_each_entry(label_ent, &nd_mapping->labels, list) {
832 		if (!label_ent->label)
833 			continue;
834 		if (memcmp(nspm->uuid, label_ent->label->uuid,
835 					NSLABEL_UUID_LEN) != 0)
836 			continue;
837 		victim = label_ent;
838 		list_move_tail(&victim->list, &nd_mapping->labels);
839 		break;
840 	}
841 	if (victim) {
842 		dev_dbg(ndd->dev, "free: %d\n", slot);
843 		slot = to_slot(ndd, victim->label);
844 		nd_label_free_slot(ndd, slot);
845 		victim->label = NULL;
846 	}
847 
848 	/* update index */
849 	rc = nd_label_write_index(ndd, ndd->ns_next,
850 			nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
851 	if (rc == 0) {
852 		list_for_each_entry(label_ent, &nd_mapping->labels, list)
853 			if (!label_ent->label) {
854 				label_ent->label = nd_label;
855 				nd_label = NULL;
856 				break;
857 			}
858 		dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
859 				"failed to track label: %d\n",
860 				to_slot(ndd, nd_label));
861 		if (nd_label)
862 			rc = -ENXIO;
863 	}
864 	mutex_unlock(&nd_mapping->lock);
865 
866 	return rc;
867 }
868 
869 static bool is_old_resource(struct resource *res, struct resource **list, int n)
870 {
871 	int i;
872 
873 	if (res->flags & DPA_RESOURCE_ADJUSTED)
874 		return false;
875 	for (i = 0; i < n; i++)
876 		if (res == list[i])
877 			return true;
878 	return false;
879 }
880 
881 static struct resource *to_resource(struct nvdimm_drvdata *ndd,
882 		struct nd_namespace_label *nd_label)
883 {
884 	struct resource *res;
885 
886 	for_each_dpa_resource(ndd, res) {
887 		if (res->start != __le64_to_cpu(nd_label->dpa))
888 			continue;
889 		if (resource_size(res) != __le64_to_cpu(nd_label->rawsize))
890 			continue;
891 		return res;
892 	}
893 
894 	return NULL;
895 }
896 
897 /*
898  * 1/ Account all the labels that can be freed after this update
899  * 2/ Allocate and write the label to the staging (next) index
900  * 3/ Record the resources in the namespace device
901  */
902 static int __blk_label_update(struct nd_region *nd_region,
903 		struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
904 		int num_labels)
905 {
906 	int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
907 	struct nd_interleave_set *nd_set = nd_region->nd_set;
908 	struct nd_namespace_common *ndns = &nsblk->common;
909 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
910 	struct nd_namespace_label *nd_label;
911 	struct nd_label_ent *label_ent, *e;
912 	struct nd_namespace_index *nsindex;
913 	unsigned long *free, *victim_map = NULL;
914 	struct resource *res, **old_res_list;
915 	struct nd_label_id label_id;
916 	u8 uuid[NSLABEL_UUID_LEN];
917 	int min_dpa_idx = 0;
918 	LIST_HEAD(list);
919 	u32 nslot, slot;
920 
921 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
922 		return -ENXIO;
923 
924 	old_res_list = nsblk->res;
925 	nfree = nd_label_nfree(ndd);
926 	old_num_resources = nsblk->num_resources;
927 	nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
928 
929 	/*
930 	 * We need to loop over the old resources a few times, which seems a
931 	 * bit inefficient, but we need to know that we have the label
932 	 * space before we start mutating the tracking structures.
933 	 * Otherwise the recovery method of last resort for userspace is
934 	 * disable and re-enable the parent region.
935 	 */
936 	alloc = 0;
937 	for_each_dpa_resource(ndd, res) {
938 		if (strcmp(res->name, label_id.id) != 0)
939 			continue;
940 		if (!is_old_resource(res, old_res_list, old_num_resources))
941 			alloc++;
942 	}
943 
944 	victims = 0;
945 	if (old_num_resources) {
946 		/* convert old local-label-map to dimm-slot victim-map */
947 		victim_map = kcalloc(BITS_TO_LONGS(nslot), sizeof(long),
948 				GFP_KERNEL);
949 		if (!victim_map)
950 			return -ENOMEM;
951 
952 		/* mark unused labels for garbage collection */
953 		for_each_clear_bit_le(slot, free, nslot) {
954 			nd_label = to_label(ndd, slot);
955 			memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
956 			if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
957 				continue;
958 			res = to_resource(ndd, nd_label);
959 			if (res && is_old_resource(res, old_res_list,
960 						old_num_resources))
961 				continue;
962 			slot = to_slot(ndd, nd_label);
963 			set_bit(slot, victim_map);
964 			victims++;
965 		}
966 	}
967 
968 	/* don't allow updates that consume the last label */
969 	if (nfree - alloc < 0 || nfree - alloc + victims < 1) {
970 		dev_info(&nsblk->common.dev, "insufficient label space\n");
971 		kfree(victim_map);
972 		return -ENOSPC;
973 	}
974 	/* from here on we need to abort on error */
975 
976 
977 	/* assign all resources to the namespace before writing the labels */
978 	nsblk->res = NULL;
979 	nsblk->num_resources = 0;
980 	for_each_dpa_resource(ndd, res) {
981 		if (strcmp(res->name, label_id.id) != 0)
982 			continue;
983 		if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) {
984 			rc = -ENOMEM;
985 			goto abort;
986 		}
987 	}
988 
989 	/*
990 	 * Find the resource associated with the first label in the set
991 	 * per the v1.2 namespace specification.
992 	 */
993 	for (i = 0; i < nsblk->num_resources; i++) {
994 		struct resource *min = nsblk->res[min_dpa_idx];
995 
996 		res = nsblk->res[i];
997 		if (res->start < min->start)
998 			min_dpa_idx = i;
999 	}
1000 
1001 	for (i = 0; i < nsblk->num_resources; i++) {
1002 		size_t offset;
1003 
1004 		res = nsblk->res[i];
1005 		if (is_old_resource(res, old_res_list, old_num_resources))
1006 			continue; /* carry-over */
1007 		slot = nd_label_alloc_slot(ndd);
1008 		if (slot == UINT_MAX)
1009 			goto abort;
1010 		dev_dbg(ndd->dev, "allocated: %d\n", slot);
1011 
1012 		nd_label = to_label(ndd, slot);
1013 		memset(nd_label, 0, sizeof_namespace_label(ndd));
1014 		memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
1015 		if (nsblk->alt_name)
1016 			memcpy(nd_label->name, nsblk->alt_name,
1017 					NSLABEL_NAME_LEN);
1018 		nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
1019 
1020 		/*
1021 		 * Use the presence of the type_guid as a flag to
1022 		 * determine isetcookie usage and nlabel + position
1023 		 * policy for blk-aperture namespaces.
1024 		 */
1025 		if (namespace_label_has(ndd, type_guid)) {
1026 			if (i == min_dpa_idx) {
1027 				nd_label->nlabel = __cpu_to_le16(nsblk->num_resources);
1028 				nd_label->position = __cpu_to_le16(0);
1029 			} else {
1030 				nd_label->nlabel = __cpu_to_le16(0xffff);
1031 				nd_label->position = __cpu_to_le16(0xffff);
1032 			}
1033 			nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2);
1034 		} else {
1035 			nd_label->nlabel = __cpu_to_le16(0); /* N/A */
1036 			nd_label->position = __cpu_to_le16(0); /* N/A */
1037 			nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
1038 		}
1039 
1040 		nd_label->dpa = __cpu_to_le64(res->start);
1041 		nd_label->rawsize = __cpu_to_le64(resource_size(res));
1042 		nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
1043 		nd_label->slot = __cpu_to_le32(slot);
1044 		if (namespace_label_has(ndd, type_guid))
1045 			guid_copy(&nd_label->type_guid, &nd_set->type_guid);
1046 		if (namespace_label_has(ndd, abstraction_guid))
1047 			guid_copy(&nd_label->abstraction_guid,
1048 					to_abstraction_guid(ndns->claim_class,
1049 						&nd_label->abstraction_guid));
1050 
1051 		if (namespace_label_has(ndd, checksum)) {
1052 			u64 sum;
1053 
1054 			nd_label->checksum = __cpu_to_le64(0);
1055 			sum = nd_fletcher64(nd_label,
1056 					sizeof_namespace_label(ndd), 1);
1057 			nd_label->checksum = __cpu_to_le64(sum);
1058 		}
1059 
1060 		/* update label */
1061 		offset = nd_label_offset(ndd, nd_label);
1062 		rc = nvdimm_set_config_data(ndd, offset, nd_label,
1063 				sizeof_namespace_label(ndd));
1064 		if (rc < 0)
1065 			goto abort;
1066 	}
1067 
1068 	/* free up now unused slots in the new index */
1069 	for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
1070 		dev_dbg(ndd->dev, "free: %d\n", slot);
1071 		nd_label_free_slot(ndd, slot);
1072 	}
1073 
1074 	/* update index */
1075 	rc = nd_label_write_index(ndd, ndd->ns_next,
1076 			nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1077 	if (rc)
1078 		goto abort;
1079 
1080 	/*
1081 	 * Now that the on-dimm labels are up to date, fix up the tracking
1082 	 * entries in nd_mapping->labels
1083 	 */
1084 	nlabel = 0;
1085 	mutex_lock(&nd_mapping->lock);
1086 	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1087 		nd_label = label_ent->label;
1088 		if (!nd_label)
1089 			continue;
1090 		nlabel++;
1091 		memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1092 		if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
1093 			continue;
1094 		nlabel--;
1095 		list_move(&label_ent->list, &list);
1096 		label_ent->label = NULL;
1097 	}
1098 	list_splice_tail_init(&list, &nd_mapping->labels);
1099 	mutex_unlock(&nd_mapping->lock);
1100 
1101 	if (nlabel + nsblk->num_resources > num_labels) {
1102 		/*
1103 		 * Bug, we can't end up with more resources than
1104 		 * available labels
1105 		 */
1106 		WARN_ON_ONCE(1);
1107 		rc = -ENXIO;
1108 		goto out;
1109 	}
1110 
1111 	mutex_lock(&nd_mapping->lock);
1112 	label_ent = list_first_entry_or_null(&nd_mapping->labels,
1113 			typeof(*label_ent), list);
1114 	if (!label_ent) {
1115 		WARN_ON(1);
1116 		mutex_unlock(&nd_mapping->lock);
1117 		rc = -ENXIO;
1118 		goto out;
1119 	}
1120 	for_each_clear_bit_le(slot, free, nslot) {
1121 		nd_label = to_label(ndd, slot);
1122 		memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1123 		if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
1124 			continue;
1125 		res = to_resource(ndd, nd_label);
1126 		res->flags &= ~DPA_RESOURCE_ADJUSTED;
1127 		dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot);
1128 		list_for_each_entry_from(label_ent, &nd_mapping->labels, list) {
1129 			if (label_ent->label)
1130 				continue;
1131 			label_ent->label = nd_label;
1132 			nd_label = NULL;
1133 			break;
1134 		}
1135 		if (nd_label)
1136 			dev_WARN(&nsblk->common.dev,
1137 					"failed to track label slot%d\n", slot);
1138 	}
1139 	mutex_unlock(&nd_mapping->lock);
1140 
1141  out:
1142 	kfree(old_res_list);
1143 	kfree(victim_map);
1144 	return rc;
1145 
1146  abort:
1147 	/*
1148 	 * 1/ repair the allocated label bitmap in the index
1149 	 * 2/ restore the resource list
1150 	 */
1151 	nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd));
1152 	kfree(nsblk->res);
1153 	nsblk->res = old_res_list;
1154 	nsblk->num_resources = old_num_resources;
1155 	old_res_list = NULL;
1156 	goto out;
1157 }
1158 
1159 static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
1160 {
1161 	int i, old_num_labels = 0;
1162 	struct nd_label_ent *label_ent;
1163 	struct nd_namespace_index *nsindex;
1164 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1165 
1166 	mutex_lock(&nd_mapping->lock);
1167 	list_for_each_entry(label_ent, &nd_mapping->labels, list)
1168 		old_num_labels++;
1169 	mutex_unlock(&nd_mapping->lock);
1170 
1171 	/*
1172 	 * We need to preserve all the old labels for the mapping so
1173 	 * they can be garbage collected after writing the new labels.
1174 	 */
1175 	for (i = old_num_labels; i < num_labels; i++) {
1176 		label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
1177 		if (!label_ent)
1178 			return -ENOMEM;
1179 		mutex_lock(&nd_mapping->lock);
1180 		list_add_tail(&label_ent->list, &nd_mapping->labels);
1181 		mutex_unlock(&nd_mapping->lock);
1182 	}
1183 
1184 	if (ndd->ns_current == -1 || ndd->ns_next == -1)
1185 		/* pass */;
1186 	else
1187 		return max(num_labels, old_num_labels);
1188 
1189 	nsindex = to_namespace_index(ndd, 0);
1190 	memset(nsindex, 0, ndd->nsarea.config_size);
1191 	for (i = 0; i < 2; i++) {
1192 		int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
1193 
1194 		if (rc)
1195 			return rc;
1196 	}
1197 	ndd->ns_next = 1;
1198 	ndd->ns_current = 0;
1199 
1200 	return max(num_labels, old_num_labels);
1201 }
1202 
1203 static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
1204 {
1205 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1206 	struct nd_label_ent *label_ent, *e;
1207 	struct nd_namespace_index *nsindex;
1208 	u8 label_uuid[NSLABEL_UUID_LEN];
1209 	unsigned long *free;
1210 	LIST_HEAD(list);
1211 	u32 nslot, slot;
1212 	int active = 0;
1213 
1214 	if (!uuid)
1215 		return 0;
1216 
1217 	/* no index || no labels == nothing to delete */
1218 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
1219 		return 0;
1220 
1221 	mutex_lock(&nd_mapping->lock);
1222 	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1223 		struct nd_namespace_label *nd_label = label_ent->label;
1224 
1225 		if (!nd_label)
1226 			continue;
1227 		active++;
1228 		memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1229 		if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
1230 			continue;
1231 		active--;
1232 		slot = to_slot(ndd, nd_label);
1233 		nd_label_free_slot(ndd, slot);
1234 		dev_dbg(ndd->dev, "free: %d\n", slot);
1235 		list_move_tail(&label_ent->list, &list);
1236 		label_ent->label = NULL;
1237 	}
1238 	list_splice_tail_init(&list, &nd_mapping->labels);
1239 
1240 	if (active == 0) {
1241 		nd_mapping_free_labels(nd_mapping);
1242 		dev_dbg(ndd->dev, "no more active labels\n");
1243 	}
1244 	mutex_unlock(&nd_mapping->lock);
1245 
1246 	return nd_label_write_index(ndd, ndd->ns_next,
1247 			nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1248 }
1249 
1250 int nd_pmem_namespace_label_update(struct nd_region *nd_region,
1251 		struct nd_namespace_pmem *nspm, resource_size_t size)
1252 {
1253 	int i;
1254 
1255 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1256 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1257 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1258 		struct resource *res;
1259 		int rc, count = 0;
1260 
1261 		if (size == 0) {
1262 			rc = del_labels(nd_mapping, nspm->uuid);
1263 			if (rc)
1264 				return rc;
1265 			continue;
1266 		}
1267 
1268 		for_each_dpa_resource(ndd, res)
1269 			if (strncmp(res->name, "pmem", 4) == 0)
1270 				count++;
1271 		WARN_ON_ONCE(!count);
1272 
1273 		rc = init_labels(nd_mapping, count);
1274 		if (rc < 0)
1275 			return rc;
1276 
1277 		rc = __pmem_label_update(nd_region, nd_mapping, nspm, i);
1278 		if (rc)
1279 			return rc;
1280 	}
1281 
1282 	return 0;
1283 }
1284 
1285 int nd_blk_namespace_label_update(struct nd_region *nd_region,
1286 		struct nd_namespace_blk *nsblk, resource_size_t size)
1287 {
1288 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1289 	struct resource *res;
1290 	int count = 0;
1291 
1292 	if (size == 0)
1293 		return del_labels(nd_mapping, nsblk->uuid);
1294 
1295 	for_each_dpa_resource(to_ndd(nd_mapping), res)
1296 		count++;
1297 
1298 	count = init_labels(nd_mapping, count);
1299 	if (count < 0)
1300 		return count;
1301 
1302 	return __blk_label_update(nd_region, nd_mapping, nsblk, count);
1303 }
1304 
1305 int __init nd_label_init(void)
1306 {
1307 	WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
1308 	WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid));
1309 	WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
1310 	WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));
1311 
1312 	return 0;
1313 }
1314