xref: /openbmc/linux/drivers/nvdimm/label.c (revision 25763b3c)
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/device.h>
14 #include <linux/ndctl.h>
15 #include <linux/uuid.h>
16 #include <linux/slab.h>
17 #include <linux/io.h>
18 #include <linux/nd.h>
19 #include "nd-core.h"
20 #include "label.h"
21 #include "nd.h"
22 
23 static guid_t nvdimm_btt_guid;
24 static guid_t nvdimm_btt2_guid;
25 static guid_t nvdimm_pfn_guid;
26 static guid_t nvdimm_dax_guid;
27 
28 static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
29 
30 static u32 best_seq(u32 a, u32 b)
31 {
32 	a &= NSINDEX_SEQ_MASK;
33 	b &= NSINDEX_SEQ_MASK;
34 
35 	if (a == 0 || a == b)
36 		return b;
37 	else if (b == 0)
38 		return a;
39 	else if (nd_inc_seq(a) == b)
40 		return b;
41 	else
42 		return a;
43 }
44 
45 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
46 {
47 	return ndd->nslabel_size;
48 }
49 
50 static size_t __sizeof_namespace_index(u32 nslot)
51 {
52 	return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
53 			NSINDEX_ALIGN);
54 }
55 
56 static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd,
57 		size_t index_size)
58 {
59 	return (ndd->nsarea.config_size - index_size * 2) /
60 			sizeof_namespace_label(ndd);
61 }
62 
63 int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
64 {
65 	u32 tmp_nslot, n;
66 
67 	tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd);
68 	n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN;
69 
70 	return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n);
71 }
72 
73 size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
74 {
75 	u32 nslot, space, size;
76 
77 	/*
78 	 * Per UEFI 2.7, the minimum size of the Label Storage Area is large
79 	 * enough to hold 2 index blocks and 2 labels.  The minimum index
80 	 * block size is 256 bytes. The label size is 128 for namespaces
81 	 * prior to version 1.2 and at minimum 256 for version 1.2 and later.
82 	 */
83 	nslot = nvdimm_num_label_slots(ndd);
84 	space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
85 	size = __sizeof_namespace_index(nslot) * 2;
86 	if (size <= space && nslot >= 2)
87 		return size / 2;
88 
89 	dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
90 			ndd->nsarea.config_size, sizeof_namespace_label(ndd));
91 	return 0;
92 }
93 
94 static int __nd_label_validate(struct nvdimm_drvdata *ndd)
95 {
96 	/*
97 	 * On media label format consists of two index blocks followed
98 	 * by an array of labels.  None of these structures are ever
99 	 * updated in place.  A sequence number tracks the current
100 	 * active index and the next one to write, while labels are
101 	 * written to free slots.
102 	 *
103 	 *     +------------+
104 	 *     |            |
105 	 *     |  nsindex0  |
106 	 *     |            |
107 	 *     +------------+
108 	 *     |            |
109 	 *     |  nsindex1  |
110 	 *     |            |
111 	 *     +------------+
112 	 *     |   label0   |
113 	 *     +------------+
114 	 *     |   label1   |
115 	 *     +------------+
116 	 *     |            |
117 	 *      ....nslot...
118 	 *     |            |
119 	 *     +------------+
120 	 *     |   labelN   |
121 	 *     +------------+
122 	 */
123 	struct nd_namespace_index *nsindex[] = {
124 		to_namespace_index(ndd, 0),
125 		to_namespace_index(ndd, 1),
126 	};
127 	const int num_index = ARRAY_SIZE(nsindex);
128 	struct device *dev = ndd->dev;
129 	bool valid[2] = { 0 };
130 	int i, num_valid = 0;
131 	u32 seq;
132 
133 	for (i = 0; i < num_index; i++) {
134 		u32 nslot;
135 		u8 sig[NSINDEX_SIG_LEN];
136 		u64 sum_save, sum, size;
137 		unsigned int version, labelsize;
138 
139 		memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
140 		if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
141 			dev_dbg(dev, "nsindex%d signature invalid\n", i);
142 			continue;
143 		}
144 
145 		/* label sizes larger than 128 arrived with v1.2 */
146 		version = __le16_to_cpu(nsindex[i]->major) * 100
147 			+ __le16_to_cpu(nsindex[i]->minor);
148 		if (version >= 102)
149 			labelsize = 1 << (7 + nsindex[i]->labelsize);
150 		else
151 			labelsize = 128;
152 
153 		if (labelsize != sizeof_namespace_label(ndd)) {
154 			dev_dbg(dev, "nsindex%d labelsize %d invalid\n",
155 					i, nsindex[i]->labelsize);
156 			continue;
157 		}
158 
159 		sum_save = __le64_to_cpu(nsindex[i]->checksum);
160 		nsindex[i]->checksum = __cpu_to_le64(0);
161 		sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
162 		nsindex[i]->checksum = __cpu_to_le64(sum_save);
163 		if (sum != sum_save) {
164 			dev_dbg(dev, "nsindex%d checksum invalid\n", i);
165 			continue;
166 		}
167 
168 		seq = __le32_to_cpu(nsindex[i]->seq);
169 		if ((seq & NSINDEX_SEQ_MASK) == 0) {
170 			dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq);
171 			continue;
172 		}
173 
174 		/* sanity check the index against expected values */
175 		if (__le64_to_cpu(nsindex[i]->myoff)
176 				!= i * sizeof_namespace_index(ndd)) {
177 			dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n",
178 					i, (unsigned long long)
179 					__le64_to_cpu(nsindex[i]->myoff));
180 			continue;
181 		}
182 		if (__le64_to_cpu(nsindex[i]->otheroff)
183 				!= (!i) * sizeof_namespace_index(ndd)) {
184 			dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n",
185 					i, (unsigned long long)
186 					__le64_to_cpu(nsindex[i]->otheroff));
187 			continue;
188 		}
189 		if (__le64_to_cpu(nsindex[i]->labeloff)
190 				!= 2 * sizeof_namespace_index(ndd)) {
191 			dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n",
192 					i, (unsigned long long)
193 					__le64_to_cpu(nsindex[i]->labeloff));
194 			continue;
195 		}
196 
197 		size = __le64_to_cpu(nsindex[i]->mysize);
198 		if (size > sizeof_namespace_index(ndd)
199 				|| size < sizeof(struct nd_namespace_index)) {
200 			dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
201 			continue;
202 		}
203 
204 		nslot = __le32_to_cpu(nsindex[i]->nslot);
205 		if (nslot * sizeof_namespace_label(ndd)
206 				+ 2 * sizeof_namespace_index(ndd)
207 				> ndd->nsarea.config_size) {
208 			dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n",
209 					i, nslot, ndd->nsarea.config_size);
210 			continue;
211 		}
212 		valid[i] = true;
213 		num_valid++;
214 	}
215 
216 	switch (num_valid) {
217 	case 0:
218 		break;
219 	case 1:
220 		for (i = 0; i < num_index; i++)
221 			if (valid[i])
222 				return i;
223 		/* can't have num_valid > 0 but valid[] = { false, false } */
224 		WARN_ON(1);
225 		break;
226 	default:
227 		/* pick the best index... */
228 		seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
229 				__le32_to_cpu(nsindex[1]->seq));
230 		if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
231 			return 1;
232 		else
233 			return 0;
234 		break;
235 	}
236 
237 	return -1;
238 }
239 
240 static int nd_label_validate(struct nvdimm_drvdata *ndd)
241 {
242 	/*
243 	 * In order to probe for and validate namespace index blocks we
244 	 * need to know the size of the labels, and we can't trust the
245 	 * size of the labels until we validate the index blocks.
246 	 * Resolve this dependency loop by probing for known label
247 	 * sizes, but default to v1.2 256-byte namespace labels if
248 	 * discovery fails.
249 	 */
250 	int label_size[] = { 128, 256 };
251 	int i, rc;
252 
253 	for (i = 0; i < ARRAY_SIZE(label_size); i++) {
254 		ndd->nslabel_size = label_size[i];
255 		rc = __nd_label_validate(ndd);
256 		if (rc >= 0)
257 			return rc;
258 	}
259 
260 	return -1;
261 }
262 
263 static void nd_label_copy(struct nvdimm_drvdata *ndd,
264 			  struct nd_namespace_index *dst,
265 			  struct nd_namespace_index *src)
266 {
267 	/* just exit if either destination or source is NULL */
268 	if (!dst || !src)
269 		return;
270 
271 	memcpy(dst, src, sizeof_namespace_index(ndd));
272 }
273 
274 static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
275 {
276 	void *base = to_namespace_index(ndd, 0);
277 
278 	return base + 2 * sizeof_namespace_index(ndd);
279 }
280 
281 static int to_slot(struct nvdimm_drvdata *ndd,
282 		struct nd_namespace_label *nd_label)
283 {
284 	unsigned long label, base;
285 
286 	label = (unsigned long) nd_label;
287 	base = (unsigned long) nd_label_base(ndd);
288 
289 	return (label - base) / sizeof_namespace_label(ndd);
290 }
291 
292 static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
293 {
294 	unsigned long label, base;
295 
296 	base = (unsigned long) nd_label_base(ndd);
297 	label = base + sizeof_namespace_label(ndd) * slot;
298 
299 	return (struct nd_namespace_label *) label;
300 }
301 
302 #define for_each_clear_bit_le(bit, addr, size) \
303 	for ((bit) = find_next_zero_bit_le((addr), (size), 0);  \
304 	     (bit) < (size);                                    \
305 	     (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
306 
307 /**
308  * preamble_index - common variable initialization for nd_label_* routines
309  * @ndd: dimm container for the relevant label set
310  * @idx: namespace_index index
311  * @nsindex_out: on return set to the currently active namespace index
312  * @free: on return set to the free label bitmap in the index
313  * @nslot: on return set to the number of slots in the label space
314  */
315 static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
316 		struct nd_namespace_index **nsindex_out,
317 		unsigned long **free, u32 *nslot)
318 {
319 	struct nd_namespace_index *nsindex;
320 
321 	nsindex = to_namespace_index(ndd, idx);
322 	if (nsindex == NULL)
323 		return false;
324 
325 	*free = (unsigned long *) nsindex->free;
326 	*nslot = __le32_to_cpu(nsindex->nslot);
327 	*nsindex_out = nsindex;
328 
329 	return true;
330 }
331 
332 char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags)
333 {
334 	if (!label_id || !uuid)
335 		return NULL;
336 	snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb",
337 			flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid);
338 	return label_id->id;
339 }
340 
341 static bool preamble_current(struct nvdimm_drvdata *ndd,
342 		struct nd_namespace_index **nsindex,
343 		unsigned long **free, u32 *nslot)
344 {
345 	return preamble_index(ndd, ndd->ns_current, nsindex,
346 			free, nslot);
347 }
348 
349 static bool preamble_next(struct nvdimm_drvdata *ndd,
350 		struct nd_namespace_index **nsindex,
351 		unsigned long **free, u32 *nslot)
352 {
353 	return preamble_index(ndd, ndd->ns_next, nsindex,
354 			free, nslot);
355 }
356 
357 static bool slot_valid(struct nvdimm_drvdata *ndd,
358 		struct nd_namespace_label *nd_label, u32 slot)
359 {
360 	/* check that we are written where we expect to be written */
361 	if (slot != __le32_to_cpu(nd_label->slot))
362 		return false;
363 
364 	/* check that DPA allocations are page aligned */
365 	if ((__le64_to_cpu(nd_label->dpa)
366 				| __le64_to_cpu(nd_label->rawsize)) % SZ_4K)
367 		return false;
368 
369 	/* check checksum */
370 	if (namespace_label_has(ndd, checksum)) {
371 		u64 sum, sum_save;
372 
373 		sum_save = __le64_to_cpu(nd_label->checksum);
374 		nd_label->checksum = __cpu_to_le64(0);
375 		sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
376 		nd_label->checksum = __cpu_to_le64(sum_save);
377 		if (sum != sum_save) {
378 			dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n",
379 				slot, sum);
380 			return false;
381 		}
382 	}
383 
384 	return true;
385 }
386 
387 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
388 {
389 	struct nd_namespace_index *nsindex;
390 	unsigned long *free;
391 	u32 nslot, slot;
392 
393 	if (!preamble_current(ndd, &nsindex, &free, &nslot))
394 		return 0; /* no label, nothing to reserve */
395 
396 	for_each_clear_bit_le(slot, free, nslot) {
397 		struct nvdimm *nvdimm = to_nvdimm(ndd->dev);
398 		struct nd_namespace_label *nd_label;
399 		struct nd_region *nd_region = NULL;
400 		u8 label_uuid[NSLABEL_UUID_LEN];
401 		struct nd_label_id label_id;
402 		struct resource *res;
403 		u32 flags;
404 
405 		nd_label = to_label(ndd, slot);
406 
407 		if (!slot_valid(ndd, nd_label, slot))
408 			continue;
409 
410 		memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
411 		flags = __le32_to_cpu(nd_label->flags);
412 		if (test_bit(NDD_NOBLK, &nvdimm->flags))
413 			flags &= ~NSLABEL_FLAG_LOCAL;
414 		nd_label_gen_id(&label_id, label_uuid, flags);
415 		res = nvdimm_allocate_dpa(ndd, &label_id,
416 				__le64_to_cpu(nd_label->dpa),
417 				__le64_to_cpu(nd_label->rawsize));
418 		nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
419 		if (!res)
420 			return -EBUSY;
421 	}
422 
423 	return 0;
424 }
425 
426 int nd_label_data_init(struct nvdimm_drvdata *ndd)
427 {
428 	size_t config_size, read_size, max_xfer, offset;
429 	struct nd_namespace_index *nsindex;
430 	unsigned int i;
431 	int rc = 0;
432 	u32 nslot;
433 
434 	if (ndd->data)
435 		return 0;
436 
437 	if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
438 		dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
439 			ndd->nsarea.max_xfer, ndd->nsarea.config_size);
440 		return -ENXIO;
441 	}
442 
443 	/*
444 	 * We need to determine the maximum index area as this is the section
445 	 * we must read and validate before we can start processing labels.
446 	 *
447 	 * If the area is too small to contain the two indexes and 2 labels
448 	 * then we abort.
449 	 *
450 	 * Start at a label size of 128 as this should result in the largest
451 	 * possible namespace index size.
452 	 */
453 	ndd->nslabel_size = 128;
454 	read_size = sizeof_namespace_index(ndd) * 2;
455 	if (!read_size)
456 		return -ENXIO;
457 
458 	/* Allocate config data */
459 	config_size = ndd->nsarea.config_size;
460 	ndd->data = kvzalloc(config_size, GFP_KERNEL);
461 	if (!ndd->data)
462 		return -ENOMEM;
463 
464 	/*
465 	 * We want to guarantee as few reads as possible while conserving
466 	 * memory. To do that we figure out how much unused space will be left
467 	 * in the last read, divide that by the total number of reads it is
468 	 * going to take given our maximum transfer size, and then reduce our
469 	 * maximum transfer size based on that result.
470 	 */
471 	max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size);
472 	if (read_size < max_xfer) {
473 		/* trim waste */
474 		max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) /
475 			    DIV_ROUND_UP(config_size, max_xfer);
476 		/* make certain we read indexes in exactly 1 read */
477 		if (max_xfer < read_size)
478 			max_xfer = read_size;
479 	}
480 
481 	/* Make our initial read size a multiple of max_xfer size */
482 	read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer,
483 			config_size);
484 
485 	/* Read the index data */
486 	rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size);
487 	if (rc)
488 		goto out_err;
489 
490 	/* Validate index data, if not valid assume all labels are invalid */
491 	ndd->ns_current = nd_label_validate(ndd);
492 	if (ndd->ns_current < 0)
493 		return 0;
494 
495 	/* Record our index values */
496 	ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
497 
498 	/* Copy "current" index on top of the "next" index */
499 	nsindex = to_current_namespace_index(ndd);
500 	nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex);
501 
502 	/* Determine starting offset for label data */
503 	offset = __le64_to_cpu(nsindex->labeloff);
504 	nslot = __le32_to_cpu(nsindex->nslot);
505 
506 	/* Loop through the free list pulling in any active labels */
507 	for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) {
508 		size_t label_read_size;
509 
510 		/* zero out the unused labels */
511 		if (test_bit_le(i, nsindex->free)) {
512 			memset(ndd->data + offset, 0, ndd->nslabel_size);
513 			continue;
514 		}
515 
516 		/* if we already read past here then just continue */
517 		if (offset + ndd->nslabel_size <= read_size)
518 			continue;
519 
520 		/* if we haven't read in a while reset our read_size offset */
521 		if (read_size < offset)
522 			read_size = offset;
523 
524 		/* determine how much more will be read after this next call. */
525 		label_read_size = offset + ndd->nslabel_size - read_size;
526 		label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) *
527 				  max_xfer;
528 
529 		/* truncate last read if needed */
530 		if (read_size + label_read_size > config_size)
531 			label_read_size = config_size - read_size;
532 
533 		/* Read the label data */
534 		rc = nvdimm_get_config_data(ndd, ndd->data + read_size,
535 					    read_size, label_read_size);
536 		if (rc)
537 			goto out_err;
538 
539 		/* push read_size to next read offset */
540 		read_size += label_read_size;
541 	}
542 
543 	dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
544 out_err:
545 	return rc;
546 }
547 
548 int nd_label_active_count(struct nvdimm_drvdata *ndd)
549 {
550 	struct nd_namespace_index *nsindex;
551 	unsigned long *free;
552 	u32 nslot, slot;
553 	int count = 0;
554 
555 	if (!preamble_current(ndd, &nsindex, &free, &nslot))
556 		return 0;
557 
558 	for_each_clear_bit_le(slot, free, nslot) {
559 		struct nd_namespace_label *nd_label;
560 
561 		nd_label = to_label(ndd, slot);
562 
563 		if (!slot_valid(ndd, nd_label, slot)) {
564 			u32 label_slot = __le32_to_cpu(nd_label->slot);
565 			u64 size = __le64_to_cpu(nd_label->rawsize);
566 			u64 dpa = __le64_to_cpu(nd_label->dpa);
567 
568 			dev_dbg(ndd->dev,
569 				"slot%d invalid slot: %d dpa: %llx size: %llx\n",
570 					slot, label_slot, dpa, size);
571 			continue;
572 		}
573 		count++;
574 	}
575 	return count;
576 }
577 
578 struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
579 {
580 	struct nd_namespace_index *nsindex;
581 	unsigned long *free;
582 	u32 nslot, slot;
583 
584 	if (!preamble_current(ndd, &nsindex, &free, &nslot))
585 		return NULL;
586 
587 	for_each_clear_bit_le(slot, free, nslot) {
588 		struct nd_namespace_label *nd_label;
589 
590 		nd_label = to_label(ndd, slot);
591 		if (!slot_valid(ndd, nd_label, slot))
592 			continue;
593 
594 		if (n-- == 0)
595 			return to_label(ndd, slot);
596 	}
597 
598 	return NULL;
599 }
600 
601 u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
602 {
603 	struct nd_namespace_index *nsindex;
604 	unsigned long *free;
605 	u32 nslot, slot;
606 
607 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
608 		return UINT_MAX;
609 
610 	WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
611 
612 	slot = find_next_bit_le(free, nslot, 0);
613 	if (slot == nslot)
614 		return UINT_MAX;
615 
616 	clear_bit_le(slot, free);
617 
618 	return slot;
619 }
620 
621 bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
622 {
623 	struct nd_namespace_index *nsindex;
624 	unsigned long *free;
625 	u32 nslot;
626 
627 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
628 		return false;
629 
630 	WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
631 
632 	if (slot < nslot)
633 		return !test_and_set_bit_le(slot, free);
634 	return false;
635 }
636 
637 u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
638 {
639 	struct nd_namespace_index *nsindex;
640 	unsigned long *free;
641 	u32 nslot;
642 
643 	WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
644 
645 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
646 		return nvdimm_num_label_slots(ndd);
647 
648 	return bitmap_weight(free, nslot);
649 }
650 
651 static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
652 		unsigned long flags)
653 {
654 	struct nd_namespace_index *nsindex;
655 	unsigned long offset;
656 	u64 checksum;
657 	u32 nslot;
658 	int rc;
659 
660 	nsindex = to_namespace_index(ndd, index);
661 	if (flags & ND_NSINDEX_INIT)
662 		nslot = nvdimm_num_label_slots(ndd);
663 	else
664 		nslot = __le32_to_cpu(nsindex->nslot);
665 
666 	memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
667 	memset(&nsindex->flags, 0, 3);
668 	nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
669 	nsindex->seq = __cpu_to_le32(seq);
670 	offset = (unsigned long) nsindex
671 		- (unsigned long) to_namespace_index(ndd, 0);
672 	nsindex->myoff = __cpu_to_le64(offset);
673 	nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
674 	offset = (unsigned long) to_namespace_index(ndd,
675 			nd_label_next_nsindex(index))
676 		- (unsigned long) to_namespace_index(ndd, 0);
677 	nsindex->otheroff = __cpu_to_le64(offset);
678 	offset = (unsigned long) nd_label_base(ndd)
679 		- (unsigned long) to_namespace_index(ndd, 0);
680 	nsindex->labeloff = __cpu_to_le64(offset);
681 	nsindex->nslot = __cpu_to_le32(nslot);
682 	nsindex->major = __cpu_to_le16(1);
683 	if (sizeof_namespace_label(ndd) < 256)
684 		nsindex->minor = __cpu_to_le16(1);
685 	else
686 		nsindex->minor = __cpu_to_le16(2);
687 	nsindex->checksum = __cpu_to_le64(0);
688 	if (flags & ND_NSINDEX_INIT) {
689 		unsigned long *free = (unsigned long *) nsindex->free;
690 		u32 nfree = ALIGN(nslot, BITS_PER_LONG);
691 		int last_bits, i;
692 
693 		memset(nsindex->free, 0xff, nfree / 8);
694 		for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
695 			clear_bit_le(nslot + i, free);
696 	}
697 	checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
698 	nsindex->checksum = __cpu_to_le64(checksum);
699 	rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
700 			nsindex, sizeof_namespace_index(ndd));
701 	if (rc < 0)
702 		return rc;
703 
704 	if (flags & ND_NSINDEX_INIT)
705 		return 0;
706 
707 	/* copy the index we just wrote to the new 'next' */
708 	WARN_ON(index != ndd->ns_next);
709 	nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
710 	ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
711 	ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
712 	WARN_ON(ndd->ns_current == ndd->ns_next);
713 
714 	return 0;
715 }
716 
717 static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
718 		struct nd_namespace_label *nd_label)
719 {
720 	return (unsigned long) nd_label
721 		- (unsigned long) to_namespace_index(ndd, 0);
722 }
723 
724 enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
725 {
726 	if (guid_equal(guid, &nvdimm_btt_guid))
727 		return NVDIMM_CCLASS_BTT;
728 	else if (guid_equal(guid, &nvdimm_btt2_guid))
729 		return NVDIMM_CCLASS_BTT2;
730 	else if (guid_equal(guid, &nvdimm_pfn_guid))
731 		return NVDIMM_CCLASS_PFN;
732 	else if (guid_equal(guid, &nvdimm_dax_guid))
733 		return NVDIMM_CCLASS_DAX;
734 	else if (guid_equal(guid, &guid_null))
735 		return NVDIMM_CCLASS_NONE;
736 
737 	return NVDIMM_CCLASS_UNKNOWN;
738 }
739 
740 static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
741 	guid_t *target)
742 {
743 	if (claim_class == NVDIMM_CCLASS_BTT)
744 		return &nvdimm_btt_guid;
745 	else if (claim_class == NVDIMM_CCLASS_BTT2)
746 		return &nvdimm_btt2_guid;
747 	else if (claim_class == NVDIMM_CCLASS_PFN)
748 		return &nvdimm_pfn_guid;
749 	else if (claim_class == NVDIMM_CCLASS_DAX)
750 		return &nvdimm_dax_guid;
751 	else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
752 		/*
753 		 * If we're modifying a namespace for which we don't
754 		 * know the claim_class, don't touch the existing guid.
755 		 */
756 		return target;
757 	} else
758 		return &guid_null;
759 }
760 
761 static void reap_victim(struct nd_mapping *nd_mapping,
762 		struct nd_label_ent *victim)
763 {
764 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
765 	u32 slot = to_slot(ndd, victim->label);
766 
767 	dev_dbg(ndd->dev, "free: %d\n", slot);
768 	nd_label_free_slot(ndd, slot);
769 	victim->label = NULL;
770 }
771 
772 static int __pmem_label_update(struct nd_region *nd_region,
773 		struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
774 		int pos, unsigned long flags)
775 {
776 	struct nd_namespace_common *ndns = &nspm->nsio.common;
777 	struct nd_interleave_set *nd_set = nd_region->nd_set;
778 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
779 	struct nd_namespace_label *nd_label;
780 	struct nd_namespace_index *nsindex;
781 	struct nd_label_ent *label_ent;
782 	struct nd_label_id label_id;
783 	struct resource *res;
784 	unsigned long *free;
785 	u32 nslot, slot;
786 	size_t offset;
787 	u64 cookie;
788 	int rc;
789 
790 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
791 		return -ENXIO;
792 
793 	cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
794 	nd_label_gen_id(&label_id, nspm->uuid, 0);
795 	for_each_dpa_resource(ndd, res)
796 		if (strcmp(res->name, label_id.id) == 0)
797 			break;
798 
799 	if (!res) {
800 		WARN_ON_ONCE(1);
801 		return -ENXIO;
802 	}
803 
804 	/* allocate and write the label to the staging (next) index */
805 	slot = nd_label_alloc_slot(ndd);
806 	if (slot == UINT_MAX)
807 		return -ENXIO;
808 	dev_dbg(ndd->dev, "allocated: %d\n", slot);
809 
810 	nd_label = to_label(ndd, slot);
811 	memset(nd_label, 0, sizeof_namespace_label(ndd));
812 	memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
813 	if (nspm->alt_name)
814 		memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
815 	nd_label->flags = __cpu_to_le32(flags);
816 	nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
817 	nd_label->position = __cpu_to_le16(pos);
818 	nd_label->isetcookie = __cpu_to_le64(cookie);
819 	nd_label->rawsize = __cpu_to_le64(resource_size(res));
820 	nd_label->lbasize = __cpu_to_le64(nspm->lbasize);
821 	nd_label->dpa = __cpu_to_le64(res->start);
822 	nd_label->slot = __cpu_to_le32(slot);
823 	if (namespace_label_has(ndd, type_guid))
824 		guid_copy(&nd_label->type_guid, &nd_set->type_guid);
825 	if (namespace_label_has(ndd, abstraction_guid))
826 		guid_copy(&nd_label->abstraction_guid,
827 				to_abstraction_guid(ndns->claim_class,
828 					&nd_label->abstraction_guid));
829 	if (namespace_label_has(ndd, checksum)) {
830 		u64 sum;
831 
832 		nd_label->checksum = __cpu_to_le64(0);
833 		sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
834 		nd_label->checksum = __cpu_to_le64(sum);
835 	}
836 	nd_dbg_dpa(nd_region, ndd, res, "\n");
837 
838 	/* update label */
839 	offset = nd_label_offset(ndd, nd_label);
840 	rc = nvdimm_set_config_data(ndd, offset, nd_label,
841 			sizeof_namespace_label(ndd));
842 	if (rc < 0)
843 		return rc;
844 
845 	/* Garbage collect the previous label */
846 	mutex_lock(&nd_mapping->lock);
847 	list_for_each_entry(label_ent, &nd_mapping->labels, list) {
848 		if (!label_ent->label)
849 			continue;
850 		if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
851 				|| memcmp(nspm->uuid, label_ent->label->uuid,
852 					NSLABEL_UUID_LEN) == 0)
853 			reap_victim(nd_mapping, label_ent);
854 	}
855 
856 	/* update index */
857 	rc = nd_label_write_index(ndd, ndd->ns_next,
858 			nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
859 	if (rc == 0) {
860 		list_for_each_entry(label_ent, &nd_mapping->labels, list)
861 			if (!label_ent->label) {
862 				label_ent->label = nd_label;
863 				nd_label = NULL;
864 				break;
865 			}
866 		dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
867 				"failed to track label: %d\n",
868 				to_slot(ndd, nd_label));
869 		if (nd_label)
870 			rc = -ENXIO;
871 	}
872 	mutex_unlock(&nd_mapping->lock);
873 
874 	return rc;
875 }
876 
877 static bool is_old_resource(struct resource *res, struct resource **list, int n)
878 {
879 	int i;
880 
881 	if (res->flags & DPA_RESOURCE_ADJUSTED)
882 		return false;
883 	for (i = 0; i < n; i++)
884 		if (res == list[i])
885 			return true;
886 	return false;
887 }
888 
889 static struct resource *to_resource(struct nvdimm_drvdata *ndd,
890 		struct nd_namespace_label *nd_label)
891 {
892 	struct resource *res;
893 
894 	for_each_dpa_resource(ndd, res) {
895 		if (res->start != __le64_to_cpu(nd_label->dpa))
896 			continue;
897 		if (resource_size(res) != __le64_to_cpu(nd_label->rawsize))
898 			continue;
899 		return res;
900 	}
901 
902 	return NULL;
903 }
904 
905 /*
906  * 1/ Account all the labels that can be freed after this update
907  * 2/ Allocate and write the label to the staging (next) index
908  * 3/ Record the resources in the namespace device
909  */
910 static int __blk_label_update(struct nd_region *nd_region,
911 		struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
912 		int num_labels)
913 {
914 	int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
915 	struct nd_interleave_set *nd_set = nd_region->nd_set;
916 	struct nd_namespace_common *ndns = &nsblk->common;
917 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
918 	struct nd_namespace_label *nd_label;
919 	struct nd_label_ent *label_ent, *e;
920 	struct nd_namespace_index *nsindex;
921 	unsigned long *free, *victim_map = NULL;
922 	struct resource *res, **old_res_list;
923 	struct nd_label_id label_id;
924 	u8 uuid[NSLABEL_UUID_LEN];
925 	int min_dpa_idx = 0;
926 	LIST_HEAD(list);
927 	u32 nslot, slot;
928 
929 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
930 		return -ENXIO;
931 
932 	old_res_list = nsblk->res;
933 	nfree = nd_label_nfree(ndd);
934 	old_num_resources = nsblk->num_resources;
935 	nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
936 
937 	/*
938 	 * We need to loop over the old resources a few times, which seems a
939 	 * bit inefficient, but we need to know that we have the label
940 	 * space before we start mutating the tracking structures.
941 	 * Otherwise the recovery method of last resort for userspace is
942 	 * disable and re-enable the parent region.
943 	 */
944 	alloc = 0;
945 	for_each_dpa_resource(ndd, res) {
946 		if (strcmp(res->name, label_id.id) != 0)
947 			continue;
948 		if (!is_old_resource(res, old_res_list, old_num_resources))
949 			alloc++;
950 	}
951 
952 	victims = 0;
953 	if (old_num_resources) {
954 		/* convert old local-label-map to dimm-slot victim-map */
955 		victim_map = bitmap_zalloc(nslot, GFP_KERNEL);
956 		if (!victim_map)
957 			return -ENOMEM;
958 
959 		/* mark unused labels for garbage collection */
960 		for_each_clear_bit_le(slot, free, nslot) {
961 			nd_label = to_label(ndd, slot);
962 			memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
963 			if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
964 				continue;
965 			res = to_resource(ndd, nd_label);
966 			if (res && is_old_resource(res, old_res_list,
967 						old_num_resources))
968 				continue;
969 			slot = to_slot(ndd, nd_label);
970 			set_bit(slot, victim_map);
971 			victims++;
972 		}
973 	}
974 
975 	/* don't allow updates that consume the last label */
976 	if (nfree - alloc < 0 || nfree - alloc + victims < 1) {
977 		dev_info(&nsblk->common.dev, "insufficient label space\n");
978 		bitmap_free(victim_map);
979 		return -ENOSPC;
980 	}
981 	/* from here on we need to abort on error */
982 
983 
984 	/* assign all resources to the namespace before writing the labels */
985 	nsblk->res = NULL;
986 	nsblk->num_resources = 0;
987 	for_each_dpa_resource(ndd, res) {
988 		if (strcmp(res->name, label_id.id) != 0)
989 			continue;
990 		if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) {
991 			rc = -ENOMEM;
992 			goto abort;
993 		}
994 	}
995 
996 	/*
997 	 * Find the resource associated with the first label in the set
998 	 * per the v1.2 namespace specification.
999 	 */
1000 	for (i = 0; i < nsblk->num_resources; i++) {
1001 		struct resource *min = nsblk->res[min_dpa_idx];
1002 
1003 		res = nsblk->res[i];
1004 		if (res->start < min->start)
1005 			min_dpa_idx = i;
1006 	}
1007 
1008 	for (i = 0; i < nsblk->num_resources; i++) {
1009 		size_t offset;
1010 
1011 		res = nsblk->res[i];
1012 		if (is_old_resource(res, old_res_list, old_num_resources))
1013 			continue; /* carry-over */
1014 		slot = nd_label_alloc_slot(ndd);
1015 		if (slot == UINT_MAX)
1016 			goto abort;
1017 		dev_dbg(ndd->dev, "allocated: %d\n", slot);
1018 
1019 		nd_label = to_label(ndd, slot);
1020 		memset(nd_label, 0, sizeof_namespace_label(ndd));
1021 		memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
1022 		if (nsblk->alt_name)
1023 			memcpy(nd_label->name, nsblk->alt_name,
1024 					NSLABEL_NAME_LEN);
1025 		nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
1026 
1027 		/*
1028 		 * Use the presence of the type_guid as a flag to
1029 		 * determine isetcookie usage and nlabel + position
1030 		 * policy for blk-aperture namespaces.
1031 		 */
1032 		if (namespace_label_has(ndd, type_guid)) {
1033 			if (i == min_dpa_idx) {
1034 				nd_label->nlabel = __cpu_to_le16(nsblk->num_resources);
1035 				nd_label->position = __cpu_to_le16(0);
1036 			} else {
1037 				nd_label->nlabel = __cpu_to_le16(0xffff);
1038 				nd_label->position = __cpu_to_le16(0xffff);
1039 			}
1040 			nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2);
1041 		} else {
1042 			nd_label->nlabel = __cpu_to_le16(0); /* N/A */
1043 			nd_label->position = __cpu_to_le16(0); /* N/A */
1044 			nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
1045 		}
1046 
1047 		nd_label->dpa = __cpu_to_le64(res->start);
1048 		nd_label->rawsize = __cpu_to_le64(resource_size(res));
1049 		nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
1050 		nd_label->slot = __cpu_to_le32(slot);
1051 		if (namespace_label_has(ndd, type_guid))
1052 			guid_copy(&nd_label->type_guid, &nd_set->type_guid);
1053 		if (namespace_label_has(ndd, abstraction_guid))
1054 			guid_copy(&nd_label->abstraction_guid,
1055 					to_abstraction_guid(ndns->claim_class,
1056 						&nd_label->abstraction_guid));
1057 
1058 		if (namespace_label_has(ndd, checksum)) {
1059 			u64 sum;
1060 
1061 			nd_label->checksum = __cpu_to_le64(0);
1062 			sum = nd_fletcher64(nd_label,
1063 					sizeof_namespace_label(ndd), 1);
1064 			nd_label->checksum = __cpu_to_le64(sum);
1065 		}
1066 
1067 		/* update label */
1068 		offset = nd_label_offset(ndd, nd_label);
1069 		rc = nvdimm_set_config_data(ndd, offset, nd_label,
1070 				sizeof_namespace_label(ndd));
1071 		if (rc < 0)
1072 			goto abort;
1073 	}
1074 
1075 	/* free up now unused slots in the new index */
1076 	for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
1077 		dev_dbg(ndd->dev, "free: %d\n", slot);
1078 		nd_label_free_slot(ndd, slot);
1079 	}
1080 
1081 	/* update index */
1082 	rc = nd_label_write_index(ndd, ndd->ns_next,
1083 			nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1084 	if (rc)
1085 		goto abort;
1086 
1087 	/*
1088 	 * Now that the on-dimm labels are up to date, fix up the tracking
1089 	 * entries in nd_mapping->labels
1090 	 */
1091 	nlabel = 0;
1092 	mutex_lock(&nd_mapping->lock);
1093 	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1094 		nd_label = label_ent->label;
1095 		if (!nd_label)
1096 			continue;
1097 		nlabel++;
1098 		memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1099 		if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
1100 			continue;
1101 		nlabel--;
1102 		list_move(&label_ent->list, &list);
1103 		label_ent->label = NULL;
1104 	}
1105 	list_splice_tail_init(&list, &nd_mapping->labels);
1106 	mutex_unlock(&nd_mapping->lock);
1107 
1108 	if (nlabel + nsblk->num_resources > num_labels) {
1109 		/*
1110 		 * Bug, we can't end up with more resources than
1111 		 * available labels
1112 		 */
1113 		WARN_ON_ONCE(1);
1114 		rc = -ENXIO;
1115 		goto out;
1116 	}
1117 
1118 	mutex_lock(&nd_mapping->lock);
1119 	label_ent = list_first_entry_or_null(&nd_mapping->labels,
1120 			typeof(*label_ent), list);
1121 	if (!label_ent) {
1122 		WARN_ON(1);
1123 		mutex_unlock(&nd_mapping->lock);
1124 		rc = -ENXIO;
1125 		goto out;
1126 	}
1127 	for_each_clear_bit_le(slot, free, nslot) {
1128 		nd_label = to_label(ndd, slot);
1129 		memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1130 		if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
1131 			continue;
1132 		res = to_resource(ndd, nd_label);
1133 		res->flags &= ~DPA_RESOURCE_ADJUSTED;
1134 		dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot);
1135 		list_for_each_entry_from(label_ent, &nd_mapping->labels, list) {
1136 			if (label_ent->label)
1137 				continue;
1138 			label_ent->label = nd_label;
1139 			nd_label = NULL;
1140 			break;
1141 		}
1142 		if (nd_label)
1143 			dev_WARN(&nsblk->common.dev,
1144 					"failed to track label slot%d\n", slot);
1145 	}
1146 	mutex_unlock(&nd_mapping->lock);
1147 
1148  out:
1149 	kfree(old_res_list);
1150 	bitmap_free(victim_map);
1151 	return rc;
1152 
1153  abort:
1154 	/*
1155 	 * 1/ repair the allocated label bitmap in the index
1156 	 * 2/ restore the resource list
1157 	 */
1158 	nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd));
1159 	kfree(nsblk->res);
1160 	nsblk->res = old_res_list;
1161 	nsblk->num_resources = old_num_resources;
1162 	old_res_list = NULL;
1163 	goto out;
1164 }
1165 
1166 static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
1167 {
1168 	int i, old_num_labels = 0;
1169 	struct nd_label_ent *label_ent;
1170 	struct nd_namespace_index *nsindex;
1171 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1172 
1173 	mutex_lock(&nd_mapping->lock);
1174 	list_for_each_entry(label_ent, &nd_mapping->labels, list)
1175 		old_num_labels++;
1176 	mutex_unlock(&nd_mapping->lock);
1177 
1178 	/*
1179 	 * We need to preserve all the old labels for the mapping so
1180 	 * they can be garbage collected after writing the new labels.
1181 	 */
1182 	for (i = old_num_labels; i < num_labels; i++) {
1183 		label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
1184 		if (!label_ent)
1185 			return -ENOMEM;
1186 		mutex_lock(&nd_mapping->lock);
1187 		list_add_tail(&label_ent->list, &nd_mapping->labels);
1188 		mutex_unlock(&nd_mapping->lock);
1189 	}
1190 
1191 	if (ndd->ns_current == -1 || ndd->ns_next == -1)
1192 		/* pass */;
1193 	else
1194 		return max(num_labels, old_num_labels);
1195 
1196 	nsindex = to_namespace_index(ndd, 0);
1197 	memset(nsindex, 0, ndd->nsarea.config_size);
1198 	for (i = 0; i < 2; i++) {
1199 		int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
1200 
1201 		if (rc)
1202 			return rc;
1203 	}
1204 	ndd->ns_next = 1;
1205 	ndd->ns_current = 0;
1206 
1207 	return max(num_labels, old_num_labels);
1208 }
1209 
1210 static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
1211 {
1212 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1213 	struct nd_label_ent *label_ent, *e;
1214 	struct nd_namespace_index *nsindex;
1215 	u8 label_uuid[NSLABEL_UUID_LEN];
1216 	unsigned long *free;
1217 	LIST_HEAD(list);
1218 	u32 nslot, slot;
1219 	int active = 0;
1220 
1221 	if (!uuid)
1222 		return 0;
1223 
1224 	/* no index || no labels == nothing to delete */
1225 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
1226 		return 0;
1227 
1228 	mutex_lock(&nd_mapping->lock);
1229 	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1230 		struct nd_namespace_label *nd_label = label_ent->label;
1231 
1232 		if (!nd_label)
1233 			continue;
1234 		active++;
1235 		memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1236 		if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
1237 			continue;
1238 		active--;
1239 		slot = to_slot(ndd, nd_label);
1240 		nd_label_free_slot(ndd, slot);
1241 		dev_dbg(ndd->dev, "free: %d\n", slot);
1242 		list_move_tail(&label_ent->list, &list);
1243 		label_ent->label = NULL;
1244 	}
1245 	list_splice_tail_init(&list, &nd_mapping->labels);
1246 
1247 	if (active == 0) {
1248 		nd_mapping_free_labels(nd_mapping);
1249 		dev_dbg(ndd->dev, "no more active labels\n");
1250 	}
1251 	mutex_unlock(&nd_mapping->lock);
1252 
1253 	return nd_label_write_index(ndd, ndd->ns_next,
1254 			nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1255 }
1256 
1257 int nd_pmem_namespace_label_update(struct nd_region *nd_region,
1258 		struct nd_namespace_pmem *nspm, resource_size_t size)
1259 {
1260 	int i, rc;
1261 
1262 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1263 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1264 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1265 		struct resource *res;
1266 		int count = 0;
1267 
1268 		if (size == 0) {
1269 			rc = del_labels(nd_mapping, nspm->uuid);
1270 			if (rc)
1271 				return rc;
1272 			continue;
1273 		}
1274 
1275 		for_each_dpa_resource(ndd, res)
1276 			if (strncmp(res->name, "pmem", 4) == 0)
1277 				count++;
1278 		WARN_ON_ONCE(!count);
1279 
1280 		rc = init_labels(nd_mapping, count);
1281 		if (rc < 0)
1282 			return rc;
1283 
1284 		rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
1285 				NSLABEL_FLAG_UPDATING);
1286 		if (rc)
1287 			return rc;
1288 	}
1289 
1290 	if (size == 0)
1291 		return 0;
1292 
1293 	/* Clear the UPDATING flag per UEFI 2.7 expectations */
1294 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1295 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1296 
1297 		rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
1298 		if (rc)
1299 			return rc;
1300 	}
1301 
1302 	return 0;
1303 }
1304 
1305 int nd_blk_namespace_label_update(struct nd_region *nd_region,
1306 		struct nd_namespace_blk *nsblk, resource_size_t size)
1307 {
1308 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1309 	struct resource *res;
1310 	int count = 0;
1311 
1312 	if (size == 0)
1313 		return del_labels(nd_mapping, nsblk->uuid);
1314 
1315 	for_each_dpa_resource(to_ndd(nd_mapping), res)
1316 		count++;
1317 
1318 	count = init_labels(nd_mapping, count);
1319 	if (count < 0)
1320 		return count;
1321 
1322 	return __blk_label_update(nd_region, nd_mapping, nsblk, count);
1323 }
1324 
1325 int __init nd_label_init(void)
1326 {
1327 	WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
1328 	WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid));
1329 	WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
1330 	WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));
1331 
1332 	return 0;
1333 }
1334