1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 4 */ 5 #include <linux/device.h> 6 #include <linux/ndctl.h> 7 #include <linux/uuid.h> 8 #include <linux/slab.h> 9 #include <linux/io.h> 10 #include <linux/nd.h> 11 #include "nd-core.h" 12 #include "label.h" 13 #include "nd.h" 14 15 static guid_t nvdimm_btt_guid; 16 static guid_t nvdimm_btt2_guid; 17 static guid_t nvdimm_pfn_guid; 18 static guid_t nvdimm_dax_guid; 19 20 static uuid_t nvdimm_btt_uuid; 21 static uuid_t nvdimm_btt2_uuid; 22 static uuid_t nvdimm_pfn_uuid; 23 static uuid_t nvdimm_dax_uuid; 24 25 static uuid_t cxl_region_uuid; 26 static uuid_t cxl_namespace_uuid; 27 28 static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0"; 29 30 static u32 best_seq(u32 a, u32 b) 31 { 32 a &= NSINDEX_SEQ_MASK; 33 b &= NSINDEX_SEQ_MASK; 34 35 if (a == 0 || a == b) 36 return b; 37 else if (b == 0) 38 return a; 39 else if (nd_inc_seq(a) == b) 40 return b; 41 else 42 return a; 43 } 44 45 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd) 46 { 47 return ndd->nslabel_size; 48 } 49 50 static size_t __sizeof_namespace_index(u32 nslot) 51 { 52 return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8), 53 NSINDEX_ALIGN); 54 } 55 56 static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd, 57 size_t index_size) 58 { 59 return (ndd->nsarea.config_size - index_size * 2) / 60 sizeof_namespace_label(ndd); 61 } 62 63 int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd) 64 { 65 u32 tmp_nslot, n; 66 67 tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd); 68 n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN; 69 70 return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n); 71 } 72 73 size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd) 74 { 75 u32 nslot, space, size; 76 77 /* 78 * Per UEFI 2.7, the minimum size of the Label Storage Area is large 79 * enough to hold 2 index blocks and 2 labels. The minimum index 80 * block size is 256 bytes. The label size is 128 for namespaces 81 * prior to version 1.2 and at minimum 256 for version 1.2 and later. 82 */ 83 nslot = nvdimm_num_label_slots(ndd); 84 space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd); 85 size = __sizeof_namespace_index(nslot) * 2; 86 if (size <= space && nslot >= 2) 87 return size / 2; 88 89 dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n", 90 ndd->nsarea.config_size, sizeof_namespace_label(ndd)); 91 return 0; 92 } 93 94 static int __nd_label_validate(struct nvdimm_drvdata *ndd) 95 { 96 /* 97 * On media label format consists of two index blocks followed 98 * by an array of labels. None of these structures are ever 99 * updated in place. A sequence number tracks the current 100 * active index and the next one to write, while labels are 101 * written to free slots. 102 * 103 * +------------+ 104 * | | 105 * | nsindex0 | 106 * | | 107 * +------------+ 108 * | | 109 * | nsindex1 | 110 * | | 111 * +------------+ 112 * | label0 | 113 * +------------+ 114 * | label1 | 115 * +------------+ 116 * | | 117 * ....nslot... 118 * | | 119 * +------------+ 120 * | labelN | 121 * +------------+ 122 */ 123 struct nd_namespace_index *nsindex[] = { 124 to_namespace_index(ndd, 0), 125 to_namespace_index(ndd, 1), 126 }; 127 const int num_index = ARRAY_SIZE(nsindex); 128 struct device *dev = ndd->dev; 129 bool valid[2] = { 0 }; 130 int i, num_valid = 0; 131 u32 seq; 132 133 for (i = 0; i < num_index; i++) { 134 u32 nslot; 135 u8 sig[NSINDEX_SIG_LEN]; 136 u64 sum_save, sum, size; 137 unsigned int version, labelsize; 138 139 memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN); 140 if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) { 141 dev_dbg(dev, "nsindex%d signature invalid\n", i); 142 continue; 143 } 144 145 /* label sizes larger than 128 arrived with v1.2 */ 146 version = __le16_to_cpu(nsindex[i]->major) * 100 147 + __le16_to_cpu(nsindex[i]->minor); 148 if (version >= 102) 149 labelsize = 1 << (7 + nsindex[i]->labelsize); 150 else 151 labelsize = 128; 152 153 if (labelsize != sizeof_namespace_label(ndd)) { 154 dev_dbg(dev, "nsindex%d labelsize %d invalid\n", 155 i, nsindex[i]->labelsize); 156 continue; 157 } 158 159 sum_save = __le64_to_cpu(nsindex[i]->checksum); 160 nsindex[i]->checksum = __cpu_to_le64(0); 161 sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1); 162 nsindex[i]->checksum = __cpu_to_le64(sum_save); 163 if (sum != sum_save) { 164 dev_dbg(dev, "nsindex%d checksum invalid\n", i); 165 continue; 166 } 167 168 seq = __le32_to_cpu(nsindex[i]->seq); 169 if ((seq & NSINDEX_SEQ_MASK) == 0) { 170 dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq); 171 continue; 172 } 173 174 /* sanity check the index against expected values */ 175 if (__le64_to_cpu(nsindex[i]->myoff) 176 != i * sizeof_namespace_index(ndd)) { 177 dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n", 178 i, (unsigned long long) 179 __le64_to_cpu(nsindex[i]->myoff)); 180 continue; 181 } 182 if (__le64_to_cpu(nsindex[i]->otheroff) 183 != (!i) * sizeof_namespace_index(ndd)) { 184 dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n", 185 i, (unsigned long long) 186 __le64_to_cpu(nsindex[i]->otheroff)); 187 continue; 188 } 189 if (__le64_to_cpu(nsindex[i]->labeloff) 190 != 2 * sizeof_namespace_index(ndd)) { 191 dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n", 192 i, (unsigned long long) 193 __le64_to_cpu(nsindex[i]->labeloff)); 194 continue; 195 } 196 197 size = __le64_to_cpu(nsindex[i]->mysize); 198 if (size > sizeof_namespace_index(ndd) 199 || size < sizeof(struct nd_namespace_index)) { 200 dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size); 201 continue; 202 } 203 204 nslot = __le32_to_cpu(nsindex[i]->nslot); 205 if (nslot * sizeof_namespace_label(ndd) 206 + 2 * sizeof_namespace_index(ndd) 207 > ndd->nsarea.config_size) { 208 dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n", 209 i, nslot, ndd->nsarea.config_size); 210 continue; 211 } 212 valid[i] = true; 213 num_valid++; 214 } 215 216 switch (num_valid) { 217 case 0: 218 break; 219 case 1: 220 for (i = 0; i < num_index; i++) 221 if (valid[i]) 222 return i; 223 /* can't have num_valid > 0 but valid[] = { false, false } */ 224 WARN_ON(1); 225 break; 226 default: 227 /* pick the best index... */ 228 seq = best_seq(__le32_to_cpu(nsindex[0]->seq), 229 __le32_to_cpu(nsindex[1]->seq)); 230 if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK)) 231 return 1; 232 else 233 return 0; 234 break; 235 } 236 237 return -1; 238 } 239 240 static int nd_label_validate(struct nvdimm_drvdata *ndd) 241 { 242 /* 243 * In order to probe for and validate namespace index blocks we 244 * need to know the size of the labels, and we can't trust the 245 * size of the labels until we validate the index blocks. 246 * Resolve this dependency loop by probing for known label 247 * sizes, but default to v1.2 256-byte namespace labels if 248 * discovery fails. 249 */ 250 int label_size[] = { 128, 256 }; 251 int i, rc; 252 253 for (i = 0; i < ARRAY_SIZE(label_size); i++) { 254 ndd->nslabel_size = label_size[i]; 255 rc = __nd_label_validate(ndd); 256 if (rc >= 0) 257 return rc; 258 } 259 260 return -1; 261 } 262 263 static void nd_label_copy(struct nvdimm_drvdata *ndd, 264 struct nd_namespace_index *dst, 265 struct nd_namespace_index *src) 266 { 267 /* just exit if either destination or source is NULL */ 268 if (!dst || !src) 269 return; 270 271 memcpy(dst, src, sizeof_namespace_index(ndd)); 272 } 273 274 static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd) 275 { 276 void *base = to_namespace_index(ndd, 0); 277 278 return base + 2 * sizeof_namespace_index(ndd); 279 } 280 281 static int to_slot(struct nvdimm_drvdata *ndd, 282 struct nd_namespace_label *nd_label) 283 { 284 unsigned long label, base; 285 286 label = (unsigned long) nd_label; 287 base = (unsigned long) nd_label_base(ndd); 288 289 return (label - base) / sizeof_namespace_label(ndd); 290 } 291 292 static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot) 293 { 294 unsigned long label, base; 295 296 base = (unsigned long) nd_label_base(ndd); 297 label = base + sizeof_namespace_label(ndd) * slot; 298 299 return (struct nd_namespace_label *) label; 300 } 301 302 #define for_each_clear_bit_le(bit, addr, size) \ 303 for ((bit) = find_next_zero_bit_le((addr), (size), 0); \ 304 (bit) < (size); \ 305 (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1)) 306 307 /** 308 * preamble_index - common variable initialization for nd_label_* routines 309 * @ndd: dimm container for the relevant label set 310 * @idx: namespace_index index 311 * @nsindex_out: on return set to the currently active namespace index 312 * @free: on return set to the free label bitmap in the index 313 * @nslot: on return set to the number of slots in the label space 314 */ 315 static bool preamble_index(struct nvdimm_drvdata *ndd, int idx, 316 struct nd_namespace_index **nsindex_out, 317 unsigned long **free, u32 *nslot) 318 { 319 struct nd_namespace_index *nsindex; 320 321 nsindex = to_namespace_index(ndd, idx); 322 if (nsindex == NULL) 323 return false; 324 325 *free = (unsigned long *) nsindex->free; 326 *nslot = __le32_to_cpu(nsindex->nslot); 327 *nsindex_out = nsindex; 328 329 return true; 330 } 331 332 char *nd_label_gen_id(struct nd_label_id *label_id, const uuid_t *uuid, 333 u32 flags) 334 { 335 if (!label_id || !uuid) 336 return NULL; 337 snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb", 338 flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid); 339 return label_id->id; 340 } 341 342 static bool preamble_current(struct nvdimm_drvdata *ndd, 343 struct nd_namespace_index **nsindex, 344 unsigned long **free, u32 *nslot) 345 { 346 return preamble_index(ndd, ndd->ns_current, nsindex, 347 free, nslot); 348 } 349 350 static bool preamble_next(struct nvdimm_drvdata *ndd, 351 struct nd_namespace_index **nsindex, 352 unsigned long **free, u32 *nslot) 353 { 354 return preamble_index(ndd, ndd->ns_next, nsindex, 355 free, nslot); 356 } 357 358 static bool nsl_validate_checksum(struct nvdimm_drvdata *ndd, 359 struct nd_namespace_label *nd_label) 360 { 361 u64 sum, sum_save; 362 363 if (!ndd->cxl && !efi_namespace_label_has(ndd, checksum)) 364 return true; 365 366 sum_save = nsl_get_checksum(ndd, nd_label); 367 nsl_set_checksum(ndd, nd_label, 0); 368 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); 369 nsl_set_checksum(ndd, nd_label, sum_save); 370 return sum == sum_save; 371 } 372 373 static void nsl_calculate_checksum(struct nvdimm_drvdata *ndd, 374 struct nd_namespace_label *nd_label) 375 { 376 u64 sum; 377 378 if (!ndd->cxl && !efi_namespace_label_has(ndd, checksum)) 379 return; 380 nsl_set_checksum(ndd, nd_label, 0); 381 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); 382 nsl_set_checksum(ndd, nd_label, sum); 383 } 384 385 static bool slot_valid(struct nvdimm_drvdata *ndd, 386 struct nd_namespace_label *nd_label, u32 slot) 387 { 388 bool valid; 389 390 /* check that we are written where we expect to be written */ 391 if (slot != nsl_get_slot(ndd, nd_label)) 392 return false; 393 valid = nsl_validate_checksum(ndd, nd_label); 394 if (!valid) 395 dev_dbg(ndd->dev, "fail checksum. slot: %d\n", slot); 396 return valid; 397 } 398 399 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd) 400 { 401 struct nd_namespace_index *nsindex; 402 unsigned long *free; 403 u32 nslot, slot; 404 405 if (!preamble_current(ndd, &nsindex, &free, &nslot)) 406 return 0; /* no label, nothing to reserve */ 407 408 for_each_clear_bit_le(slot, free, nslot) { 409 struct nvdimm *nvdimm = to_nvdimm(ndd->dev); 410 struct nd_namespace_label *nd_label; 411 struct nd_region *nd_region = NULL; 412 struct nd_label_id label_id; 413 struct resource *res; 414 uuid_t label_uuid; 415 u32 flags; 416 417 nd_label = to_label(ndd, slot); 418 419 if (!slot_valid(ndd, nd_label, slot)) 420 continue; 421 422 nsl_get_uuid(ndd, nd_label, &label_uuid); 423 flags = nsl_get_flags(ndd, nd_label); 424 if (test_bit(NDD_NOBLK, &nvdimm->flags)) 425 flags &= ~NSLABEL_FLAG_LOCAL; 426 nd_label_gen_id(&label_id, &label_uuid, flags); 427 res = nvdimm_allocate_dpa(ndd, &label_id, 428 nsl_get_dpa(ndd, nd_label), 429 nsl_get_rawsize(ndd, nd_label)); 430 nd_dbg_dpa(nd_region, ndd, res, "reserve\n"); 431 if (!res) 432 return -EBUSY; 433 } 434 435 return 0; 436 } 437 438 int nd_label_data_init(struct nvdimm_drvdata *ndd) 439 { 440 size_t config_size, read_size, max_xfer, offset; 441 struct nd_namespace_index *nsindex; 442 unsigned int i; 443 int rc = 0; 444 u32 nslot; 445 446 if (ndd->data) 447 return 0; 448 449 if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) { 450 dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n", 451 ndd->nsarea.max_xfer, ndd->nsarea.config_size); 452 return -ENXIO; 453 } 454 455 /* 456 * We need to determine the maximum index area as this is the section 457 * we must read and validate before we can start processing labels. 458 * 459 * If the area is too small to contain the two indexes and 2 labels 460 * then we abort. 461 * 462 * Start at a label size of 128 as this should result in the largest 463 * possible namespace index size. 464 */ 465 ndd->nslabel_size = 128; 466 read_size = sizeof_namespace_index(ndd) * 2; 467 if (!read_size) 468 return -ENXIO; 469 470 /* Allocate config data */ 471 config_size = ndd->nsarea.config_size; 472 ndd->data = kvzalloc(config_size, GFP_KERNEL); 473 if (!ndd->data) 474 return -ENOMEM; 475 476 /* 477 * We want to guarantee as few reads as possible while conserving 478 * memory. To do that we figure out how much unused space will be left 479 * in the last read, divide that by the total number of reads it is 480 * going to take given our maximum transfer size, and then reduce our 481 * maximum transfer size based on that result. 482 */ 483 max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size); 484 if (read_size < max_xfer) { 485 /* trim waste */ 486 max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) / 487 DIV_ROUND_UP(config_size, max_xfer); 488 /* make certain we read indexes in exactly 1 read */ 489 if (max_xfer < read_size) 490 max_xfer = read_size; 491 } 492 493 /* Make our initial read size a multiple of max_xfer size */ 494 read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer, 495 config_size); 496 497 /* Read the index data */ 498 rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size); 499 if (rc) 500 goto out_err; 501 502 /* Validate index data, if not valid assume all labels are invalid */ 503 ndd->ns_current = nd_label_validate(ndd); 504 if (ndd->ns_current < 0) 505 return 0; 506 507 /* Record our index values */ 508 ndd->ns_next = nd_label_next_nsindex(ndd->ns_current); 509 510 /* Copy "current" index on top of the "next" index */ 511 nsindex = to_current_namespace_index(ndd); 512 nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex); 513 514 /* Determine starting offset for label data */ 515 offset = __le64_to_cpu(nsindex->labeloff); 516 nslot = __le32_to_cpu(nsindex->nslot); 517 518 /* Loop through the free list pulling in any active labels */ 519 for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) { 520 size_t label_read_size; 521 522 /* zero out the unused labels */ 523 if (test_bit_le(i, nsindex->free)) { 524 memset(ndd->data + offset, 0, ndd->nslabel_size); 525 continue; 526 } 527 528 /* if we already read past here then just continue */ 529 if (offset + ndd->nslabel_size <= read_size) 530 continue; 531 532 /* if we haven't read in a while reset our read_size offset */ 533 if (read_size < offset) 534 read_size = offset; 535 536 /* determine how much more will be read after this next call. */ 537 label_read_size = offset + ndd->nslabel_size - read_size; 538 label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) * 539 max_xfer; 540 541 /* truncate last read if needed */ 542 if (read_size + label_read_size > config_size) 543 label_read_size = config_size - read_size; 544 545 /* Read the label data */ 546 rc = nvdimm_get_config_data(ndd, ndd->data + read_size, 547 read_size, label_read_size); 548 if (rc) 549 goto out_err; 550 551 /* push read_size to next read offset */ 552 read_size += label_read_size; 553 } 554 555 dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc); 556 out_err: 557 return rc; 558 } 559 560 int nd_label_active_count(struct nvdimm_drvdata *ndd) 561 { 562 struct nd_namespace_index *nsindex; 563 unsigned long *free; 564 u32 nslot, slot; 565 int count = 0; 566 567 if (!preamble_current(ndd, &nsindex, &free, &nslot)) 568 return 0; 569 570 for_each_clear_bit_le(slot, free, nslot) { 571 struct nd_namespace_label *nd_label; 572 573 nd_label = to_label(ndd, slot); 574 575 if (!slot_valid(ndd, nd_label, slot)) { 576 u32 label_slot = nsl_get_slot(ndd, nd_label); 577 u64 size = nsl_get_rawsize(ndd, nd_label); 578 u64 dpa = nsl_get_dpa(ndd, nd_label); 579 580 dev_dbg(ndd->dev, 581 "slot%d invalid slot: %d dpa: %llx size: %llx\n", 582 slot, label_slot, dpa, size); 583 continue; 584 } 585 count++; 586 } 587 return count; 588 } 589 590 struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n) 591 { 592 struct nd_namespace_index *nsindex; 593 unsigned long *free; 594 u32 nslot, slot; 595 596 if (!preamble_current(ndd, &nsindex, &free, &nslot)) 597 return NULL; 598 599 for_each_clear_bit_le(slot, free, nslot) { 600 struct nd_namespace_label *nd_label; 601 602 nd_label = to_label(ndd, slot); 603 if (!slot_valid(ndd, nd_label, slot)) 604 continue; 605 606 if (n-- == 0) 607 return to_label(ndd, slot); 608 } 609 610 return NULL; 611 } 612 613 u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd) 614 { 615 struct nd_namespace_index *nsindex; 616 unsigned long *free; 617 u32 nslot, slot; 618 619 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 620 return UINT_MAX; 621 622 WARN_ON(!is_nvdimm_bus_locked(ndd->dev)); 623 624 slot = find_next_bit_le(free, nslot, 0); 625 if (slot == nslot) 626 return UINT_MAX; 627 628 clear_bit_le(slot, free); 629 630 return slot; 631 } 632 633 bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot) 634 { 635 struct nd_namespace_index *nsindex; 636 unsigned long *free; 637 u32 nslot; 638 639 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 640 return false; 641 642 WARN_ON(!is_nvdimm_bus_locked(ndd->dev)); 643 644 if (slot < nslot) 645 return !test_and_set_bit_le(slot, free); 646 return false; 647 } 648 649 u32 nd_label_nfree(struct nvdimm_drvdata *ndd) 650 { 651 struct nd_namespace_index *nsindex; 652 unsigned long *free; 653 u32 nslot; 654 655 WARN_ON(!is_nvdimm_bus_locked(ndd->dev)); 656 657 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 658 return nvdimm_num_label_slots(ndd); 659 660 return bitmap_weight(free, nslot); 661 } 662 663 static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq, 664 unsigned long flags) 665 { 666 struct nd_namespace_index *nsindex; 667 unsigned long offset; 668 u64 checksum; 669 u32 nslot; 670 int rc; 671 672 nsindex = to_namespace_index(ndd, index); 673 if (flags & ND_NSINDEX_INIT) 674 nslot = nvdimm_num_label_slots(ndd); 675 else 676 nslot = __le32_to_cpu(nsindex->nslot); 677 678 memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN); 679 memset(&nsindex->flags, 0, 3); 680 nsindex->labelsize = sizeof_namespace_label(ndd) >> 8; 681 nsindex->seq = __cpu_to_le32(seq); 682 offset = (unsigned long) nsindex 683 - (unsigned long) to_namespace_index(ndd, 0); 684 nsindex->myoff = __cpu_to_le64(offset); 685 nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd)); 686 offset = (unsigned long) to_namespace_index(ndd, 687 nd_label_next_nsindex(index)) 688 - (unsigned long) to_namespace_index(ndd, 0); 689 nsindex->otheroff = __cpu_to_le64(offset); 690 offset = (unsigned long) nd_label_base(ndd) 691 - (unsigned long) to_namespace_index(ndd, 0); 692 nsindex->labeloff = __cpu_to_le64(offset); 693 nsindex->nslot = __cpu_to_le32(nslot); 694 nsindex->major = __cpu_to_le16(1); 695 if (sizeof_namespace_label(ndd) < 256) 696 nsindex->minor = __cpu_to_le16(1); 697 else 698 nsindex->minor = __cpu_to_le16(2); 699 nsindex->checksum = __cpu_to_le64(0); 700 if (flags & ND_NSINDEX_INIT) { 701 unsigned long *free = (unsigned long *) nsindex->free; 702 u32 nfree = ALIGN(nslot, BITS_PER_LONG); 703 int last_bits, i; 704 705 memset(nsindex->free, 0xff, nfree / 8); 706 for (i = 0, last_bits = nfree - nslot; i < last_bits; i++) 707 clear_bit_le(nslot + i, free); 708 } 709 checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1); 710 nsindex->checksum = __cpu_to_le64(checksum); 711 rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff), 712 nsindex, sizeof_namespace_index(ndd)); 713 if (rc < 0) 714 return rc; 715 716 if (flags & ND_NSINDEX_INIT) 717 return 0; 718 719 /* copy the index we just wrote to the new 'next' */ 720 WARN_ON(index != ndd->ns_next); 721 nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex); 722 ndd->ns_current = nd_label_next_nsindex(ndd->ns_current); 723 ndd->ns_next = nd_label_next_nsindex(ndd->ns_next); 724 WARN_ON(ndd->ns_current == ndd->ns_next); 725 726 return 0; 727 } 728 729 static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd, 730 struct nd_namespace_label *nd_label) 731 { 732 return (unsigned long) nd_label 733 - (unsigned long) to_namespace_index(ndd, 0); 734 } 735 736 static enum nvdimm_claim_class guid_to_nvdimm_cclass(guid_t *guid) 737 { 738 if (guid_equal(guid, &nvdimm_btt_guid)) 739 return NVDIMM_CCLASS_BTT; 740 else if (guid_equal(guid, &nvdimm_btt2_guid)) 741 return NVDIMM_CCLASS_BTT2; 742 else if (guid_equal(guid, &nvdimm_pfn_guid)) 743 return NVDIMM_CCLASS_PFN; 744 else if (guid_equal(guid, &nvdimm_dax_guid)) 745 return NVDIMM_CCLASS_DAX; 746 else if (guid_equal(guid, &guid_null)) 747 return NVDIMM_CCLASS_NONE; 748 749 return NVDIMM_CCLASS_UNKNOWN; 750 } 751 752 /* CXL labels store UUIDs instead of GUIDs for the same data */ 753 static enum nvdimm_claim_class uuid_to_nvdimm_cclass(uuid_t *uuid) 754 { 755 if (uuid_equal(uuid, &nvdimm_btt_uuid)) 756 return NVDIMM_CCLASS_BTT; 757 else if (uuid_equal(uuid, &nvdimm_btt2_uuid)) 758 return NVDIMM_CCLASS_BTT2; 759 else if (uuid_equal(uuid, &nvdimm_pfn_uuid)) 760 return NVDIMM_CCLASS_PFN; 761 else if (uuid_equal(uuid, &nvdimm_dax_uuid)) 762 return NVDIMM_CCLASS_DAX; 763 else if (uuid_equal(uuid, &uuid_null)) 764 return NVDIMM_CCLASS_NONE; 765 766 return NVDIMM_CCLASS_UNKNOWN; 767 } 768 769 static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class, 770 guid_t *target) 771 { 772 if (claim_class == NVDIMM_CCLASS_BTT) 773 return &nvdimm_btt_guid; 774 else if (claim_class == NVDIMM_CCLASS_BTT2) 775 return &nvdimm_btt2_guid; 776 else if (claim_class == NVDIMM_CCLASS_PFN) 777 return &nvdimm_pfn_guid; 778 else if (claim_class == NVDIMM_CCLASS_DAX) 779 return &nvdimm_dax_guid; 780 else if (claim_class == NVDIMM_CCLASS_UNKNOWN) { 781 /* 782 * If we're modifying a namespace for which we don't 783 * know the claim_class, don't touch the existing guid. 784 */ 785 return target; 786 } else 787 return &guid_null; 788 } 789 790 /* CXL labels store UUIDs instead of GUIDs for the same data */ 791 static const uuid_t *to_abstraction_uuid(enum nvdimm_claim_class claim_class, 792 uuid_t *target) 793 { 794 if (claim_class == NVDIMM_CCLASS_BTT) 795 return &nvdimm_btt_uuid; 796 else if (claim_class == NVDIMM_CCLASS_BTT2) 797 return &nvdimm_btt2_uuid; 798 else if (claim_class == NVDIMM_CCLASS_PFN) 799 return &nvdimm_pfn_uuid; 800 else if (claim_class == NVDIMM_CCLASS_DAX) 801 return &nvdimm_dax_uuid; 802 else if (claim_class == NVDIMM_CCLASS_UNKNOWN) { 803 /* 804 * If we're modifying a namespace for which we don't 805 * know the claim_class, don't touch the existing uuid. 806 */ 807 return target; 808 } else 809 return &uuid_null; 810 } 811 812 static void reap_victim(struct nd_mapping *nd_mapping, 813 struct nd_label_ent *victim) 814 { 815 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 816 u32 slot = to_slot(ndd, victim->label); 817 818 dev_dbg(ndd->dev, "free: %d\n", slot); 819 nd_label_free_slot(ndd, slot); 820 victim->label = NULL; 821 } 822 823 static void nsl_set_type_guid(struct nvdimm_drvdata *ndd, 824 struct nd_namespace_label *nd_label, guid_t *guid) 825 { 826 if (efi_namespace_label_has(ndd, type_guid)) 827 guid_copy(&nd_label->efi.type_guid, guid); 828 } 829 830 bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd, 831 struct nd_namespace_label *nd_label, guid_t *guid) 832 { 833 if (ndd->cxl || !efi_namespace_label_has(ndd, type_guid)) 834 return true; 835 if (!guid_equal(&nd_label->efi.type_guid, guid)) { 836 dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", guid, 837 &nd_label->efi.type_guid); 838 return false; 839 } 840 return true; 841 } 842 843 static void nsl_set_claim_class(struct nvdimm_drvdata *ndd, 844 struct nd_namespace_label *nd_label, 845 enum nvdimm_claim_class claim_class) 846 { 847 if (ndd->cxl) { 848 uuid_t uuid; 849 850 import_uuid(&uuid, nd_label->cxl.abstraction_uuid); 851 export_uuid(nd_label->cxl.abstraction_uuid, 852 to_abstraction_uuid(claim_class, &uuid)); 853 return; 854 } 855 856 if (!efi_namespace_label_has(ndd, abstraction_guid)) 857 return; 858 guid_copy(&nd_label->efi.abstraction_guid, 859 to_abstraction_guid(claim_class, 860 &nd_label->efi.abstraction_guid)); 861 } 862 863 enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd, 864 struct nd_namespace_label *nd_label) 865 { 866 if (ndd->cxl) { 867 uuid_t uuid; 868 869 import_uuid(&uuid, nd_label->cxl.abstraction_uuid); 870 return uuid_to_nvdimm_cclass(&uuid); 871 } 872 if (!efi_namespace_label_has(ndd, abstraction_guid)) 873 return NVDIMM_CCLASS_NONE; 874 return guid_to_nvdimm_cclass(&nd_label->efi.abstraction_guid); 875 } 876 877 static int __pmem_label_update(struct nd_region *nd_region, 878 struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm, 879 int pos, unsigned long flags) 880 { 881 struct nd_namespace_common *ndns = &nspm->nsio.common; 882 struct nd_interleave_set *nd_set = nd_region->nd_set; 883 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 884 struct nd_namespace_label *nd_label; 885 struct nd_namespace_index *nsindex; 886 struct nd_label_ent *label_ent; 887 struct nd_label_id label_id; 888 struct resource *res; 889 unsigned long *free; 890 u32 nslot, slot; 891 size_t offset; 892 u64 cookie; 893 int rc; 894 895 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 896 return -ENXIO; 897 898 cookie = nd_region_interleave_set_cookie(nd_region, nsindex); 899 nd_label_gen_id(&label_id, nspm->uuid, 0); 900 for_each_dpa_resource(ndd, res) 901 if (strcmp(res->name, label_id.id) == 0) 902 break; 903 904 if (!res) { 905 WARN_ON_ONCE(1); 906 return -ENXIO; 907 } 908 909 /* allocate and write the label to the staging (next) index */ 910 slot = nd_label_alloc_slot(ndd); 911 if (slot == UINT_MAX) 912 return -ENXIO; 913 dev_dbg(ndd->dev, "allocated: %d\n", slot); 914 915 nd_label = to_label(ndd, slot); 916 memset(nd_label, 0, sizeof_namespace_label(ndd)); 917 nsl_set_uuid(ndd, nd_label, nspm->uuid); 918 nsl_set_name(ndd, nd_label, nspm->alt_name); 919 nsl_set_flags(ndd, nd_label, flags); 920 nsl_set_nlabel(ndd, nd_label, nd_region->ndr_mappings); 921 nsl_set_nrange(ndd, nd_label, 1); 922 nsl_set_position(ndd, nd_label, pos); 923 nsl_set_isetcookie(ndd, nd_label, cookie); 924 nsl_set_rawsize(ndd, nd_label, resource_size(res)); 925 nsl_set_lbasize(ndd, nd_label, nspm->lbasize); 926 nsl_set_dpa(ndd, nd_label, res->start); 927 nsl_set_slot(ndd, nd_label, slot); 928 nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid); 929 nsl_set_claim_class(ndd, nd_label, ndns->claim_class); 930 nsl_calculate_checksum(ndd, nd_label); 931 nd_dbg_dpa(nd_region, ndd, res, "\n"); 932 933 /* update label */ 934 offset = nd_label_offset(ndd, nd_label); 935 rc = nvdimm_set_config_data(ndd, offset, nd_label, 936 sizeof_namespace_label(ndd)); 937 if (rc < 0) 938 return rc; 939 940 /* Garbage collect the previous label */ 941 mutex_lock(&nd_mapping->lock); 942 list_for_each_entry(label_ent, &nd_mapping->labels, list) { 943 if (!label_ent->label) 944 continue; 945 if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags) || 946 nsl_uuid_equal(ndd, label_ent->label, nspm->uuid)) 947 reap_victim(nd_mapping, label_ent); 948 } 949 950 /* update index */ 951 rc = nd_label_write_index(ndd, ndd->ns_next, 952 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0); 953 if (rc == 0) { 954 list_for_each_entry(label_ent, &nd_mapping->labels, list) 955 if (!label_ent->label) { 956 label_ent->label = nd_label; 957 nd_label = NULL; 958 break; 959 } 960 dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label, 961 "failed to track label: %d\n", 962 to_slot(ndd, nd_label)); 963 if (nd_label) 964 rc = -ENXIO; 965 } 966 mutex_unlock(&nd_mapping->lock); 967 968 return rc; 969 } 970 971 static bool is_old_resource(struct resource *res, struct resource **list, int n) 972 { 973 int i; 974 975 if (res->flags & DPA_RESOURCE_ADJUSTED) 976 return false; 977 for (i = 0; i < n; i++) 978 if (res == list[i]) 979 return true; 980 return false; 981 } 982 983 static struct resource *to_resource(struct nvdimm_drvdata *ndd, 984 struct nd_namespace_label *nd_label) 985 { 986 struct resource *res; 987 988 for_each_dpa_resource(ndd, res) { 989 if (res->start != nsl_get_dpa(ndd, nd_label)) 990 continue; 991 if (resource_size(res) != nsl_get_rawsize(ndd, nd_label)) 992 continue; 993 return res; 994 } 995 996 return NULL; 997 } 998 999 /* 1000 * Use the presence of the type_guid as a flag to determine isetcookie 1001 * usage and nlabel + position policy for blk-aperture namespaces. 1002 */ 1003 static void nsl_set_blk_isetcookie(struct nvdimm_drvdata *ndd, 1004 struct nd_namespace_label *nd_label, 1005 u64 isetcookie) 1006 { 1007 if (efi_namespace_label_has(ndd, type_guid)) { 1008 nsl_set_isetcookie(ndd, nd_label, isetcookie); 1009 return; 1010 } 1011 nsl_set_isetcookie(ndd, nd_label, 0); /* N/A */ 1012 } 1013 1014 bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd, 1015 struct nd_namespace_label *nd_label, 1016 u64 isetcookie) 1017 { 1018 if (!efi_namespace_label_has(ndd, type_guid)) 1019 return true; 1020 1021 if (nsl_get_isetcookie(ndd, nd_label) != isetcookie) { 1022 dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n", isetcookie, 1023 nsl_get_isetcookie(ndd, nd_label)); 1024 return false; 1025 } 1026 1027 return true; 1028 } 1029 1030 static void nsl_set_blk_nlabel(struct nvdimm_drvdata *ndd, 1031 struct nd_namespace_label *nd_label, int nlabel, 1032 bool first) 1033 { 1034 if (!efi_namespace_label_has(ndd, type_guid)) { 1035 nsl_set_nlabel(ndd, nd_label, 0); /* N/A */ 1036 return; 1037 } 1038 nsl_set_nlabel(ndd, nd_label, first ? nlabel : 0xffff); 1039 } 1040 1041 static void nsl_set_blk_position(struct nvdimm_drvdata *ndd, 1042 struct nd_namespace_label *nd_label, 1043 bool first) 1044 { 1045 if (!efi_namespace_label_has(ndd, type_guid)) { 1046 nsl_set_position(ndd, nd_label, 0); 1047 return; 1048 } 1049 nsl_set_position(ndd, nd_label, first ? 0 : 0xffff); 1050 } 1051 1052 /* 1053 * 1/ Account all the labels that can be freed after this update 1054 * 2/ Allocate and write the label to the staging (next) index 1055 * 3/ Record the resources in the namespace device 1056 */ 1057 static int __blk_label_update(struct nd_region *nd_region, 1058 struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk, 1059 int num_labels) 1060 { 1061 int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO; 1062 struct nd_interleave_set *nd_set = nd_region->nd_set; 1063 struct nd_namespace_common *ndns = &nsblk->common; 1064 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1065 struct nd_namespace_label *nd_label; 1066 struct nd_label_ent *label_ent, *e; 1067 struct nd_namespace_index *nsindex; 1068 unsigned long *free, *victim_map = NULL; 1069 struct resource *res, **old_res_list; 1070 struct nd_label_id label_id; 1071 int min_dpa_idx = 0; 1072 LIST_HEAD(list); 1073 u32 nslot, slot; 1074 1075 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 1076 return -ENXIO; 1077 1078 old_res_list = nsblk->res; 1079 nfree = nd_label_nfree(ndd); 1080 old_num_resources = nsblk->num_resources; 1081 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL); 1082 1083 /* 1084 * We need to loop over the old resources a few times, which seems a 1085 * bit inefficient, but we need to know that we have the label 1086 * space before we start mutating the tracking structures. 1087 * Otherwise the recovery method of last resort for userspace is 1088 * disable and re-enable the parent region. 1089 */ 1090 alloc = 0; 1091 for_each_dpa_resource(ndd, res) { 1092 if (strcmp(res->name, label_id.id) != 0) 1093 continue; 1094 if (!is_old_resource(res, old_res_list, old_num_resources)) 1095 alloc++; 1096 } 1097 1098 victims = 0; 1099 if (old_num_resources) { 1100 /* convert old local-label-map to dimm-slot victim-map */ 1101 victim_map = bitmap_zalloc(nslot, GFP_KERNEL); 1102 if (!victim_map) 1103 return -ENOMEM; 1104 1105 /* mark unused labels for garbage collection */ 1106 for_each_clear_bit_le(slot, free, nslot) { 1107 nd_label = to_label(ndd, slot); 1108 if (!nsl_uuid_equal(ndd, nd_label, nsblk->uuid)) 1109 continue; 1110 res = to_resource(ndd, nd_label); 1111 if (res && is_old_resource(res, old_res_list, 1112 old_num_resources)) 1113 continue; 1114 slot = to_slot(ndd, nd_label); 1115 set_bit(slot, victim_map); 1116 victims++; 1117 } 1118 } 1119 1120 /* don't allow updates that consume the last label */ 1121 if (nfree - alloc < 0 || nfree - alloc + victims < 1) { 1122 dev_info(&nsblk->common.dev, "insufficient label space\n"); 1123 bitmap_free(victim_map); 1124 return -ENOSPC; 1125 } 1126 /* from here on we need to abort on error */ 1127 1128 1129 /* assign all resources to the namespace before writing the labels */ 1130 nsblk->res = NULL; 1131 nsblk->num_resources = 0; 1132 for_each_dpa_resource(ndd, res) { 1133 if (strcmp(res->name, label_id.id) != 0) 1134 continue; 1135 if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) { 1136 rc = -ENOMEM; 1137 goto abort; 1138 } 1139 } 1140 1141 /* release slots associated with any invalidated UUIDs */ 1142 mutex_lock(&nd_mapping->lock); 1143 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) 1144 if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)) { 1145 reap_victim(nd_mapping, label_ent); 1146 list_move(&label_ent->list, &list); 1147 } 1148 mutex_unlock(&nd_mapping->lock); 1149 1150 /* 1151 * Find the resource associated with the first label in the set 1152 * per the v1.2 namespace specification. 1153 */ 1154 for (i = 0; i < nsblk->num_resources; i++) { 1155 struct resource *min = nsblk->res[min_dpa_idx]; 1156 1157 res = nsblk->res[i]; 1158 if (res->start < min->start) 1159 min_dpa_idx = i; 1160 } 1161 1162 for (i = 0; i < nsblk->num_resources; i++) { 1163 size_t offset; 1164 1165 res = nsblk->res[i]; 1166 if (is_old_resource(res, old_res_list, old_num_resources)) 1167 continue; /* carry-over */ 1168 slot = nd_label_alloc_slot(ndd); 1169 if (slot == UINT_MAX) { 1170 rc = -ENXIO; 1171 goto abort; 1172 } 1173 dev_dbg(ndd->dev, "allocated: %d\n", slot); 1174 1175 nd_label = to_label(ndd, slot); 1176 memset(nd_label, 0, sizeof_namespace_label(ndd)); 1177 nsl_set_uuid(ndd, nd_label, nsblk->uuid); 1178 nsl_set_name(ndd, nd_label, nsblk->alt_name); 1179 nsl_set_flags(ndd, nd_label, NSLABEL_FLAG_LOCAL); 1180 1181 nsl_set_blk_nlabel(ndd, nd_label, nsblk->num_resources, 1182 i == min_dpa_idx); 1183 nsl_set_blk_position(ndd, nd_label, i == min_dpa_idx); 1184 nsl_set_blk_isetcookie(ndd, nd_label, nd_set->cookie2); 1185 1186 nsl_set_dpa(ndd, nd_label, res->start); 1187 nsl_set_rawsize(ndd, nd_label, resource_size(res)); 1188 nsl_set_lbasize(ndd, nd_label, nsblk->lbasize); 1189 nsl_set_slot(ndd, nd_label, slot); 1190 nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid); 1191 nsl_set_claim_class(ndd, nd_label, ndns->claim_class); 1192 nsl_calculate_checksum(ndd, nd_label); 1193 1194 /* update label */ 1195 offset = nd_label_offset(ndd, nd_label); 1196 rc = nvdimm_set_config_data(ndd, offset, nd_label, 1197 sizeof_namespace_label(ndd)); 1198 if (rc < 0) 1199 goto abort; 1200 } 1201 1202 /* free up now unused slots in the new index */ 1203 for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) { 1204 dev_dbg(ndd->dev, "free: %d\n", slot); 1205 nd_label_free_slot(ndd, slot); 1206 } 1207 1208 /* update index */ 1209 rc = nd_label_write_index(ndd, ndd->ns_next, 1210 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0); 1211 if (rc) 1212 goto abort; 1213 1214 /* 1215 * Now that the on-dimm labels are up to date, fix up the tracking 1216 * entries in nd_mapping->labels 1217 */ 1218 nlabel = 0; 1219 mutex_lock(&nd_mapping->lock); 1220 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { 1221 nd_label = label_ent->label; 1222 if (!nd_label) 1223 continue; 1224 nlabel++; 1225 if (!nsl_uuid_equal(ndd, nd_label, nsblk->uuid)) 1226 continue; 1227 nlabel--; 1228 list_move(&label_ent->list, &list); 1229 label_ent->label = NULL; 1230 } 1231 list_splice_tail_init(&list, &nd_mapping->labels); 1232 mutex_unlock(&nd_mapping->lock); 1233 1234 if (nlabel + nsblk->num_resources > num_labels) { 1235 /* 1236 * Bug, we can't end up with more resources than 1237 * available labels 1238 */ 1239 WARN_ON_ONCE(1); 1240 rc = -ENXIO; 1241 goto out; 1242 } 1243 1244 mutex_lock(&nd_mapping->lock); 1245 label_ent = list_first_entry_or_null(&nd_mapping->labels, 1246 typeof(*label_ent), list); 1247 if (!label_ent) { 1248 WARN_ON(1); 1249 mutex_unlock(&nd_mapping->lock); 1250 rc = -ENXIO; 1251 goto out; 1252 } 1253 for_each_clear_bit_le(slot, free, nslot) { 1254 nd_label = to_label(ndd, slot); 1255 if (!nsl_uuid_equal(ndd, nd_label, nsblk->uuid)) 1256 continue; 1257 res = to_resource(ndd, nd_label); 1258 res->flags &= ~DPA_RESOURCE_ADJUSTED; 1259 dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot); 1260 list_for_each_entry_from(label_ent, &nd_mapping->labels, list) { 1261 if (label_ent->label) 1262 continue; 1263 label_ent->label = nd_label; 1264 nd_label = NULL; 1265 break; 1266 } 1267 if (nd_label) 1268 dev_WARN(&nsblk->common.dev, 1269 "failed to track label slot%d\n", slot); 1270 } 1271 mutex_unlock(&nd_mapping->lock); 1272 1273 out: 1274 kfree(old_res_list); 1275 bitmap_free(victim_map); 1276 return rc; 1277 1278 abort: 1279 /* 1280 * 1/ repair the allocated label bitmap in the index 1281 * 2/ restore the resource list 1282 */ 1283 nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd)); 1284 kfree(nsblk->res); 1285 nsblk->res = old_res_list; 1286 nsblk->num_resources = old_num_resources; 1287 old_res_list = NULL; 1288 goto out; 1289 } 1290 1291 static int init_labels(struct nd_mapping *nd_mapping, int num_labels) 1292 { 1293 int i, old_num_labels = 0; 1294 struct nd_label_ent *label_ent; 1295 struct nd_namespace_index *nsindex; 1296 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1297 1298 mutex_lock(&nd_mapping->lock); 1299 list_for_each_entry(label_ent, &nd_mapping->labels, list) 1300 old_num_labels++; 1301 mutex_unlock(&nd_mapping->lock); 1302 1303 /* 1304 * We need to preserve all the old labels for the mapping so 1305 * they can be garbage collected after writing the new labels. 1306 */ 1307 for (i = old_num_labels; i < num_labels; i++) { 1308 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL); 1309 if (!label_ent) 1310 return -ENOMEM; 1311 mutex_lock(&nd_mapping->lock); 1312 list_add_tail(&label_ent->list, &nd_mapping->labels); 1313 mutex_unlock(&nd_mapping->lock); 1314 } 1315 1316 if (ndd->ns_current == -1 || ndd->ns_next == -1) 1317 /* pass */; 1318 else 1319 return max(num_labels, old_num_labels); 1320 1321 nsindex = to_namespace_index(ndd, 0); 1322 memset(nsindex, 0, ndd->nsarea.config_size); 1323 for (i = 0; i < 2; i++) { 1324 int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT); 1325 1326 if (rc) 1327 return rc; 1328 } 1329 ndd->ns_next = 1; 1330 ndd->ns_current = 0; 1331 1332 return max(num_labels, old_num_labels); 1333 } 1334 1335 static int del_labels(struct nd_mapping *nd_mapping, uuid_t *uuid) 1336 { 1337 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1338 struct nd_label_ent *label_ent, *e; 1339 struct nd_namespace_index *nsindex; 1340 unsigned long *free; 1341 LIST_HEAD(list); 1342 u32 nslot, slot; 1343 int active = 0; 1344 1345 if (!uuid) 1346 return 0; 1347 1348 /* no index || no labels == nothing to delete */ 1349 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 1350 return 0; 1351 1352 mutex_lock(&nd_mapping->lock); 1353 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { 1354 struct nd_namespace_label *nd_label = label_ent->label; 1355 1356 if (!nd_label) 1357 continue; 1358 active++; 1359 if (!nsl_uuid_equal(ndd, nd_label, uuid)) 1360 continue; 1361 active--; 1362 slot = to_slot(ndd, nd_label); 1363 nd_label_free_slot(ndd, slot); 1364 dev_dbg(ndd->dev, "free: %d\n", slot); 1365 list_move_tail(&label_ent->list, &list); 1366 label_ent->label = NULL; 1367 } 1368 list_splice_tail_init(&list, &nd_mapping->labels); 1369 1370 if (active == 0) { 1371 nd_mapping_free_labels(nd_mapping); 1372 dev_dbg(ndd->dev, "no more active labels\n"); 1373 } 1374 mutex_unlock(&nd_mapping->lock); 1375 1376 return nd_label_write_index(ndd, ndd->ns_next, 1377 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0); 1378 } 1379 1380 int nd_pmem_namespace_label_update(struct nd_region *nd_region, 1381 struct nd_namespace_pmem *nspm, resource_size_t size) 1382 { 1383 int i, rc; 1384 1385 for (i = 0; i < nd_region->ndr_mappings; i++) { 1386 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 1387 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1388 struct resource *res; 1389 int count = 0; 1390 1391 if (size == 0) { 1392 rc = del_labels(nd_mapping, nspm->uuid); 1393 if (rc) 1394 return rc; 1395 continue; 1396 } 1397 1398 for_each_dpa_resource(ndd, res) 1399 if (strncmp(res->name, "pmem", 4) == 0) 1400 count++; 1401 WARN_ON_ONCE(!count); 1402 1403 rc = init_labels(nd_mapping, count); 1404 if (rc < 0) 1405 return rc; 1406 1407 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 1408 NSLABEL_FLAG_UPDATING); 1409 if (rc) 1410 return rc; 1411 } 1412 1413 if (size == 0) 1414 return 0; 1415 1416 /* Clear the UPDATING flag per UEFI 2.7 expectations */ 1417 for (i = 0; i < nd_region->ndr_mappings; i++) { 1418 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 1419 1420 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0); 1421 if (rc) 1422 return rc; 1423 } 1424 1425 return 0; 1426 } 1427 1428 int nd_blk_namespace_label_update(struct nd_region *nd_region, 1429 struct nd_namespace_blk *nsblk, resource_size_t size) 1430 { 1431 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 1432 struct resource *res; 1433 int count = 0; 1434 1435 if (size == 0) 1436 return del_labels(nd_mapping, nsblk->uuid); 1437 1438 for_each_dpa_resource(to_ndd(nd_mapping), res) 1439 count++; 1440 1441 count = init_labels(nd_mapping, count); 1442 if (count < 0) 1443 return count; 1444 1445 return __blk_label_update(nd_region, nd_mapping, nsblk, count); 1446 } 1447 1448 int __init nd_label_init(void) 1449 { 1450 WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid)); 1451 WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid)); 1452 WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid)); 1453 WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid)); 1454 1455 WARN_ON(uuid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_uuid)); 1456 WARN_ON(uuid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_uuid)); 1457 WARN_ON(uuid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_uuid)); 1458 WARN_ON(uuid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_uuid)); 1459 1460 WARN_ON(uuid_parse(CXL_REGION_UUID, &cxl_region_uuid)); 1461 WARN_ON(uuid_parse(CXL_NAMESPACE_UUID, &cxl_namespace_uuid)); 1462 1463 return 0; 1464 } 1465