1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/device.h> 14 #include <linux/ndctl.h> 15 #include <linux/slab.h> 16 #include <linux/io.h> 17 #include <linux/nd.h> 18 #include "nd-core.h" 19 #include "label.h" 20 #include "nd.h" 21 22 static u32 best_seq(u32 a, u32 b) 23 { 24 a &= NSINDEX_SEQ_MASK; 25 b &= NSINDEX_SEQ_MASK; 26 27 if (a == 0 || a == b) 28 return b; 29 else if (b == 0) 30 return a; 31 else if (nd_inc_seq(a) == b) 32 return b; 33 else 34 return a; 35 } 36 37 size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd) 38 { 39 u32 index_span; 40 41 if (ndd->nsindex_size) 42 return ndd->nsindex_size; 43 44 /* 45 * The minimum index space is 512 bytes, with that amount of 46 * index we can describe ~1400 labels which is less than a byte 47 * of overhead per label. Round up to a byte of overhead per 48 * label and determine the size of the index region. Yes, this 49 * starts to waste space at larger config_sizes, but it's 50 * unlikely we'll ever see anything but 128K. 51 */ 52 index_span = ndd->nsarea.config_size / 129; 53 index_span /= NSINDEX_ALIGN * 2; 54 ndd->nsindex_size = index_span * NSINDEX_ALIGN; 55 56 return ndd->nsindex_size; 57 } 58 59 int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd) 60 { 61 return ndd->nsarea.config_size / 129; 62 } 63 64 int nd_label_validate(struct nvdimm_drvdata *ndd) 65 { 66 /* 67 * On media label format consists of two index blocks followed 68 * by an array of labels. None of these structures are ever 69 * updated in place. A sequence number tracks the current 70 * active index and the next one to write, while labels are 71 * written to free slots. 72 * 73 * +------------+ 74 * | | 75 * | nsindex0 | 76 * | | 77 * +------------+ 78 * | | 79 * | nsindex1 | 80 * | | 81 * +------------+ 82 * | label0 | 83 * +------------+ 84 * | label1 | 85 * +------------+ 86 * | | 87 * ....nslot... 88 * | | 89 * +------------+ 90 * | labelN | 91 * +------------+ 92 */ 93 struct nd_namespace_index *nsindex[] = { 94 to_namespace_index(ndd, 0), 95 to_namespace_index(ndd, 1), 96 }; 97 const int num_index = ARRAY_SIZE(nsindex); 98 struct device *dev = ndd->dev; 99 bool valid[2] = { 0 }; 100 int i, num_valid = 0; 101 u32 seq; 102 103 for (i = 0; i < num_index; i++) { 104 u32 nslot; 105 u8 sig[NSINDEX_SIG_LEN]; 106 u64 sum_save, sum, size; 107 108 memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN); 109 if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) { 110 dev_dbg(dev, "%s: nsindex%d signature invalid\n", 111 __func__, i); 112 continue; 113 } 114 sum_save = __le64_to_cpu(nsindex[i]->checksum); 115 nsindex[i]->checksum = __cpu_to_le64(0); 116 sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1); 117 nsindex[i]->checksum = __cpu_to_le64(sum_save); 118 if (sum != sum_save) { 119 dev_dbg(dev, "%s: nsindex%d checksum invalid\n", 120 __func__, i); 121 continue; 122 } 123 124 seq = __le32_to_cpu(nsindex[i]->seq); 125 if ((seq & NSINDEX_SEQ_MASK) == 0) { 126 dev_dbg(dev, "%s: nsindex%d sequence: %#x invalid\n", 127 __func__, i, seq); 128 continue; 129 } 130 131 /* sanity check the index against expected values */ 132 if (__le64_to_cpu(nsindex[i]->myoff) 133 != i * sizeof_namespace_index(ndd)) { 134 dev_dbg(dev, "%s: nsindex%d myoff: %#llx invalid\n", 135 __func__, i, (unsigned long long) 136 __le64_to_cpu(nsindex[i]->myoff)); 137 continue; 138 } 139 if (__le64_to_cpu(nsindex[i]->otheroff) 140 != (!i) * sizeof_namespace_index(ndd)) { 141 dev_dbg(dev, "%s: nsindex%d otheroff: %#llx invalid\n", 142 __func__, i, (unsigned long long) 143 __le64_to_cpu(nsindex[i]->otheroff)); 144 continue; 145 } 146 147 size = __le64_to_cpu(nsindex[i]->mysize); 148 if (size > sizeof_namespace_index(ndd) 149 || size < sizeof(struct nd_namespace_index)) { 150 dev_dbg(dev, "%s: nsindex%d mysize: %#llx invalid\n", 151 __func__, i, size); 152 continue; 153 } 154 155 nslot = __le32_to_cpu(nsindex[i]->nslot); 156 if (nslot * sizeof(struct nd_namespace_label) 157 + 2 * sizeof_namespace_index(ndd) 158 > ndd->nsarea.config_size) { 159 dev_dbg(dev, "%s: nsindex%d nslot: %u invalid, config_size: %#x\n", 160 __func__, i, nslot, 161 ndd->nsarea.config_size); 162 continue; 163 } 164 valid[i] = true; 165 num_valid++; 166 } 167 168 switch (num_valid) { 169 case 0: 170 break; 171 case 1: 172 for (i = 0; i < num_index; i++) 173 if (valid[i]) 174 return i; 175 /* can't have num_valid > 0 but valid[] = { false, false } */ 176 WARN_ON(1); 177 break; 178 default: 179 /* pick the best index... */ 180 seq = best_seq(__le32_to_cpu(nsindex[0]->seq), 181 __le32_to_cpu(nsindex[1]->seq)); 182 if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK)) 183 return 1; 184 else 185 return 0; 186 break; 187 } 188 189 return -1; 190 } 191 192 void nd_label_copy(struct nvdimm_drvdata *ndd, struct nd_namespace_index *dst, 193 struct nd_namespace_index *src) 194 { 195 if (dst && src) 196 /* pass */; 197 else 198 return; 199 200 memcpy(dst, src, sizeof_namespace_index(ndd)); 201 } 202 203 static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd) 204 { 205 void *base = to_namespace_index(ndd, 0); 206 207 return base + 2 * sizeof_namespace_index(ndd); 208 } 209 210 static int to_slot(struct nvdimm_drvdata *ndd, 211 struct nd_namespace_label *nd_label) 212 { 213 return nd_label - nd_label_base(ndd); 214 } 215 216 #define for_each_clear_bit_le(bit, addr, size) \ 217 for ((bit) = find_next_zero_bit_le((addr), (size), 0); \ 218 (bit) < (size); \ 219 (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1)) 220 221 /** 222 * preamble_index - common variable initialization for nd_label_* routines 223 * @ndd: dimm container for the relevant label set 224 * @idx: namespace_index index 225 * @nsindex_out: on return set to the currently active namespace index 226 * @free: on return set to the free label bitmap in the index 227 * @nslot: on return set to the number of slots in the label space 228 */ 229 static bool preamble_index(struct nvdimm_drvdata *ndd, int idx, 230 struct nd_namespace_index **nsindex_out, 231 unsigned long **free, u32 *nslot) 232 { 233 struct nd_namespace_index *nsindex; 234 235 nsindex = to_namespace_index(ndd, idx); 236 if (nsindex == NULL) 237 return false; 238 239 *free = (unsigned long *) nsindex->free; 240 *nslot = __le32_to_cpu(nsindex->nslot); 241 *nsindex_out = nsindex; 242 243 return true; 244 } 245 246 char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags) 247 { 248 if (!label_id || !uuid) 249 return NULL; 250 snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb", 251 flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid); 252 return label_id->id; 253 } 254 255 static bool preamble_current(struct nvdimm_drvdata *ndd, 256 struct nd_namespace_index **nsindex, 257 unsigned long **free, u32 *nslot) 258 { 259 return preamble_index(ndd, ndd->ns_current, nsindex, 260 free, nslot); 261 } 262 263 static bool preamble_next(struct nvdimm_drvdata *ndd, 264 struct nd_namespace_index **nsindex, 265 unsigned long **free, u32 *nslot) 266 { 267 return preamble_index(ndd, ndd->ns_next, nsindex, 268 free, nslot); 269 } 270 271 static bool slot_valid(struct nd_namespace_label *nd_label, u32 slot) 272 { 273 /* check that we are written where we expect to be written */ 274 if (slot != __le32_to_cpu(nd_label->slot)) 275 return false; 276 277 /* check that DPA allocations are page aligned */ 278 if ((__le64_to_cpu(nd_label->dpa) 279 | __le64_to_cpu(nd_label->rawsize)) % SZ_4K) 280 return false; 281 282 return true; 283 } 284 285 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd) 286 { 287 struct nd_namespace_index *nsindex; 288 unsigned long *free; 289 u32 nslot, slot; 290 291 if (!preamble_current(ndd, &nsindex, &free, &nslot)) 292 return 0; /* no label, nothing to reserve */ 293 294 for_each_clear_bit_le(slot, free, nslot) { 295 struct nd_namespace_label *nd_label; 296 struct nd_region *nd_region = NULL; 297 u8 label_uuid[NSLABEL_UUID_LEN]; 298 struct nd_label_id label_id; 299 struct resource *res; 300 u32 flags; 301 302 nd_label = nd_label_base(ndd) + slot; 303 304 if (!slot_valid(nd_label, slot)) 305 continue; 306 307 memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN); 308 flags = __le32_to_cpu(nd_label->flags); 309 nd_label_gen_id(&label_id, label_uuid, flags); 310 res = nvdimm_allocate_dpa(ndd, &label_id, 311 __le64_to_cpu(nd_label->dpa), 312 __le64_to_cpu(nd_label->rawsize)); 313 nd_dbg_dpa(nd_region, ndd, res, "reserve\n"); 314 if (!res) 315 return -EBUSY; 316 } 317 318 return 0; 319 } 320 321 int nd_label_active_count(struct nvdimm_drvdata *ndd) 322 { 323 struct nd_namespace_index *nsindex; 324 unsigned long *free; 325 u32 nslot, slot; 326 int count = 0; 327 328 if (!preamble_current(ndd, &nsindex, &free, &nslot)) 329 return 0; 330 331 for_each_clear_bit_le(slot, free, nslot) { 332 struct nd_namespace_label *nd_label; 333 334 nd_label = nd_label_base(ndd) + slot; 335 336 if (!slot_valid(nd_label, slot)) { 337 u32 label_slot = __le32_to_cpu(nd_label->slot); 338 u64 size = __le64_to_cpu(nd_label->rawsize); 339 u64 dpa = __le64_to_cpu(nd_label->dpa); 340 341 dev_dbg(ndd->dev, 342 "%s: slot%d invalid slot: %d dpa: %llx size: %llx\n", 343 __func__, slot, label_slot, dpa, size); 344 continue; 345 } 346 count++; 347 } 348 return count; 349 } 350 351 struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n) 352 { 353 struct nd_namespace_index *nsindex; 354 unsigned long *free; 355 u32 nslot, slot; 356 357 if (!preamble_current(ndd, &nsindex, &free, &nslot)) 358 return NULL; 359 360 for_each_clear_bit_le(slot, free, nslot) { 361 struct nd_namespace_label *nd_label; 362 363 nd_label = nd_label_base(ndd) + slot; 364 if (!slot_valid(nd_label, slot)) 365 continue; 366 367 if (n-- == 0) 368 return nd_label_base(ndd) + slot; 369 } 370 371 return NULL; 372 } 373 374 u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd) 375 { 376 struct nd_namespace_index *nsindex; 377 unsigned long *free; 378 u32 nslot, slot; 379 380 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 381 return UINT_MAX; 382 383 WARN_ON(!is_nvdimm_bus_locked(ndd->dev)); 384 385 slot = find_next_bit_le(free, nslot, 0); 386 if (slot == nslot) 387 return UINT_MAX; 388 389 clear_bit_le(slot, free); 390 391 return slot; 392 } 393 394 bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot) 395 { 396 struct nd_namespace_index *nsindex; 397 unsigned long *free; 398 u32 nslot; 399 400 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 401 return false; 402 403 WARN_ON(!is_nvdimm_bus_locked(ndd->dev)); 404 405 if (slot < nslot) 406 return !test_and_set_bit_le(slot, free); 407 return false; 408 } 409 410 u32 nd_label_nfree(struct nvdimm_drvdata *ndd) 411 { 412 struct nd_namespace_index *nsindex; 413 unsigned long *free; 414 u32 nslot; 415 416 WARN_ON(!is_nvdimm_bus_locked(ndd->dev)); 417 418 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 419 return nvdimm_num_label_slots(ndd); 420 421 return bitmap_weight(free, nslot); 422 } 423 424 static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq, 425 unsigned long flags) 426 { 427 struct nd_namespace_index *nsindex; 428 unsigned long offset; 429 u64 checksum; 430 u32 nslot; 431 int rc; 432 433 nsindex = to_namespace_index(ndd, index); 434 if (flags & ND_NSINDEX_INIT) 435 nslot = nvdimm_num_label_slots(ndd); 436 else 437 nslot = __le32_to_cpu(nsindex->nslot); 438 439 memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN); 440 nsindex->flags = __cpu_to_le32(0); 441 nsindex->seq = __cpu_to_le32(seq); 442 offset = (unsigned long) nsindex 443 - (unsigned long) to_namespace_index(ndd, 0); 444 nsindex->myoff = __cpu_to_le64(offset); 445 nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd)); 446 offset = (unsigned long) to_namespace_index(ndd, 447 nd_label_next_nsindex(index)) 448 - (unsigned long) to_namespace_index(ndd, 0); 449 nsindex->otheroff = __cpu_to_le64(offset); 450 offset = (unsigned long) nd_label_base(ndd) 451 - (unsigned long) to_namespace_index(ndd, 0); 452 nsindex->labeloff = __cpu_to_le64(offset); 453 nsindex->nslot = __cpu_to_le32(nslot); 454 nsindex->major = __cpu_to_le16(1); 455 nsindex->minor = __cpu_to_le16(1); 456 nsindex->checksum = __cpu_to_le64(0); 457 if (flags & ND_NSINDEX_INIT) { 458 unsigned long *free = (unsigned long *) nsindex->free; 459 u32 nfree = ALIGN(nslot, BITS_PER_LONG); 460 int last_bits, i; 461 462 memset(nsindex->free, 0xff, nfree / 8); 463 for (i = 0, last_bits = nfree - nslot; i < last_bits; i++) 464 clear_bit_le(nslot + i, free); 465 } 466 checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1); 467 nsindex->checksum = __cpu_to_le64(checksum); 468 rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff), 469 nsindex, sizeof_namespace_index(ndd)); 470 if (rc < 0) 471 return rc; 472 473 if (flags & ND_NSINDEX_INIT) 474 return 0; 475 476 /* copy the index we just wrote to the new 'next' */ 477 WARN_ON(index != ndd->ns_next); 478 nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex); 479 ndd->ns_current = nd_label_next_nsindex(ndd->ns_current); 480 ndd->ns_next = nd_label_next_nsindex(ndd->ns_next); 481 WARN_ON(ndd->ns_current == ndd->ns_next); 482 483 return 0; 484 } 485 486 static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd, 487 struct nd_namespace_label *nd_label) 488 { 489 return (unsigned long) nd_label 490 - (unsigned long) to_namespace_index(ndd, 0); 491 } 492 493 static int __pmem_label_update(struct nd_region *nd_region, 494 struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm, 495 int pos) 496 { 497 u64 cookie = nd_region_interleave_set_cookie(nd_region); 498 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 499 struct nd_label_ent *label_ent, *victim = NULL; 500 struct nd_namespace_label *nd_label; 501 struct nd_namespace_index *nsindex; 502 struct nd_label_id label_id; 503 struct resource *res; 504 unsigned long *free; 505 u32 nslot, slot; 506 size_t offset; 507 int rc; 508 509 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 510 return -ENXIO; 511 512 nd_label_gen_id(&label_id, nspm->uuid, 0); 513 for_each_dpa_resource(ndd, res) 514 if (strcmp(res->name, label_id.id) == 0) 515 break; 516 517 if (!res) { 518 WARN_ON_ONCE(1); 519 return -ENXIO; 520 } 521 522 /* allocate and write the label to the staging (next) index */ 523 slot = nd_label_alloc_slot(ndd); 524 if (slot == UINT_MAX) 525 return -ENXIO; 526 dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot); 527 528 nd_label = nd_label_base(ndd) + slot; 529 memset(nd_label, 0, sizeof(struct nd_namespace_label)); 530 memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN); 531 if (nspm->alt_name) 532 memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN); 533 nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING); 534 nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings); 535 nd_label->position = __cpu_to_le16(pos); 536 nd_label->isetcookie = __cpu_to_le64(cookie); 537 nd_label->rawsize = __cpu_to_le64(resource_size(res)); 538 nd_label->dpa = __cpu_to_le64(res->start); 539 nd_label->slot = __cpu_to_le32(slot); 540 nd_dbg_dpa(nd_region, ndd, res, "%s\n", __func__); 541 542 /* update label */ 543 offset = nd_label_offset(ndd, nd_label); 544 rc = nvdimm_set_config_data(ndd, offset, nd_label, 545 sizeof(struct nd_namespace_label)); 546 if (rc < 0) 547 return rc; 548 549 /* Garbage collect the previous label */ 550 mutex_lock(&nd_mapping->lock); 551 list_for_each_entry(label_ent, &nd_mapping->labels, list) { 552 if (!label_ent->label) 553 continue; 554 if (memcmp(nspm->uuid, label_ent->label->uuid, 555 NSLABEL_UUID_LEN) != 0) 556 continue; 557 victim = label_ent; 558 list_move_tail(&victim->list, &nd_mapping->labels); 559 break; 560 } 561 if (victim) { 562 dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot); 563 slot = to_slot(ndd, victim->label); 564 nd_label_free_slot(ndd, slot); 565 victim->label = NULL; 566 } 567 568 /* update index */ 569 rc = nd_label_write_index(ndd, ndd->ns_next, 570 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0); 571 if (rc == 0) { 572 list_for_each_entry(label_ent, &nd_mapping->labels, list) 573 if (!label_ent->label) { 574 label_ent->label = nd_label; 575 nd_label = NULL; 576 break; 577 } 578 dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label, 579 "failed to track label: %d\n", 580 to_slot(ndd, nd_label)); 581 if (nd_label) 582 rc = -ENXIO; 583 } 584 mutex_unlock(&nd_mapping->lock); 585 586 return rc; 587 } 588 589 static bool is_old_resource(struct resource *res, struct resource **list, int n) 590 { 591 int i; 592 593 if (res->flags & DPA_RESOURCE_ADJUSTED) 594 return false; 595 for (i = 0; i < n; i++) 596 if (res == list[i]) 597 return true; 598 return false; 599 } 600 601 static struct resource *to_resource(struct nvdimm_drvdata *ndd, 602 struct nd_namespace_label *nd_label) 603 { 604 struct resource *res; 605 606 for_each_dpa_resource(ndd, res) { 607 if (res->start != __le64_to_cpu(nd_label->dpa)) 608 continue; 609 if (resource_size(res) != __le64_to_cpu(nd_label->rawsize)) 610 continue; 611 return res; 612 } 613 614 return NULL; 615 } 616 617 /* 618 * 1/ Account all the labels that can be freed after this update 619 * 2/ Allocate and write the label to the staging (next) index 620 * 3/ Record the resources in the namespace device 621 */ 622 static int __blk_label_update(struct nd_region *nd_region, 623 struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk, 624 int num_labels) 625 { 626 int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO; 627 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 628 struct nd_namespace_label *nd_label; 629 struct nd_label_ent *label_ent, *e; 630 struct nd_namespace_index *nsindex; 631 unsigned long *free, *victim_map = NULL; 632 struct resource *res, **old_res_list; 633 struct nd_label_id label_id; 634 u8 uuid[NSLABEL_UUID_LEN]; 635 LIST_HEAD(list); 636 u32 nslot, slot; 637 638 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 639 return -ENXIO; 640 641 old_res_list = nsblk->res; 642 nfree = nd_label_nfree(ndd); 643 old_num_resources = nsblk->num_resources; 644 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL); 645 646 /* 647 * We need to loop over the old resources a few times, which seems a 648 * bit inefficient, but we need to know that we have the label 649 * space before we start mutating the tracking structures. 650 * Otherwise the recovery method of last resort for userspace is 651 * disable and re-enable the parent region. 652 */ 653 alloc = 0; 654 for_each_dpa_resource(ndd, res) { 655 if (strcmp(res->name, label_id.id) != 0) 656 continue; 657 if (!is_old_resource(res, old_res_list, old_num_resources)) 658 alloc++; 659 } 660 661 victims = 0; 662 if (old_num_resources) { 663 /* convert old local-label-map to dimm-slot victim-map */ 664 victim_map = kcalloc(BITS_TO_LONGS(nslot), sizeof(long), 665 GFP_KERNEL); 666 if (!victim_map) 667 return -ENOMEM; 668 669 /* mark unused labels for garbage collection */ 670 for_each_clear_bit_le(slot, free, nslot) { 671 nd_label = nd_label_base(ndd) + slot; 672 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN); 673 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0) 674 continue; 675 res = to_resource(ndd, nd_label); 676 if (res && is_old_resource(res, old_res_list, 677 old_num_resources)) 678 continue; 679 slot = to_slot(ndd, nd_label); 680 set_bit(slot, victim_map); 681 victims++; 682 } 683 } 684 685 /* don't allow updates that consume the last label */ 686 if (nfree - alloc < 0 || nfree - alloc + victims < 1) { 687 dev_info(&nsblk->common.dev, "insufficient label space\n"); 688 kfree(victim_map); 689 return -ENOSPC; 690 } 691 /* from here on we need to abort on error */ 692 693 694 /* assign all resources to the namespace before writing the labels */ 695 nsblk->res = NULL; 696 nsblk->num_resources = 0; 697 for_each_dpa_resource(ndd, res) { 698 if (strcmp(res->name, label_id.id) != 0) 699 continue; 700 if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) { 701 rc = -ENOMEM; 702 goto abort; 703 } 704 } 705 706 for (i = 0; i < nsblk->num_resources; i++) { 707 size_t offset; 708 709 res = nsblk->res[i]; 710 if (is_old_resource(res, old_res_list, old_num_resources)) 711 continue; /* carry-over */ 712 slot = nd_label_alloc_slot(ndd); 713 if (slot == UINT_MAX) 714 goto abort; 715 dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot); 716 717 nd_label = nd_label_base(ndd) + slot; 718 memset(nd_label, 0, sizeof(struct nd_namespace_label)); 719 memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN); 720 if (nsblk->alt_name) 721 memcpy(nd_label->name, nsblk->alt_name, 722 NSLABEL_NAME_LEN); 723 nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL); 724 nd_label->nlabel = __cpu_to_le16(0); /* N/A */ 725 nd_label->position = __cpu_to_le16(0); /* N/A */ 726 nd_label->isetcookie = __cpu_to_le64(0); /* N/A */ 727 nd_label->dpa = __cpu_to_le64(res->start); 728 nd_label->rawsize = __cpu_to_le64(resource_size(res)); 729 nd_label->lbasize = __cpu_to_le64(nsblk->lbasize); 730 nd_label->slot = __cpu_to_le32(slot); 731 732 /* update label */ 733 offset = nd_label_offset(ndd, nd_label); 734 rc = nvdimm_set_config_data(ndd, offset, nd_label, 735 sizeof(struct nd_namespace_label)); 736 if (rc < 0) 737 goto abort; 738 } 739 740 /* free up now unused slots in the new index */ 741 for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) { 742 dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot); 743 nd_label_free_slot(ndd, slot); 744 } 745 746 /* update index */ 747 rc = nd_label_write_index(ndd, ndd->ns_next, 748 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0); 749 if (rc) 750 goto abort; 751 752 /* 753 * Now that the on-dimm labels are up to date, fix up the tracking 754 * entries in nd_mapping->labels 755 */ 756 nlabel = 0; 757 mutex_lock(&nd_mapping->lock); 758 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { 759 nd_label = label_ent->label; 760 if (!nd_label) 761 continue; 762 nlabel++; 763 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN); 764 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0) 765 continue; 766 nlabel--; 767 list_move(&label_ent->list, &list); 768 label_ent->label = NULL; 769 } 770 list_splice_tail_init(&list, &nd_mapping->labels); 771 mutex_unlock(&nd_mapping->lock); 772 773 if (nlabel + nsblk->num_resources > num_labels) { 774 /* 775 * Bug, we can't end up with more resources than 776 * available labels 777 */ 778 WARN_ON_ONCE(1); 779 rc = -ENXIO; 780 goto out; 781 } 782 783 mutex_lock(&nd_mapping->lock); 784 label_ent = list_first_entry_or_null(&nd_mapping->labels, 785 typeof(*label_ent), list); 786 if (!label_ent) { 787 WARN_ON(1); 788 mutex_unlock(&nd_mapping->lock); 789 rc = -ENXIO; 790 goto out; 791 } 792 for_each_clear_bit_le(slot, free, nslot) { 793 nd_label = nd_label_base(ndd) + slot; 794 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN); 795 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0) 796 continue; 797 res = to_resource(ndd, nd_label); 798 res->flags &= ~DPA_RESOURCE_ADJUSTED; 799 dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot); 800 list_for_each_entry_from(label_ent, &nd_mapping->labels, list) { 801 if (label_ent->label) 802 continue; 803 label_ent->label = nd_label; 804 nd_label = NULL; 805 break; 806 } 807 if (nd_label) 808 dev_WARN(&nsblk->common.dev, 809 "failed to track label slot%d\n", slot); 810 } 811 mutex_unlock(&nd_mapping->lock); 812 813 out: 814 kfree(old_res_list); 815 kfree(victim_map); 816 return rc; 817 818 abort: 819 /* 820 * 1/ repair the allocated label bitmap in the index 821 * 2/ restore the resource list 822 */ 823 nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd)); 824 kfree(nsblk->res); 825 nsblk->res = old_res_list; 826 nsblk->num_resources = old_num_resources; 827 old_res_list = NULL; 828 goto out; 829 } 830 831 static int init_labels(struct nd_mapping *nd_mapping, int num_labels) 832 { 833 int i, old_num_labels = 0; 834 struct nd_label_ent *label_ent; 835 struct nd_namespace_index *nsindex; 836 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 837 838 mutex_lock(&nd_mapping->lock); 839 list_for_each_entry(label_ent, &nd_mapping->labels, list) 840 old_num_labels++; 841 mutex_unlock(&nd_mapping->lock); 842 843 /* 844 * We need to preserve all the old labels for the mapping so 845 * they can be garbage collected after writing the new labels. 846 */ 847 for (i = old_num_labels; i < num_labels; i++) { 848 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL); 849 if (!label_ent) 850 return -ENOMEM; 851 mutex_lock(&nd_mapping->lock); 852 list_add_tail(&label_ent->list, &nd_mapping->labels); 853 mutex_unlock(&nd_mapping->lock); 854 } 855 856 if (ndd->ns_current == -1 || ndd->ns_next == -1) 857 /* pass */; 858 else 859 return max(num_labels, old_num_labels); 860 861 nsindex = to_namespace_index(ndd, 0); 862 memset(nsindex, 0, ndd->nsarea.config_size); 863 for (i = 0; i < 2; i++) { 864 int rc = nd_label_write_index(ndd, i, i*2, ND_NSINDEX_INIT); 865 866 if (rc) 867 return rc; 868 } 869 ndd->ns_next = 1; 870 ndd->ns_current = 0; 871 872 return max(num_labels, old_num_labels); 873 } 874 875 static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid) 876 { 877 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 878 struct nd_label_ent *label_ent, *e; 879 struct nd_namespace_index *nsindex; 880 u8 label_uuid[NSLABEL_UUID_LEN]; 881 unsigned long *free; 882 LIST_HEAD(list); 883 u32 nslot, slot; 884 int active = 0; 885 886 if (!uuid) 887 return 0; 888 889 /* no index || no labels == nothing to delete */ 890 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 891 return 0; 892 893 mutex_lock(&nd_mapping->lock); 894 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { 895 struct nd_namespace_label *nd_label = label_ent->label; 896 897 if (!nd_label) 898 continue; 899 active++; 900 memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN); 901 if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0) 902 continue; 903 active--; 904 slot = to_slot(ndd, nd_label); 905 nd_label_free_slot(ndd, slot); 906 dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot); 907 list_move_tail(&label_ent->list, &list); 908 label_ent->label = NULL; 909 } 910 list_splice_tail_init(&list, &nd_mapping->labels); 911 912 if (active == 0) { 913 nd_mapping_free_labels(nd_mapping); 914 dev_dbg(ndd->dev, "%s: no more active labels\n", __func__); 915 } 916 mutex_unlock(&nd_mapping->lock); 917 918 return nd_label_write_index(ndd, ndd->ns_next, 919 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0); 920 } 921 922 int nd_pmem_namespace_label_update(struct nd_region *nd_region, 923 struct nd_namespace_pmem *nspm, resource_size_t size) 924 { 925 int i; 926 927 for (i = 0; i < nd_region->ndr_mappings; i++) { 928 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 929 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 930 struct resource *res; 931 int rc, count = 0; 932 933 if (size == 0) { 934 rc = del_labels(nd_mapping, nspm->uuid); 935 if (rc) 936 return rc; 937 continue; 938 } 939 940 for_each_dpa_resource(ndd, res) 941 if (strncmp(res->name, "pmem", 4) == 0) 942 count++; 943 WARN_ON_ONCE(!count); 944 945 rc = init_labels(nd_mapping, count); 946 if (rc < 0) 947 return rc; 948 949 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i); 950 if (rc) 951 return rc; 952 } 953 954 return 0; 955 } 956 957 int nd_blk_namespace_label_update(struct nd_region *nd_region, 958 struct nd_namespace_blk *nsblk, resource_size_t size) 959 { 960 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 961 struct resource *res; 962 int count = 0; 963 964 if (size == 0) 965 return del_labels(nd_mapping, nsblk->uuid); 966 967 for_each_dpa_resource(to_ndd(nd_mapping), res) 968 count++; 969 970 count = init_labels(nd_mapping, count); 971 if (count < 0) 972 return count; 973 974 return __blk_label_update(nd_region, nd_mapping, nsblk, count); 975 } 976