1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 4 */ 5 #include <linux/module.h> 6 #include <linux/device.h> 7 #include <linux/sort.h> 8 #include <linux/slab.h> 9 #include <linux/list.h> 10 #include <linux/nd.h> 11 #include "nd-core.h" 12 #include "pmem.h" 13 #include "pfn.h" 14 #include "nd.h" 15 16 static void namespace_io_release(struct device *dev) 17 { 18 struct nd_namespace_io *nsio = to_nd_namespace_io(dev); 19 20 kfree(nsio); 21 } 22 23 static void namespace_pmem_release(struct device *dev) 24 { 25 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 26 struct nd_region *nd_region = to_nd_region(dev->parent); 27 28 if (nspm->id >= 0) 29 ida_simple_remove(&nd_region->ns_ida, nspm->id); 30 kfree(nspm->alt_name); 31 kfree(nspm->uuid); 32 kfree(nspm); 33 } 34 35 static bool is_namespace_pmem(const struct device *dev); 36 static bool is_namespace_io(const struct device *dev); 37 38 static int is_uuid_busy(struct device *dev, void *data) 39 { 40 uuid_t *uuid1 = data, *uuid2 = NULL; 41 42 if (is_namespace_pmem(dev)) { 43 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 44 45 uuid2 = nspm->uuid; 46 } else if (is_nd_btt(dev)) { 47 struct nd_btt *nd_btt = to_nd_btt(dev); 48 49 uuid2 = nd_btt->uuid; 50 } else if (is_nd_pfn(dev)) { 51 struct nd_pfn *nd_pfn = to_nd_pfn(dev); 52 53 uuid2 = nd_pfn->uuid; 54 } 55 56 if (uuid2 && uuid_equal(uuid1, uuid2)) 57 return -EBUSY; 58 59 return 0; 60 } 61 62 static int is_namespace_uuid_busy(struct device *dev, void *data) 63 { 64 if (is_nd_region(dev)) 65 return device_for_each_child(dev, data, is_uuid_busy); 66 return 0; 67 } 68 69 /** 70 * nd_is_uuid_unique - verify that no other namespace has @uuid 71 * @dev: any device on a nvdimm_bus 72 * @uuid: uuid to check 73 */ 74 bool nd_is_uuid_unique(struct device *dev, uuid_t *uuid) 75 { 76 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 77 78 if (!nvdimm_bus) 79 return false; 80 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev)); 81 if (device_for_each_child(&nvdimm_bus->dev, uuid, 82 is_namespace_uuid_busy) != 0) 83 return false; 84 return true; 85 } 86 87 bool pmem_should_map_pages(struct device *dev) 88 { 89 struct nd_region *nd_region = to_nd_region(dev->parent); 90 struct nd_namespace_common *ndns = to_ndns(dev); 91 struct nd_namespace_io *nsio; 92 93 if (!IS_ENABLED(CONFIG_ZONE_DEVICE)) 94 return false; 95 96 if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags)) 97 return false; 98 99 if (is_nd_pfn(dev) || is_nd_btt(dev)) 100 return false; 101 102 if (ndns->force_raw) 103 return false; 104 105 nsio = to_nd_namespace_io(dev); 106 if (region_intersects(nsio->res.start, resource_size(&nsio->res), 107 IORESOURCE_SYSTEM_RAM, 108 IORES_DESC_NONE) == REGION_MIXED) 109 return false; 110 111 return ARCH_MEMREMAP_PMEM == MEMREMAP_WB; 112 } 113 EXPORT_SYMBOL(pmem_should_map_pages); 114 115 unsigned int pmem_sector_size(struct nd_namespace_common *ndns) 116 { 117 if (is_namespace_pmem(&ndns->dev)) { 118 struct nd_namespace_pmem *nspm; 119 120 nspm = to_nd_namespace_pmem(&ndns->dev); 121 if (nspm->lbasize == 0 || nspm->lbasize == 512) 122 /* default */; 123 else if (nspm->lbasize == 4096) 124 return 4096; 125 else 126 dev_WARN(&ndns->dev, "unsupported sector size: %ld\n", 127 nspm->lbasize); 128 } 129 130 /* 131 * There is no namespace label (is_namespace_io()), or the label 132 * indicates the default sector size. 133 */ 134 return 512; 135 } 136 EXPORT_SYMBOL(pmem_sector_size); 137 138 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, 139 char *name) 140 { 141 struct nd_region *nd_region = to_nd_region(ndns->dev.parent); 142 const char *suffix = NULL; 143 144 if (ndns->claim && is_nd_btt(ndns->claim)) 145 suffix = "s"; 146 147 if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) { 148 int nsidx = 0; 149 150 if (is_namespace_pmem(&ndns->dev)) { 151 struct nd_namespace_pmem *nspm; 152 153 nspm = to_nd_namespace_pmem(&ndns->dev); 154 nsidx = nspm->id; 155 } 156 157 if (nsidx) 158 sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx, 159 suffix ? suffix : ""); 160 else 161 sprintf(name, "pmem%d%s", nd_region->id, 162 suffix ? suffix : ""); 163 } else { 164 return NULL; 165 } 166 167 return name; 168 } 169 EXPORT_SYMBOL(nvdimm_namespace_disk_name); 170 171 const uuid_t *nd_dev_to_uuid(struct device *dev) 172 { 173 if (dev && is_namespace_pmem(dev)) { 174 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 175 176 return nspm->uuid; 177 } 178 return &uuid_null; 179 } 180 EXPORT_SYMBOL(nd_dev_to_uuid); 181 182 static ssize_t nstype_show(struct device *dev, 183 struct device_attribute *attr, char *buf) 184 { 185 struct nd_region *nd_region = to_nd_region(dev->parent); 186 187 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region)); 188 } 189 static DEVICE_ATTR_RO(nstype); 190 191 static ssize_t __alt_name_store(struct device *dev, const char *buf, 192 const size_t len) 193 { 194 char *input, *pos, *alt_name, **ns_altname; 195 ssize_t rc; 196 197 if (is_namespace_pmem(dev)) { 198 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 199 200 ns_altname = &nspm->alt_name; 201 } else 202 return -ENXIO; 203 204 if (dev->driver || to_ndns(dev)->claim) 205 return -EBUSY; 206 207 input = kstrndup(buf, len, GFP_KERNEL); 208 if (!input) 209 return -ENOMEM; 210 211 pos = strim(input); 212 if (strlen(pos) + 1 > NSLABEL_NAME_LEN) { 213 rc = -EINVAL; 214 goto out; 215 } 216 217 alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL); 218 if (!alt_name) { 219 rc = -ENOMEM; 220 goto out; 221 } 222 kfree(*ns_altname); 223 *ns_altname = alt_name; 224 sprintf(*ns_altname, "%s", pos); 225 rc = len; 226 227 out: 228 kfree(input); 229 return rc; 230 } 231 232 static int nd_namespace_label_update(struct nd_region *nd_region, 233 struct device *dev) 234 { 235 dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim, 236 "namespace must be idle during label update\n"); 237 if (dev->driver || to_ndns(dev)->claim) 238 return 0; 239 240 /* 241 * Only allow label writes that will result in a valid namespace 242 * or deletion of an existing namespace. 243 */ 244 if (is_namespace_pmem(dev)) { 245 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 246 resource_size_t size = resource_size(&nspm->nsio.res); 247 248 if (size == 0 && nspm->uuid) 249 /* delete allocation */; 250 else if (!nspm->uuid) 251 return 0; 252 253 return nd_pmem_namespace_label_update(nd_region, nspm, size); 254 } else 255 return -ENXIO; 256 } 257 258 static ssize_t alt_name_store(struct device *dev, 259 struct device_attribute *attr, const char *buf, size_t len) 260 { 261 struct nd_region *nd_region = to_nd_region(dev->parent); 262 ssize_t rc; 263 264 device_lock(dev); 265 nvdimm_bus_lock(dev); 266 wait_nvdimm_bus_probe_idle(dev); 267 rc = __alt_name_store(dev, buf, len); 268 if (rc >= 0) 269 rc = nd_namespace_label_update(nd_region, dev); 270 dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc); 271 nvdimm_bus_unlock(dev); 272 device_unlock(dev); 273 274 return rc < 0 ? rc : len; 275 } 276 277 static ssize_t alt_name_show(struct device *dev, 278 struct device_attribute *attr, char *buf) 279 { 280 char *ns_altname; 281 282 if (is_namespace_pmem(dev)) { 283 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 284 285 ns_altname = nspm->alt_name; 286 } else 287 return -ENXIO; 288 289 return sprintf(buf, "%s\n", ns_altname ? ns_altname : ""); 290 } 291 static DEVICE_ATTR_RW(alt_name); 292 293 static int scan_free(struct nd_region *nd_region, 294 struct nd_mapping *nd_mapping, struct nd_label_id *label_id, 295 resource_size_t n) 296 { 297 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 298 int rc = 0; 299 300 while (n) { 301 struct resource *res, *last; 302 303 last = NULL; 304 for_each_dpa_resource(ndd, res) 305 if (strcmp(res->name, label_id->id) == 0) 306 last = res; 307 res = last; 308 if (!res) 309 return 0; 310 311 if (n >= resource_size(res)) { 312 n -= resource_size(res); 313 nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc); 314 nvdimm_free_dpa(ndd, res); 315 /* retry with last resource deleted */ 316 continue; 317 } 318 319 rc = adjust_resource(res, res->start, resource_size(res) - n); 320 if (rc == 0) 321 res->flags |= DPA_RESOURCE_ADJUSTED; 322 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc); 323 break; 324 } 325 326 return rc; 327 } 328 329 /** 330 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id 331 * @nd_region: the set of dimms to reclaim @n bytes from 332 * @label_id: unique identifier for the namespace consuming this dpa range 333 * @n: number of bytes per-dimm to release 334 * 335 * Assumes resources are ordered. Starting from the end try to 336 * adjust_resource() the allocation to @n, but if @n is larger than the 337 * allocation delete it and find the 'new' last allocation in the label 338 * set. 339 */ 340 static int shrink_dpa_allocation(struct nd_region *nd_region, 341 struct nd_label_id *label_id, resource_size_t n) 342 { 343 int i; 344 345 for (i = 0; i < nd_region->ndr_mappings; i++) { 346 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 347 int rc; 348 349 rc = scan_free(nd_region, nd_mapping, label_id, n); 350 if (rc) 351 return rc; 352 } 353 354 return 0; 355 } 356 357 static resource_size_t init_dpa_allocation(struct nd_label_id *label_id, 358 struct nd_region *nd_region, struct nd_mapping *nd_mapping, 359 resource_size_t n) 360 { 361 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 362 struct resource *res; 363 int rc = 0; 364 365 /* first resource allocation for this label-id or dimm */ 366 res = nvdimm_allocate_dpa(ndd, label_id, nd_mapping->start, n); 367 if (!res) 368 rc = -EBUSY; 369 370 nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc); 371 return rc ? n : 0; 372 } 373 374 375 /** 376 * space_valid() - validate free dpa space against constraints 377 * @nd_region: hosting region of the free space 378 * @ndd: dimm device data for debug 379 * @label_id: namespace id to allocate space 380 * @prev: potential allocation that precedes free space 381 * @next: allocation that follows the given free space range 382 * @exist: first allocation with same id in the mapping 383 * @n: range that must satisfied for pmem allocations 384 * @valid: free space range to validate 385 * 386 * BLK-space is valid as long as it does not precede a PMEM 387 * allocation in a given region. PMEM-space must be contiguous 388 * and adjacent to an existing allocation (if one 389 * exists). If reserving PMEM any space is valid. 390 */ 391 static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd, 392 struct nd_label_id *label_id, struct resource *prev, 393 struct resource *next, struct resource *exist, 394 resource_size_t n, struct resource *valid) 395 { 396 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0; 397 unsigned long align; 398 399 align = nd_region->align / nd_region->ndr_mappings; 400 valid->start = ALIGN(valid->start, align); 401 valid->end = ALIGN_DOWN(valid->end + 1, align) - 1; 402 403 if (valid->start >= valid->end) 404 goto invalid; 405 406 if (is_reserve) 407 return; 408 409 /* allocation needs to be contiguous, so this is all or nothing */ 410 if (resource_size(valid) < n) 411 goto invalid; 412 413 /* we've got all the space we need and no existing allocation */ 414 if (!exist) 415 return; 416 417 /* allocation needs to be contiguous with the existing namespace */ 418 if (valid->start == exist->end + 1 419 || valid->end == exist->start - 1) 420 return; 421 422 invalid: 423 /* truncate @valid size to 0 */ 424 valid->end = valid->start - 1; 425 } 426 427 enum alloc_loc { 428 ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER, 429 }; 430 431 static resource_size_t scan_allocate(struct nd_region *nd_region, 432 struct nd_mapping *nd_mapping, struct nd_label_id *label_id, 433 resource_size_t n) 434 { 435 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1; 436 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 437 struct resource *res, *exist = NULL, valid; 438 const resource_size_t to_allocate = n; 439 int first; 440 441 for_each_dpa_resource(ndd, res) 442 if (strcmp(label_id->id, res->name) == 0) 443 exist = res; 444 445 valid.start = nd_mapping->start; 446 valid.end = mapping_end; 447 valid.name = "free space"; 448 retry: 449 first = 0; 450 for_each_dpa_resource(ndd, res) { 451 struct resource *next = res->sibling, *new_res = NULL; 452 resource_size_t allocate, available = 0; 453 enum alloc_loc loc = ALLOC_ERR; 454 const char *action; 455 int rc = 0; 456 457 /* ignore resources outside this nd_mapping */ 458 if (res->start > mapping_end) 459 continue; 460 if (res->end < nd_mapping->start) 461 continue; 462 463 /* space at the beginning of the mapping */ 464 if (!first++ && res->start > nd_mapping->start) { 465 valid.start = nd_mapping->start; 466 valid.end = res->start - 1; 467 space_valid(nd_region, ndd, label_id, NULL, next, exist, 468 to_allocate, &valid); 469 available = resource_size(&valid); 470 if (available) 471 loc = ALLOC_BEFORE; 472 } 473 474 /* space between allocations */ 475 if (!loc && next) { 476 valid.start = res->start + resource_size(res); 477 valid.end = min(mapping_end, next->start - 1); 478 space_valid(nd_region, ndd, label_id, res, next, exist, 479 to_allocate, &valid); 480 available = resource_size(&valid); 481 if (available) 482 loc = ALLOC_MID; 483 } 484 485 /* space at the end of the mapping */ 486 if (!loc && !next) { 487 valid.start = res->start + resource_size(res); 488 valid.end = mapping_end; 489 space_valid(nd_region, ndd, label_id, res, next, exist, 490 to_allocate, &valid); 491 available = resource_size(&valid); 492 if (available) 493 loc = ALLOC_AFTER; 494 } 495 496 if (!loc || !available) 497 continue; 498 allocate = min(available, n); 499 switch (loc) { 500 case ALLOC_BEFORE: 501 if (strcmp(res->name, label_id->id) == 0) { 502 /* adjust current resource up */ 503 rc = adjust_resource(res, res->start - allocate, 504 resource_size(res) + allocate); 505 action = "cur grow up"; 506 } else 507 action = "allocate"; 508 break; 509 case ALLOC_MID: 510 if (strcmp(next->name, label_id->id) == 0) { 511 /* adjust next resource up */ 512 rc = adjust_resource(next, next->start 513 - allocate, resource_size(next) 514 + allocate); 515 new_res = next; 516 action = "next grow up"; 517 } else if (strcmp(res->name, label_id->id) == 0) { 518 action = "grow down"; 519 } else 520 action = "allocate"; 521 break; 522 case ALLOC_AFTER: 523 if (strcmp(res->name, label_id->id) == 0) 524 action = "grow down"; 525 else 526 action = "allocate"; 527 break; 528 default: 529 return n; 530 } 531 532 if (strcmp(action, "allocate") == 0) { 533 new_res = nvdimm_allocate_dpa(ndd, label_id, 534 valid.start, allocate); 535 if (!new_res) 536 rc = -EBUSY; 537 } else if (strcmp(action, "grow down") == 0) { 538 /* adjust current resource down */ 539 rc = adjust_resource(res, res->start, resource_size(res) 540 + allocate); 541 if (rc == 0) 542 res->flags |= DPA_RESOURCE_ADJUSTED; 543 } 544 545 if (!new_res) 546 new_res = res; 547 548 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n", 549 action, loc, rc); 550 551 if (rc) 552 return n; 553 554 n -= allocate; 555 if (n) { 556 /* 557 * Retry scan with newly inserted resources. 558 * For example, if we did an ALLOC_BEFORE 559 * insertion there may also have been space 560 * available for an ALLOC_AFTER insertion, so we 561 * need to check this same resource again 562 */ 563 goto retry; 564 } else 565 return 0; 566 } 567 568 if (n == to_allocate) 569 return init_dpa_allocation(label_id, nd_region, nd_mapping, n); 570 return n; 571 } 572 573 static int merge_dpa(struct nd_region *nd_region, 574 struct nd_mapping *nd_mapping, struct nd_label_id *label_id) 575 { 576 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 577 struct resource *res; 578 579 if (strncmp("pmem", label_id->id, 4) == 0) 580 return 0; 581 retry: 582 for_each_dpa_resource(ndd, res) { 583 int rc; 584 struct resource *next = res->sibling; 585 resource_size_t end = res->start + resource_size(res); 586 587 if (!next || strcmp(res->name, label_id->id) != 0 588 || strcmp(next->name, label_id->id) != 0 589 || end != next->start) 590 continue; 591 end += resource_size(next); 592 nvdimm_free_dpa(ndd, next); 593 rc = adjust_resource(res, res->start, end - res->start); 594 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc); 595 if (rc) 596 return rc; 597 res->flags |= DPA_RESOURCE_ADJUSTED; 598 goto retry; 599 } 600 601 return 0; 602 } 603 604 int __reserve_free_pmem(struct device *dev, void *data) 605 { 606 struct nvdimm *nvdimm = data; 607 struct nd_region *nd_region; 608 struct nd_label_id label_id; 609 int i; 610 611 if (!is_memory(dev)) 612 return 0; 613 614 nd_region = to_nd_region(dev); 615 if (nd_region->ndr_mappings == 0) 616 return 0; 617 618 memset(&label_id, 0, sizeof(label_id)); 619 strcat(label_id.id, "pmem-reserve"); 620 for (i = 0; i < nd_region->ndr_mappings; i++) { 621 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 622 resource_size_t n, rem = 0; 623 624 if (nd_mapping->nvdimm != nvdimm) 625 continue; 626 627 n = nd_pmem_available_dpa(nd_region, nd_mapping); 628 if (n == 0) 629 return 0; 630 rem = scan_allocate(nd_region, nd_mapping, &label_id, n); 631 dev_WARN_ONCE(&nd_region->dev, rem, 632 "pmem reserve underrun: %#llx of %#llx bytes\n", 633 (unsigned long long) n - rem, 634 (unsigned long long) n); 635 return rem ? -ENXIO : 0; 636 } 637 638 return 0; 639 } 640 641 void release_free_pmem(struct nvdimm_bus *nvdimm_bus, 642 struct nd_mapping *nd_mapping) 643 { 644 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 645 struct resource *res, *_res; 646 647 for_each_dpa_resource_safe(ndd, res, _res) 648 if (strcmp(res->name, "pmem-reserve") == 0) 649 nvdimm_free_dpa(ndd, res); 650 } 651 652 /** 653 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id 654 * @nd_region: the set of dimms to allocate @n more bytes from 655 * @label_id: unique identifier for the namespace consuming this dpa range 656 * @n: number of bytes per-dimm to add to the existing allocation 657 * 658 * Assumes resources are ordered. For BLK regions, first consume 659 * BLK-only available DPA free space, then consume PMEM-aliased DPA 660 * space starting at the highest DPA. For PMEM regions start 661 * allocations from the start of an interleave set and end at the first 662 * BLK allocation or the end of the interleave set, whichever comes 663 * first. 664 */ 665 static int grow_dpa_allocation(struct nd_region *nd_region, 666 struct nd_label_id *label_id, resource_size_t n) 667 { 668 int i; 669 670 for (i = 0; i < nd_region->ndr_mappings; i++) { 671 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 672 resource_size_t rem = n; 673 int rc; 674 675 rem = scan_allocate(nd_region, nd_mapping, label_id, rem); 676 dev_WARN_ONCE(&nd_region->dev, rem, 677 "allocation underrun: %#llx of %#llx bytes\n", 678 (unsigned long long) n - rem, 679 (unsigned long long) n); 680 if (rem) 681 return -ENXIO; 682 683 rc = merge_dpa(nd_region, nd_mapping, label_id); 684 if (rc) 685 return rc; 686 } 687 688 return 0; 689 } 690 691 static void nd_namespace_pmem_set_resource(struct nd_region *nd_region, 692 struct nd_namespace_pmem *nspm, resource_size_t size) 693 { 694 struct resource *res = &nspm->nsio.res; 695 resource_size_t offset = 0; 696 697 if (size && !nspm->uuid) { 698 WARN_ON_ONCE(1); 699 size = 0; 700 } 701 702 if (size && nspm->uuid) { 703 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 704 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 705 struct nd_label_id label_id; 706 struct resource *res; 707 708 if (!ndd) { 709 size = 0; 710 goto out; 711 } 712 713 nd_label_gen_id(&label_id, nspm->uuid, 0); 714 715 /* calculate a spa offset from the dpa allocation offset */ 716 for_each_dpa_resource(ndd, res) 717 if (strcmp(res->name, label_id.id) == 0) { 718 offset = (res->start - nd_mapping->start) 719 * nd_region->ndr_mappings; 720 goto out; 721 } 722 723 WARN_ON_ONCE(1); 724 size = 0; 725 } 726 727 out: 728 res->start = nd_region->ndr_start + offset; 729 res->end = res->start + size - 1; 730 } 731 732 static bool uuid_not_set(const uuid_t *uuid, struct device *dev, 733 const char *where) 734 { 735 if (!uuid) { 736 dev_dbg(dev, "%s: uuid not set\n", where); 737 return true; 738 } 739 return false; 740 } 741 742 static ssize_t __size_store(struct device *dev, unsigned long long val) 743 { 744 resource_size_t allocated = 0, available = 0; 745 struct nd_region *nd_region = to_nd_region(dev->parent); 746 struct nd_namespace_common *ndns = to_ndns(dev); 747 struct nd_mapping *nd_mapping; 748 struct nvdimm_drvdata *ndd; 749 struct nd_label_id label_id; 750 u32 flags = 0, remainder; 751 int rc, i, id = -1; 752 uuid_t *uuid = NULL; 753 754 if (dev->driver || ndns->claim) 755 return -EBUSY; 756 757 if (is_namespace_pmem(dev)) { 758 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 759 760 uuid = nspm->uuid; 761 id = nspm->id; 762 } 763 764 /* 765 * We need a uuid for the allocation-label and dimm(s) on which 766 * to store the label. 767 */ 768 if (uuid_not_set(uuid, dev, __func__)) 769 return -ENXIO; 770 if (nd_region->ndr_mappings == 0) { 771 dev_dbg(dev, "not associated with dimm(s)\n"); 772 return -ENXIO; 773 } 774 775 div_u64_rem(val, nd_region->align, &remainder); 776 if (remainder) { 777 dev_dbg(dev, "%llu is not %ldK aligned\n", val, 778 nd_region->align / SZ_1K); 779 return -EINVAL; 780 } 781 782 nd_label_gen_id(&label_id, uuid, flags); 783 for (i = 0; i < nd_region->ndr_mappings; i++) { 784 nd_mapping = &nd_region->mapping[i]; 785 ndd = to_ndd(nd_mapping); 786 787 /* 788 * All dimms in an interleave set, need to be enabled 789 * for the size to be changed. 790 */ 791 if (!ndd) 792 return -ENXIO; 793 794 allocated += nvdimm_allocated_dpa(ndd, &label_id); 795 } 796 available = nd_region_allocatable_dpa(nd_region); 797 798 if (val > available + allocated) 799 return -ENOSPC; 800 801 if (val == allocated) 802 return 0; 803 804 val = div_u64(val, nd_region->ndr_mappings); 805 allocated = div_u64(allocated, nd_region->ndr_mappings); 806 if (val < allocated) 807 rc = shrink_dpa_allocation(nd_region, &label_id, 808 allocated - val); 809 else 810 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated); 811 812 if (rc) 813 return rc; 814 815 if (is_namespace_pmem(dev)) { 816 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 817 818 nd_namespace_pmem_set_resource(nd_region, nspm, 819 val * nd_region->ndr_mappings); 820 } 821 822 /* 823 * Try to delete the namespace if we deleted all of its 824 * allocation, this is not the seed or 0th device for the 825 * region, and it is not actively claimed by a btt, pfn, or dax 826 * instance. 827 */ 828 if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim) 829 nd_device_unregister(dev, ND_ASYNC); 830 831 return rc; 832 } 833 834 static ssize_t size_store(struct device *dev, 835 struct device_attribute *attr, const char *buf, size_t len) 836 { 837 struct nd_region *nd_region = to_nd_region(dev->parent); 838 unsigned long long val; 839 int rc; 840 841 rc = kstrtoull(buf, 0, &val); 842 if (rc) 843 return rc; 844 845 device_lock(dev); 846 nvdimm_bus_lock(dev); 847 wait_nvdimm_bus_probe_idle(dev); 848 rc = __size_store(dev, val); 849 if (rc >= 0) 850 rc = nd_namespace_label_update(nd_region, dev); 851 852 /* setting size zero == 'delete namespace' */ 853 if (rc == 0 && val == 0 && is_namespace_pmem(dev)) { 854 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 855 856 kfree(nspm->uuid); 857 nspm->uuid = NULL; 858 } 859 860 dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc); 861 862 nvdimm_bus_unlock(dev); 863 device_unlock(dev); 864 865 return rc < 0 ? rc : len; 866 } 867 868 resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns) 869 { 870 struct device *dev = &ndns->dev; 871 872 if (is_namespace_pmem(dev)) { 873 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 874 875 return resource_size(&nspm->nsio.res); 876 } else if (is_namespace_io(dev)) { 877 struct nd_namespace_io *nsio = to_nd_namespace_io(dev); 878 879 return resource_size(&nsio->res); 880 } else 881 WARN_ONCE(1, "unknown namespace type\n"); 882 return 0; 883 } 884 885 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns) 886 { 887 resource_size_t size; 888 889 nvdimm_bus_lock(&ndns->dev); 890 size = __nvdimm_namespace_capacity(ndns); 891 nvdimm_bus_unlock(&ndns->dev); 892 893 return size; 894 } 895 EXPORT_SYMBOL(nvdimm_namespace_capacity); 896 897 bool nvdimm_namespace_locked(struct nd_namespace_common *ndns) 898 { 899 int i; 900 bool locked = false; 901 struct device *dev = &ndns->dev; 902 struct nd_region *nd_region = to_nd_region(dev->parent); 903 904 for (i = 0; i < nd_region->ndr_mappings; i++) { 905 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 906 struct nvdimm *nvdimm = nd_mapping->nvdimm; 907 908 if (test_bit(NDD_LOCKED, &nvdimm->flags)) { 909 dev_dbg(dev, "%s locked\n", nvdimm_name(nvdimm)); 910 locked = true; 911 } 912 } 913 return locked; 914 } 915 EXPORT_SYMBOL(nvdimm_namespace_locked); 916 917 static ssize_t size_show(struct device *dev, 918 struct device_attribute *attr, char *buf) 919 { 920 return sprintf(buf, "%llu\n", (unsigned long long) 921 nvdimm_namespace_capacity(to_ndns(dev))); 922 } 923 static DEVICE_ATTR(size, 0444, size_show, size_store); 924 925 static uuid_t *namespace_to_uuid(struct device *dev) 926 { 927 if (is_namespace_pmem(dev)) { 928 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 929 930 return nspm->uuid; 931 } 932 return ERR_PTR(-ENXIO); 933 } 934 935 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, 936 char *buf) 937 { 938 uuid_t *uuid = namespace_to_uuid(dev); 939 940 if (IS_ERR(uuid)) 941 return PTR_ERR(uuid); 942 if (uuid) 943 return sprintf(buf, "%pUb\n", uuid); 944 return sprintf(buf, "\n"); 945 } 946 947 /** 948 * namespace_update_uuid - check for a unique uuid and whether we're "renaming" 949 * @nd_region: parent region so we can updates all dimms in the set 950 * @dev: namespace type for generating label_id 951 * @new_uuid: incoming uuid 952 * @old_uuid: reference to the uuid storage location in the namespace object 953 */ 954 static int namespace_update_uuid(struct nd_region *nd_region, 955 struct device *dev, uuid_t *new_uuid, 956 uuid_t **old_uuid) 957 { 958 struct nd_label_id old_label_id; 959 struct nd_label_id new_label_id; 960 int i; 961 962 if (!nd_is_uuid_unique(dev, new_uuid)) 963 return -EINVAL; 964 965 if (*old_uuid == NULL) 966 goto out; 967 968 /* 969 * If we've already written a label with this uuid, then it's 970 * too late to rename because we can't reliably update the uuid 971 * without losing the old namespace. Userspace must delete this 972 * namespace to abandon the old uuid. 973 */ 974 for (i = 0; i < nd_region->ndr_mappings; i++) { 975 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 976 977 /* 978 * This check by itself is sufficient because old_uuid 979 * would be NULL above if this uuid did not exist in the 980 * currently written set. 981 * 982 * FIXME: can we delete uuid with zero dpa allocated? 983 */ 984 if (list_empty(&nd_mapping->labels)) 985 return -EBUSY; 986 } 987 988 nd_label_gen_id(&old_label_id, *old_uuid, 0); 989 nd_label_gen_id(&new_label_id, new_uuid, 0); 990 for (i = 0; i < nd_region->ndr_mappings; i++) { 991 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 992 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 993 struct nd_label_ent *label_ent; 994 struct resource *res; 995 996 for_each_dpa_resource(ndd, res) 997 if (strcmp(res->name, old_label_id.id) == 0) 998 sprintf((void *) res->name, "%s", 999 new_label_id.id); 1000 1001 mutex_lock(&nd_mapping->lock); 1002 list_for_each_entry(label_ent, &nd_mapping->labels, list) { 1003 struct nd_namespace_label *nd_label = label_ent->label; 1004 struct nd_label_id label_id; 1005 uuid_t uuid; 1006 1007 if (!nd_label) 1008 continue; 1009 nsl_get_uuid(ndd, nd_label, &uuid); 1010 nd_label_gen_id(&label_id, &uuid, 1011 nsl_get_flags(ndd, nd_label)); 1012 if (strcmp(old_label_id.id, label_id.id) == 0) 1013 set_bit(ND_LABEL_REAP, &label_ent->flags); 1014 } 1015 mutex_unlock(&nd_mapping->lock); 1016 } 1017 kfree(*old_uuid); 1018 out: 1019 *old_uuid = new_uuid; 1020 return 0; 1021 } 1022 1023 static ssize_t uuid_store(struct device *dev, 1024 struct device_attribute *attr, const char *buf, size_t len) 1025 { 1026 struct nd_region *nd_region = to_nd_region(dev->parent); 1027 uuid_t *uuid = NULL; 1028 uuid_t **ns_uuid; 1029 ssize_t rc = 0; 1030 1031 if (is_namespace_pmem(dev)) { 1032 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 1033 1034 ns_uuid = &nspm->uuid; 1035 } else 1036 return -ENXIO; 1037 1038 device_lock(dev); 1039 nvdimm_bus_lock(dev); 1040 wait_nvdimm_bus_probe_idle(dev); 1041 if (to_ndns(dev)->claim) 1042 rc = -EBUSY; 1043 if (rc >= 0) 1044 rc = nd_uuid_store(dev, &uuid, buf, len); 1045 if (rc >= 0) 1046 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid); 1047 if (rc >= 0) 1048 rc = nd_namespace_label_update(nd_region, dev); 1049 else 1050 kfree(uuid); 1051 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 1052 buf[len - 1] == '\n' ? "" : "\n"); 1053 nvdimm_bus_unlock(dev); 1054 device_unlock(dev); 1055 1056 return rc < 0 ? rc : len; 1057 } 1058 static DEVICE_ATTR_RW(uuid); 1059 1060 static ssize_t resource_show(struct device *dev, 1061 struct device_attribute *attr, char *buf) 1062 { 1063 struct resource *res; 1064 1065 if (is_namespace_pmem(dev)) { 1066 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 1067 1068 res = &nspm->nsio.res; 1069 } else if (is_namespace_io(dev)) { 1070 struct nd_namespace_io *nsio = to_nd_namespace_io(dev); 1071 1072 res = &nsio->res; 1073 } else 1074 return -ENXIO; 1075 1076 /* no address to convey if the namespace has no allocation */ 1077 if (resource_size(res) == 0) 1078 return -ENXIO; 1079 return sprintf(buf, "%#llx\n", (unsigned long long) res->start); 1080 } 1081 static DEVICE_ATTR_ADMIN_RO(resource); 1082 1083 static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 }; 1084 1085 static ssize_t sector_size_show(struct device *dev, 1086 struct device_attribute *attr, char *buf) 1087 { 1088 if (is_namespace_pmem(dev)) { 1089 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 1090 1091 return nd_size_select_show(nspm->lbasize, 1092 pmem_lbasize_supported, buf); 1093 } 1094 return -ENXIO; 1095 } 1096 1097 static ssize_t sector_size_store(struct device *dev, 1098 struct device_attribute *attr, const char *buf, size_t len) 1099 { 1100 struct nd_region *nd_region = to_nd_region(dev->parent); 1101 const unsigned long *supported; 1102 unsigned long *lbasize; 1103 ssize_t rc = 0; 1104 1105 if (is_namespace_pmem(dev)) { 1106 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 1107 1108 lbasize = &nspm->lbasize; 1109 supported = pmem_lbasize_supported; 1110 } else 1111 return -ENXIO; 1112 1113 device_lock(dev); 1114 nvdimm_bus_lock(dev); 1115 if (to_ndns(dev)->claim) 1116 rc = -EBUSY; 1117 if (rc >= 0) 1118 rc = nd_size_select_store(dev, buf, lbasize, supported); 1119 if (rc >= 0) 1120 rc = nd_namespace_label_update(nd_region, dev); 1121 dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote", 1122 buf, buf[len - 1] == '\n' ? "" : "\n"); 1123 nvdimm_bus_unlock(dev); 1124 device_unlock(dev); 1125 1126 return rc ? rc : len; 1127 } 1128 static DEVICE_ATTR_RW(sector_size); 1129 1130 static ssize_t dpa_extents_show(struct device *dev, 1131 struct device_attribute *attr, char *buf) 1132 { 1133 struct nd_region *nd_region = to_nd_region(dev->parent); 1134 struct nd_label_id label_id; 1135 uuid_t *uuid = NULL; 1136 int count = 0, i; 1137 u32 flags = 0; 1138 1139 nvdimm_bus_lock(dev); 1140 if (is_namespace_pmem(dev)) { 1141 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 1142 1143 uuid = nspm->uuid; 1144 flags = 0; 1145 } 1146 1147 if (!uuid) 1148 goto out; 1149 1150 nd_label_gen_id(&label_id, uuid, flags); 1151 for (i = 0; i < nd_region->ndr_mappings; i++) { 1152 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 1153 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1154 struct resource *res; 1155 1156 for_each_dpa_resource(ndd, res) 1157 if (strcmp(res->name, label_id.id) == 0) 1158 count++; 1159 } 1160 out: 1161 nvdimm_bus_unlock(dev); 1162 1163 return sprintf(buf, "%d\n", count); 1164 } 1165 static DEVICE_ATTR_RO(dpa_extents); 1166 1167 static int btt_claim_class(struct device *dev) 1168 { 1169 struct nd_region *nd_region = to_nd_region(dev->parent); 1170 int i, loop_bitmask = 0; 1171 1172 for (i = 0; i < nd_region->ndr_mappings; i++) { 1173 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 1174 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1175 struct nd_namespace_index *nsindex; 1176 1177 /* 1178 * If any of the DIMMs do not support labels the only 1179 * possible BTT format is v1. 1180 */ 1181 if (!ndd) { 1182 loop_bitmask = 0; 1183 break; 1184 } 1185 1186 nsindex = to_namespace_index(ndd, ndd->ns_current); 1187 if (nsindex == NULL) 1188 loop_bitmask |= 1; 1189 else { 1190 /* check whether existing labels are v1.1 or v1.2 */ 1191 if (__le16_to_cpu(nsindex->major) == 1 1192 && __le16_to_cpu(nsindex->minor) == 1) 1193 loop_bitmask |= 2; 1194 else 1195 loop_bitmask |= 4; 1196 } 1197 } 1198 /* 1199 * If nsindex is null loop_bitmask's bit 0 will be set, and if an index 1200 * block is found, a v1.1 label for any mapping will set bit 1, and a 1201 * v1.2 label will set bit 2. 1202 * 1203 * At the end of the loop, at most one of the three bits must be set. 1204 * If multiple bits were set, it means the different mappings disagree 1205 * about their labels, and this must be cleaned up first. 1206 * 1207 * If all the label index blocks are found to agree, nsindex of NULL 1208 * implies labels haven't been initialized yet, and when they will, 1209 * they will be of the 1.2 format, so we can assume BTT2.0 1210 * 1211 * If 1.1 labels are found, we enforce BTT1.1, and if 1.2 labels are 1212 * found, we enforce BTT2.0 1213 * 1214 * If the loop was never entered, default to BTT1.1 (legacy namespaces) 1215 */ 1216 switch (loop_bitmask) { 1217 case 0: 1218 case 2: 1219 return NVDIMM_CCLASS_BTT; 1220 case 1: 1221 case 4: 1222 return NVDIMM_CCLASS_BTT2; 1223 default: 1224 return -ENXIO; 1225 } 1226 } 1227 1228 static ssize_t holder_show(struct device *dev, 1229 struct device_attribute *attr, char *buf) 1230 { 1231 struct nd_namespace_common *ndns = to_ndns(dev); 1232 ssize_t rc; 1233 1234 device_lock(dev); 1235 rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : ""); 1236 device_unlock(dev); 1237 1238 return rc; 1239 } 1240 static DEVICE_ATTR_RO(holder); 1241 1242 static int __holder_class_store(struct device *dev, const char *buf) 1243 { 1244 struct nd_namespace_common *ndns = to_ndns(dev); 1245 1246 if (dev->driver || ndns->claim) 1247 return -EBUSY; 1248 1249 if (sysfs_streq(buf, "btt")) { 1250 int rc = btt_claim_class(dev); 1251 1252 if (rc < NVDIMM_CCLASS_NONE) 1253 return rc; 1254 ndns->claim_class = rc; 1255 } else if (sysfs_streq(buf, "pfn")) 1256 ndns->claim_class = NVDIMM_CCLASS_PFN; 1257 else if (sysfs_streq(buf, "dax")) 1258 ndns->claim_class = NVDIMM_CCLASS_DAX; 1259 else if (sysfs_streq(buf, "")) 1260 ndns->claim_class = NVDIMM_CCLASS_NONE; 1261 else 1262 return -EINVAL; 1263 1264 return 0; 1265 } 1266 1267 static ssize_t holder_class_store(struct device *dev, 1268 struct device_attribute *attr, const char *buf, size_t len) 1269 { 1270 struct nd_region *nd_region = to_nd_region(dev->parent); 1271 int rc; 1272 1273 device_lock(dev); 1274 nvdimm_bus_lock(dev); 1275 wait_nvdimm_bus_probe_idle(dev); 1276 rc = __holder_class_store(dev, buf); 1277 if (rc >= 0) 1278 rc = nd_namespace_label_update(nd_region, dev); 1279 dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc); 1280 nvdimm_bus_unlock(dev); 1281 device_unlock(dev); 1282 1283 return rc < 0 ? rc : len; 1284 } 1285 1286 static ssize_t holder_class_show(struct device *dev, 1287 struct device_attribute *attr, char *buf) 1288 { 1289 struct nd_namespace_common *ndns = to_ndns(dev); 1290 ssize_t rc; 1291 1292 device_lock(dev); 1293 if (ndns->claim_class == NVDIMM_CCLASS_NONE) 1294 rc = sprintf(buf, "\n"); 1295 else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) || 1296 (ndns->claim_class == NVDIMM_CCLASS_BTT2)) 1297 rc = sprintf(buf, "btt\n"); 1298 else if (ndns->claim_class == NVDIMM_CCLASS_PFN) 1299 rc = sprintf(buf, "pfn\n"); 1300 else if (ndns->claim_class == NVDIMM_CCLASS_DAX) 1301 rc = sprintf(buf, "dax\n"); 1302 else 1303 rc = sprintf(buf, "<unknown>\n"); 1304 device_unlock(dev); 1305 1306 return rc; 1307 } 1308 static DEVICE_ATTR_RW(holder_class); 1309 1310 static ssize_t mode_show(struct device *dev, 1311 struct device_attribute *attr, char *buf) 1312 { 1313 struct nd_namespace_common *ndns = to_ndns(dev); 1314 struct device *claim; 1315 char *mode; 1316 ssize_t rc; 1317 1318 device_lock(dev); 1319 claim = ndns->claim; 1320 if (claim && is_nd_btt(claim)) 1321 mode = "safe"; 1322 else if (claim && is_nd_pfn(claim)) 1323 mode = "memory"; 1324 else if (claim && is_nd_dax(claim)) 1325 mode = "dax"; 1326 else if (!claim && pmem_should_map_pages(dev)) 1327 mode = "memory"; 1328 else 1329 mode = "raw"; 1330 rc = sprintf(buf, "%s\n", mode); 1331 device_unlock(dev); 1332 1333 return rc; 1334 } 1335 static DEVICE_ATTR_RO(mode); 1336 1337 static ssize_t force_raw_store(struct device *dev, 1338 struct device_attribute *attr, const char *buf, size_t len) 1339 { 1340 bool force_raw; 1341 int rc = strtobool(buf, &force_raw); 1342 1343 if (rc) 1344 return rc; 1345 1346 to_ndns(dev)->force_raw = force_raw; 1347 return len; 1348 } 1349 1350 static ssize_t force_raw_show(struct device *dev, 1351 struct device_attribute *attr, char *buf) 1352 { 1353 return sprintf(buf, "%d\n", to_ndns(dev)->force_raw); 1354 } 1355 static DEVICE_ATTR_RW(force_raw); 1356 1357 static struct attribute *nd_namespace_attributes[] = { 1358 &dev_attr_nstype.attr, 1359 &dev_attr_size.attr, 1360 &dev_attr_mode.attr, 1361 &dev_attr_uuid.attr, 1362 &dev_attr_holder.attr, 1363 &dev_attr_resource.attr, 1364 &dev_attr_alt_name.attr, 1365 &dev_attr_force_raw.attr, 1366 &dev_attr_sector_size.attr, 1367 &dev_attr_dpa_extents.attr, 1368 &dev_attr_holder_class.attr, 1369 NULL, 1370 }; 1371 1372 static umode_t namespace_visible(struct kobject *kobj, 1373 struct attribute *a, int n) 1374 { 1375 struct device *dev = container_of(kobj, struct device, kobj); 1376 1377 if (is_namespace_pmem(dev)) { 1378 if (a == &dev_attr_size.attr) 1379 return 0644; 1380 1381 return a->mode; 1382 } 1383 1384 /* base is_namespace_io() attributes */ 1385 if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr || 1386 a == &dev_attr_holder.attr || a == &dev_attr_holder_class.attr || 1387 a == &dev_attr_force_raw.attr || a == &dev_attr_mode.attr || 1388 a == &dev_attr_resource.attr) 1389 return a->mode; 1390 1391 return 0; 1392 } 1393 1394 static struct attribute_group nd_namespace_attribute_group = { 1395 .attrs = nd_namespace_attributes, 1396 .is_visible = namespace_visible, 1397 }; 1398 1399 static const struct attribute_group *nd_namespace_attribute_groups[] = { 1400 &nd_device_attribute_group, 1401 &nd_namespace_attribute_group, 1402 &nd_numa_attribute_group, 1403 NULL, 1404 }; 1405 1406 static const struct device_type namespace_io_device_type = { 1407 .name = "nd_namespace_io", 1408 .release = namespace_io_release, 1409 .groups = nd_namespace_attribute_groups, 1410 }; 1411 1412 static const struct device_type namespace_pmem_device_type = { 1413 .name = "nd_namespace_pmem", 1414 .release = namespace_pmem_release, 1415 .groups = nd_namespace_attribute_groups, 1416 }; 1417 1418 static bool is_namespace_pmem(const struct device *dev) 1419 { 1420 return dev ? dev->type == &namespace_pmem_device_type : false; 1421 } 1422 1423 static bool is_namespace_io(const struct device *dev) 1424 { 1425 return dev ? dev->type == &namespace_io_device_type : false; 1426 } 1427 1428 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev) 1429 { 1430 struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL; 1431 struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL; 1432 struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL; 1433 struct nd_namespace_common *ndns = NULL; 1434 resource_size_t size; 1435 1436 if (nd_btt || nd_pfn || nd_dax) { 1437 if (nd_btt) 1438 ndns = nd_btt->ndns; 1439 else if (nd_pfn) 1440 ndns = nd_pfn->ndns; 1441 else if (nd_dax) 1442 ndns = nd_dax->nd_pfn.ndns; 1443 1444 if (!ndns) 1445 return ERR_PTR(-ENODEV); 1446 1447 /* 1448 * Flush any in-progess probes / removals in the driver 1449 * for the raw personality of this namespace. 1450 */ 1451 device_lock(&ndns->dev); 1452 device_unlock(&ndns->dev); 1453 if (ndns->dev.driver) { 1454 dev_dbg(&ndns->dev, "is active, can't bind %s\n", 1455 dev_name(dev)); 1456 return ERR_PTR(-EBUSY); 1457 } 1458 if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev, 1459 "host (%s) vs claim (%s) mismatch\n", 1460 dev_name(dev), 1461 dev_name(ndns->claim))) 1462 return ERR_PTR(-ENXIO); 1463 } else { 1464 ndns = to_ndns(dev); 1465 if (ndns->claim) { 1466 dev_dbg(dev, "claimed by %s, failing probe\n", 1467 dev_name(ndns->claim)); 1468 1469 return ERR_PTR(-ENXIO); 1470 } 1471 } 1472 1473 if (nvdimm_namespace_locked(ndns)) 1474 return ERR_PTR(-EACCES); 1475 1476 size = nvdimm_namespace_capacity(ndns); 1477 if (size < ND_MIN_NAMESPACE_SIZE) { 1478 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n", 1479 &size, ND_MIN_NAMESPACE_SIZE); 1480 return ERR_PTR(-ENODEV); 1481 } 1482 1483 /* 1484 * Note, alignment validation for fsdax and devdax mode 1485 * namespaces happens in nd_pfn_validate() where infoblock 1486 * padding parameters can be applied. 1487 */ 1488 if (pmem_should_map_pages(dev)) { 1489 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); 1490 struct resource *res = &nsio->res; 1491 1492 if (!IS_ALIGNED(res->start | (res->end + 1), 1493 memremap_compat_align())) { 1494 dev_err(&ndns->dev, "%pr misaligned, unable to map\n", res); 1495 return ERR_PTR(-EOPNOTSUPP); 1496 } 1497 } 1498 1499 if (is_namespace_pmem(&ndns->dev)) { 1500 struct nd_namespace_pmem *nspm; 1501 1502 nspm = to_nd_namespace_pmem(&ndns->dev); 1503 if (uuid_not_set(nspm->uuid, &ndns->dev, __func__)) 1504 return ERR_PTR(-ENODEV); 1505 } 1506 1507 return ndns; 1508 } 1509 EXPORT_SYMBOL(nvdimm_namespace_common_probe); 1510 1511 int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns, 1512 resource_size_t size) 1513 { 1514 return devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev), size); 1515 } 1516 EXPORT_SYMBOL_GPL(devm_namespace_enable); 1517 1518 void devm_namespace_disable(struct device *dev, struct nd_namespace_common *ndns) 1519 { 1520 devm_nsio_disable(dev, to_nd_namespace_io(&ndns->dev)); 1521 } 1522 EXPORT_SYMBOL_GPL(devm_namespace_disable); 1523 1524 static struct device **create_namespace_io(struct nd_region *nd_region) 1525 { 1526 struct nd_namespace_io *nsio; 1527 struct device *dev, **devs; 1528 struct resource *res; 1529 1530 nsio = kzalloc(sizeof(*nsio), GFP_KERNEL); 1531 if (!nsio) 1532 return NULL; 1533 1534 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL); 1535 if (!devs) { 1536 kfree(nsio); 1537 return NULL; 1538 } 1539 1540 dev = &nsio->common.dev; 1541 dev->type = &namespace_io_device_type; 1542 dev->parent = &nd_region->dev; 1543 res = &nsio->res; 1544 res->name = dev_name(&nd_region->dev); 1545 res->flags = IORESOURCE_MEM; 1546 res->start = nd_region->ndr_start; 1547 res->end = res->start + nd_region->ndr_size - 1; 1548 1549 devs[0] = dev; 1550 return devs; 1551 } 1552 1553 static bool has_uuid_at_pos(struct nd_region *nd_region, const uuid_t *uuid, 1554 u64 cookie, u16 pos) 1555 { 1556 struct nd_namespace_label *found = NULL; 1557 int i; 1558 1559 for (i = 0; i < nd_region->ndr_mappings; i++) { 1560 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 1561 struct nd_interleave_set *nd_set = nd_region->nd_set; 1562 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1563 struct nd_label_ent *label_ent; 1564 bool found_uuid = false; 1565 1566 list_for_each_entry(label_ent, &nd_mapping->labels, list) { 1567 struct nd_namespace_label *nd_label = label_ent->label; 1568 u16 position; 1569 1570 if (!nd_label) 1571 continue; 1572 position = nsl_get_position(ndd, nd_label); 1573 1574 if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) 1575 continue; 1576 1577 if (!nsl_uuid_equal(ndd, nd_label, uuid)) 1578 continue; 1579 1580 if (!nsl_validate_type_guid(ndd, nd_label, 1581 &nd_set->type_guid)) 1582 continue; 1583 1584 if (found_uuid) { 1585 dev_dbg(ndd->dev, "duplicate entry for uuid\n"); 1586 return false; 1587 } 1588 found_uuid = true; 1589 if (!nsl_validate_nlabel(nd_region, ndd, nd_label)) 1590 continue; 1591 if (position != pos) 1592 continue; 1593 found = nd_label; 1594 break; 1595 } 1596 if (found) 1597 break; 1598 } 1599 return found != NULL; 1600 } 1601 1602 static int select_pmem_id(struct nd_region *nd_region, const uuid_t *pmem_id) 1603 { 1604 int i; 1605 1606 if (!pmem_id) 1607 return -ENODEV; 1608 1609 for (i = 0; i < nd_region->ndr_mappings; i++) { 1610 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 1611 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1612 struct nd_namespace_label *nd_label = NULL; 1613 u64 hw_start, hw_end, pmem_start, pmem_end; 1614 struct nd_label_ent *label_ent; 1615 1616 lockdep_assert_held(&nd_mapping->lock); 1617 list_for_each_entry(label_ent, &nd_mapping->labels, list) { 1618 nd_label = label_ent->label; 1619 if (!nd_label) 1620 continue; 1621 if (nsl_uuid_equal(ndd, nd_label, pmem_id)) 1622 break; 1623 nd_label = NULL; 1624 } 1625 1626 if (!nd_label) { 1627 WARN_ON(1); 1628 return -EINVAL; 1629 } 1630 1631 /* 1632 * Check that this label is compliant with the dpa 1633 * range published in NFIT 1634 */ 1635 hw_start = nd_mapping->start; 1636 hw_end = hw_start + nd_mapping->size; 1637 pmem_start = nsl_get_dpa(ndd, nd_label); 1638 pmem_end = pmem_start + nsl_get_rawsize(ndd, nd_label); 1639 if (pmem_start >= hw_start && pmem_start < hw_end 1640 && pmem_end <= hw_end && pmem_end > hw_start) 1641 /* pass */; 1642 else { 1643 dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n", 1644 dev_name(ndd->dev), 1645 nsl_uuid_raw(ndd, nd_label)); 1646 return -EINVAL; 1647 } 1648 1649 /* move recently validated label to the front of the list */ 1650 list_move(&label_ent->list, &nd_mapping->labels); 1651 } 1652 return 0; 1653 } 1654 1655 /** 1656 * create_namespace_pmem - validate interleave set labelling, retrieve label0 1657 * @nd_region: region with mappings to validate 1658 * @nspm: target namespace to create 1659 * @nd_label: target pmem namespace label to evaluate 1660 */ 1661 static struct device *create_namespace_pmem(struct nd_region *nd_region, 1662 struct nd_mapping *nd_mapping, 1663 struct nd_namespace_label *nd_label) 1664 { 1665 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1666 struct nd_namespace_index *nsindex = 1667 to_namespace_index(ndd, ndd->ns_current); 1668 u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex); 1669 u64 altcookie = nd_region_interleave_set_altcookie(nd_region); 1670 struct nd_label_ent *label_ent; 1671 struct nd_namespace_pmem *nspm; 1672 resource_size_t size = 0; 1673 struct resource *res; 1674 struct device *dev; 1675 uuid_t uuid; 1676 int rc = 0; 1677 u16 i; 1678 1679 if (cookie == 0) { 1680 dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n"); 1681 return ERR_PTR(-ENXIO); 1682 } 1683 1684 if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) { 1685 dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n", 1686 nsl_uuid_raw(ndd, nd_label)); 1687 if (!nsl_validate_isetcookie(ndd, nd_label, altcookie)) 1688 return ERR_PTR(-EAGAIN); 1689 1690 dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n", 1691 nsl_uuid_raw(ndd, nd_label)); 1692 } 1693 1694 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); 1695 if (!nspm) 1696 return ERR_PTR(-ENOMEM); 1697 1698 nspm->id = -1; 1699 dev = &nspm->nsio.common.dev; 1700 dev->type = &namespace_pmem_device_type; 1701 dev->parent = &nd_region->dev; 1702 res = &nspm->nsio.res; 1703 res->name = dev_name(&nd_region->dev); 1704 res->flags = IORESOURCE_MEM; 1705 1706 for (i = 0; i < nd_region->ndr_mappings; i++) { 1707 nsl_get_uuid(ndd, nd_label, &uuid); 1708 if (has_uuid_at_pos(nd_region, &uuid, cookie, i)) 1709 continue; 1710 if (has_uuid_at_pos(nd_region, &uuid, altcookie, i)) 1711 continue; 1712 break; 1713 } 1714 1715 if (i < nd_region->ndr_mappings) { 1716 struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm; 1717 1718 /* 1719 * Give up if we don't find an instance of a uuid at each 1720 * position (from 0 to nd_region->ndr_mappings - 1), or if we 1721 * find a dimm with two instances of the same uuid. 1722 */ 1723 dev_err(&nd_region->dev, "%s missing label for %pUb\n", 1724 nvdimm_name(nvdimm), nsl_uuid_raw(ndd, nd_label)); 1725 rc = -EINVAL; 1726 goto err; 1727 } 1728 1729 /* 1730 * Fix up each mapping's 'labels' to have the validated pmem label for 1731 * that position at labels[0], and NULL at labels[1]. In the process, 1732 * check that the namespace aligns with interleave-set. 1733 */ 1734 nsl_get_uuid(ndd, nd_label, &uuid); 1735 rc = select_pmem_id(nd_region, &uuid); 1736 if (rc) 1737 goto err; 1738 1739 /* Calculate total size and populate namespace properties from label0 */ 1740 for (i = 0; i < nd_region->ndr_mappings; i++) { 1741 struct nd_namespace_label *label0; 1742 struct nvdimm_drvdata *ndd; 1743 1744 nd_mapping = &nd_region->mapping[i]; 1745 label_ent = list_first_entry_or_null(&nd_mapping->labels, 1746 typeof(*label_ent), list); 1747 label0 = label_ent ? label_ent->label : NULL; 1748 1749 if (!label0) { 1750 WARN_ON(1); 1751 continue; 1752 } 1753 1754 ndd = to_ndd(nd_mapping); 1755 size += nsl_get_rawsize(ndd, label0); 1756 if (nsl_get_position(ndd, label0) != 0) 1757 continue; 1758 WARN_ON(nspm->alt_name || nspm->uuid); 1759 nspm->alt_name = kmemdup(nsl_ref_name(ndd, label0), 1760 NSLABEL_NAME_LEN, GFP_KERNEL); 1761 nsl_get_uuid(ndd, label0, &uuid); 1762 nspm->uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL); 1763 nspm->lbasize = nsl_get_lbasize(ndd, label0); 1764 nspm->nsio.common.claim_class = 1765 nsl_get_claim_class(ndd, label0); 1766 } 1767 1768 if (!nspm->alt_name || !nspm->uuid) { 1769 rc = -ENOMEM; 1770 goto err; 1771 } 1772 1773 nd_namespace_pmem_set_resource(nd_region, nspm, size); 1774 1775 return dev; 1776 err: 1777 namespace_pmem_release(dev); 1778 switch (rc) { 1779 case -EINVAL: 1780 dev_dbg(&nd_region->dev, "invalid label(s)\n"); 1781 break; 1782 case -ENODEV: 1783 dev_dbg(&nd_region->dev, "label not found\n"); 1784 break; 1785 default: 1786 dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc); 1787 break; 1788 } 1789 return ERR_PTR(rc); 1790 } 1791 1792 static struct device *nd_namespace_pmem_create(struct nd_region *nd_region) 1793 { 1794 struct nd_namespace_pmem *nspm; 1795 struct resource *res; 1796 struct device *dev; 1797 1798 if (!is_memory(&nd_region->dev)) 1799 return NULL; 1800 1801 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); 1802 if (!nspm) 1803 return NULL; 1804 1805 dev = &nspm->nsio.common.dev; 1806 dev->type = &namespace_pmem_device_type; 1807 dev->parent = &nd_region->dev; 1808 res = &nspm->nsio.res; 1809 res->name = dev_name(&nd_region->dev); 1810 res->flags = IORESOURCE_MEM; 1811 1812 nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL); 1813 if (nspm->id < 0) { 1814 kfree(nspm); 1815 return NULL; 1816 } 1817 dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id); 1818 nd_namespace_pmem_set_resource(nd_region, nspm, 0); 1819 1820 return dev; 1821 } 1822 1823 static struct lock_class_key nvdimm_namespace_key; 1824 1825 void nd_region_create_ns_seed(struct nd_region *nd_region) 1826 { 1827 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); 1828 1829 if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO) 1830 return; 1831 1832 nd_region->ns_seed = nd_namespace_pmem_create(nd_region); 1833 1834 /* 1835 * Seed creation failures are not fatal, provisioning is simply 1836 * disabled until memory becomes available 1837 */ 1838 if (!nd_region->ns_seed) 1839 dev_err(&nd_region->dev, "failed to create namespace\n"); 1840 else { 1841 device_initialize(nd_region->ns_seed); 1842 lockdep_set_class(&nd_region->ns_seed->mutex, 1843 &nvdimm_namespace_key); 1844 nd_device_register(nd_region->ns_seed); 1845 } 1846 } 1847 1848 void nd_region_create_dax_seed(struct nd_region *nd_region) 1849 { 1850 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); 1851 nd_region->dax_seed = nd_dax_create(nd_region); 1852 /* 1853 * Seed creation failures are not fatal, provisioning is simply 1854 * disabled until memory becomes available 1855 */ 1856 if (!nd_region->dax_seed) 1857 dev_err(&nd_region->dev, "failed to create dax namespace\n"); 1858 } 1859 1860 void nd_region_create_pfn_seed(struct nd_region *nd_region) 1861 { 1862 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); 1863 nd_region->pfn_seed = nd_pfn_create(nd_region); 1864 /* 1865 * Seed creation failures are not fatal, provisioning is simply 1866 * disabled until memory becomes available 1867 */ 1868 if (!nd_region->pfn_seed) 1869 dev_err(&nd_region->dev, "failed to create pfn namespace\n"); 1870 } 1871 1872 void nd_region_create_btt_seed(struct nd_region *nd_region) 1873 { 1874 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); 1875 nd_region->btt_seed = nd_btt_create(nd_region); 1876 /* 1877 * Seed creation failures are not fatal, provisioning is simply 1878 * disabled until memory becomes available 1879 */ 1880 if (!nd_region->btt_seed) 1881 dev_err(&nd_region->dev, "failed to create btt namespace\n"); 1882 } 1883 1884 static int add_namespace_resource(struct nd_region *nd_region, 1885 struct nd_namespace_label *nd_label, struct device **devs, 1886 int count) 1887 { 1888 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 1889 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1890 int i; 1891 1892 for (i = 0; i < count; i++) { 1893 uuid_t *uuid = namespace_to_uuid(devs[i]); 1894 1895 if (IS_ERR(uuid)) { 1896 WARN_ON(1); 1897 continue; 1898 } 1899 1900 if (!nsl_uuid_equal(ndd, nd_label, uuid)) 1901 continue; 1902 dev_err(&nd_region->dev, 1903 "error: conflicting extents for uuid: %pUb\n", uuid); 1904 return -ENXIO; 1905 } 1906 1907 return i; 1908 } 1909 1910 static int cmp_dpa(const void *a, const void *b) 1911 { 1912 const struct device *dev_a = *(const struct device **) a; 1913 const struct device *dev_b = *(const struct device **) b; 1914 struct nd_namespace_pmem *nspm_a, *nspm_b; 1915 1916 if (is_namespace_io(dev_a)) 1917 return 0; 1918 1919 nspm_a = to_nd_namespace_pmem(dev_a); 1920 nspm_b = to_nd_namespace_pmem(dev_b); 1921 1922 return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start, 1923 sizeof(resource_size_t)); 1924 } 1925 1926 static struct device **scan_labels(struct nd_region *nd_region) 1927 { 1928 int i, count = 0; 1929 struct device *dev, **devs = NULL; 1930 struct nd_label_ent *label_ent, *e; 1931 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 1932 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1933 resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1; 1934 1935 /* "safe" because create_namespace_pmem() might list_move() label_ent */ 1936 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { 1937 struct nd_namespace_label *nd_label = label_ent->label; 1938 struct device **__devs; 1939 1940 if (!nd_label) 1941 continue; 1942 1943 /* skip labels that describe extents outside of the region */ 1944 if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start || 1945 nsl_get_dpa(ndd, nd_label) > map_end) 1946 continue; 1947 1948 i = add_namespace_resource(nd_region, nd_label, devs, count); 1949 if (i < 0) 1950 goto err; 1951 if (i < count) 1952 continue; 1953 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL); 1954 if (!__devs) 1955 goto err; 1956 memcpy(__devs, devs, sizeof(dev) * count); 1957 kfree(devs); 1958 devs = __devs; 1959 1960 dev = create_namespace_pmem(nd_region, nd_mapping, nd_label); 1961 if (IS_ERR(dev)) { 1962 switch (PTR_ERR(dev)) { 1963 case -EAGAIN: 1964 /* skip invalid labels */ 1965 continue; 1966 case -ENODEV: 1967 /* fallthrough to seed creation */ 1968 break; 1969 default: 1970 goto err; 1971 } 1972 } else 1973 devs[count++] = dev; 1974 1975 } 1976 1977 dev_dbg(&nd_region->dev, "discovered %d namespace%s\n", count, 1978 count == 1 ? "" : "s"); 1979 1980 if (count == 0) { 1981 struct nd_namespace_pmem *nspm; 1982 1983 /* Publish a zero-sized namespace for userspace to configure. */ 1984 nd_mapping_free_labels(nd_mapping); 1985 1986 devs = kcalloc(2, sizeof(dev), GFP_KERNEL); 1987 if (!devs) 1988 goto err; 1989 1990 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); 1991 if (!nspm) 1992 goto err; 1993 dev = &nspm->nsio.common.dev; 1994 dev->type = &namespace_pmem_device_type; 1995 nd_namespace_pmem_set_resource(nd_region, nspm, 0); 1996 dev->parent = &nd_region->dev; 1997 devs[count++] = dev; 1998 } else if (is_memory(&nd_region->dev)) { 1999 /* clean unselected labels */ 2000 for (i = 0; i < nd_region->ndr_mappings; i++) { 2001 struct list_head *l, *e; 2002 LIST_HEAD(list); 2003 int j; 2004 2005 nd_mapping = &nd_region->mapping[i]; 2006 if (list_empty(&nd_mapping->labels)) { 2007 WARN_ON(1); 2008 continue; 2009 } 2010 2011 j = count; 2012 list_for_each_safe(l, e, &nd_mapping->labels) { 2013 if (!j--) 2014 break; 2015 list_move_tail(l, &list); 2016 } 2017 nd_mapping_free_labels(nd_mapping); 2018 list_splice_init(&list, &nd_mapping->labels); 2019 } 2020 } 2021 2022 if (count > 1) 2023 sort(devs, count, sizeof(struct device *), cmp_dpa, NULL); 2024 2025 return devs; 2026 2027 err: 2028 if (devs) { 2029 for (i = 0; devs[i]; i++) 2030 namespace_pmem_release(devs[i]); 2031 kfree(devs); 2032 } 2033 return NULL; 2034 } 2035 2036 static struct device **create_namespaces(struct nd_region *nd_region) 2037 { 2038 struct nd_mapping *nd_mapping; 2039 struct device **devs; 2040 int i; 2041 2042 if (nd_region->ndr_mappings == 0) 2043 return NULL; 2044 2045 /* lock down all mappings while we scan labels */ 2046 for (i = 0; i < nd_region->ndr_mappings; i++) { 2047 nd_mapping = &nd_region->mapping[i]; 2048 mutex_lock_nested(&nd_mapping->lock, i); 2049 } 2050 2051 devs = scan_labels(nd_region); 2052 2053 for (i = 0; i < nd_region->ndr_mappings; i++) { 2054 int reverse = nd_region->ndr_mappings - 1 - i; 2055 2056 nd_mapping = &nd_region->mapping[reverse]; 2057 mutex_unlock(&nd_mapping->lock); 2058 } 2059 2060 return devs; 2061 } 2062 2063 static void deactivate_labels(void *region) 2064 { 2065 struct nd_region *nd_region = region; 2066 int i; 2067 2068 for (i = 0; i < nd_region->ndr_mappings; i++) { 2069 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 2070 struct nvdimm_drvdata *ndd = nd_mapping->ndd; 2071 struct nvdimm *nvdimm = nd_mapping->nvdimm; 2072 2073 mutex_lock(&nd_mapping->lock); 2074 nd_mapping_free_labels(nd_mapping); 2075 mutex_unlock(&nd_mapping->lock); 2076 2077 put_ndd(ndd); 2078 nd_mapping->ndd = NULL; 2079 if (ndd) 2080 atomic_dec(&nvdimm->busy); 2081 } 2082 } 2083 2084 static int init_active_labels(struct nd_region *nd_region) 2085 { 2086 int i, rc = 0; 2087 2088 for (i = 0; i < nd_region->ndr_mappings; i++) { 2089 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 2090 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 2091 struct nvdimm *nvdimm = nd_mapping->nvdimm; 2092 struct nd_label_ent *label_ent; 2093 int count, j; 2094 2095 /* 2096 * If the dimm is disabled then we may need to prevent 2097 * the region from being activated. 2098 */ 2099 if (!ndd) { 2100 if (test_bit(NDD_LOCKED, &nvdimm->flags)) 2101 /* fail, label data may be unreadable */; 2102 else if (test_bit(NDD_LABELING, &nvdimm->flags)) 2103 /* fail, labels needed to disambiguate dpa */; 2104 else 2105 continue; 2106 2107 dev_err(&nd_region->dev, "%s: is %s, failing probe\n", 2108 dev_name(&nd_mapping->nvdimm->dev), 2109 test_bit(NDD_LOCKED, &nvdimm->flags) 2110 ? "locked" : "disabled"); 2111 rc = -ENXIO; 2112 goto out; 2113 } 2114 nd_mapping->ndd = ndd; 2115 atomic_inc(&nvdimm->busy); 2116 get_ndd(ndd); 2117 2118 count = nd_label_active_count(ndd); 2119 dev_dbg(ndd->dev, "count: %d\n", count); 2120 if (!count) 2121 continue; 2122 for (j = 0; j < count; j++) { 2123 struct nd_namespace_label *label; 2124 2125 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL); 2126 if (!label_ent) 2127 break; 2128 label = nd_label_active(ndd, j); 2129 label_ent->label = label; 2130 2131 mutex_lock(&nd_mapping->lock); 2132 list_add_tail(&label_ent->list, &nd_mapping->labels); 2133 mutex_unlock(&nd_mapping->lock); 2134 } 2135 2136 if (j < count) 2137 break; 2138 } 2139 2140 if (i < nd_region->ndr_mappings) 2141 rc = -ENOMEM; 2142 2143 out: 2144 if (rc) { 2145 deactivate_labels(nd_region); 2146 return rc; 2147 } 2148 2149 return devm_add_action_or_reset(&nd_region->dev, deactivate_labels, 2150 nd_region); 2151 } 2152 2153 int nd_region_register_namespaces(struct nd_region *nd_region, int *err) 2154 { 2155 struct device **devs = NULL; 2156 int i, rc = 0, type; 2157 2158 *err = 0; 2159 nvdimm_bus_lock(&nd_region->dev); 2160 rc = init_active_labels(nd_region); 2161 if (rc) { 2162 nvdimm_bus_unlock(&nd_region->dev); 2163 return rc; 2164 } 2165 2166 type = nd_region_to_nstype(nd_region); 2167 switch (type) { 2168 case ND_DEVICE_NAMESPACE_IO: 2169 devs = create_namespace_io(nd_region); 2170 break; 2171 case ND_DEVICE_NAMESPACE_PMEM: 2172 devs = create_namespaces(nd_region); 2173 break; 2174 default: 2175 break; 2176 } 2177 nvdimm_bus_unlock(&nd_region->dev); 2178 2179 if (!devs) 2180 return -ENODEV; 2181 2182 for (i = 0; devs[i]; i++) { 2183 struct device *dev = devs[i]; 2184 int id; 2185 2186 if (type == ND_DEVICE_NAMESPACE_PMEM) { 2187 struct nd_namespace_pmem *nspm; 2188 2189 nspm = to_nd_namespace_pmem(dev); 2190 id = ida_simple_get(&nd_region->ns_ida, 0, 0, 2191 GFP_KERNEL); 2192 nspm->id = id; 2193 } else 2194 id = i; 2195 2196 if (id < 0) 2197 break; 2198 dev_set_name(dev, "namespace%d.%d", nd_region->id, id); 2199 device_initialize(dev); 2200 lockdep_set_class(&dev->mutex, &nvdimm_namespace_key); 2201 nd_device_register(dev); 2202 } 2203 if (i) 2204 nd_region->ns_seed = devs[0]; 2205 2206 if (devs[i]) { 2207 int j; 2208 2209 for (j = i; devs[j]; j++) { 2210 struct device *dev = devs[j]; 2211 2212 device_initialize(dev); 2213 put_device(dev); 2214 } 2215 *err = j - i; 2216 /* 2217 * All of the namespaces we tried to register failed, so 2218 * fail region activation. 2219 */ 2220 if (*err == 0) 2221 rc = -ENODEV; 2222 } 2223 kfree(devs); 2224 2225 if (rc == -ENODEV) 2226 return rc; 2227 2228 return i; 2229 } 2230