1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/module.h> 14 #include <linux/device.h> 15 #include <linux/sort.h> 16 #include <linux/slab.h> 17 #include <linux/pmem.h> 18 #include <linux/list.h> 19 #include <linux/nd.h> 20 #include "nd-core.h" 21 #include "nd.h" 22 23 static void namespace_io_release(struct device *dev) 24 { 25 struct nd_namespace_io *nsio = to_nd_namespace_io(dev); 26 27 kfree(nsio); 28 } 29 30 static void namespace_pmem_release(struct device *dev) 31 { 32 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 33 struct nd_region *nd_region = to_nd_region(dev->parent); 34 35 if (nspm->id >= 0) 36 ida_simple_remove(&nd_region->ns_ida, nspm->id); 37 kfree(nspm->alt_name); 38 kfree(nspm->uuid); 39 kfree(nspm); 40 } 41 42 static void namespace_blk_release(struct device *dev) 43 { 44 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); 45 struct nd_region *nd_region = to_nd_region(dev->parent); 46 47 if (nsblk->id >= 0) 48 ida_simple_remove(&nd_region->ns_ida, nsblk->id); 49 kfree(nsblk->alt_name); 50 kfree(nsblk->uuid); 51 kfree(nsblk->res); 52 kfree(nsblk); 53 } 54 55 static const struct device_type namespace_io_device_type = { 56 .name = "nd_namespace_io", 57 .release = namespace_io_release, 58 }; 59 60 static const struct device_type namespace_pmem_device_type = { 61 .name = "nd_namespace_pmem", 62 .release = namespace_pmem_release, 63 }; 64 65 static const struct device_type namespace_blk_device_type = { 66 .name = "nd_namespace_blk", 67 .release = namespace_blk_release, 68 }; 69 70 static bool is_namespace_pmem(const struct device *dev) 71 { 72 return dev ? dev->type == &namespace_pmem_device_type : false; 73 } 74 75 static bool is_namespace_blk(const struct device *dev) 76 { 77 return dev ? dev->type == &namespace_blk_device_type : false; 78 } 79 80 static bool is_namespace_io(const struct device *dev) 81 { 82 return dev ? dev->type == &namespace_io_device_type : false; 83 } 84 85 static int is_uuid_busy(struct device *dev, void *data) 86 { 87 u8 *uuid1 = data, *uuid2 = NULL; 88 89 if (is_namespace_pmem(dev)) { 90 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 91 92 uuid2 = nspm->uuid; 93 } else if (is_namespace_blk(dev)) { 94 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); 95 96 uuid2 = nsblk->uuid; 97 } else if (is_nd_btt(dev)) { 98 struct nd_btt *nd_btt = to_nd_btt(dev); 99 100 uuid2 = nd_btt->uuid; 101 } else if (is_nd_pfn(dev)) { 102 struct nd_pfn *nd_pfn = to_nd_pfn(dev); 103 104 uuid2 = nd_pfn->uuid; 105 } 106 107 if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0) 108 return -EBUSY; 109 110 return 0; 111 } 112 113 static int is_namespace_uuid_busy(struct device *dev, void *data) 114 { 115 if (is_nd_pmem(dev) || is_nd_blk(dev)) 116 return device_for_each_child(dev, data, is_uuid_busy); 117 return 0; 118 } 119 120 /** 121 * nd_is_uuid_unique - verify that no other namespace has @uuid 122 * @dev: any device on a nvdimm_bus 123 * @uuid: uuid to check 124 */ 125 bool nd_is_uuid_unique(struct device *dev, u8 *uuid) 126 { 127 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 128 129 if (!nvdimm_bus) 130 return false; 131 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev)); 132 if (device_for_each_child(&nvdimm_bus->dev, uuid, 133 is_namespace_uuid_busy) != 0) 134 return false; 135 return true; 136 } 137 138 bool pmem_should_map_pages(struct device *dev) 139 { 140 struct nd_region *nd_region = to_nd_region(dev->parent); 141 struct nd_namespace_io *nsio; 142 143 if (!IS_ENABLED(CONFIG_ZONE_DEVICE)) 144 return false; 145 146 if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags)) 147 return false; 148 149 if (is_nd_pfn(dev) || is_nd_btt(dev)) 150 return false; 151 152 nsio = to_nd_namespace_io(dev); 153 if (region_intersects(nsio->res.start, resource_size(&nsio->res), 154 IORESOURCE_SYSTEM_RAM, 155 IORES_DESC_NONE) == REGION_MIXED) 156 return false; 157 158 #ifdef ARCH_MEMREMAP_PMEM 159 return ARCH_MEMREMAP_PMEM == MEMREMAP_WB; 160 #else 161 return false; 162 #endif 163 } 164 EXPORT_SYMBOL(pmem_should_map_pages); 165 166 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, 167 char *name) 168 { 169 struct nd_region *nd_region = to_nd_region(ndns->dev.parent); 170 const char *suffix = NULL; 171 172 if (ndns->claim && is_nd_btt(ndns->claim)) 173 suffix = "s"; 174 175 if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) { 176 int nsidx = 0; 177 178 if (is_namespace_pmem(&ndns->dev)) { 179 struct nd_namespace_pmem *nspm; 180 181 nspm = to_nd_namespace_pmem(&ndns->dev); 182 nsidx = nspm->id; 183 } 184 185 if (nsidx) 186 sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx, 187 suffix ? suffix : ""); 188 else 189 sprintf(name, "pmem%d%s", nd_region->id, 190 suffix ? suffix : ""); 191 } else if (is_namespace_blk(&ndns->dev)) { 192 struct nd_namespace_blk *nsblk; 193 194 nsblk = to_nd_namespace_blk(&ndns->dev); 195 sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id, 196 suffix ? suffix : ""); 197 } else { 198 return NULL; 199 } 200 201 return name; 202 } 203 EXPORT_SYMBOL(nvdimm_namespace_disk_name); 204 205 const u8 *nd_dev_to_uuid(struct device *dev) 206 { 207 static const u8 null_uuid[16]; 208 209 if (!dev) 210 return null_uuid; 211 212 if (is_namespace_pmem(dev)) { 213 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 214 215 return nspm->uuid; 216 } else if (is_namespace_blk(dev)) { 217 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); 218 219 return nsblk->uuid; 220 } else 221 return null_uuid; 222 } 223 EXPORT_SYMBOL(nd_dev_to_uuid); 224 225 static ssize_t nstype_show(struct device *dev, 226 struct device_attribute *attr, char *buf) 227 { 228 struct nd_region *nd_region = to_nd_region(dev->parent); 229 230 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region)); 231 } 232 static DEVICE_ATTR_RO(nstype); 233 234 static ssize_t __alt_name_store(struct device *dev, const char *buf, 235 const size_t len) 236 { 237 char *input, *pos, *alt_name, **ns_altname; 238 ssize_t rc; 239 240 if (is_namespace_pmem(dev)) { 241 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 242 243 ns_altname = &nspm->alt_name; 244 } else if (is_namespace_blk(dev)) { 245 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); 246 247 ns_altname = &nsblk->alt_name; 248 } else 249 return -ENXIO; 250 251 if (dev->driver || to_ndns(dev)->claim) 252 return -EBUSY; 253 254 input = kmemdup(buf, len + 1, GFP_KERNEL); 255 if (!input) 256 return -ENOMEM; 257 258 input[len] = '\0'; 259 pos = strim(input); 260 if (strlen(pos) + 1 > NSLABEL_NAME_LEN) { 261 rc = -EINVAL; 262 goto out; 263 } 264 265 alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL); 266 if (!alt_name) { 267 rc = -ENOMEM; 268 goto out; 269 } 270 kfree(*ns_altname); 271 *ns_altname = alt_name; 272 sprintf(*ns_altname, "%s", pos); 273 rc = len; 274 275 out: 276 kfree(input); 277 return rc; 278 } 279 280 static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk) 281 { 282 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent); 283 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 284 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 285 struct nd_label_id label_id; 286 resource_size_t size = 0; 287 struct resource *res; 288 289 if (!nsblk->uuid) 290 return 0; 291 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL); 292 for_each_dpa_resource(ndd, res) 293 if (strcmp(res->name, label_id.id) == 0) 294 size += resource_size(res); 295 return size; 296 } 297 298 static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk) 299 { 300 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent); 301 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 302 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 303 struct nd_label_id label_id; 304 struct resource *res; 305 int count, i; 306 307 if (!nsblk->uuid || !nsblk->lbasize || !ndd) 308 return false; 309 310 count = 0; 311 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL); 312 for_each_dpa_resource(ndd, res) { 313 if (strcmp(res->name, label_id.id) != 0) 314 continue; 315 /* 316 * Resources with unacknowledged adjustments indicate a 317 * failure to update labels 318 */ 319 if (res->flags & DPA_RESOURCE_ADJUSTED) 320 return false; 321 count++; 322 } 323 324 /* These values match after a successful label update */ 325 if (count != nsblk->num_resources) 326 return false; 327 328 for (i = 0; i < nsblk->num_resources; i++) { 329 struct resource *found = NULL; 330 331 for_each_dpa_resource(ndd, res) 332 if (res == nsblk->res[i]) { 333 found = res; 334 break; 335 } 336 /* stale resource */ 337 if (!found) 338 return false; 339 } 340 341 return true; 342 } 343 344 resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk) 345 { 346 resource_size_t size; 347 348 nvdimm_bus_lock(&nsblk->common.dev); 349 size = __nd_namespace_blk_validate(nsblk); 350 nvdimm_bus_unlock(&nsblk->common.dev); 351 352 return size; 353 } 354 EXPORT_SYMBOL(nd_namespace_blk_validate); 355 356 357 static int nd_namespace_label_update(struct nd_region *nd_region, 358 struct device *dev) 359 { 360 dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim, 361 "namespace must be idle during label update\n"); 362 if (dev->driver || to_ndns(dev)->claim) 363 return 0; 364 365 /* 366 * Only allow label writes that will result in a valid namespace 367 * or deletion of an existing namespace. 368 */ 369 if (is_namespace_pmem(dev)) { 370 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 371 resource_size_t size = resource_size(&nspm->nsio.res); 372 373 if (size == 0 && nspm->uuid) 374 /* delete allocation */; 375 else if (!nspm->uuid) 376 return 0; 377 378 return nd_pmem_namespace_label_update(nd_region, nspm, size); 379 } else if (is_namespace_blk(dev)) { 380 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); 381 resource_size_t size = nd_namespace_blk_size(nsblk); 382 383 if (size == 0 && nsblk->uuid) 384 /* delete allocation */; 385 else if (!nsblk->uuid || !nsblk->lbasize) 386 return 0; 387 388 return nd_blk_namespace_label_update(nd_region, nsblk, size); 389 } else 390 return -ENXIO; 391 } 392 393 static ssize_t alt_name_store(struct device *dev, 394 struct device_attribute *attr, const char *buf, size_t len) 395 { 396 struct nd_region *nd_region = to_nd_region(dev->parent); 397 ssize_t rc; 398 399 device_lock(dev); 400 nvdimm_bus_lock(dev); 401 wait_nvdimm_bus_probe_idle(dev); 402 rc = __alt_name_store(dev, buf, len); 403 if (rc >= 0) 404 rc = nd_namespace_label_update(nd_region, dev); 405 dev_dbg(dev, "%s: %s(%zd)\n", __func__, rc < 0 ? "fail " : "", rc); 406 nvdimm_bus_unlock(dev); 407 device_unlock(dev); 408 409 return rc < 0 ? rc : len; 410 } 411 412 static ssize_t alt_name_show(struct device *dev, 413 struct device_attribute *attr, char *buf) 414 { 415 char *ns_altname; 416 417 if (is_namespace_pmem(dev)) { 418 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 419 420 ns_altname = nspm->alt_name; 421 } else if (is_namespace_blk(dev)) { 422 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); 423 424 ns_altname = nsblk->alt_name; 425 } else 426 return -ENXIO; 427 428 return sprintf(buf, "%s\n", ns_altname ? ns_altname : ""); 429 } 430 static DEVICE_ATTR_RW(alt_name); 431 432 static int scan_free(struct nd_region *nd_region, 433 struct nd_mapping *nd_mapping, struct nd_label_id *label_id, 434 resource_size_t n) 435 { 436 bool is_blk = strncmp(label_id->id, "blk", 3) == 0; 437 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 438 int rc = 0; 439 440 while (n) { 441 struct resource *res, *last; 442 resource_size_t new_start; 443 444 last = NULL; 445 for_each_dpa_resource(ndd, res) 446 if (strcmp(res->name, label_id->id) == 0) 447 last = res; 448 res = last; 449 if (!res) 450 return 0; 451 452 if (n >= resource_size(res)) { 453 n -= resource_size(res); 454 nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc); 455 nvdimm_free_dpa(ndd, res); 456 /* retry with last resource deleted */ 457 continue; 458 } 459 460 /* 461 * Keep BLK allocations relegated to high DPA as much as 462 * possible 463 */ 464 if (is_blk) 465 new_start = res->start + n; 466 else 467 new_start = res->start; 468 469 rc = adjust_resource(res, new_start, resource_size(res) - n); 470 if (rc == 0) 471 res->flags |= DPA_RESOURCE_ADJUSTED; 472 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc); 473 break; 474 } 475 476 return rc; 477 } 478 479 /** 480 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id 481 * @nd_region: the set of dimms to reclaim @n bytes from 482 * @label_id: unique identifier for the namespace consuming this dpa range 483 * @n: number of bytes per-dimm to release 484 * 485 * Assumes resources are ordered. Starting from the end try to 486 * adjust_resource() the allocation to @n, but if @n is larger than the 487 * allocation delete it and find the 'new' last allocation in the label 488 * set. 489 */ 490 static int shrink_dpa_allocation(struct nd_region *nd_region, 491 struct nd_label_id *label_id, resource_size_t n) 492 { 493 int i; 494 495 for (i = 0; i < nd_region->ndr_mappings; i++) { 496 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 497 int rc; 498 499 rc = scan_free(nd_region, nd_mapping, label_id, n); 500 if (rc) 501 return rc; 502 } 503 504 return 0; 505 } 506 507 static resource_size_t init_dpa_allocation(struct nd_label_id *label_id, 508 struct nd_region *nd_region, struct nd_mapping *nd_mapping, 509 resource_size_t n) 510 { 511 bool is_blk = strncmp(label_id->id, "blk", 3) == 0; 512 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 513 resource_size_t first_dpa; 514 struct resource *res; 515 int rc = 0; 516 517 /* allocate blk from highest dpa first */ 518 if (is_blk) 519 first_dpa = nd_mapping->start + nd_mapping->size - n; 520 else 521 first_dpa = nd_mapping->start; 522 523 /* first resource allocation for this label-id or dimm */ 524 res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n); 525 if (!res) 526 rc = -EBUSY; 527 528 nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc); 529 return rc ? n : 0; 530 } 531 532 533 /** 534 * space_valid() - validate free dpa space against constraints 535 * @nd_region: hosting region of the free space 536 * @ndd: dimm device data for debug 537 * @label_id: namespace id to allocate space 538 * @prev: potential allocation that precedes free space 539 * @next: allocation that follows the given free space range 540 * @exist: first allocation with same id in the mapping 541 * @n: range that must satisfied for pmem allocations 542 * @valid: free space range to validate 543 * 544 * BLK-space is valid as long as it does not precede a PMEM 545 * allocation in a given region. PMEM-space must be contiguous 546 * and adjacent to an existing existing allocation (if one 547 * exists). If reserving PMEM any space is valid. 548 */ 549 static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd, 550 struct nd_label_id *label_id, struct resource *prev, 551 struct resource *next, struct resource *exist, 552 resource_size_t n, struct resource *valid) 553 { 554 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0; 555 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; 556 557 if (valid->start >= valid->end) 558 goto invalid; 559 560 if (is_reserve) 561 return; 562 563 if (!is_pmem) { 564 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 565 struct nvdimm_bus *nvdimm_bus; 566 struct blk_alloc_info info = { 567 .nd_mapping = nd_mapping, 568 .available = nd_mapping->size, 569 .res = valid, 570 }; 571 572 WARN_ON(!is_nd_blk(&nd_region->dev)); 573 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); 574 device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy); 575 return; 576 } 577 578 /* allocation needs to be contiguous, so this is all or nothing */ 579 if (resource_size(valid) < n) 580 goto invalid; 581 582 /* we've got all the space we need and no existing allocation */ 583 if (!exist) 584 return; 585 586 /* allocation needs to be contiguous with the existing namespace */ 587 if (valid->start == exist->end + 1 588 || valid->end == exist->start - 1) 589 return; 590 591 invalid: 592 /* truncate @valid size to 0 */ 593 valid->end = valid->start - 1; 594 } 595 596 enum alloc_loc { 597 ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER, 598 }; 599 600 static resource_size_t scan_allocate(struct nd_region *nd_region, 601 struct nd_mapping *nd_mapping, struct nd_label_id *label_id, 602 resource_size_t n) 603 { 604 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1; 605 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; 606 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 607 struct resource *res, *exist = NULL, valid; 608 const resource_size_t to_allocate = n; 609 int first; 610 611 for_each_dpa_resource(ndd, res) 612 if (strcmp(label_id->id, res->name) == 0) 613 exist = res; 614 615 valid.start = nd_mapping->start; 616 valid.end = mapping_end; 617 valid.name = "free space"; 618 retry: 619 first = 0; 620 for_each_dpa_resource(ndd, res) { 621 struct resource *next = res->sibling, *new_res = NULL; 622 resource_size_t allocate, available = 0; 623 enum alloc_loc loc = ALLOC_ERR; 624 const char *action; 625 int rc = 0; 626 627 /* ignore resources outside this nd_mapping */ 628 if (res->start > mapping_end) 629 continue; 630 if (res->end < nd_mapping->start) 631 continue; 632 633 /* space at the beginning of the mapping */ 634 if (!first++ && res->start > nd_mapping->start) { 635 valid.start = nd_mapping->start; 636 valid.end = res->start - 1; 637 space_valid(nd_region, ndd, label_id, NULL, next, exist, 638 to_allocate, &valid); 639 available = resource_size(&valid); 640 if (available) 641 loc = ALLOC_BEFORE; 642 } 643 644 /* space between allocations */ 645 if (!loc && next) { 646 valid.start = res->start + resource_size(res); 647 valid.end = min(mapping_end, next->start - 1); 648 space_valid(nd_region, ndd, label_id, res, next, exist, 649 to_allocate, &valid); 650 available = resource_size(&valid); 651 if (available) 652 loc = ALLOC_MID; 653 } 654 655 /* space at the end of the mapping */ 656 if (!loc && !next) { 657 valid.start = res->start + resource_size(res); 658 valid.end = mapping_end; 659 space_valid(nd_region, ndd, label_id, res, next, exist, 660 to_allocate, &valid); 661 available = resource_size(&valid); 662 if (available) 663 loc = ALLOC_AFTER; 664 } 665 666 if (!loc || !available) 667 continue; 668 allocate = min(available, n); 669 switch (loc) { 670 case ALLOC_BEFORE: 671 if (strcmp(res->name, label_id->id) == 0) { 672 /* adjust current resource up */ 673 rc = adjust_resource(res, res->start - allocate, 674 resource_size(res) + allocate); 675 action = "cur grow up"; 676 } else 677 action = "allocate"; 678 break; 679 case ALLOC_MID: 680 if (strcmp(next->name, label_id->id) == 0) { 681 /* adjust next resource up */ 682 rc = adjust_resource(next, next->start 683 - allocate, resource_size(next) 684 + allocate); 685 new_res = next; 686 action = "next grow up"; 687 } else if (strcmp(res->name, label_id->id) == 0) { 688 action = "grow down"; 689 } else 690 action = "allocate"; 691 break; 692 case ALLOC_AFTER: 693 if (strcmp(res->name, label_id->id) == 0) 694 action = "grow down"; 695 else 696 action = "allocate"; 697 break; 698 default: 699 return n; 700 } 701 702 if (strcmp(action, "allocate") == 0) { 703 /* BLK allocate bottom up */ 704 if (!is_pmem) 705 valid.start += available - allocate; 706 707 new_res = nvdimm_allocate_dpa(ndd, label_id, 708 valid.start, allocate); 709 if (!new_res) 710 rc = -EBUSY; 711 } else if (strcmp(action, "grow down") == 0) { 712 /* adjust current resource down */ 713 rc = adjust_resource(res, res->start, resource_size(res) 714 + allocate); 715 if (rc == 0) 716 res->flags |= DPA_RESOURCE_ADJUSTED; 717 } 718 719 if (!new_res) 720 new_res = res; 721 722 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n", 723 action, loc, rc); 724 725 if (rc) 726 return n; 727 728 n -= allocate; 729 if (n) { 730 /* 731 * Retry scan with newly inserted resources. 732 * For example, if we did an ALLOC_BEFORE 733 * insertion there may also have been space 734 * available for an ALLOC_AFTER insertion, so we 735 * need to check this same resource again 736 */ 737 goto retry; 738 } else 739 return 0; 740 } 741 742 /* 743 * If we allocated nothing in the BLK case it may be because we are in 744 * an initial "pmem-reserve pass". Only do an initial BLK allocation 745 * when none of the DPA space is reserved. 746 */ 747 if ((is_pmem || !ndd->dpa.child) && n == to_allocate) 748 return init_dpa_allocation(label_id, nd_region, nd_mapping, n); 749 return n; 750 } 751 752 static int merge_dpa(struct nd_region *nd_region, 753 struct nd_mapping *nd_mapping, struct nd_label_id *label_id) 754 { 755 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 756 struct resource *res; 757 758 if (strncmp("pmem", label_id->id, 4) == 0) 759 return 0; 760 retry: 761 for_each_dpa_resource(ndd, res) { 762 int rc; 763 struct resource *next = res->sibling; 764 resource_size_t end = res->start + resource_size(res); 765 766 if (!next || strcmp(res->name, label_id->id) != 0 767 || strcmp(next->name, label_id->id) != 0 768 || end != next->start) 769 continue; 770 end += resource_size(next); 771 nvdimm_free_dpa(ndd, next); 772 rc = adjust_resource(res, res->start, end - res->start); 773 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc); 774 if (rc) 775 return rc; 776 res->flags |= DPA_RESOURCE_ADJUSTED; 777 goto retry; 778 } 779 780 return 0; 781 } 782 783 static int __reserve_free_pmem(struct device *dev, void *data) 784 { 785 struct nvdimm *nvdimm = data; 786 struct nd_region *nd_region; 787 struct nd_label_id label_id; 788 int i; 789 790 if (!is_nd_pmem(dev)) 791 return 0; 792 793 nd_region = to_nd_region(dev); 794 if (nd_region->ndr_mappings == 0) 795 return 0; 796 797 memset(&label_id, 0, sizeof(label_id)); 798 strcat(label_id.id, "pmem-reserve"); 799 for (i = 0; i < nd_region->ndr_mappings; i++) { 800 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 801 resource_size_t n, rem = 0; 802 803 if (nd_mapping->nvdimm != nvdimm) 804 continue; 805 806 n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem); 807 if (n == 0) 808 return 0; 809 rem = scan_allocate(nd_region, nd_mapping, &label_id, n); 810 dev_WARN_ONCE(&nd_region->dev, rem, 811 "pmem reserve underrun: %#llx of %#llx bytes\n", 812 (unsigned long long) n - rem, 813 (unsigned long long) n); 814 return rem ? -ENXIO : 0; 815 } 816 817 return 0; 818 } 819 820 static void release_free_pmem(struct nvdimm_bus *nvdimm_bus, 821 struct nd_mapping *nd_mapping) 822 { 823 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 824 struct resource *res, *_res; 825 826 for_each_dpa_resource_safe(ndd, res, _res) 827 if (strcmp(res->name, "pmem-reserve") == 0) 828 nvdimm_free_dpa(ndd, res); 829 } 830 831 static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus, 832 struct nd_mapping *nd_mapping) 833 { 834 struct nvdimm *nvdimm = nd_mapping->nvdimm; 835 int rc; 836 837 rc = device_for_each_child(&nvdimm_bus->dev, nvdimm, 838 __reserve_free_pmem); 839 if (rc) 840 release_free_pmem(nvdimm_bus, nd_mapping); 841 return rc; 842 } 843 844 /** 845 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id 846 * @nd_region: the set of dimms to allocate @n more bytes from 847 * @label_id: unique identifier for the namespace consuming this dpa range 848 * @n: number of bytes per-dimm to add to the existing allocation 849 * 850 * Assumes resources are ordered. For BLK regions, first consume 851 * BLK-only available DPA free space, then consume PMEM-aliased DPA 852 * space starting at the highest DPA. For PMEM regions start 853 * allocations from the start of an interleave set and end at the first 854 * BLK allocation or the end of the interleave set, whichever comes 855 * first. 856 */ 857 static int grow_dpa_allocation(struct nd_region *nd_region, 858 struct nd_label_id *label_id, resource_size_t n) 859 { 860 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); 861 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; 862 int i; 863 864 for (i = 0; i < nd_region->ndr_mappings; i++) { 865 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 866 resource_size_t rem = n; 867 int rc, j; 868 869 /* 870 * In the BLK case try once with all unallocated PMEM 871 * reserved, and once without 872 */ 873 for (j = is_pmem; j < 2; j++) { 874 bool blk_only = j == 0; 875 876 if (blk_only) { 877 rc = reserve_free_pmem(nvdimm_bus, nd_mapping); 878 if (rc) 879 return rc; 880 } 881 rem = scan_allocate(nd_region, nd_mapping, 882 label_id, rem); 883 if (blk_only) 884 release_free_pmem(nvdimm_bus, nd_mapping); 885 886 /* try again and allow encroachments into PMEM */ 887 if (rem == 0) 888 break; 889 } 890 891 dev_WARN_ONCE(&nd_region->dev, rem, 892 "allocation underrun: %#llx of %#llx bytes\n", 893 (unsigned long long) n - rem, 894 (unsigned long long) n); 895 if (rem) 896 return -ENXIO; 897 898 rc = merge_dpa(nd_region, nd_mapping, label_id); 899 if (rc) 900 return rc; 901 } 902 903 return 0; 904 } 905 906 static void nd_namespace_pmem_set_resource(struct nd_region *nd_region, 907 struct nd_namespace_pmem *nspm, resource_size_t size) 908 { 909 struct resource *res = &nspm->nsio.res; 910 resource_size_t offset = 0; 911 912 if (size && !nspm->uuid) { 913 WARN_ON_ONCE(1); 914 size = 0; 915 } 916 917 if (size && nspm->uuid) { 918 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 919 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 920 struct nd_label_id label_id; 921 struct resource *res; 922 923 if (!ndd) { 924 size = 0; 925 goto out; 926 } 927 928 nd_label_gen_id(&label_id, nspm->uuid, 0); 929 930 /* calculate a spa offset from the dpa allocation offset */ 931 for_each_dpa_resource(ndd, res) 932 if (strcmp(res->name, label_id.id) == 0) { 933 offset = (res->start - nd_mapping->start) 934 * nd_region->ndr_mappings; 935 goto out; 936 } 937 938 WARN_ON_ONCE(1); 939 size = 0; 940 } 941 942 out: 943 res->start = nd_region->ndr_start + offset; 944 res->end = res->start + size - 1; 945 } 946 947 static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where) 948 { 949 if (!uuid) { 950 dev_dbg(dev, "%s: uuid not set\n", where); 951 return true; 952 } 953 return false; 954 } 955 956 static ssize_t __size_store(struct device *dev, unsigned long long val) 957 { 958 resource_size_t allocated = 0, available = 0; 959 struct nd_region *nd_region = to_nd_region(dev->parent); 960 struct nd_namespace_common *ndns = to_ndns(dev); 961 struct nd_mapping *nd_mapping; 962 struct nvdimm_drvdata *ndd; 963 struct nd_label_id label_id; 964 u32 flags = 0, remainder; 965 int rc, i, id = -1; 966 u8 *uuid = NULL; 967 968 if (dev->driver || ndns->claim) 969 return -EBUSY; 970 971 if (is_namespace_pmem(dev)) { 972 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 973 974 uuid = nspm->uuid; 975 id = nspm->id; 976 } else if (is_namespace_blk(dev)) { 977 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); 978 979 uuid = nsblk->uuid; 980 flags = NSLABEL_FLAG_LOCAL; 981 id = nsblk->id; 982 } 983 984 /* 985 * We need a uuid for the allocation-label and dimm(s) on which 986 * to store the label. 987 */ 988 if (uuid_not_set(uuid, dev, __func__)) 989 return -ENXIO; 990 if (nd_region->ndr_mappings == 0) { 991 dev_dbg(dev, "%s: not associated with dimm(s)\n", __func__); 992 return -ENXIO; 993 } 994 995 div_u64_rem(val, SZ_4K * nd_region->ndr_mappings, &remainder); 996 if (remainder) { 997 dev_dbg(dev, "%llu is not %dK aligned\n", val, 998 (SZ_4K * nd_region->ndr_mappings) / SZ_1K); 999 return -EINVAL; 1000 } 1001 1002 nd_label_gen_id(&label_id, uuid, flags); 1003 for (i = 0; i < nd_region->ndr_mappings; i++) { 1004 nd_mapping = &nd_region->mapping[i]; 1005 ndd = to_ndd(nd_mapping); 1006 1007 /* 1008 * All dimms in an interleave set, or the base dimm for a blk 1009 * region, need to be enabled for the size to be changed. 1010 */ 1011 if (!ndd) 1012 return -ENXIO; 1013 1014 allocated += nvdimm_allocated_dpa(ndd, &label_id); 1015 } 1016 available = nd_region_available_dpa(nd_region); 1017 1018 if (val > available + allocated) 1019 return -ENOSPC; 1020 1021 if (val == allocated) 1022 return 0; 1023 1024 val = div_u64(val, nd_region->ndr_mappings); 1025 allocated = div_u64(allocated, nd_region->ndr_mappings); 1026 if (val < allocated) 1027 rc = shrink_dpa_allocation(nd_region, &label_id, 1028 allocated - val); 1029 else 1030 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated); 1031 1032 if (rc) 1033 return rc; 1034 1035 if (is_namespace_pmem(dev)) { 1036 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 1037 1038 nd_namespace_pmem_set_resource(nd_region, nspm, 1039 val * nd_region->ndr_mappings); 1040 } 1041 1042 /* 1043 * Try to delete the namespace if we deleted all of its 1044 * allocation, this is not the seed or 0th device for the 1045 * region, and it is not actively claimed by a btt, pfn, or dax 1046 * instance. 1047 */ 1048 if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim) 1049 nd_device_unregister(dev, ND_ASYNC); 1050 1051 return rc; 1052 } 1053 1054 static ssize_t size_store(struct device *dev, 1055 struct device_attribute *attr, const char *buf, size_t len) 1056 { 1057 struct nd_region *nd_region = to_nd_region(dev->parent); 1058 unsigned long long val; 1059 u8 **uuid = NULL; 1060 int rc; 1061 1062 rc = kstrtoull(buf, 0, &val); 1063 if (rc) 1064 return rc; 1065 1066 device_lock(dev); 1067 nvdimm_bus_lock(dev); 1068 wait_nvdimm_bus_probe_idle(dev); 1069 rc = __size_store(dev, val); 1070 if (rc >= 0) 1071 rc = nd_namespace_label_update(nd_region, dev); 1072 1073 if (is_namespace_pmem(dev)) { 1074 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 1075 1076 uuid = &nspm->uuid; 1077 } else if (is_namespace_blk(dev)) { 1078 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); 1079 1080 uuid = &nsblk->uuid; 1081 } 1082 1083 if (rc == 0 && val == 0 && uuid) { 1084 /* setting size zero == 'delete namespace' */ 1085 kfree(*uuid); 1086 *uuid = NULL; 1087 } 1088 1089 dev_dbg(dev, "%s: %llx %s (%d)\n", __func__, val, rc < 0 1090 ? "fail" : "success", rc); 1091 1092 nvdimm_bus_unlock(dev); 1093 device_unlock(dev); 1094 1095 return rc < 0 ? rc : len; 1096 } 1097 1098 resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns) 1099 { 1100 struct device *dev = &ndns->dev; 1101 1102 if (is_namespace_pmem(dev)) { 1103 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 1104 1105 return resource_size(&nspm->nsio.res); 1106 } else if (is_namespace_blk(dev)) { 1107 return nd_namespace_blk_size(to_nd_namespace_blk(dev)); 1108 } else if (is_namespace_io(dev)) { 1109 struct nd_namespace_io *nsio = to_nd_namespace_io(dev); 1110 1111 return resource_size(&nsio->res); 1112 } else 1113 WARN_ONCE(1, "unknown namespace type\n"); 1114 return 0; 1115 } 1116 1117 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns) 1118 { 1119 resource_size_t size; 1120 1121 nvdimm_bus_lock(&ndns->dev); 1122 size = __nvdimm_namespace_capacity(ndns); 1123 nvdimm_bus_unlock(&ndns->dev); 1124 1125 return size; 1126 } 1127 EXPORT_SYMBOL(nvdimm_namespace_capacity); 1128 1129 static ssize_t size_show(struct device *dev, 1130 struct device_attribute *attr, char *buf) 1131 { 1132 return sprintf(buf, "%llu\n", (unsigned long long) 1133 nvdimm_namespace_capacity(to_ndns(dev))); 1134 } 1135 static DEVICE_ATTR(size, 0444, size_show, size_store); 1136 1137 static u8 *namespace_to_uuid(struct device *dev) 1138 { 1139 if (is_namespace_pmem(dev)) { 1140 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 1141 1142 return nspm->uuid; 1143 } else if (is_namespace_blk(dev)) { 1144 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); 1145 1146 return nsblk->uuid; 1147 } else 1148 return ERR_PTR(-ENXIO); 1149 } 1150 1151 static ssize_t uuid_show(struct device *dev, 1152 struct device_attribute *attr, char *buf) 1153 { 1154 u8 *uuid = namespace_to_uuid(dev); 1155 1156 if (IS_ERR(uuid)) 1157 return PTR_ERR(uuid); 1158 if (uuid) 1159 return sprintf(buf, "%pUb\n", uuid); 1160 return sprintf(buf, "\n"); 1161 } 1162 1163 /** 1164 * namespace_update_uuid - check for a unique uuid and whether we're "renaming" 1165 * @nd_region: parent region so we can updates all dimms in the set 1166 * @dev: namespace type for generating label_id 1167 * @new_uuid: incoming uuid 1168 * @old_uuid: reference to the uuid storage location in the namespace object 1169 */ 1170 static int namespace_update_uuid(struct nd_region *nd_region, 1171 struct device *dev, u8 *new_uuid, u8 **old_uuid) 1172 { 1173 u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0; 1174 struct nd_label_id old_label_id; 1175 struct nd_label_id new_label_id; 1176 int i; 1177 1178 if (!nd_is_uuid_unique(dev, new_uuid)) 1179 return -EINVAL; 1180 1181 if (*old_uuid == NULL) 1182 goto out; 1183 1184 /* 1185 * If we've already written a label with this uuid, then it's 1186 * too late to rename because we can't reliably update the uuid 1187 * without losing the old namespace. Userspace must delete this 1188 * namespace to abandon the old uuid. 1189 */ 1190 for (i = 0; i < nd_region->ndr_mappings; i++) { 1191 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 1192 1193 /* 1194 * This check by itself is sufficient because old_uuid 1195 * would be NULL above if this uuid did not exist in the 1196 * currently written set. 1197 * 1198 * FIXME: can we delete uuid with zero dpa allocated? 1199 */ 1200 if (list_empty(&nd_mapping->labels)) 1201 return -EBUSY; 1202 } 1203 1204 nd_label_gen_id(&old_label_id, *old_uuid, flags); 1205 nd_label_gen_id(&new_label_id, new_uuid, flags); 1206 for (i = 0; i < nd_region->ndr_mappings; i++) { 1207 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 1208 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1209 struct resource *res; 1210 1211 for_each_dpa_resource(ndd, res) 1212 if (strcmp(res->name, old_label_id.id) == 0) 1213 sprintf((void *) res->name, "%s", 1214 new_label_id.id); 1215 } 1216 kfree(*old_uuid); 1217 out: 1218 *old_uuid = new_uuid; 1219 return 0; 1220 } 1221 1222 static ssize_t uuid_store(struct device *dev, 1223 struct device_attribute *attr, const char *buf, size_t len) 1224 { 1225 struct nd_region *nd_region = to_nd_region(dev->parent); 1226 u8 *uuid = NULL; 1227 ssize_t rc = 0; 1228 u8 **ns_uuid; 1229 1230 if (is_namespace_pmem(dev)) { 1231 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 1232 1233 ns_uuid = &nspm->uuid; 1234 } else if (is_namespace_blk(dev)) { 1235 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); 1236 1237 ns_uuid = &nsblk->uuid; 1238 } else 1239 return -ENXIO; 1240 1241 device_lock(dev); 1242 nvdimm_bus_lock(dev); 1243 wait_nvdimm_bus_probe_idle(dev); 1244 if (to_ndns(dev)->claim) 1245 rc = -EBUSY; 1246 if (rc >= 0) 1247 rc = nd_uuid_store(dev, &uuid, buf, len); 1248 if (rc >= 0) 1249 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid); 1250 if (rc >= 0) 1251 rc = nd_namespace_label_update(nd_region, dev); 1252 else 1253 kfree(uuid); 1254 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, 1255 rc, buf, buf[len - 1] == '\n' ? "" : "\n"); 1256 nvdimm_bus_unlock(dev); 1257 device_unlock(dev); 1258 1259 return rc < 0 ? rc : len; 1260 } 1261 static DEVICE_ATTR_RW(uuid); 1262 1263 static ssize_t resource_show(struct device *dev, 1264 struct device_attribute *attr, char *buf) 1265 { 1266 struct resource *res; 1267 1268 if (is_namespace_pmem(dev)) { 1269 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 1270 1271 res = &nspm->nsio.res; 1272 } else if (is_namespace_io(dev)) { 1273 struct nd_namespace_io *nsio = to_nd_namespace_io(dev); 1274 1275 res = &nsio->res; 1276 } else 1277 return -ENXIO; 1278 1279 /* no address to convey if the namespace has no allocation */ 1280 if (resource_size(res) == 0) 1281 return -ENXIO; 1282 return sprintf(buf, "%#llx\n", (unsigned long long) res->start); 1283 } 1284 static DEVICE_ATTR_RO(resource); 1285 1286 static const unsigned long ns_lbasize_supported[] = { 512, 520, 528, 1287 4096, 4104, 4160, 4224, 0 }; 1288 1289 static ssize_t sector_size_show(struct device *dev, 1290 struct device_attribute *attr, char *buf) 1291 { 1292 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); 1293 1294 if (!is_namespace_blk(dev)) 1295 return -ENXIO; 1296 1297 return nd_sector_size_show(nsblk->lbasize, ns_lbasize_supported, buf); 1298 } 1299 1300 static ssize_t sector_size_store(struct device *dev, 1301 struct device_attribute *attr, const char *buf, size_t len) 1302 { 1303 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); 1304 struct nd_region *nd_region = to_nd_region(dev->parent); 1305 ssize_t rc = 0; 1306 1307 if (!is_namespace_blk(dev)) 1308 return -ENXIO; 1309 1310 device_lock(dev); 1311 nvdimm_bus_lock(dev); 1312 if (to_ndns(dev)->claim) 1313 rc = -EBUSY; 1314 if (rc >= 0) 1315 rc = nd_sector_size_store(dev, buf, &nsblk->lbasize, 1316 ns_lbasize_supported); 1317 if (rc >= 0) 1318 rc = nd_namespace_label_update(nd_region, dev); 1319 dev_dbg(dev, "%s: result: %zd %s: %s%s", __func__, 1320 rc, rc < 0 ? "tried" : "wrote", buf, 1321 buf[len - 1] == '\n' ? "" : "\n"); 1322 nvdimm_bus_unlock(dev); 1323 device_unlock(dev); 1324 1325 return rc ? rc : len; 1326 } 1327 static DEVICE_ATTR_RW(sector_size); 1328 1329 static ssize_t dpa_extents_show(struct device *dev, 1330 struct device_attribute *attr, char *buf) 1331 { 1332 struct nd_region *nd_region = to_nd_region(dev->parent); 1333 struct nd_label_id label_id; 1334 int count = 0, i; 1335 u8 *uuid = NULL; 1336 u32 flags = 0; 1337 1338 nvdimm_bus_lock(dev); 1339 if (is_namespace_pmem(dev)) { 1340 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 1341 1342 uuid = nspm->uuid; 1343 flags = 0; 1344 } else if (is_namespace_blk(dev)) { 1345 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); 1346 1347 uuid = nsblk->uuid; 1348 flags = NSLABEL_FLAG_LOCAL; 1349 } 1350 1351 if (!uuid) 1352 goto out; 1353 1354 nd_label_gen_id(&label_id, uuid, flags); 1355 for (i = 0; i < nd_region->ndr_mappings; i++) { 1356 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 1357 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1358 struct resource *res; 1359 1360 for_each_dpa_resource(ndd, res) 1361 if (strcmp(res->name, label_id.id) == 0) 1362 count++; 1363 } 1364 out: 1365 nvdimm_bus_unlock(dev); 1366 1367 return sprintf(buf, "%d\n", count); 1368 } 1369 static DEVICE_ATTR_RO(dpa_extents); 1370 1371 static ssize_t holder_show(struct device *dev, 1372 struct device_attribute *attr, char *buf) 1373 { 1374 struct nd_namespace_common *ndns = to_ndns(dev); 1375 ssize_t rc; 1376 1377 device_lock(dev); 1378 rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : ""); 1379 device_unlock(dev); 1380 1381 return rc; 1382 } 1383 static DEVICE_ATTR_RO(holder); 1384 1385 static ssize_t mode_show(struct device *dev, 1386 struct device_attribute *attr, char *buf) 1387 { 1388 struct nd_namespace_common *ndns = to_ndns(dev); 1389 struct device *claim; 1390 char *mode; 1391 ssize_t rc; 1392 1393 device_lock(dev); 1394 claim = ndns->claim; 1395 if (claim && is_nd_btt(claim)) 1396 mode = "safe"; 1397 else if (claim && is_nd_pfn(claim)) 1398 mode = "memory"; 1399 else if (claim && is_nd_dax(claim)) 1400 mode = "dax"; 1401 else if (!claim && pmem_should_map_pages(dev)) 1402 mode = "memory"; 1403 else 1404 mode = "raw"; 1405 rc = sprintf(buf, "%s\n", mode); 1406 device_unlock(dev); 1407 1408 return rc; 1409 } 1410 static DEVICE_ATTR_RO(mode); 1411 1412 static ssize_t force_raw_store(struct device *dev, 1413 struct device_attribute *attr, const char *buf, size_t len) 1414 { 1415 bool force_raw; 1416 int rc = strtobool(buf, &force_raw); 1417 1418 if (rc) 1419 return rc; 1420 1421 to_ndns(dev)->force_raw = force_raw; 1422 return len; 1423 } 1424 1425 static ssize_t force_raw_show(struct device *dev, 1426 struct device_attribute *attr, char *buf) 1427 { 1428 return sprintf(buf, "%d\n", to_ndns(dev)->force_raw); 1429 } 1430 static DEVICE_ATTR_RW(force_raw); 1431 1432 static struct attribute *nd_namespace_attributes[] = { 1433 &dev_attr_nstype.attr, 1434 &dev_attr_size.attr, 1435 &dev_attr_mode.attr, 1436 &dev_attr_uuid.attr, 1437 &dev_attr_holder.attr, 1438 &dev_attr_resource.attr, 1439 &dev_attr_alt_name.attr, 1440 &dev_attr_force_raw.attr, 1441 &dev_attr_sector_size.attr, 1442 &dev_attr_dpa_extents.attr, 1443 NULL, 1444 }; 1445 1446 static umode_t namespace_visible(struct kobject *kobj, 1447 struct attribute *a, int n) 1448 { 1449 struct device *dev = container_of(kobj, struct device, kobj); 1450 1451 if (a == &dev_attr_resource.attr) { 1452 if (is_namespace_blk(dev)) 1453 return 0; 1454 return a->mode; 1455 } 1456 1457 if (is_namespace_pmem(dev) || is_namespace_blk(dev)) { 1458 if (a == &dev_attr_size.attr) 1459 return 0644; 1460 1461 if (is_namespace_pmem(dev) && a == &dev_attr_sector_size.attr) 1462 return 0; 1463 1464 return a->mode; 1465 } 1466 1467 if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr 1468 || a == &dev_attr_holder.attr 1469 || a == &dev_attr_force_raw.attr 1470 || a == &dev_attr_mode.attr) 1471 return a->mode; 1472 1473 return 0; 1474 } 1475 1476 static struct attribute_group nd_namespace_attribute_group = { 1477 .attrs = nd_namespace_attributes, 1478 .is_visible = namespace_visible, 1479 }; 1480 1481 static const struct attribute_group *nd_namespace_attribute_groups[] = { 1482 &nd_device_attribute_group, 1483 &nd_namespace_attribute_group, 1484 &nd_numa_attribute_group, 1485 NULL, 1486 }; 1487 1488 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev) 1489 { 1490 struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL; 1491 struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL; 1492 struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL; 1493 struct nd_namespace_common *ndns = NULL; 1494 resource_size_t size; 1495 1496 if (nd_btt || nd_pfn || nd_dax) { 1497 if (nd_btt) 1498 ndns = nd_btt->ndns; 1499 else if (nd_pfn) 1500 ndns = nd_pfn->ndns; 1501 else if (nd_dax) 1502 ndns = nd_dax->nd_pfn.ndns; 1503 1504 if (!ndns) 1505 return ERR_PTR(-ENODEV); 1506 1507 /* 1508 * Flush any in-progess probes / removals in the driver 1509 * for the raw personality of this namespace. 1510 */ 1511 device_lock(&ndns->dev); 1512 device_unlock(&ndns->dev); 1513 if (ndns->dev.driver) { 1514 dev_dbg(&ndns->dev, "is active, can't bind %s\n", 1515 dev_name(dev)); 1516 return ERR_PTR(-EBUSY); 1517 } 1518 if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev, 1519 "host (%s) vs claim (%s) mismatch\n", 1520 dev_name(dev), 1521 dev_name(ndns->claim))) 1522 return ERR_PTR(-ENXIO); 1523 } else { 1524 ndns = to_ndns(dev); 1525 if (ndns->claim) { 1526 dev_dbg(dev, "claimed by %s, failing probe\n", 1527 dev_name(ndns->claim)); 1528 1529 return ERR_PTR(-ENXIO); 1530 } 1531 } 1532 1533 size = nvdimm_namespace_capacity(ndns); 1534 if (size < ND_MIN_NAMESPACE_SIZE) { 1535 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n", 1536 &size, ND_MIN_NAMESPACE_SIZE); 1537 return ERR_PTR(-ENODEV); 1538 } 1539 1540 if (is_namespace_pmem(&ndns->dev)) { 1541 struct nd_namespace_pmem *nspm; 1542 1543 nspm = to_nd_namespace_pmem(&ndns->dev); 1544 if (uuid_not_set(nspm->uuid, &ndns->dev, __func__)) 1545 return ERR_PTR(-ENODEV); 1546 } else if (is_namespace_blk(&ndns->dev)) { 1547 struct nd_namespace_blk *nsblk; 1548 1549 nsblk = to_nd_namespace_blk(&ndns->dev); 1550 if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__)) 1551 return ERR_PTR(-ENODEV); 1552 if (!nsblk->lbasize) { 1553 dev_dbg(&ndns->dev, "%s: sector size not set\n", 1554 __func__); 1555 return ERR_PTR(-ENODEV); 1556 } 1557 if (!nd_namespace_blk_validate(nsblk)) 1558 return ERR_PTR(-ENODEV); 1559 } 1560 1561 return ndns; 1562 } 1563 EXPORT_SYMBOL(nvdimm_namespace_common_probe); 1564 1565 static struct device **create_namespace_io(struct nd_region *nd_region) 1566 { 1567 struct nd_namespace_io *nsio; 1568 struct device *dev, **devs; 1569 struct resource *res; 1570 1571 nsio = kzalloc(sizeof(*nsio), GFP_KERNEL); 1572 if (!nsio) 1573 return NULL; 1574 1575 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL); 1576 if (!devs) { 1577 kfree(nsio); 1578 return NULL; 1579 } 1580 1581 dev = &nsio->common.dev; 1582 dev->type = &namespace_io_device_type; 1583 dev->parent = &nd_region->dev; 1584 res = &nsio->res; 1585 res->name = dev_name(&nd_region->dev); 1586 res->flags = IORESOURCE_MEM; 1587 res->start = nd_region->ndr_start; 1588 res->end = res->start + nd_region->ndr_size - 1; 1589 1590 devs[0] = dev; 1591 return devs; 1592 } 1593 1594 static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid, 1595 u64 cookie, u16 pos) 1596 { 1597 struct nd_namespace_label *found = NULL; 1598 int i; 1599 1600 for (i = 0; i < nd_region->ndr_mappings; i++) { 1601 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 1602 struct nd_label_ent *label_ent; 1603 bool found_uuid = false; 1604 1605 list_for_each_entry(label_ent, &nd_mapping->labels, list) { 1606 struct nd_namespace_label *nd_label = label_ent->label; 1607 u16 position, nlabel; 1608 u64 isetcookie; 1609 1610 if (!nd_label) 1611 continue; 1612 isetcookie = __le64_to_cpu(nd_label->isetcookie); 1613 position = __le16_to_cpu(nd_label->position); 1614 nlabel = __le16_to_cpu(nd_label->nlabel); 1615 1616 if (isetcookie != cookie) 1617 continue; 1618 1619 if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0) 1620 continue; 1621 1622 if (found_uuid) { 1623 dev_dbg(to_ndd(nd_mapping)->dev, 1624 "%s duplicate entry for uuid\n", 1625 __func__); 1626 return false; 1627 } 1628 found_uuid = true; 1629 if (nlabel != nd_region->ndr_mappings) 1630 continue; 1631 if (position != pos) 1632 continue; 1633 found = nd_label; 1634 break; 1635 } 1636 if (found) 1637 break; 1638 } 1639 return found != NULL; 1640 } 1641 1642 static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id) 1643 { 1644 int i; 1645 1646 if (!pmem_id) 1647 return -ENODEV; 1648 1649 for (i = 0; i < nd_region->ndr_mappings; i++) { 1650 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 1651 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1652 struct nd_namespace_label *nd_label = NULL; 1653 u64 hw_start, hw_end, pmem_start, pmem_end; 1654 struct nd_label_ent *label_ent; 1655 1656 lockdep_assert_held(&nd_mapping->lock); 1657 list_for_each_entry(label_ent, &nd_mapping->labels, list) { 1658 nd_label = label_ent->label; 1659 if (!nd_label) 1660 continue; 1661 if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0) 1662 break; 1663 nd_label = NULL; 1664 } 1665 1666 if (!nd_label) { 1667 WARN_ON(1); 1668 return -EINVAL; 1669 } 1670 1671 /* 1672 * Check that this label is compliant with the dpa 1673 * range published in NFIT 1674 */ 1675 hw_start = nd_mapping->start; 1676 hw_end = hw_start + nd_mapping->size; 1677 pmem_start = __le64_to_cpu(nd_label->dpa); 1678 pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize); 1679 if (pmem_start >= hw_start && pmem_start < hw_end 1680 && pmem_end <= hw_end && pmem_end > hw_start) 1681 /* pass */; 1682 else { 1683 dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n", 1684 dev_name(ndd->dev), nd_label->uuid); 1685 return -EINVAL; 1686 } 1687 1688 /* move recently validated label to the front of the list */ 1689 list_move(&label_ent->list, &nd_mapping->labels); 1690 } 1691 return 0; 1692 } 1693 1694 /** 1695 * create_namespace_pmem - validate interleave set labelling, retrieve label0 1696 * @nd_region: region with mappings to validate 1697 * @nspm: target namespace to create 1698 * @nd_label: target pmem namespace label to evaluate 1699 */ 1700 struct device *create_namespace_pmem(struct nd_region *nd_region, 1701 struct nd_namespace_label *nd_label) 1702 { 1703 u64 altcookie = nd_region_interleave_set_altcookie(nd_region); 1704 u64 cookie = nd_region_interleave_set_cookie(nd_region); 1705 struct nd_label_ent *label_ent; 1706 struct nd_namespace_pmem *nspm; 1707 struct nd_mapping *nd_mapping; 1708 resource_size_t size = 0; 1709 struct resource *res; 1710 struct device *dev; 1711 int rc = 0; 1712 u16 i; 1713 1714 if (cookie == 0) { 1715 dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n"); 1716 return ERR_PTR(-ENXIO); 1717 } 1718 1719 if (__le64_to_cpu(nd_label->isetcookie) != cookie) { 1720 dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n", 1721 nd_label->uuid); 1722 if (__le64_to_cpu(nd_label->isetcookie) != altcookie) 1723 return ERR_PTR(-EAGAIN); 1724 1725 dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n", 1726 nd_label->uuid); 1727 } 1728 1729 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); 1730 if (!nspm) 1731 return ERR_PTR(-ENOMEM); 1732 1733 nspm->id = -1; 1734 dev = &nspm->nsio.common.dev; 1735 dev->type = &namespace_pmem_device_type; 1736 dev->parent = &nd_region->dev; 1737 res = &nspm->nsio.res; 1738 res->name = dev_name(&nd_region->dev); 1739 res->flags = IORESOURCE_MEM; 1740 1741 for (i = 0; i < nd_region->ndr_mappings; i++) { 1742 if (has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i)) 1743 continue; 1744 if (has_uuid_at_pos(nd_region, nd_label->uuid, altcookie, i)) 1745 continue; 1746 break; 1747 } 1748 1749 if (i < nd_region->ndr_mappings) { 1750 struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]); 1751 1752 /* 1753 * Give up if we don't find an instance of a uuid at each 1754 * position (from 0 to nd_region->ndr_mappings - 1), or if we 1755 * find a dimm with two instances of the same uuid. 1756 */ 1757 dev_err(&nd_region->dev, "%s missing label for %pUb\n", 1758 dev_name(ndd->dev), nd_label->uuid); 1759 rc = -EINVAL; 1760 goto err; 1761 } 1762 1763 /* 1764 * Fix up each mapping's 'labels' to have the validated pmem label for 1765 * that position at labels[0], and NULL at labels[1]. In the process, 1766 * check that the namespace aligns with interleave-set. We know 1767 * that it does not overlap with any blk namespaces by virtue of 1768 * the dimm being enabled (i.e. nd_label_reserve_dpa() 1769 * succeeded). 1770 */ 1771 rc = select_pmem_id(nd_region, nd_label->uuid); 1772 if (rc) 1773 goto err; 1774 1775 /* Calculate total size and populate namespace properties from label0 */ 1776 for (i = 0; i < nd_region->ndr_mappings; i++) { 1777 struct nd_namespace_label *label0; 1778 1779 nd_mapping = &nd_region->mapping[i]; 1780 label_ent = list_first_entry_or_null(&nd_mapping->labels, 1781 typeof(*label_ent), list); 1782 label0 = label_ent ? label_ent->label : 0; 1783 1784 if (!label0) { 1785 WARN_ON(1); 1786 continue; 1787 } 1788 1789 size += __le64_to_cpu(label0->rawsize); 1790 if (__le16_to_cpu(label0->position) != 0) 1791 continue; 1792 WARN_ON(nspm->alt_name || nspm->uuid); 1793 nspm->alt_name = kmemdup((void __force *) label0->name, 1794 NSLABEL_NAME_LEN, GFP_KERNEL); 1795 nspm->uuid = kmemdup((void __force *) label0->uuid, 1796 NSLABEL_UUID_LEN, GFP_KERNEL); 1797 } 1798 1799 if (!nspm->alt_name || !nspm->uuid) { 1800 rc = -ENOMEM; 1801 goto err; 1802 } 1803 1804 nd_namespace_pmem_set_resource(nd_region, nspm, size); 1805 1806 return dev; 1807 err: 1808 namespace_pmem_release(dev); 1809 switch (rc) { 1810 case -EINVAL: 1811 dev_dbg(&nd_region->dev, "%s: invalid label(s)\n", __func__); 1812 break; 1813 case -ENODEV: 1814 dev_dbg(&nd_region->dev, "%s: label not found\n", __func__); 1815 break; 1816 default: 1817 dev_dbg(&nd_region->dev, "%s: unexpected err: %d\n", 1818 __func__, rc); 1819 break; 1820 } 1821 return ERR_PTR(rc); 1822 } 1823 1824 struct resource *nsblk_add_resource(struct nd_region *nd_region, 1825 struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk, 1826 resource_size_t start) 1827 { 1828 struct nd_label_id label_id; 1829 struct resource *res; 1830 1831 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL); 1832 res = krealloc(nsblk->res, 1833 sizeof(void *) * (nsblk->num_resources + 1), 1834 GFP_KERNEL); 1835 if (!res) 1836 return NULL; 1837 nsblk->res = (struct resource **) res; 1838 for_each_dpa_resource(ndd, res) 1839 if (strcmp(res->name, label_id.id) == 0 1840 && res->start == start) { 1841 nsblk->res[nsblk->num_resources++] = res; 1842 return res; 1843 } 1844 return NULL; 1845 } 1846 1847 static struct device *nd_namespace_blk_create(struct nd_region *nd_region) 1848 { 1849 struct nd_namespace_blk *nsblk; 1850 struct device *dev; 1851 1852 if (!is_nd_blk(&nd_region->dev)) 1853 return NULL; 1854 1855 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); 1856 if (!nsblk) 1857 return NULL; 1858 1859 dev = &nsblk->common.dev; 1860 dev->type = &namespace_blk_device_type; 1861 nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL); 1862 if (nsblk->id < 0) { 1863 kfree(nsblk); 1864 return NULL; 1865 } 1866 dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id); 1867 dev->parent = &nd_region->dev; 1868 dev->groups = nd_namespace_attribute_groups; 1869 1870 return &nsblk->common.dev; 1871 } 1872 1873 static struct device *nd_namespace_pmem_create(struct nd_region *nd_region) 1874 { 1875 struct nd_namespace_pmem *nspm; 1876 struct resource *res; 1877 struct device *dev; 1878 1879 if (!is_nd_pmem(&nd_region->dev)) 1880 return NULL; 1881 1882 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); 1883 if (!nspm) 1884 return NULL; 1885 1886 dev = &nspm->nsio.common.dev; 1887 dev->type = &namespace_pmem_device_type; 1888 dev->parent = &nd_region->dev; 1889 res = &nspm->nsio.res; 1890 res->name = dev_name(&nd_region->dev); 1891 res->flags = IORESOURCE_MEM; 1892 1893 nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL); 1894 if (nspm->id < 0) { 1895 kfree(nspm); 1896 return NULL; 1897 } 1898 dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id); 1899 dev->parent = &nd_region->dev; 1900 dev->groups = nd_namespace_attribute_groups; 1901 nd_namespace_pmem_set_resource(nd_region, nspm, 0); 1902 1903 return dev; 1904 } 1905 1906 void nd_region_create_ns_seed(struct nd_region *nd_region) 1907 { 1908 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); 1909 1910 if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO) 1911 return; 1912 1913 if (is_nd_blk(&nd_region->dev)) 1914 nd_region->ns_seed = nd_namespace_blk_create(nd_region); 1915 else 1916 nd_region->ns_seed = nd_namespace_pmem_create(nd_region); 1917 1918 /* 1919 * Seed creation failures are not fatal, provisioning is simply 1920 * disabled until memory becomes available 1921 */ 1922 if (!nd_region->ns_seed) 1923 dev_err(&nd_region->dev, "failed to create %s namespace\n", 1924 is_nd_blk(&nd_region->dev) ? "blk" : "pmem"); 1925 else 1926 nd_device_register(nd_region->ns_seed); 1927 } 1928 1929 void nd_region_create_dax_seed(struct nd_region *nd_region) 1930 { 1931 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); 1932 nd_region->dax_seed = nd_dax_create(nd_region); 1933 /* 1934 * Seed creation failures are not fatal, provisioning is simply 1935 * disabled until memory becomes available 1936 */ 1937 if (!nd_region->dax_seed) 1938 dev_err(&nd_region->dev, "failed to create dax namespace\n"); 1939 } 1940 1941 void nd_region_create_pfn_seed(struct nd_region *nd_region) 1942 { 1943 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); 1944 nd_region->pfn_seed = nd_pfn_create(nd_region); 1945 /* 1946 * Seed creation failures are not fatal, provisioning is simply 1947 * disabled until memory becomes available 1948 */ 1949 if (!nd_region->pfn_seed) 1950 dev_err(&nd_region->dev, "failed to create pfn namespace\n"); 1951 } 1952 1953 void nd_region_create_btt_seed(struct nd_region *nd_region) 1954 { 1955 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); 1956 nd_region->btt_seed = nd_btt_create(nd_region); 1957 /* 1958 * Seed creation failures are not fatal, provisioning is simply 1959 * disabled until memory becomes available 1960 */ 1961 if (!nd_region->btt_seed) 1962 dev_err(&nd_region->dev, "failed to create btt namespace\n"); 1963 } 1964 1965 static int add_namespace_resource(struct nd_region *nd_region, 1966 struct nd_namespace_label *nd_label, struct device **devs, 1967 int count) 1968 { 1969 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 1970 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1971 int i; 1972 1973 for (i = 0; i < count; i++) { 1974 u8 *uuid = namespace_to_uuid(devs[i]); 1975 struct resource *res; 1976 1977 if (IS_ERR_OR_NULL(uuid)) { 1978 WARN_ON(1); 1979 continue; 1980 } 1981 1982 if (memcmp(uuid, nd_label->uuid, NSLABEL_UUID_LEN) != 0) 1983 continue; 1984 if (is_namespace_blk(devs[i])) { 1985 res = nsblk_add_resource(nd_region, ndd, 1986 to_nd_namespace_blk(devs[i]), 1987 __le64_to_cpu(nd_label->dpa)); 1988 if (!res) 1989 return -ENXIO; 1990 nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count); 1991 } else { 1992 dev_err(&nd_region->dev, 1993 "error: conflicting extents for uuid: %pUb\n", 1994 nd_label->uuid); 1995 return -ENXIO; 1996 } 1997 break; 1998 } 1999 2000 return i; 2001 } 2002 2003 struct device *create_namespace_blk(struct nd_region *nd_region, 2004 struct nd_namespace_label *nd_label, int count) 2005 { 2006 2007 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 2008 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 2009 struct nd_namespace_blk *nsblk; 2010 char name[NSLABEL_NAME_LEN]; 2011 struct device *dev = NULL; 2012 struct resource *res; 2013 2014 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); 2015 if (!nsblk) 2016 return ERR_PTR(-ENOMEM); 2017 dev = &nsblk->common.dev; 2018 dev->type = &namespace_blk_device_type; 2019 dev->parent = &nd_region->dev; 2020 nsblk->id = -1; 2021 nsblk->lbasize = __le64_to_cpu(nd_label->lbasize); 2022 nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN, 2023 GFP_KERNEL); 2024 if (!nsblk->uuid) 2025 goto blk_err; 2026 memcpy(name, nd_label->name, NSLABEL_NAME_LEN); 2027 if (name[0]) 2028 nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, 2029 GFP_KERNEL); 2030 res = nsblk_add_resource(nd_region, ndd, nsblk, 2031 __le64_to_cpu(nd_label->dpa)); 2032 if (!res) 2033 goto blk_err; 2034 nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count); 2035 return dev; 2036 blk_err: 2037 namespace_blk_release(dev); 2038 return ERR_PTR(-ENXIO); 2039 } 2040 2041 static int cmp_dpa(const void *a, const void *b) 2042 { 2043 const struct device *dev_a = *(const struct device **) a; 2044 const struct device *dev_b = *(const struct device **) b; 2045 struct nd_namespace_blk *nsblk_a, *nsblk_b; 2046 struct nd_namespace_pmem *nspm_a, *nspm_b; 2047 2048 if (is_namespace_io(dev_a)) 2049 return 0; 2050 2051 if (is_namespace_blk(dev_a)) { 2052 nsblk_a = to_nd_namespace_blk(dev_a); 2053 nsblk_b = to_nd_namespace_blk(dev_b); 2054 2055 return memcmp(&nsblk_a->res[0]->start, &nsblk_b->res[0]->start, 2056 sizeof(resource_size_t)); 2057 } 2058 2059 nspm_a = to_nd_namespace_pmem(dev_a); 2060 nspm_b = to_nd_namespace_pmem(dev_b); 2061 2062 return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start, 2063 sizeof(resource_size_t)); 2064 } 2065 2066 static struct device **scan_labels(struct nd_region *nd_region) 2067 { 2068 int i, count = 0; 2069 struct device *dev, **devs = NULL; 2070 struct nd_label_ent *label_ent, *e; 2071 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 2072 resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1; 2073 2074 /* "safe" because create_namespace_pmem() might list_move() label_ent */ 2075 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { 2076 struct nd_namespace_label *nd_label = label_ent->label; 2077 struct device **__devs; 2078 u32 flags; 2079 2080 if (!nd_label) 2081 continue; 2082 flags = __le32_to_cpu(nd_label->flags); 2083 if (is_nd_blk(&nd_region->dev) 2084 == !!(flags & NSLABEL_FLAG_LOCAL)) 2085 /* pass, region matches label type */; 2086 else 2087 continue; 2088 2089 /* skip labels that describe extents outside of the region */ 2090 if (nd_label->dpa < nd_mapping->start || nd_label->dpa > map_end) 2091 continue; 2092 2093 i = add_namespace_resource(nd_region, nd_label, devs, count); 2094 if (i < 0) 2095 goto err; 2096 if (i < count) 2097 continue; 2098 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL); 2099 if (!__devs) 2100 goto err; 2101 memcpy(__devs, devs, sizeof(dev) * count); 2102 kfree(devs); 2103 devs = __devs; 2104 2105 if (is_nd_blk(&nd_region->dev)) { 2106 dev = create_namespace_blk(nd_region, nd_label, count); 2107 if (IS_ERR(dev)) 2108 goto err; 2109 devs[count++] = dev; 2110 } else { 2111 dev = create_namespace_pmem(nd_region, nd_label); 2112 if (IS_ERR(dev)) { 2113 switch (PTR_ERR(dev)) { 2114 case -EAGAIN: 2115 /* skip invalid labels */ 2116 continue; 2117 case -ENODEV: 2118 /* fallthrough to seed creation */ 2119 break; 2120 default: 2121 goto err; 2122 } 2123 } else 2124 devs[count++] = dev; 2125 } 2126 } 2127 2128 dev_dbg(&nd_region->dev, "%s: discovered %d %s namespace%s\n", 2129 __func__, count, is_nd_blk(&nd_region->dev) 2130 ? "blk" : "pmem", count == 1 ? "" : "s"); 2131 2132 if (count == 0) { 2133 /* Publish a zero-sized namespace for userspace to configure. */ 2134 nd_mapping_free_labels(nd_mapping); 2135 2136 devs = kcalloc(2, sizeof(dev), GFP_KERNEL); 2137 if (!devs) 2138 goto err; 2139 if (is_nd_blk(&nd_region->dev)) { 2140 struct nd_namespace_blk *nsblk; 2141 2142 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); 2143 if (!nsblk) 2144 goto err; 2145 dev = &nsblk->common.dev; 2146 dev->type = &namespace_blk_device_type; 2147 } else { 2148 struct nd_namespace_pmem *nspm; 2149 2150 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); 2151 if (!nspm) 2152 goto err; 2153 dev = &nspm->nsio.common.dev; 2154 dev->type = &namespace_pmem_device_type; 2155 nd_namespace_pmem_set_resource(nd_region, nspm, 0); 2156 } 2157 dev->parent = &nd_region->dev; 2158 devs[count++] = dev; 2159 } else if (is_nd_pmem(&nd_region->dev)) { 2160 /* clean unselected labels */ 2161 for (i = 0; i < nd_region->ndr_mappings; i++) { 2162 struct list_head *l, *e; 2163 LIST_HEAD(list); 2164 int j; 2165 2166 nd_mapping = &nd_region->mapping[i]; 2167 if (list_empty(&nd_mapping->labels)) { 2168 WARN_ON(1); 2169 continue; 2170 } 2171 2172 j = count; 2173 list_for_each_safe(l, e, &nd_mapping->labels) { 2174 if (!j--) 2175 break; 2176 list_move_tail(l, &list); 2177 } 2178 nd_mapping_free_labels(nd_mapping); 2179 list_splice_init(&list, &nd_mapping->labels); 2180 } 2181 } 2182 2183 if (count > 1) 2184 sort(devs, count, sizeof(struct device *), cmp_dpa, NULL); 2185 2186 return devs; 2187 2188 err: 2189 if (devs) { 2190 for (i = 0; devs[i]; i++) 2191 if (is_nd_blk(&nd_region->dev)) 2192 namespace_blk_release(devs[i]); 2193 else 2194 namespace_pmem_release(devs[i]); 2195 kfree(devs); 2196 } 2197 return NULL; 2198 } 2199 2200 static struct device **create_namespaces(struct nd_region *nd_region) 2201 { 2202 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 2203 struct device **devs; 2204 int i; 2205 2206 if (nd_region->ndr_mappings == 0) 2207 return NULL; 2208 2209 /* lock down all mappings while we scan labels */ 2210 for (i = 0; i < nd_region->ndr_mappings; i++) { 2211 nd_mapping = &nd_region->mapping[i]; 2212 mutex_lock_nested(&nd_mapping->lock, i); 2213 } 2214 2215 devs = scan_labels(nd_region); 2216 2217 for (i = 0; i < nd_region->ndr_mappings; i++) { 2218 int reverse = nd_region->ndr_mappings - 1 - i; 2219 2220 nd_mapping = &nd_region->mapping[reverse]; 2221 mutex_unlock(&nd_mapping->lock); 2222 } 2223 2224 return devs; 2225 } 2226 2227 static int init_active_labels(struct nd_region *nd_region) 2228 { 2229 int i; 2230 2231 for (i = 0; i < nd_region->ndr_mappings; i++) { 2232 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 2233 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 2234 struct nvdimm *nvdimm = nd_mapping->nvdimm; 2235 struct nd_label_ent *label_ent; 2236 int count, j; 2237 2238 /* 2239 * If the dimm is disabled then prevent the region from 2240 * being activated if it aliases DPA. 2241 */ 2242 if (!ndd) { 2243 if ((nvdimm->flags & NDD_ALIASING) == 0) 2244 return 0; 2245 dev_dbg(&nd_region->dev, "%s: is disabled, failing probe\n", 2246 dev_name(&nd_mapping->nvdimm->dev)); 2247 return -ENXIO; 2248 } 2249 nd_mapping->ndd = ndd; 2250 atomic_inc(&nvdimm->busy); 2251 get_ndd(ndd); 2252 2253 count = nd_label_active_count(ndd); 2254 dev_dbg(ndd->dev, "%s: %d\n", __func__, count); 2255 if (!count) 2256 continue; 2257 for (j = 0; j < count; j++) { 2258 struct nd_namespace_label *label; 2259 2260 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL); 2261 if (!label_ent) 2262 break; 2263 label = nd_label_active(ndd, j); 2264 label_ent->label = label; 2265 2266 mutex_lock(&nd_mapping->lock); 2267 list_add_tail(&label_ent->list, &nd_mapping->labels); 2268 mutex_unlock(&nd_mapping->lock); 2269 } 2270 2271 if (j >= count) 2272 continue; 2273 2274 mutex_lock(&nd_mapping->lock); 2275 nd_mapping_free_labels(nd_mapping); 2276 mutex_unlock(&nd_mapping->lock); 2277 return -ENOMEM; 2278 } 2279 2280 return 0; 2281 } 2282 2283 int nd_region_register_namespaces(struct nd_region *nd_region, int *err) 2284 { 2285 struct device **devs = NULL; 2286 int i, rc = 0, type; 2287 2288 *err = 0; 2289 nvdimm_bus_lock(&nd_region->dev); 2290 rc = init_active_labels(nd_region); 2291 if (rc) { 2292 nvdimm_bus_unlock(&nd_region->dev); 2293 return rc; 2294 } 2295 2296 type = nd_region_to_nstype(nd_region); 2297 switch (type) { 2298 case ND_DEVICE_NAMESPACE_IO: 2299 devs = create_namespace_io(nd_region); 2300 break; 2301 case ND_DEVICE_NAMESPACE_PMEM: 2302 case ND_DEVICE_NAMESPACE_BLK: 2303 devs = create_namespaces(nd_region); 2304 break; 2305 default: 2306 break; 2307 } 2308 nvdimm_bus_unlock(&nd_region->dev); 2309 2310 if (!devs) 2311 return -ENODEV; 2312 2313 for (i = 0; devs[i]; i++) { 2314 struct device *dev = devs[i]; 2315 int id; 2316 2317 if (type == ND_DEVICE_NAMESPACE_BLK) { 2318 struct nd_namespace_blk *nsblk; 2319 2320 nsblk = to_nd_namespace_blk(dev); 2321 id = ida_simple_get(&nd_region->ns_ida, 0, 0, 2322 GFP_KERNEL); 2323 nsblk->id = id; 2324 } else if (type == ND_DEVICE_NAMESPACE_PMEM) { 2325 struct nd_namespace_pmem *nspm; 2326 2327 nspm = to_nd_namespace_pmem(dev); 2328 id = ida_simple_get(&nd_region->ns_ida, 0, 0, 2329 GFP_KERNEL); 2330 nspm->id = id; 2331 } else 2332 id = i; 2333 2334 if (id < 0) 2335 break; 2336 dev_set_name(dev, "namespace%d.%d", nd_region->id, id); 2337 dev->groups = nd_namespace_attribute_groups; 2338 nd_device_register(dev); 2339 } 2340 if (i) 2341 nd_region->ns_seed = devs[0]; 2342 2343 if (devs[i]) { 2344 int j; 2345 2346 for (j = i; devs[j]; j++) { 2347 struct device *dev = devs[j]; 2348 2349 device_initialize(dev); 2350 put_device(dev); 2351 } 2352 *err = j - i; 2353 /* 2354 * All of the namespaces we tried to register failed, so 2355 * fail region activation. 2356 */ 2357 if (*err == 0) 2358 rc = -ENODEV; 2359 } 2360 kfree(devs); 2361 2362 if (rc == -ENODEV) 2363 return rc; 2364 2365 return i; 2366 } 2367