1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright(c) 2013-2016 Intel Corporation. All rights reserved. 4 */ 5 #include <linux/memremap.h> 6 #include <linux/blkdev.h> 7 #include <linux/device.h> 8 #include <linux/sizes.h> 9 #include <linux/slab.h> 10 #include <linux/fs.h> 11 #include <linux/mm.h> 12 #include "nd-core.h" 13 #include "pfn.h" 14 #include "nd.h" 15 16 static const bool page_struct_override = IS_ENABLED(CONFIG_NVDIMM_KMSAN); 17 18 static void nd_pfn_release(struct device *dev) 19 { 20 struct nd_region *nd_region = to_nd_region(dev->parent); 21 struct nd_pfn *nd_pfn = to_nd_pfn(dev); 22 23 dev_dbg(dev, "trace\n"); 24 nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns); 25 ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id); 26 kfree(nd_pfn->uuid); 27 kfree(nd_pfn); 28 } 29 30 struct nd_pfn *to_nd_pfn(struct device *dev) 31 { 32 struct nd_pfn *nd_pfn = container_of(dev, struct nd_pfn, dev); 33 34 WARN_ON(!is_nd_pfn(dev)); 35 return nd_pfn; 36 } 37 EXPORT_SYMBOL(to_nd_pfn); 38 39 static ssize_t mode_show(struct device *dev, 40 struct device_attribute *attr, char *buf) 41 { 42 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 43 44 switch (nd_pfn->mode) { 45 case PFN_MODE_RAM: 46 return sprintf(buf, "ram\n"); 47 case PFN_MODE_PMEM: 48 return sprintf(buf, "pmem\n"); 49 default: 50 return sprintf(buf, "none\n"); 51 } 52 } 53 54 static ssize_t mode_store(struct device *dev, 55 struct device_attribute *attr, const char *buf, size_t len) 56 { 57 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 58 ssize_t rc = 0; 59 60 device_lock(dev); 61 nvdimm_bus_lock(dev); 62 if (dev->driver) 63 rc = -EBUSY; 64 else { 65 size_t n = len - 1; 66 67 if (strncmp(buf, "pmem\n", n) == 0 68 || strncmp(buf, "pmem", n) == 0) { 69 nd_pfn->mode = PFN_MODE_PMEM; 70 } else if (strncmp(buf, "ram\n", n) == 0 71 || strncmp(buf, "ram", n) == 0) 72 nd_pfn->mode = PFN_MODE_RAM; 73 else if (strncmp(buf, "none\n", n) == 0 74 || strncmp(buf, "none", n) == 0) 75 nd_pfn->mode = PFN_MODE_NONE; 76 else 77 rc = -EINVAL; 78 } 79 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 80 buf[len - 1] == '\n' ? "" : "\n"); 81 nvdimm_bus_unlock(dev); 82 device_unlock(dev); 83 84 return rc ? rc : len; 85 } 86 static DEVICE_ATTR_RW(mode); 87 88 static ssize_t align_show(struct device *dev, 89 struct device_attribute *attr, char *buf) 90 { 91 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 92 93 return sprintf(buf, "%ld\n", nd_pfn->align); 94 } 95 96 static unsigned long *nd_pfn_supported_alignments(unsigned long *alignments) 97 { 98 99 alignments[0] = PAGE_SIZE; 100 101 if (has_transparent_hugepage()) { 102 alignments[1] = HPAGE_PMD_SIZE; 103 if (has_transparent_pud_hugepage()) 104 alignments[2] = HPAGE_PUD_SIZE; 105 } 106 107 return alignments; 108 } 109 110 /* 111 * Use pmd mapping if supported as default alignment 112 */ 113 static unsigned long nd_pfn_default_alignment(void) 114 { 115 116 if (has_transparent_hugepage()) 117 return HPAGE_PMD_SIZE; 118 return PAGE_SIZE; 119 } 120 121 static ssize_t align_store(struct device *dev, 122 struct device_attribute *attr, const char *buf, size_t len) 123 { 124 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 125 unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, }; 126 ssize_t rc; 127 128 device_lock(dev); 129 nvdimm_bus_lock(dev); 130 rc = nd_size_select_store(dev, buf, &nd_pfn->align, 131 nd_pfn_supported_alignments(aligns)); 132 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 133 buf[len - 1] == '\n' ? "" : "\n"); 134 nvdimm_bus_unlock(dev); 135 device_unlock(dev); 136 137 return rc ? rc : len; 138 } 139 static DEVICE_ATTR_RW(align); 140 141 static ssize_t uuid_show(struct device *dev, 142 struct device_attribute *attr, char *buf) 143 { 144 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 145 146 if (nd_pfn->uuid) 147 return sprintf(buf, "%pUb\n", nd_pfn->uuid); 148 return sprintf(buf, "\n"); 149 } 150 151 static ssize_t uuid_store(struct device *dev, 152 struct device_attribute *attr, const char *buf, size_t len) 153 { 154 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 155 ssize_t rc; 156 157 device_lock(dev); 158 rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len); 159 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 160 buf[len - 1] == '\n' ? "" : "\n"); 161 device_unlock(dev); 162 163 return rc ? rc : len; 164 } 165 static DEVICE_ATTR_RW(uuid); 166 167 static ssize_t namespace_show(struct device *dev, 168 struct device_attribute *attr, char *buf) 169 { 170 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 171 ssize_t rc; 172 173 nvdimm_bus_lock(dev); 174 rc = sprintf(buf, "%s\n", nd_pfn->ndns 175 ? dev_name(&nd_pfn->ndns->dev) : ""); 176 nvdimm_bus_unlock(dev); 177 return rc; 178 } 179 180 static ssize_t namespace_store(struct device *dev, 181 struct device_attribute *attr, const char *buf, size_t len) 182 { 183 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 184 ssize_t rc; 185 186 device_lock(dev); 187 nvdimm_bus_lock(dev); 188 rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len); 189 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 190 buf[len - 1] == '\n' ? "" : "\n"); 191 nvdimm_bus_unlock(dev); 192 device_unlock(dev); 193 194 return rc; 195 } 196 static DEVICE_ATTR_RW(namespace); 197 198 static ssize_t resource_show(struct device *dev, 199 struct device_attribute *attr, char *buf) 200 { 201 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 202 ssize_t rc; 203 204 device_lock(dev); 205 if (dev->driver) { 206 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; 207 u64 offset = __le64_to_cpu(pfn_sb->dataoff); 208 struct nd_namespace_common *ndns = nd_pfn->ndns; 209 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad); 210 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); 211 212 rc = sprintf(buf, "%#llx\n", (unsigned long long) nsio->res.start 213 + start_pad + offset); 214 } else { 215 /* no address to convey if the pfn instance is disabled */ 216 rc = -ENXIO; 217 } 218 device_unlock(dev); 219 220 return rc; 221 } 222 static DEVICE_ATTR_ADMIN_RO(resource); 223 224 static ssize_t size_show(struct device *dev, 225 struct device_attribute *attr, char *buf) 226 { 227 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 228 ssize_t rc; 229 230 device_lock(dev); 231 if (dev->driver) { 232 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; 233 u64 offset = __le64_to_cpu(pfn_sb->dataoff); 234 struct nd_namespace_common *ndns = nd_pfn->ndns; 235 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad); 236 u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc); 237 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); 238 239 rc = sprintf(buf, "%llu\n", (unsigned long long) 240 resource_size(&nsio->res) - start_pad 241 - end_trunc - offset); 242 } else { 243 /* no size to convey if the pfn instance is disabled */ 244 rc = -ENXIO; 245 } 246 device_unlock(dev); 247 248 return rc; 249 } 250 static DEVICE_ATTR_RO(size); 251 252 static ssize_t supported_alignments_show(struct device *dev, 253 struct device_attribute *attr, char *buf) 254 { 255 unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, }; 256 257 return nd_size_select_show(0, 258 nd_pfn_supported_alignments(aligns), buf); 259 } 260 static DEVICE_ATTR_RO(supported_alignments); 261 262 static struct attribute *nd_pfn_attributes[] = { 263 &dev_attr_mode.attr, 264 &dev_attr_namespace.attr, 265 &dev_attr_uuid.attr, 266 &dev_attr_align.attr, 267 &dev_attr_resource.attr, 268 &dev_attr_size.attr, 269 &dev_attr_supported_alignments.attr, 270 NULL, 271 }; 272 273 static struct attribute_group nd_pfn_attribute_group = { 274 .attrs = nd_pfn_attributes, 275 }; 276 277 const struct attribute_group *nd_pfn_attribute_groups[] = { 278 &nd_pfn_attribute_group, 279 &nd_device_attribute_group, 280 &nd_numa_attribute_group, 281 NULL, 282 }; 283 284 static const struct device_type nd_pfn_device_type = { 285 .name = "nd_pfn", 286 .release = nd_pfn_release, 287 .groups = nd_pfn_attribute_groups, 288 }; 289 290 bool is_nd_pfn(struct device *dev) 291 { 292 return dev ? dev->type == &nd_pfn_device_type : false; 293 } 294 EXPORT_SYMBOL(is_nd_pfn); 295 296 static struct lock_class_key nvdimm_pfn_key; 297 298 struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn, 299 struct nd_namespace_common *ndns) 300 { 301 struct device *dev; 302 303 if (!nd_pfn) 304 return NULL; 305 306 nd_pfn->mode = PFN_MODE_NONE; 307 nd_pfn->align = nd_pfn_default_alignment(); 308 dev = &nd_pfn->dev; 309 device_initialize(&nd_pfn->dev); 310 lockdep_set_class(&nd_pfn->dev.mutex, &nvdimm_pfn_key); 311 if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) { 312 dev_dbg(&ndns->dev, "failed, already claimed by %s\n", 313 dev_name(ndns->claim)); 314 put_device(dev); 315 return NULL; 316 } 317 return dev; 318 } 319 320 static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region) 321 { 322 struct nd_pfn *nd_pfn; 323 struct device *dev; 324 325 nd_pfn = kzalloc(sizeof(*nd_pfn), GFP_KERNEL); 326 if (!nd_pfn) 327 return NULL; 328 329 nd_pfn->id = ida_simple_get(&nd_region->pfn_ida, 0, 0, GFP_KERNEL); 330 if (nd_pfn->id < 0) { 331 kfree(nd_pfn); 332 return NULL; 333 } 334 335 dev = &nd_pfn->dev; 336 dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id); 337 dev->type = &nd_pfn_device_type; 338 dev->parent = &nd_region->dev; 339 340 return nd_pfn; 341 } 342 343 struct device *nd_pfn_create(struct nd_region *nd_region) 344 { 345 struct nd_pfn *nd_pfn; 346 struct device *dev; 347 348 if (!is_memory(&nd_region->dev)) 349 return NULL; 350 351 nd_pfn = nd_pfn_alloc(nd_region); 352 dev = nd_pfn_devinit(nd_pfn, NULL); 353 354 nd_device_register(dev); 355 return dev; 356 } 357 358 /* 359 * nd_pfn_clear_memmap_errors() clears any errors in the volatile memmap 360 * space associated with the namespace. If the memmap is set to DRAM, then 361 * this is a no-op. Since the memmap area is freshly initialized during 362 * probe, we have an opportunity to clear any badblocks in this area. 363 */ 364 static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn) 365 { 366 struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent); 367 struct nd_namespace_common *ndns = nd_pfn->ndns; 368 void *zero_page = page_address(ZERO_PAGE(0)); 369 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; 370 int num_bad, meta_num, rc, bb_present; 371 sector_t first_bad, meta_start; 372 struct nd_namespace_io *nsio; 373 374 if (nd_pfn->mode != PFN_MODE_PMEM) 375 return 0; 376 377 nsio = to_nd_namespace_io(&ndns->dev); 378 meta_start = (SZ_4K + sizeof(*pfn_sb)) >> 9; 379 meta_num = (le64_to_cpu(pfn_sb->dataoff) >> 9) - meta_start; 380 381 /* 382 * re-enable the namespace with correct size so that we can access 383 * the device memmap area. 384 */ 385 devm_namespace_disable(&nd_pfn->dev, ndns); 386 rc = devm_namespace_enable(&nd_pfn->dev, ndns, le64_to_cpu(pfn_sb->dataoff)); 387 if (rc) 388 return rc; 389 390 do { 391 unsigned long zero_len; 392 u64 nsoff; 393 394 bb_present = badblocks_check(&nd_region->bb, meta_start, 395 meta_num, &first_bad, &num_bad); 396 if (bb_present) { 397 dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %llx\n", 398 num_bad, first_bad); 399 nsoff = ALIGN_DOWN((nd_region->ndr_start 400 + (first_bad << 9)) - nsio->res.start, 401 PAGE_SIZE); 402 zero_len = ALIGN(num_bad << 9, PAGE_SIZE); 403 while (zero_len) { 404 unsigned long chunk = min(zero_len, PAGE_SIZE); 405 406 rc = nvdimm_write_bytes(ndns, nsoff, zero_page, 407 chunk, 0); 408 if (rc) 409 break; 410 411 zero_len -= chunk; 412 nsoff += chunk; 413 } 414 if (rc) { 415 dev_err(&nd_pfn->dev, 416 "error clearing %x badblocks at %llx\n", 417 num_bad, first_bad); 418 return rc; 419 } 420 } 421 } while (bb_present); 422 423 return 0; 424 } 425 426 static bool nd_supported_alignment(unsigned long align) 427 { 428 int i; 429 unsigned long supported[MAX_NVDIMM_ALIGN] = { [0] = 0, }; 430 431 if (align == 0) 432 return false; 433 434 nd_pfn_supported_alignments(supported); 435 for (i = 0; supported[i]; i++) 436 if (align == supported[i]) 437 return true; 438 return false; 439 } 440 441 /** 442 * nd_pfn_validate - read and validate info-block 443 * @nd_pfn: fsdax namespace runtime state / properties 444 * @sig: 'devdax' or 'fsdax' signature 445 * 446 * Upon return the info-block buffer contents (->pfn_sb) are 447 * indeterminate when validation fails, and a coherent info-block 448 * otherwise. 449 */ 450 int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) 451 { 452 u64 checksum, offset; 453 struct resource *res; 454 enum nd_pfn_mode mode; 455 resource_size_t res_size; 456 struct nd_namespace_io *nsio; 457 unsigned long align, start_pad, end_trunc; 458 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; 459 struct nd_namespace_common *ndns = nd_pfn->ndns; 460 const uuid_t *parent_uuid = nd_dev_to_uuid(&ndns->dev); 461 462 if (!pfn_sb || !ndns) 463 return -ENODEV; 464 465 if (!is_memory(nd_pfn->dev.parent)) 466 return -ENODEV; 467 468 if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0)) 469 return -ENXIO; 470 471 if (memcmp(pfn_sb->signature, sig, PFN_SIG_LEN) != 0) 472 return -ENODEV; 473 474 checksum = le64_to_cpu(pfn_sb->checksum); 475 pfn_sb->checksum = 0; 476 if (checksum != nd_sb_checksum((struct nd_gen_sb *) pfn_sb)) 477 return -ENODEV; 478 pfn_sb->checksum = cpu_to_le64(checksum); 479 480 if (memcmp(pfn_sb->parent_uuid, parent_uuid, 16) != 0) 481 return -ENODEV; 482 483 if (__le16_to_cpu(pfn_sb->version_minor) < 1) { 484 pfn_sb->start_pad = 0; 485 pfn_sb->end_trunc = 0; 486 } 487 488 if (__le16_to_cpu(pfn_sb->version_minor) < 2) 489 pfn_sb->align = 0; 490 491 if (__le16_to_cpu(pfn_sb->version_minor) < 4) { 492 pfn_sb->page_struct_size = cpu_to_le16(64); 493 pfn_sb->page_size = cpu_to_le32(PAGE_SIZE); 494 } 495 496 switch (le32_to_cpu(pfn_sb->mode)) { 497 case PFN_MODE_RAM: 498 case PFN_MODE_PMEM: 499 break; 500 default: 501 return -ENXIO; 502 } 503 504 align = le32_to_cpu(pfn_sb->align); 505 offset = le64_to_cpu(pfn_sb->dataoff); 506 start_pad = le32_to_cpu(pfn_sb->start_pad); 507 end_trunc = le32_to_cpu(pfn_sb->end_trunc); 508 if (align == 0) 509 align = 1UL << ilog2(offset); 510 mode = le32_to_cpu(pfn_sb->mode); 511 512 if ((le32_to_cpu(pfn_sb->page_size) > PAGE_SIZE) && 513 (mode == PFN_MODE_PMEM)) { 514 dev_err(&nd_pfn->dev, 515 "init failed, page size mismatch %d\n", 516 le32_to_cpu(pfn_sb->page_size)); 517 return -EOPNOTSUPP; 518 } 519 520 if ((le16_to_cpu(pfn_sb->page_struct_size) < sizeof(struct page)) && 521 (mode == PFN_MODE_PMEM)) { 522 dev_err(&nd_pfn->dev, 523 "init failed, struct page size mismatch %d\n", 524 le16_to_cpu(pfn_sb->page_struct_size)); 525 return -EOPNOTSUPP; 526 } 527 528 /* 529 * Check whether the we support the alignment. For Dax if the 530 * superblock alignment is not matching, we won't initialize 531 * the device. 532 */ 533 if (!nd_supported_alignment(align) && 534 !memcmp(pfn_sb->signature, DAX_SIG, PFN_SIG_LEN)) { 535 dev_err(&nd_pfn->dev, "init failed, alignment mismatch: " 536 "%ld:%ld\n", nd_pfn->align, align); 537 return -EOPNOTSUPP; 538 } 539 540 if (!nd_pfn->uuid) { 541 /* 542 * When probing a namepace via nd_pfn_probe() the uuid 543 * is NULL (see: nd_pfn_devinit()) we init settings from 544 * pfn_sb 545 */ 546 nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL); 547 if (!nd_pfn->uuid) 548 return -ENOMEM; 549 nd_pfn->align = align; 550 nd_pfn->mode = mode; 551 } else { 552 /* 553 * When probing a pfn / dax instance we validate the 554 * live settings against the pfn_sb 555 */ 556 if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0) 557 return -ENODEV; 558 559 /* 560 * If the uuid validates, but other settings mismatch 561 * return EINVAL because userspace has managed to change 562 * the configuration without specifying new 563 * identification. 564 */ 565 if (nd_pfn->align != align || nd_pfn->mode != mode) { 566 dev_err(&nd_pfn->dev, 567 "init failed, settings mismatch\n"); 568 dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n", 569 nd_pfn->align, align, nd_pfn->mode, 570 mode); 571 return -EOPNOTSUPP; 572 } 573 } 574 575 if (align > nvdimm_namespace_capacity(ndns)) { 576 dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n", 577 align, nvdimm_namespace_capacity(ndns)); 578 return -EOPNOTSUPP; 579 } 580 581 /* 582 * These warnings are verbose because they can only trigger in 583 * the case where the physical address alignment of the 584 * namespace has changed since the pfn superblock was 585 * established. 586 */ 587 nsio = to_nd_namespace_io(&ndns->dev); 588 res = &nsio->res; 589 res_size = resource_size(res); 590 if (offset >= res_size) { 591 dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n", 592 dev_name(&ndns->dev)); 593 return -EOPNOTSUPP; 594 } 595 596 if ((align && !IS_ALIGNED(res->start + offset + start_pad, align)) 597 || !IS_ALIGNED(offset, PAGE_SIZE)) { 598 dev_err(&nd_pfn->dev, 599 "bad offset: %#llx dax disabled align: %#lx\n", 600 offset, align); 601 return -EOPNOTSUPP; 602 } 603 604 if (!IS_ALIGNED(res->start + start_pad, memremap_compat_align())) { 605 dev_err(&nd_pfn->dev, "resource start misaligned\n"); 606 return -EOPNOTSUPP; 607 } 608 609 if (!IS_ALIGNED(res->end + 1 - end_trunc, memremap_compat_align())) { 610 dev_err(&nd_pfn->dev, "resource end misaligned\n"); 611 return -EOPNOTSUPP; 612 } 613 614 if (offset >= (res_size - start_pad - end_trunc)) { 615 dev_err(&nd_pfn->dev, "bad offset with small namespace\n"); 616 return -EOPNOTSUPP; 617 } 618 return 0; 619 } 620 EXPORT_SYMBOL(nd_pfn_validate); 621 622 int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns) 623 { 624 int rc; 625 struct nd_pfn *nd_pfn; 626 struct device *pfn_dev; 627 struct nd_pfn_sb *pfn_sb; 628 struct nd_region *nd_region = to_nd_region(ndns->dev.parent); 629 630 if (ndns->force_raw) 631 return -ENODEV; 632 633 switch (ndns->claim_class) { 634 case NVDIMM_CCLASS_NONE: 635 case NVDIMM_CCLASS_PFN: 636 break; 637 default: 638 return -ENODEV; 639 } 640 641 nvdimm_bus_lock(&ndns->dev); 642 nd_pfn = nd_pfn_alloc(nd_region); 643 pfn_dev = nd_pfn_devinit(nd_pfn, ndns); 644 nvdimm_bus_unlock(&ndns->dev); 645 if (!pfn_dev) 646 return -ENOMEM; 647 pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL); 648 nd_pfn = to_nd_pfn(pfn_dev); 649 nd_pfn->pfn_sb = pfn_sb; 650 rc = nd_pfn_validate(nd_pfn, PFN_SIG); 651 dev_dbg(dev, "pfn: %s\n", rc == 0 ? dev_name(pfn_dev) : "<none>"); 652 if (rc < 0) { 653 nd_detach_ndns(pfn_dev, &nd_pfn->ndns); 654 put_device(pfn_dev); 655 } else 656 nd_device_register(pfn_dev); 657 658 return rc; 659 } 660 EXPORT_SYMBOL(nd_pfn_probe); 661 662 /* 663 * We hotplug memory at sub-section granularity, pad the reserved area 664 * from the previous section base to the namespace base address. 665 */ 666 static unsigned long init_altmap_base(resource_size_t base) 667 { 668 unsigned long base_pfn = PHYS_PFN(base); 669 670 return SUBSECTION_ALIGN_DOWN(base_pfn); 671 } 672 673 static unsigned long init_altmap_reserve(resource_size_t base) 674 { 675 unsigned long reserve = nd_info_block_reserve() >> PAGE_SHIFT; 676 unsigned long base_pfn = PHYS_PFN(base); 677 678 reserve += base_pfn - SUBSECTION_ALIGN_DOWN(base_pfn); 679 return reserve; 680 } 681 682 static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) 683 { 684 struct range *range = &pgmap->range; 685 struct vmem_altmap *altmap = &pgmap->altmap; 686 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; 687 u64 offset = le64_to_cpu(pfn_sb->dataoff); 688 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad); 689 u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc); 690 u32 reserve = nd_info_block_reserve(); 691 struct nd_namespace_common *ndns = nd_pfn->ndns; 692 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); 693 resource_size_t base = nsio->res.start + start_pad; 694 resource_size_t end = nsio->res.end - end_trunc; 695 struct vmem_altmap __altmap = { 696 .base_pfn = init_altmap_base(base), 697 .reserve = init_altmap_reserve(base), 698 .end_pfn = PHYS_PFN(end), 699 }; 700 701 *range = (struct range) { 702 .start = nsio->res.start + start_pad, 703 .end = nsio->res.end - end_trunc, 704 }; 705 pgmap->nr_range = 1; 706 if (nd_pfn->mode == PFN_MODE_RAM) { 707 if (offset < reserve) 708 return -EINVAL; 709 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); 710 } else if (nd_pfn->mode == PFN_MODE_PMEM) { 711 nd_pfn->npfns = PHYS_PFN((range_len(range) - offset)); 712 if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns) 713 dev_info(&nd_pfn->dev, 714 "number of pfns truncated from %lld to %ld\n", 715 le64_to_cpu(nd_pfn->pfn_sb->npfns), 716 nd_pfn->npfns); 717 memcpy(altmap, &__altmap, sizeof(*altmap)); 718 altmap->free = PHYS_PFN(offset - reserve); 719 altmap->alloc = 0; 720 pgmap->flags |= PGMAP_ALTMAP_VALID; 721 } else 722 return -ENXIO; 723 724 return 0; 725 } 726 727 static int nd_pfn_init(struct nd_pfn *nd_pfn) 728 { 729 struct nd_namespace_common *ndns = nd_pfn->ndns; 730 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); 731 resource_size_t start, size; 732 struct nd_region *nd_region; 733 unsigned long npfns, align; 734 u32 end_trunc; 735 struct nd_pfn_sb *pfn_sb; 736 phys_addr_t offset; 737 const char *sig; 738 u64 checksum; 739 int rc; 740 741 pfn_sb = devm_kmalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL); 742 if (!pfn_sb) 743 return -ENOMEM; 744 745 nd_pfn->pfn_sb = pfn_sb; 746 if (is_nd_dax(&nd_pfn->dev)) 747 sig = DAX_SIG; 748 else 749 sig = PFN_SIG; 750 751 rc = nd_pfn_validate(nd_pfn, sig); 752 if (rc == 0) 753 return nd_pfn_clear_memmap_errors(nd_pfn); 754 if (rc != -ENODEV) 755 return rc; 756 757 /* no info block, do init */; 758 memset(pfn_sb, 0, sizeof(*pfn_sb)); 759 760 nd_region = to_nd_region(nd_pfn->dev.parent); 761 if (nd_region->ro) { 762 dev_info(&nd_pfn->dev, 763 "%s is read-only, unable to init metadata\n", 764 dev_name(&nd_region->dev)); 765 return -ENXIO; 766 } 767 768 start = nsio->res.start; 769 size = resource_size(&nsio->res); 770 npfns = PHYS_PFN(size - SZ_8K); 771 align = max(nd_pfn->align, memremap_compat_align()); 772 773 /* 774 * When @start is misaligned fail namespace creation. See 775 * the 'struct nd_pfn_sb' commentary on why ->start_pad is not 776 * an option. 777 */ 778 if (!IS_ALIGNED(start, memremap_compat_align())) { 779 dev_err(&nd_pfn->dev, "%s: start %pa misaligned to %#lx\n", 780 dev_name(&ndns->dev), &start, 781 memremap_compat_align()); 782 return -EINVAL; 783 } 784 end_trunc = start + size - ALIGN_DOWN(start + size, align); 785 if (nd_pfn->mode == PFN_MODE_PMEM) { 786 unsigned long page_map_size = MAX_STRUCT_PAGE_SIZE * npfns; 787 788 /* 789 * The altmap should be padded out to the block size used 790 * when populating the vmemmap. This *should* be equal to 791 * PMD_SIZE for most architectures. 792 * 793 * Also make sure size of struct page is less than 794 * MAX_STRUCT_PAGE_SIZE. The goal here is compatibility in the 795 * face of production kernel configurations that reduce the 796 * 'struct page' size below MAX_STRUCT_PAGE_SIZE. For debug 797 * kernel configurations that increase the 'struct page' size 798 * above MAX_STRUCT_PAGE_SIZE, the page_struct_override allows 799 * for continuing with the capacity that will be wasted when 800 * reverting to a production kernel configuration. Otherwise, 801 * those configurations are blocked by default. 802 */ 803 if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE) { 804 if (page_struct_override) 805 page_map_size = sizeof(struct page) * npfns; 806 else { 807 dev_err(&nd_pfn->dev, 808 "Memory debug options prevent using pmem for the page map\n"); 809 return -EINVAL; 810 } 811 } 812 offset = ALIGN(start + SZ_8K + page_map_size, align) - start; 813 } else if (nd_pfn->mode == PFN_MODE_RAM) 814 offset = ALIGN(start + SZ_8K, align) - start; 815 else 816 return -ENXIO; 817 818 if (offset >= (size - end_trunc)) { 819 /* This results in zero size devices */ 820 dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n", 821 dev_name(&ndns->dev)); 822 return -ENXIO; 823 } 824 825 npfns = PHYS_PFN(size - offset - end_trunc); 826 pfn_sb->mode = cpu_to_le32(nd_pfn->mode); 827 pfn_sb->dataoff = cpu_to_le64(offset); 828 pfn_sb->npfns = cpu_to_le64(npfns); 829 memcpy(pfn_sb->signature, sig, PFN_SIG_LEN); 830 memcpy(pfn_sb->uuid, nd_pfn->uuid, 16); 831 memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16); 832 pfn_sb->version_major = cpu_to_le16(1); 833 pfn_sb->version_minor = cpu_to_le16(4); 834 pfn_sb->end_trunc = cpu_to_le32(end_trunc); 835 pfn_sb->align = cpu_to_le32(nd_pfn->align); 836 if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE && page_struct_override) 837 pfn_sb->page_struct_size = cpu_to_le16(sizeof(struct page)); 838 else 839 pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE); 840 pfn_sb->page_size = cpu_to_le32(PAGE_SIZE); 841 checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb); 842 pfn_sb->checksum = cpu_to_le64(checksum); 843 844 rc = nd_pfn_clear_memmap_errors(nd_pfn); 845 if (rc) 846 return rc; 847 848 return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0); 849 } 850 851 /* 852 * Determine the effective resource range and vmem_altmap from an nd_pfn 853 * instance. 854 */ 855 int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) 856 { 857 int rc; 858 859 if (!nd_pfn->uuid || !nd_pfn->ndns) 860 return -ENODEV; 861 862 rc = nd_pfn_init(nd_pfn); 863 if (rc) 864 return rc; 865 866 /* we need a valid pfn_sb before we can init a dev_pagemap */ 867 return __nvdimm_setup_pfn(nd_pfn, pgmap); 868 } 869 EXPORT_SYMBOL_GPL(nvdimm_setup_pfn); 870