1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/scatterlist.h> 14 #include <linux/highmem.h> 15 #include <linux/sched.h> 16 #include <linux/slab.h> 17 #include <linux/hash.h> 18 #include <linux/sort.h> 19 #include <linux/io.h> 20 #include <linux/nd.h> 21 #include "nd-core.h" 22 #include "nd.h" 23 24 /* 25 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is 26 * irrelevant. 27 */ 28 #include <linux/io-64-nonatomic-hi-lo.h> 29 30 static DEFINE_IDA(region_ida); 31 static DEFINE_PER_CPU(int, flush_idx); 32 33 static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm, 34 struct nd_region_data *ndrd) 35 { 36 int i, j; 37 38 dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm), 39 nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es"); 40 for (i = 0; i < (1 << ndrd->hints_shift); i++) { 41 struct resource *res = &nvdimm->flush_wpq[i]; 42 unsigned long pfn = PHYS_PFN(res->start); 43 void __iomem *flush_page; 44 45 /* check if flush hints share a page */ 46 for (j = 0; j < i; j++) { 47 struct resource *res_j = &nvdimm->flush_wpq[j]; 48 unsigned long pfn_j = PHYS_PFN(res_j->start); 49 50 if (pfn == pfn_j) 51 break; 52 } 53 54 if (j < i) 55 flush_page = (void __iomem *) ((unsigned long) 56 ndrd_get_flush_wpq(ndrd, dimm, j) 57 & PAGE_MASK); 58 else 59 flush_page = devm_nvdimm_ioremap(dev, 60 PFN_PHYS(pfn), PAGE_SIZE); 61 if (!flush_page) 62 return -ENXIO; 63 ndrd_set_flush_wpq(ndrd, dimm, i, flush_page 64 + (res->start & ~PAGE_MASK)); 65 } 66 67 return 0; 68 } 69 70 int nd_region_activate(struct nd_region *nd_region) 71 { 72 int i, j, num_flush = 0; 73 struct nd_region_data *ndrd; 74 struct device *dev = &nd_region->dev; 75 size_t flush_data_size = sizeof(void *); 76 77 nvdimm_bus_lock(&nd_region->dev); 78 for (i = 0; i < nd_region->ndr_mappings; i++) { 79 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 80 struct nvdimm *nvdimm = nd_mapping->nvdimm; 81 82 /* at least one null hint slot per-dimm for the "no-hint" case */ 83 flush_data_size += sizeof(void *); 84 num_flush = min_not_zero(num_flush, nvdimm->num_flush); 85 if (!nvdimm->num_flush) 86 continue; 87 flush_data_size += nvdimm->num_flush * sizeof(void *); 88 } 89 nvdimm_bus_unlock(&nd_region->dev); 90 91 ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL); 92 if (!ndrd) 93 return -ENOMEM; 94 dev_set_drvdata(dev, ndrd); 95 96 if (!num_flush) 97 return 0; 98 99 ndrd->hints_shift = ilog2(num_flush); 100 for (i = 0; i < nd_region->ndr_mappings; i++) { 101 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 102 struct nvdimm *nvdimm = nd_mapping->nvdimm; 103 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd); 104 105 if (rc) 106 return rc; 107 } 108 109 /* 110 * Clear out entries that are duplicates. This should prevent the 111 * extra flushings. 112 */ 113 for (i = 0; i < nd_region->ndr_mappings - 1; i++) { 114 /* ignore if NULL already */ 115 if (!ndrd_get_flush_wpq(ndrd, i, 0)) 116 continue; 117 118 for (j = i + 1; j < nd_region->ndr_mappings; j++) 119 if (ndrd_get_flush_wpq(ndrd, i, 0) == 120 ndrd_get_flush_wpq(ndrd, j, 0)) 121 ndrd_set_flush_wpq(ndrd, j, 0, NULL); 122 } 123 124 return 0; 125 } 126 127 static void nd_region_release(struct device *dev) 128 { 129 struct nd_region *nd_region = to_nd_region(dev); 130 u16 i; 131 132 for (i = 0; i < nd_region->ndr_mappings; i++) { 133 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 134 struct nvdimm *nvdimm = nd_mapping->nvdimm; 135 136 put_device(&nvdimm->dev); 137 } 138 free_percpu(nd_region->lane); 139 ida_simple_remove(®ion_ida, nd_region->id); 140 if (is_nd_blk(dev)) 141 kfree(to_nd_blk_region(dev)); 142 else 143 kfree(nd_region); 144 } 145 146 static struct device_type nd_blk_device_type = { 147 .name = "nd_blk", 148 .release = nd_region_release, 149 }; 150 151 static struct device_type nd_pmem_device_type = { 152 .name = "nd_pmem", 153 .release = nd_region_release, 154 }; 155 156 static struct device_type nd_volatile_device_type = { 157 .name = "nd_volatile", 158 .release = nd_region_release, 159 }; 160 161 bool is_nd_pmem(struct device *dev) 162 { 163 return dev ? dev->type == &nd_pmem_device_type : false; 164 } 165 166 bool is_nd_blk(struct device *dev) 167 { 168 return dev ? dev->type == &nd_blk_device_type : false; 169 } 170 171 bool is_nd_volatile(struct device *dev) 172 { 173 return dev ? dev->type == &nd_volatile_device_type : false; 174 } 175 176 struct nd_region *to_nd_region(struct device *dev) 177 { 178 struct nd_region *nd_region = container_of(dev, struct nd_region, dev); 179 180 WARN_ON(dev->type->release != nd_region_release); 181 return nd_region; 182 } 183 EXPORT_SYMBOL_GPL(to_nd_region); 184 185 struct device *nd_region_dev(struct nd_region *nd_region) 186 { 187 if (!nd_region) 188 return NULL; 189 return &nd_region->dev; 190 } 191 EXPORT_SYMBOL_GPL(nd_region_dev); 192 193 struct nd_blk_region *to_nd_blk_region(struct device *dev) 194 { 195 struct nd_region *nd_region = to_nd_region(dev); 196 197 WARN_ON(!is_nd_blk(dev)); 198 return container_of(nd_region, struct nd_blk_region, nd_region); 199 } 200 EXPORT_SYMBOL_GPL(to_nd_blk_region); 201 202 void *nd_region_provider_data(struct nd_region *nd_region) 203 { 204 return nd_region->provider_data; 205 } 206 EXPORT_SYMBOL_GPL(nd_region_provider_data); 207 208 void *nd_blk_region_provider_data(struct nd_blk_region *ndbr) 209 { 210 return ndbr->blk_provider_data; 211 } 212 EXPORT_SYMBOL_GPL(nd_blk_region_provider_data); 213 214 void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data) 215 { 216 ndbr->blk_provider_data = data; 217 } 218 EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data); 219 220 /** 221 * nd_region_to_nstype() - region to an integer namespace type 222 * @nd_region: region-device to interrogate 223 * 224 * This is the 'nstype' attribute of a region as well, an input to the 225 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match 226 * namespace devices with namespace drivers. 227 */ 228 int nd_region_to_nstype(struct nd_region *nd_region) 229 { 230 if (is_memory(&nd_region->dev)) { 231 u16 i, alias; 232 233 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) { 234 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 235 struct nvdimm *nvdimm = nd_mapping->nvdimm; 236 237 if (test_bit(NDD_ALIASING, &nvdimm->flags)) 238 alias++; 239 } 240 if (alias) 241 return ND_DEVICE_NAMESPACE_PMEM; 242 else 243 return ND_DEVICE_NAMESPACE_IO; 244 } else if (is_nd_blk(&nd_region->dev)) { 245 return ND_DEVICE_NAMESPACE_BLK; 246 } 247 248 return 0; 249 } 250 EXPORT_SYMBOL(nd_region_to_nstype); 251 252 static ssize_t size_show(struct device *dev, 253 struct device_attribute *attr, char *buf) 254 { 255 struct nd_region *nd_region = to_nd_region(dev); 256 unsigned long long size = 0; 257 258 if (is_memory(dev)) { 259 size = nd_region->ndr_size; 260 } else if (nd_region->ndr_mappings == 1) { 261 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 262 263 size = nd_mapping->size; 264 } 265 266 return sprintf(buf, "%llu\n", size); 267 } 268 static DEVICE_ATTR_RO(size); 269 270 static ssize_t deep_flush_show(struct device *dev, 271 struct device_attribute *attr, char *buf) 272 { 273 struct nd_region *nd_region = to_nd_region(dev); 274 275 /* 276 * NOTE: in the nvdimm_has_flush() error case this attribute is 277 * not visible. 278 */ 279 return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region)); 280 } 281 282 static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr, 283 const char *buf, size_t len) 284 { 285 bool flush; 286 int rc = strtobool(buf, &flush); 287 struct nd_region *nd_region = to_nd_region(dev); 288 289 if (rc) 290 return rc; 291 if (!flush) 292 return -EINVAL; 293 nvdimm_flush(nd_region); 294 295 return len; 296 } 297 static DEVICE_ATTR_RW(deep_flush); 298 299 static ssize_t mappings_show(struct device *dev, 300 struct device_attribute *attr, char *buf) 301 { 302 struct nd_region *nd_region = to_nd_region(dev); 303 304 return sprintf(buf, "%d\n", nd_region->ndr_mappings); 305 } 306 static DEVICE_ATTR_RO(mappings); 307 308 static ssize_t nstype_show(struct device *dev, 309 struct device_attribute *attr, char *buf) 310 { 311 struct nd_region *nd_region = to_nd_region(dev); 312 313 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region)); 314 } 315 static DEVICE_ATTR_RO(nstype); 316 317 static ssize_t set_cookie_show(struct device *dev, 318 struct device_attribute *attr, char *buf) 319 { 320 struct nd_region *nd_region = to_nd_region(dev); 321 struct nd_interleave_set *nd_set = nd_region->nd_set; 322 ssize_t rc = 0; 323 324 if (is_memory(dev) && nd_set) 325 /* pass, should be precluded by region_visible */; 326 else 327 return -ENXIO; 328 329 /* 330 * The cookie to show depends on which specification of the 331 * labels we are using. If there are not labels then default to 332 * the v1.1 namespace label cookie definition. To read all this 333 * data we need to wait for probing to settle. 334 */ 335 device_lock(dev); 336 nvdimm_bus_lock(dev); 337 wait_nvdimm_bus_probe_idle(dev); 338 if (nd_region->ndr_mappings) { 339 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 340 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 341 342 if (ndd) { 343 struct nd_namespace_index *nsindex; 344 345 nsindex = to_namespace_index(ndd, ndd->ns_current); 346 rc = sprintf(buf, "%#llx\n", 347 nd_region_interleave_set_cookie(nd_region, 348 nsindex)); 349 } 350 } 351 nvdimm_bus_unlock(dev); 352 device_unlock(dev); 353 354 if (rc) 355 return rc; 356 return sprintf(buf, "%#llx\n", nd_set->cookie1); 357 } 358 static DEVICE_ATTR_RO(set_cookie); 359 360 resource_size_t nd_region_available_dpa(struct nd_region *nd_region) 361 { 362 resource_size_t blk_max_overlap = 0, available, overlap; 363 int i; 364 365 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); 366 367 retry: 368 available = 0; 369 overlap = blk_max_overlap; 370 for (i = 0; i < nd_region->ndr_mappings; i++) { 371 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 372 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 373 374 /* if a dimm is disabled the available capacity is zero */ 375 if (!ndd) 376 return 0; 377 378 if (is_memory(&nd_region->dev)) { 379 available += nd_pmem_available_dpa(nd_region, 380 nd_mapping, &overlap); 381 if (overlap > blk_max_overlap) { 382 blk_max_overlap = overlap; 383 goto retry; 384 } 385 } else if (is_nd_blk(&nd_region->dev)) 386 available += nd_blk_available_dpa(nd_region); 387 } 388 389 return available; 390 } 391 392 static ssize_t available_size_show(struct device *dev, 393 struct device_attribute *attr, char *buf) 394 { 395 struct nd_region *nd_region = to_nd_region(dev); 396 unsigned long long available = 0; 397 398 /* 399 * Flush in-flight updates and grab a snapshot of the available 400 * size. Of course, this value is potentially invalidated the 401 * memory nvdimm_bus_lock() is dropped, but that's userspace's 402 * problem to not race itself. 403 */ 404 nvdimm_bus_lock(dev); 405 wait_nvdimm_bus_probe_idle(dev); 406 available = nd_region_available_dpa(nd_region); 407 nvdimm_bus_unlock(dev); 408 409 return sprintf(buf, "%llu\n", available); 410 } 411 static DEVICE_ATTR_RO(available_size); 412 413 static ssize_t init_namespaces_show(struct device *dev, 414 struct device_attribute *attr, char *buf) 415 { 416 struct nd_region_data *ndrd = dev_get_drvdata(dev); 417 ssize_t rc; 418 419 nvdimm_bus_lock(dev); 420 if (ndrd) 421 rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count); 422 else 423 rc = -ENXIO; 424 nvdimm_bus_unlock(dev); 425 426 return rc; 427 } 428 static DEVICE_ATTR_RO(init_namespaces); 429 430 static ssize_t namespace_seed_show(struct device *dev, 431 struct device_attribute *attr, char *buf) 432 { 433 struct nd_region *nd_region = to_nd_region(dev); 434 ssize_t rc; 435 436 nvdimm_bus_lock(dev); 437 if (nd_region->ns_seed) 438 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); 439 else 440 rc = sprintf(buf, "\n"); 441 nvdimm_bus_unlock(dev); 442 return rc; 443 } 444 static DEVICE_ATTR_RO(namespace_seed); 445 446 static ssize_t btt_seed_show(struct device *dev, 447 struct device_attribute *attr, char *buf) 448 { 449 struct nd_region *nd_region = to_nd_region(dev); 450 ssize_t rc; 451 452 nvdimm_bus_lock(dev); 453 if (nd_region->btt_seed) 454 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed)); 455 else 456 rc = sprintf(buf, "\n"); 457 nvdimm_bus_unlock(dev); 458 459 return rc; 460 } 461 static DEVICE_ATTR_RO(btt_seed); 462 463 static ssize_t pfn_seed_show(struct device *dev, 464 struct device_attribute *attr, char *buf) 465 { 466 struct nd_region *nd_region = to_nd_region(dev); 467 ssize_t rc; 468 469 nvdimm_bus_lock(dev); 470 if (nd_region->pfn_seed) 471 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed)); 472 else 473 rc = sprintf(buf, "\n"); 474 nvdimm_bus_unlock(dev); 475 476 return rc; 477 } 478 static DEVICE_ATTR_RO(pfn_seed); 479 480 static ssize_t dax_seed_show(struct device *dev, 481 struct device_attribute *attr, char *buf) 482 { 483 struct nd_region *nd_region = to_nd_region(dev); 484 ssize_t rc; 485 486 nvdimm_bus_lock(dev); 487 if (nd_region->dax_seed) 488 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed)); 489 else 490 rc = sprintf(buf, "\n"); 491 nvdimm_bus_unlock(dev); 492 493 return rc; 494 } 495 static DEVICE_ATTR_RO(dax_seed); 496 497 static ssize_t read_only_show(struct device *dev, 498 struct device_attribute *attr, char *buf) 499 { 500 struct nd_region *nd_region = to_nd_region(dev); 501 502 return sprintf(buf, "%d\n", nd_region->ro); 503 } 504 505 static ssize_t read_only_store(struct device *dev, 506 struct device_attribute *attr, const char *buf, size_t len) 507 { 508 bool ro; 509 int rc = strtobool(buf, &ro); 510 struct nd_region *nd_region = to_nd_region(dev); 511 512 if (rc) 513 return rc; 514 515 nd_region->ro = ro; 516 return len; 517 } 518 static DEVICE_ATTR_RW(read_only); 519 520 static ssize_t region_badblocks_show(struct device *dev, 521 struct device_attribute *attr, char *buf) 522 { 523 struct nd_region *nd_region = to_nd_region(dev); 524 525 return badblocks_show(&nd_region->bb, buf, 0); 526 } 527 528 static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL); 529 530 static ssize_t resource_show(struct device *dev, 531 struct device_attribute *attr, char *buf) 532 { 533 struct nd_region *nd_region = to_nd_region(dev); 534 535 return sprintf(buf, "%#llx\n", nd_region->ndr_start); 536 } 537 static DEVICE_ATTR_RO(resource); 538 539 static ssize_t persistence_domain_show(struct device *dev, 540 struct device_attribute *attr, char *buf) 541 { 542 struct nd_region *nd_region = to_nd_region(dev); 543 unsigned long flags = nd_region->flags; 544 545 return sprintf(buf, "%s%s\n", 546 flags & BIT(ND_REGION_PERSIST_CACHE) ? "cpu_cache " : "", 547 flags & BIT(ND_REGION_PERSIST_MEMCTRL) ? "memory_controller " : ""); 548 } 549 static DEVICE_ATTR_RO(persistence_domain); 550 551 static struct attribute *nd_region_attributes[] = { 552 &dev_attr_size.attr, 553 &dev_attr_nstype.attr, 554 &dev_attr_mappings.attr, 555 &dev_attr_btt_seed.attr, 556 &dev_attr_pfn_seed.attr, 557 &dev_attr_dax_seed.attr, 558 &dev_attr_deep_flush.attr, 559 &dev_attr_read_only.attr, 560 &dev_attr_set_cookie.attr, 561 &dev_attr_available_size.attr, 562 &dev_attr_namespace_seed.attr, 563 &dev_attr_init_namespaces.attr, 564 &dev_attr_badblocks.attr, 565 &dev_attr_resource.attr, 566 &dev_attr_persistence_domain.attr, 567 NULL, 568 }; 569 570 static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) 571 { 572 struct device *dev = container_of(kobj, typeof(*dev), kobj); 573 struct nd_region *nd_region = to_nd_region(dev); 574 struct nd_interleave_set *nd_set = nd_region->nd_set; 575 int type = nd_region_to_nstype(nd_region); 576 577 if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr) 578 return 0; 579 580 if (!is_memory(dev) && a == &dev_attr_dax_seed.attr) 581 return 0; 582 583 if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr) 584 return 0; 585 586 if (a == &dev_attr_resource.attr) { 587 if (is_nd_pmem(dev)) 588 return 0400; 589 else 590 return 0; 591 } 592 593 if (a == &dev_attr_deep_flush.attr) { 594 int has_flush = nvdimm_has_flush(nd_region); 595 596 if (has_flush == 1) 597 return a->mode; 598 else if (has_flush == 0) 599 return 0444; 600 else 601 return 0; 602 } 603 604 if (a != &dev_attr_set_cookie.attr 605 && a != &dev_attr_available_size.attr) 606 return a->mode; 607 608 if ((type == ND_DEVICE_NAMESPACE_PMEM 609 || type == ND_DEVICE_NAMESPACE_BLK) 610 && a == &dev_attr_available_size.attr) 611 return a->mode; 612 else if (is_memory(dev) && nd_set) 613 return a->mode; 614 615 return 0; 616 } 617 618 struct attribute_group nd_region_attribute_group = { 619 .attrs = nd_region_attributes, 620 .is_visible = region_visible, 621 }; 622 EXPORT_SYMBOL_GPL(nd_region_attribute_group); 623 624 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region, 625 struct nd_namespace_index *nsindex) 626 { 627 struct nd_interleave_set *nd_set = nd_region->nd_set; 628 629 if (!nd_set) 630 return 0; 631 632 if (nsindex && __le16_to_cpu(nsindex->major) == 1 633 && __le16_to_cpu(nsindex->minor) == 1) 634 return nd_set->cookie1; 635 return nd_set->cookie2; 636 } 637 638 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region) 639 { 640 struct nd_interleave_set *nd_set = nd_region->nd_set; 641 642 if (nd_set) 643 return nd_set->altcookie; 644 return 0; 645 } 646 647 void nd_mapping_free_labels(struct nd_mapping *nd_mapping) 648 { 649 struct nd_label_ent *label_ent, *e; 650 651 lockdep_assert_held(&nd_mapping->lock); 652 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { 653 list_del(&label_ent->list); 654 kfree(label_ent); 655 } 656 } 657 658 /* 659 * Upon successful probe/remove, take/release a reference on the 660 * associated interleave set (if present), and plant new btt + namespace 661 * seeds. Also, on the removal of a BLK region, notify the provider to 662 * disable the region. 663 */ 664 static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, 665 struct device *dev, bool probe) 666 { 667 struct nd_region *nd_region; 668 669 if (!probe && is_nd_region(dev)) { 670 int i; 671 672 nd_region = to_nd_region(dev); 673 for (i = 0; i < nd_region->ndr_mappings; i++) { 674 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 675 struct nvdimm_drvdata *ndd = nd_mapping->ndd; 676 struct nvdimm *nvdimm = nd_mapping->nvdimm; 677 678 mutex_lock(&nd_mapping->lock); 679 nd_mapping_free_labels(nd_mapping); 680 mutex_unlock(&nd_mapping->lock); 681 682 put_ndd(ndd); 683 nd_mapping->ndd = NULL; 684 if (ndd) 685 atomic_dec(&nvdimm->busy); 686 } 687 } 688 if (dev->parent && is_nd_region(dev->parent) && probe) { 689 nd_region = to_nd_region(dev->parent); 690 nvdimm_bus_lock(dev); 691 if (nd_region->ns_seed == dev) 692 nd_region_create_ns_seed(nd_region); 693 nvdimm_bus_unlock(dev); 694 } 695 if (is_nd_btt(dev) && probe) { 696 struct nd_btt *nd_btt = to_nd_btt(dev); 697 698 nd_region = to_nd_region(dev->parent); 699 nvdimm_bus_lock(dev); 700 if (nd_region->btt_seed == dev) 701 nd_region_create_btt_seed(nd_region); 702 if (nd_region->ns_seed == &nd_btt->ndns->dev) 703 nd_region_create_ns_seed(nd_region); 704 nvdimm_bus_unlock(dev); 705 } 706 if (is_nd_pfn(dev) && probe) { 707 struct nd_pfn *nd_pfn = to_nd_pfn(dev); 708 709 nd_region = to_nd_region(dev->parent); 710 nvdimm_bus_lock(dev); 711 if (nd_region->pfn_seed == dev) 712 nd_region_create_pfn_seed(nd_region); 713 if (nd_region->ns_seed == &nd_pfn->ndns->dev) 714 nd_region_create_ns_seed(nd_region); 715 nvdimm_bus_unlock(dev); 716 } 717 if (is_nd_dax(dev) && probe) { 718 struct nd_dax *nd_dax = to_nd_dax(dev); 719 720 nd_region = to_nd_region(dev->parent); 721 nvdimm_bus_lock(dev); 722 if (nd_region->dax_seed == dev) 723 nd_region_create_dax_seed(nd_region); 724 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) 725 nd_region_create_ns_seed(nd_region); 726 nvdimm_bus_unlock(dev); 727 } 728 } 729 730 void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev) 731 { 732 nd_region_notify_driver_action(nvdimm_bus, dev, true); 733 } 734 735 void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev) 736 { 737 nd_region_notify_driver_action(nvdimm_bus, dev, false); 738 } 739 740 static ssize_t mappingN(struct device *dev, char *buf, int n) 741 { 742 struct nd_region *nd_region = to_nd_region(dev); 743 struct nd_mapping *nd_mapping; 744 struct nvdimm *nvdimm; 745 746 if (n >= nd_region->ndr_mappings) 747 return -ENXIO; 748 nd_mapping = &nd_region->mapping[n]; 749 nvdimm = nd_mapping->nvdimm; 750 751 return sprintf(buf, "%s,%llu,%llu,%d\n", dev_name(&nvdimm->dev), 752 nd_mapping->start, nd_mapping->size, 753 nd_mapping->position); 754 } 755 756 #define REGION_MAPPING(idx) \ 757 static ssize_t mapping##idx##_show(struct device *dev, \ 758 struct device_attribute *attr, char *buf) \ 759 { \ 760 return mappingN(dev, buf, idx); \ 761 } \ 762 static DEVICE_ATTR_RO(mapping##idx) 763 764 /* 765 * 32 should be enough for a while, even in the presence of socket 766 * interleave a 32-way interleave set is a degenerate case. 767 */ 768 REGION_MAPPING(0); 769 REGION_MAPPING(1); 770 REGION_MAPPING(2); 771 REGION_MAPPING(3); 772 REGION_MAPPING(4); 773 REGION_MAPPING(5); 774 REGION_MAPPING(6); 775 REGION_MAPPING(7); 776 REGION_MAPPING(8); 777 REGION_MAPPING(9); 778 REGION_MAPPING(10); 779 REGION_MAPPING(11); 780 REGION_MAPPING(12); 781 REGION_MAPPING(13); 782 REGION_MAPPING(14); 783 REGION_MAPPING(15); 784 REGION_MAPPING(16); 785 REGION_MAPPING(17); 786 REGION_MAPPING(18); 787 REGION_MAPPING(19); 788 REGION_MAPPING(20); 789 REGION_MAPPING(21); 790 REGION_MAPPING(22); 791 REGION_MAPPING(23); 792 REGION_MAPPING(24); 793 REGION_MAPPING(25); 794 REGION_MAPPING(26); 795 REGION_MAPPING(27); 796 REGION_MAPPING(28); 797 REGION_MAPPING(29); 798 REGION_MAPPING(30); 799 REGION_MAPPING(31); 800 801 static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n) 802 { 803 struct device *dev = container_of(kobj, struct device, kobj); 804 struct nd_region *nd_region = to_nd_region(dev); 805 806 if (n < nd_region->ndr_mappings) 807 return a->mode; 808 return 0; 809 } 810 811 static struct attribute *mapping_attributes[] = { 812 &dev_attr_mapping0.attr, 813 &dev_attr_mapping1.attr, 814 &dev_attr_mapping2.attr, 815 &dev_attr_mapping3.attr, 816 &dev_attr_mapping4.attr, 817 &dev_attr_mapping5.attr, 818 &dev_attr_mapping6.attr, 819 &dev_attr_mapping7.attr, 820 &dev_attr_mapping8.attr, 821 &dev_attr_mapping9.attr, 822 &dev_attr_mapping10.attr, 823 &dev_attr_mapping11.attr, 824 &dev_attr_mapping12.attr, 825 &dev_attr_mapping13.attr, 826 &dev_attr_mapping14.attr, 827 &dev_attr_mapping15.attr, 828 &dev_attr_mapping16.attr, 829 &dev_attr_mapping17.attr, 830 &dev_attr_mapping18.attr, 831 &dev_attr_mapping19.attr, 832 &dev_attr_mapping20.attr, 833 &dev_attr_mapping21.attr, 834 &dev_attr_mapping22.attr, 835 &dev_attr_mapping23.attr, 836 &dev_attr_mapping24.attr, 837 &dev_attr_mapping25.attr, 838 &dev_attr_mapping26.attr, 839 &dev_attr_mapping27.attr, 840 &dev_attr_mapping28.attr, 841 &dev_attr_mapping29.attr, 842 &dev_attr_mapping30.attr, 843 &dev_attr_mapping31.attr, 844 NULL, 845 }; 846 847 struct attribute_group nd_mapping_attribute_group = { 848 .is_visible = mapping_visible, 849 .attrs = mapping_attributes, 850 }; 851 EXPORT_SYMBOL_GPL(nd_mapping_attribute_group); 852 853 int nd_blk_region_init(struct nd_region *nd_region) 854 { 855 struct device *dev = &nd_region->dev; 856 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 857 858 if (!is_nd_blk(dev)) 859 return 0; 860 861 if (nd_region->ndr_mappings < 1) { 862 dev_dbg(dev, "invalid BLK region\n"); 863 return -ENXIO; 864 } 865 866 return to_nd_blk_region(dev)->enable(nvdimm_bus, dev); 867 } 868 869 /** 870 * nd_region_acquire_lane - allocate and lock a lane 871 * @nd_region: region id and number of lanes possible 872 * 873 * A lane correlates to a BLK-data-window and/or a log slot in the BTT. 874 * We optimize for the common case where there are 256 lanes, one 875 * per-cpu. For larger systems we need to lock to share lanes. For now 876 * this implementation assumes the cost of maintaining an allocator for 877 * free lanes is on the order of the lock hold time, so it implements a 878 * static lane = cpu % num_lanes mapping. 879 * 880 * In the case of a BTT instance on top of a BLK namespace a lane may be 881 * acquired recursively. We lock on the first instance. 882 * 883 * In the case of a BTT instance on top of PMEM, we only acquire a lane 884 * for the BTT metadata updates. 885 */ 886 unsigned int nd_region_acquire_lane(struct nd_region *nd_region) 887 { 888 unsigned int cpu, lane; 889 890 cpu = get_cpu(); 891 if (nd_region->num_lanes < nr_cpu_ids) { 892 struct nd_percpu_lane *ndl_lock, *ndl_count; 893 894 lane = cpu % nd_region->num_lanes; 895 ndl_count = per_cpu_ptr(nd_region->lane, cpu); 896 ndl_lock = per_cpu_ptr(nd_region->lane, lane); 897 if (ndl_count->count++ == 0) 898 spin_lock(&ndl_lock->lock); 899 } else 900 lane = cpu; 901 902 return lane; 903 } 904 EXPORT_SYMBOL(nd_region_acquire_lane); 905 906 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane) 907 { 908 if (nd_region->num_lanes < nr_cpu_ids) { 909 unsigned int cpu = get_cpu(); 910 struct nd_percpu_lane *ndl_lock, *ndl_count; 911 912 ndl_count = per_cpu_ptr(nd_region->lane, cpu); 913 ndl_lock = per_cpu_ptr(nd_region->lane, lane); 914 if (--ndl_count->count == 0) 915 spin_unlock(&ndl_lock->lock); 916 put_cpu(); 917 } 918 put_cpu(); 919 } 920 EXPORT_SYMBOL(nd_region_release_lane); 921 922 static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, 923 struct nd_region_desc *ndr_desc, struct device_type *dev_type, 924 const char *caller) 925 { 926 struct nd_region *nd_region; 927 struct device *dev; 928 void *region_buf; 929 unsigned int i; 930 int ro = 0; 931 932 for (i = 0; i < ndr_desc->num_mappings; i++) { 933 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; 934 struct nvdimm *nvdimm = mapping->nvdimm; 935 936 if ((mapping->start | mapping->size) % SZ_4K) { 937 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n", 938 caller, dev_name(&nvdimm->dev), i); 939 940 return NULL; 941 } 942 943 if (test_bit(NDD_UNARMED, &nvdimm->flags)) 944 ro = 1; 945 } 946 947 if (dev_type == &nd_blk_device_type) { 948 struct nd_blk_region_desc *ndbr_desc; 949 struct nd_blk_region *ndbr; 950 951 ndbr_desc = to_blk_region_desc(ndr_desc); 952 ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping) 953 * ndr_desc->num_mappings, 954 GFP_KERNEL); 955 if (ndbr) { 956 nd_region = &ndbr->nd_region; 957 ndbr->enable = ndbr_desc->enable; 958 ndbr->do_io = ndbr_desc->do_io; 959 } 960 region_buf = ndbr; 961 } else { 962 nd_region = kzalloc(sizeof(struct nd_region) 963 + sizeof(struct nd_mapping) 964 * ndr_desc->num_mappings, 965 GFP_KERNEL); 966 region_buf = nd_region; 967 } 968 969 if (!region_buf) 970 return NULL; 971 nd_region->id = ida_simple_get(®ion_ida, 0, 0, GFP_KERNEL); 972 if (nd_region->id < 0) 973 goto err_id; 974 975 nd_region->lane = alloc_percpu(struct nd_percpu_lane); 976 if (!nd_region->lane) 977 goto err_percpu; 978 979 for (i = 0; i < nr_cpu_ids; i++) { 980 struct nd_percpu_lane *ndl; 981 982 ndl = per_cpu_ptr(nd_region->lane, i); 983 spin_lock_init(&ndl->lock); 984 ndl->count = 0; 985 } 986 987 for (i = 0; i < ndr_desc->num_mappings; i++) { 988 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; 989 struct nvdimm *nvdimm = mapping->nvdimm; 990 991 nd_region->mapping[i].nvdimm = nvdimm; 992 nd_region->mapping[i].start = mapping->start; 993 nd_region->mapping[i].size = mapping->size; 994 nd_region->mapping[i].position = mapping->position; 995 INIT_LIST_HEAD(&nd_region->mapping[i].labels); 996 mutex_init(&nd_region->mapping[i].lock); 997 998 get_device(&nvdimm->dev); 999 } 1000 nd_region->ndr_mappings = ndr_desc->num_mappings; 1001 nd_region->provider_data = ndr_desc->provider_data; 1002 nd_region->nd_set = ndr_desc->nd_set; 1003 nd_region->num_lanes = ndr_desc->num_lanes; 1004 nd_region->flags = ndr_desc->flags; 1005 nd_region->ro = ro; 1006 nd_region->numa_node = ndr_desc->numa_node; 1007 ida_init(&nd_region->ns_ida); 1008 ida_init(&nd_region->btt_ida); 1009 ida_init(&nd_region->pfn_ida); 1010 ida_init(&nd_region->dax_ida); 1011 dev = &nd_region->dev; 1012 dev_set_name(dev, "region%d", nd_region->id); 1013 dev->parent = &nvdimm_bus->dev; 1014 dev->type = dev_type; 1015 dev->groups = ndr_desc->attr_groups; 1016 dev->of_node = ndr_desc->of_node; 1017 nd_region->ndr_size = resource_size(ndr_desc->res); 1018 nd_region->ndr_start = ndr_desc->res->start; 1019 nd_device_register(dev); 1020 1021 return nd_region; 1022 1023 err_percpu: 1024 ida_simple_remove(®ion_ida, nd_region->id); 1025 err_id: 1026 kfree(region_buf); 1027 return NULL; 1028 } 1029 1030 struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, 1031 struct nd_region_desc *ndr_desc) 1032 { 1033 ndr_desc->num_lanes = ND_MAX_LANES; 1034 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type, 1035 __func__); 1036 } 1037 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create); 1038 1039 struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, 1040 struct nd_region_desc *ndr_desc) 1041 { 1042 if (ndr_desc->num_mappings > 1) 1043 return NULL; 1044 ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES); 1045 return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type, 1046 __func__); 1047 } 1048 EXPORT_SYMBOL_GPL(nvdimm_blk_region_create); 1049 1050 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, 1051 struct nd_region_desc *ndr_desc) 1052 { 1053 ndr_desc->num_lanes = ND_MAX_LANES; 1054 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type, 1055 __func__); 1056 } 1057 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create); 1058 1059 /** 1060 * nvdimm_flush - flush any posted write queues between the cpu and pmem media 1061 * @nd_region: blk or interleaved pmem region 1062 */ 1063 void nvdimm_flush(struct nd_region *nd_region) 1064 { 1065 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); 1066 int i, idx; 1067 1068 /* 1069 * Try to encourage some diversity in flush hint addresses 1070 * across cpus assuming a limited number of flush hints. 1071 */ 1072 idx = this_cpu_read(flush_idx); 1073 idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8)); 1074 1075 /* 1076 * The first wmb() is needed to 'sfence' all previous writes 1077 * such that they are architecturally visible for the platform 1078 * buffer flush. Note that we've already arranged for pmem 1079 * writes to avoid the cache via memcpy_flushcache(). The final 1080 * wmb() ensures ordering for the NVDIMM flush write. 1081 */ 1082 wmb(); 1083 for (i = 0; i < nd_region->ndr_mappings; i++) 1084 if (ndrd_get_flush_wpq(ndrd, i, 0)) 1085 writeq(1, ndrd_get_flush_wpq(ndrd, i, idx)); 1086 wmb(); 1087 } 1088 EXPORT_SYMBOL_GPL(nvdimm_flush); 1089 1090 /** 1091 * nvdimm_has_flush - determine write flushing requirements 1092 * @nd_region: blk or interleaved pmem region 1093 * 1094 * Returns 1 if writes require flushing 1095 * Returns 0 if writes do not require flushing 1096 * Returns -ENXIO if flushing capability can not be determined 1097 */ 1098 int nvdimm_has_flush(struct nd_region *nd_region) 1099 { 1100 int i; 1101 1102 /* no nvdimm or pmem api == flushing capability unknown */ 1103 if (nd_region->ndr_mappings == 0 1104 || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)) 1105 return -ENXIO; 1106 1107 for (i = 0; i < nd_region->ndr_mappings; i++) { 1108 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 1109 struct nvdimm *nvdimm = nd_mapping->nvdimm; 1110 1111 /* flush hints present / available */ 1112 if (nvdimm->num_flush) 1113 return 1; 1114 } 1115 1116 /* 1117 * The platform defines dimm devices without hints, assume 1118 * platform persistence mechanism like ADR 1119 */ 1120 return 0; 1121 } 1122 EXPORT_SYMBOL_GPL(nvdimm_has_flush); 1123 1124 int nvdimm_has_cache(struct nd_region *nd_region) 1125 { 1126 return is_nd_pmem(&nd_region->dev); 1127 } 1128 EXPORT_SYMBOL_GPL(nvdimm_has_cache); 1129 1130 void __exit nd_region_devs_exit(void) 1131 { 1132 ida_destroy(®ion_ida); 1133 } 1134