1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/scatterlist.h> 14 #include <linux/highmem.h> 15 #include <linux/sched.h> 16 #include <linux/slab.h> 17 #include <linux/hash.h> 18 #include <linux/pmem.h> 19 #include <linux/sort.h> 20 #include <linux/io.h> 21 #include <linux/nd.h> 22 #include "nd-core.h" 23 #include "nd.h" 24 25 /* 26 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is 27 * irrelevant. 28 */ 29 #include <linux/io-64-nonatomic-hi-lo.h> 30 31 static DEFINE_IDA(region_ida); 32 static DEFINE_PER_CPU(int, flush_idx); 33 34 static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm, 35 struct nd_region_data *ndrd) 36 { 37 int i, j; 38 39 dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm), 40 nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es"); 41 for (i = 0; i < (1 << ndrd->hints_shift); i++) { 42 struct resource *res = &nvdimm->flush_wpq[i]; 43 unsigned long pfn = PHYS_PFN(res->start); 44 void __iomem *flush_page; 45 46 /* check if flush hints share a page */ 47 for (j = 0; j < i; j++) { 48 struct resource *res_j = &nvdimm->flush_wpq[j]; 49 unsigned long pfn_j = PHYS_PFN(res_j->start); 50 51 if (pfn == pfn_j) 52 break; 53 } 54 55 if (j < i) 56 flush_page = (void __iomem *) ((unsigned long) 57 ndrd_get_flush_wpq(ndrd, dimm, j) 58 & PAGE_MASK); 59 else 60 flush_page = devm_nvdimm_ioremap(dev, 61 PFN_PHYS(pfn), PAGE_SIZE); 62 if (!flush_page) 63 return -ENXIO; 64 ndrd_set_flush_wpq(ndrd, dimm, i, flush_page 65 + (res->start & ~PAGE_MASK)); 66 } 67 68 return 0; 69 } 70 71 int nd_region_activate(struct nd_region *nd_region) 72 { 73 int i, j, num_flush = 0; 74 struct nd_region_data *ndrd; 75 struct device *dev = &nd_region->dev; 76 size_t flush_data_size = sizeof(void *); 77 78 nvdimm_bus_lock(&nd_region->dev); 79 for (i = 0; i < nd_region->ndr_mappings; i++) { 80 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 81 struct nvdimm *nvdimm = nd_mapping->nvdimm; 82 83 /* at least one null hint slot per-dimm for the "no-hint" case */ 84 flush_data_size += sizeof(void *); 85 num_flush = min_not_zero(num_flush, nvdimm->num_flush); 86 if (!nvdimm->num_flush) 87 continue; 88 flush_data_size += nvdimm->num_flush * sizeof(void *); 89 } 90 nvdimm_bus_unlock(&nd_region->dev); 91 92 ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL); 93 if (!ndrd) 94 return -ENOMEM; 95 dev_set_drvdata(dev, ndrd); 96 97 if (!num_flush) 98 return 0; 99 100 ndrd->hints_shift = ilog2(num_flush); 101 for (i = 0; i < nd_region->ndr_mappings; i++) { 102 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 103 struct nvdimm *nvdimm = nd_mapping->nvdimm; 104 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd); 105 106 if (rc) 107 return rc; 108 } 109 110 /* 111 * Clear out entries that are duplicates. This should prevent the 112 * extra flushings. 113 */ 114 for (i = 0; i < nd_region->ndr_mappings - 1; i++) { 115 /* ignore if NULL already */ 116 if (!ndrd_get_flush_wpq(ndrd, i, 0)) 117 continue; 118 119 for (j = i + 1; j < nd_region->ndr_mappings; j++) 120 if (ndrd_get_flush_wpq(ndrd, i, 0) == 121 ndrd_get_flush_wpq(ndrd, j, 0)) 122 ndrd_set_flush_wpq(ndrd, j, 0, NULL); 123 } 124 125 return 0; 126 } 127 128 static void nd_region_release(struct device *dev) 129 { 130 struct nd_region *nd_region = to_nd_region(dev); 131 u16 i; 132 133 for (i = 0; i < nd_region->ndr_mappings; i++) { 134 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 135 struct nvdimm *nvdimm = nd_mapping->nvdimm; 136 137 put_device(&nvdimm->dev); 138 } 139 free_percpu(nd_region->lane); 140 ida_simple_remove(®ion_ida, nd_region->id); 141 if (is_nd_blk(dev)) 142 kfree(to_nd_blk_region(dev)); 143 else 144 kfree(nd_region); 145 } 146 147 static struct device_type nd_blk_device_type = { 148 .name = "nd_blk", 149 .release = nd_region_release, 150 }; 151 152 static struct device_type nd_pmem_device_type = { 153 .name = "nd_pmem", 154 .release = nd_region_release, 155 }; 156 157 static struct device_type nd_volatile_device_type = { 158 .name = "nd_volatile", 159 .release = nd_region_release, 160 }; 161 162 bool is_nd_pmem(struct device *dev) 163 { 164 return dev ? dev->type == &nd_pmem_device_type : false; 165 } 166 167 bool is_nd_blk(struct device *dev) 168 { 169 return dev ? dev->type == &nd_blk_device_type : false; 170 } 171 172 struct nd_region *to_nd_region(struct device *dev) 173 { 174 struct nd_region *nd_region = container_of(dev, struct nd_region, dev); 175 176 WARN_ON(dev->type->release != nd_region_release); 177 return nd_region; 178 } 179 EXPORT_SYMBOL_GPL(to_nd_region); 180 181 struct nd_blk_region *to_nd_blk_region(struct device *dev) 182 { 183 struct nd_region *nd_region = to_nd_region(dev); 184 185 WARN_ON(!is_nd_blk(dev)); 186 return container_of(nd_region, struct nd_blk_region, nd_region); 187 } 188 EXPORT_SYMBOL_GPL(to_nd_blk_region); 189 190 void *nd_region_provider_data(struct nd_region *nd_region) 191 { 192 return nd_region->provider_data; 193 } 194 EXPORT_SYMBOL_GPL(nd_region_provider_data); 195 196 void *nd_blk_region_provider_data(struct nd_blk_region *ndbr) 197 { 198 return ndbr->blk_provider_data; 199 } 200 EXPORT_SYMBOL_GPL(nd_blk_region_provider_data); 201 202 void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data) 203 { 204 ndbr->blk_provider_data = data; 205 } 206 EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data); 207 208 /** 209 * nd_region_to_nstype() - region to an integer namespace type 210 * @nd_region: region-device to interrogate 211 * 212 * This is the 'nstype' attribute of a region as well, an input to the 213 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match 214 * namespace devices with namespace drivers. 215 */ 216 int nd_region_to_nstype(struct nd_region *nd_region) 217 { 218 if (is_nd_pmem(&nd_region->dev)) { 219 u16 i, alias; 220 221 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) { 222 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 223 struct nvdimm *nvdimm = nd_mapping->nvdimm; 224 225 if (nvdimm->flags & NDD_ALIASING) 226 alias++; 227 } 228 if (alias) 229 return ND_DEVICE_NAMESPACE_PMEM; 230 else 231 return ND_DEVICE_NAMESPACE_IO; 232 } else if (is_nd_blk(&nd_region->dev)) { 233 return ND_DEVICE_NAMESPACE_BLK; 234 } 235 236 return 0; 237 } 238 EXPORT_SYMBOL(nd_region_to_nstype); 239 240 static ssize_t size_show(struct device *dev, 241 struct device_attribute *attr, char *buf) 242 { 243 struct nd_region *nd_region = to_nd_region(dev); 244 unsigned long long size = 0; 245 246 if (is_nd_pmem(dev)) { 247 size = nd_region->ndr_size; 248 } else if (nd_region->ndr_mappings == 1) { 249 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 250 251 size = nd_mapping->size; 252 } 253 254 return sprintf(buf, "%llu\n", size); 255 } 256 static DEVICE_ATTR_RO(size); 257 258 static ssize_t mappings_show(struct device *dev, 259 struct device_attribute *attr, char *buf) 260 { 261 struct nd_region *nd_region = to_nd_region(dev); 262 263 return sprintf(buf, "%d\n", nd_region->ndr_mappings); 264 } 265 static DEVICE_ATTR_RO(mappings); 266 267 static ssize_t nstype_show(struct device *dev, 268 struct device_attribute *attr, char *buf) 269 { 270 struct nd_region *nd_region = to_nd_region(dev); 271 272 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region)); 273 } 274 static DEVICE_ATTR_RO(nstype); 275 276 static ssize_t set_cookie_show(struct device *dev, 277 struct device_attribute *attr, char *buf) 278 { 279 struct nd_region *nd_region = to_nd_region(dev); 280 struct nd_interleave_set *nd_set = nd_region->nd_set; 281 282 if (is_nd_pmem(dev) && nd_set) 283 /* pass, should be precluded by region_visible */; 284 else 285 return -ENXIO; 286 287 return sprintf(buf, "%#llx\n", nd_set->cookie); 288 } 289 static DEVICE_ATTR_RO(set_cookie); 290 291 resource_size_t nd_region_available_dpa(struct nd_region *nd_region) 292 { 293 resource_size_t blk_max_overlap = 0, available, overlap; 294 int i; 295 296 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); 297 298 retry: 299 available = 0; 300 overlap = blk_max_overlap; 301 for (i = 0; i < nd_region->ndr_mappings; i++) { 302 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 303 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 304 305 /* if a dimm is disabled the available capacity is zero */ 306 if (!ndd) 307 return 0; 308 309 if (is_nd_pmem(&nd_region->dev)) { 310 available += nd_pmem_available_dpa(nd_region, 311 nd_mapping, &overlap); 312 if (overlap > blk_max_overlap) { 313 blk_max_overlap = overlap; 314 goto retry; 315 } 316 } else if (is_nd_blk(&nd_region->dev)) 317 available += nd_blk_available_dpa(nd_region); 318 } 319 320 return available; 321 } 322 323 static ssize_t available_size_show(struct device *dev, 324 struct device_attribute *attr, char *buf) 325 { 326 struct nd_region *nd_region = to_nd_region(dev); 327 unsigned long long available = 0; 328 329 /* 330 * Flush in-flight updates and grab a snapshot of the available 331 * size. Of course, this value is potentially invalidated the 332 * memory nvdimm_bus_lock() is dropped, but that's userspace's 333 * problem to not race itself. 334 */ 335 nvdimm_bus_lock(dev); 336 wait_nvdimm_bus_probe_idle(dev); 337 available = nd_region_available_dpa(nd_region); 338 nvdimm_bus_unlock(dev); 339 340 return sprintf(buf, "%llu\n", available); 341 } 342 static DEVICE_ATTR_RO(available_size); 343 344 static ssize_t init_namespaces_show(struct device *dev, 345 struct device_attribute *attr, char *buf) 346 { 347 struct nd_region_data *ndrd = dev_get_drvdata(dev); 348 ssize_t rc; 349 350 nvdimm_bus_lock(dev); 351 if (ndrd) 352 rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count); 353 else 354 rc = -ENXIO; 355 nvdimm_bus_unlock(dev); 356 357 return rc; 358 } 359 static DEVICE_ATTR_RO(init_namespaces); 360 361 static ssize_t namespace_seed_show(struct device *dev, 362 struct device_attribute *attr, char *buf) 363 { 364 struct nd_region *nd_region = to_nd_region(dev); 365 ssize_t rc; 366 367 nvdimm_bus_lock(dev); 368 if (nd_region->ns_seed) 369 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); 370 else 371 rc = sprintf(buf, "\n"); 372 nvdimm_bus_unlock(dev); 373 return rc; 374 } 375 static DEVICE_ATTR_RO(namespace_seed); 376 377 static ssize_t btt_seed_show(struct device *dev, 378 struct device_attribute *attr, char *buf) 379 { 380 struct nd_region *nd_region = to_nd_region(dev); 381 ssize_t rc; 382 383 nvdimm_bus_lock(dev); 384 if (nd_region->btt_seed) 385 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed)); 386 else 387 rc = sprintf(buf, "\n"); 388 nvdimm_bus_unlock(dev); 389 390 return rc; 391 } 392 static DEVICE_ATTR_RO(btt_seed); 393 394 static ssize_t pfn_seed_show(struct device *dev, 395 struct device_attribute *attr, char *buf) 396 { 397 struct nd_region *nd_region = to_nd_region(dev); 398 ssize_t rc; 399 400 nvdimm_bus_lock(dev); 401 if (nd_region->pfn_seed) 402 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed)); 403 else 404 rc = sprintf(buf, "\n"); 405 nvdimm_bus_unlock(dev); 406 407 return rc; 408 } 409 static DEVICE_ATTR_RO(pfn_seed); 410 411 static ssize_t dax_seed_show(struct device *dev, 412 struct device_attribute *attr, char *buf) 413 { 414 struct nd_region *nd_region = to_nd_region(dev); 415 ssize_t rc; 416 417 nvdimm_bus_lock(dev); 418 if (nd_region->dax_seed) 419 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed)); 420 else 421 rc = sprintf(buf, "\n"); 422 nvdimm_bus_unlock(dev); 423 424 return rc; 425 } 426 static DEVICE_ATTR_RO(dax_seed); 427 428 static ssize_t read_only_show(struct device *dev, 429 struct device_attribute *attr, char *buf) 430 { 431 struct nd_region *nd_region = to_nd_region(dev); 432 433 return sprintf(buf, "%d\n", nd_region->ro); 434 } 435 436 static ssize_t read_only_store(struct device *dev, 437 struct device_attribute *attr, const char *buf, size_t len) 438 { 439 bool ro; 440 int rc = strtobool(buf, &ro); 441 struct nd_region *nd_region = to_nd_region(dev); 442 443 if (rc) 444 return rc; 445 446 nd_region->ro = ro; 447 return len; 448 } 449 static DEVICE_ATTR_RW(read_only); 450 451 static struct attribute *nd_region_attributes[] = { 452 &dev_attr_size.attr, 453 &dev_attr_nstype.attr, 454 &dev_attr_mappings.attr, 455 &dev_attr_btt_seed.attr, 456 &dev_attr_pfn_seed.attr, 457 &dev_attr_dax_seed.attr, 458 &dev_attr_read_only.attr, 459 &dev_attr_set_cookie.attr, 460 &dev_attr_available_size.attr, 461 &dev_attr_namespace_seed.attr, 462 &dev_attr_init_namespaces.attr, 463 NULL, 464 }; 465 466 static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) 467 { 468 struct device *dev = container_of(kobj, typeof(*dev), kobj); 469 struct nd_region *nd_region = to_nd_region(dev); 470 struct nd_interleave_set *nd_set = nd_region->nd_set; 471 int type = nd_region_to_nstype(nd_region); 472 473 if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr) 474 return 0; 475 476 if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr) 477 return 0; 478 479 if (a != &dev_attr_set_cookie.attr 480 && a != &dev_attr_available_size.attr) 481 return a->mode; 482 483 if ((type == ND_DEVICE_NAMESPACE_PMEM 484 || type == ND_DEVICE_NAMESPACE_BLK) 485 && a == &dev_attr_available_size.attr) 486 return a->mode; 487 else if (is_nd_pmem(dev) && nd_set) 488 return a->mode; 489 490 return 0; 491 } 492 493 struct attribute_group nd_region_attribute_group = { 494 .attrs = nd_region_attributes, 495 .is_visible = region_visible, 496 }; 497 EXPORT_SYMBOL_GPL(nd_region_attribute_group); 498 499 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region) 500 { 501 struct nd_interleave_set *nd_set = nd_region->nd_set; 502 503 if (nd_set) 504 return nd_set->cookie; 505 return 0; 506 } 507 508 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region) 509 { 510 struct nd_interleave_set *nd_set = nd_region->nd_set; 511 512 if (nd_set) 513 return nd_set->altcookie; 514 return 0; 515 } 516 517 void nd_mapping_free_labels(struct nd_mapping *nd_mapping) 518 { 519 struct nd_label_ent *label_ent, *e; 520 521 lockdep_assert_held(&nd_mapping->lock); 522 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { 523 list_del(&label_ent->list); 524 kfree(label_ent); 525 } 526 } 527 528 /* 529 * Upon successful probe/remove, take/release a reference on the 530 * associated interleave set (if present), and plant new btt + namespace 531 * seeds. Also, on the removal of a BLK region, notify the provider to 532 * disable the region. 533 */ 534 static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, 535 struct device *dev, bool probe) 536 { 537 struct nd_region *nd_region; 538 539 if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) { 540 int i; 541 542 nd_region = to_nd_region(dev); 543 for (i = 0; i < nd_region->ndr_mappings; i++) { 544 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 545 struct nvdimm_drvdata *ndd = nd_mapping->ndd; 546 struct nvdimm *nvdimm = nd_mapping->nvdimm; 547 548 mutex_lock(&nd_mapping->lock); 549 nd_mapping_free_labels(nd_mapping); 550 mutex_unlock(&nd_mapping->lock); 551 552 put_ndd(ndd); 553 nd_mapping->ndd = NULL; 554 if (ndd) 555 atomic_dec(&nvdimm->busy); 556 } 557 558 if (is_nd_pmem(dev)) 559 return; 560 } 561 if (dev->parent && (is_nd_blk(dev->parent) || is_nd_pmem(dev->parent)) 562 && probe) { 563 nd_region = to_nd_region(dev->parent); 564 nvdimm_bus_lock(dev); 565 if (nd_region->ns_seed == dev) 566 nd_region_create_ns_seed(nd_region); 567 nvdimm_bus_unlock(dev); 568 } 569 if (is_nd_btt(dev) && probe) { 570 struct nd_btt *nd_btt = to_nd_btt(dev); 571 572 nd_region = to_nd_region(dev->parent); 573 nvdimm_bus_lock(dev); 574 if (nd_region->btt_seed == dev) 575 nd_region_create_btt_seed(nd_region); 576 if (nd_region->ns_seed == &nd_btt->ndns->dev) 577 nd_region_create_ns_seed(nd_region); 578 nvdimm_bus_unlock(dev); 579 } 580 if (is_nd_pfn(dev) && probe) { 581 struct nd_pfn *nd_pfn = to_nd_pfn(dev); 582 583 nd_region = to_nd_region(dev->parent); 584 nvdimm_bus_lock(dev); 585 if (nd_region->pfn_seed == dev) 586 nd_region_create_pfn_seed(nd_region); 587 if (nd_region->ns_seed == &nd_pfn->ndns->dev) 588 nd_region_create_ns_seed(nd_region); 589 nvdimm_bus_unlock(dev); 590 } 591 if (is_nd_dax(dev) && probe) { 592 struct nd_dax *nd_dax = to_nd_dax(dev); 593 594 nd_region = to_nd_region(dev->parent); 595 nvdimm_bus_lock(dev); 596 if (nd_region->dax_seed == dev) 597 nd_region_create_dax_seed(nd_region); 598 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) 599 nd_region_create_ns_seed(nd_region); 600 nvdimm_bus_unlock(dev); 601 } 602 } 603 604 void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev) 605 { 606 nd_region_notify_driver_action(nvdimm_bus, dev, true); 607 } 608 609 void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev) 610 { 611 nd_region_notify_driver_action(nvdimm_bus, dev, false); 612 } 613 614 static ssize_t mappingN(struct device *dev, char *buf, int n) 615 { 616 struct nd_region *nd_region = to_nd_region(dev); 617 struct nd_mapping *nd_mapping; 618 struct nvdimm *nvdimm; 619 620 if (n >= nd_region->ndr_mappings) 621 return -ENXIO; 622 nd_mapping = &nd_region->mapping[n]; 623 nvdimm = nd_mapping->nvdimm; 624 625 return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev), 626 nd_mapping->start, nd_mapping->size); 627 } 628 629 #define REGION_MAPPING(idx) \ 630 static ssize_t mapping##idx##_show(struct device *dev, \ 631 struct device_attribute *attr, char *buf) \ 632 { \ 633 return mappingN(dev, buf, idx); \ 634 } \ 635 static DEVICE_ATTR_RO(mapping##idx) 636 637 /* 638 * 32 should be enough for a while, even in the presence of socket 639 * interleave a 32-way interleave set is a degenerate case. 640 */ 641 REGION_MAPPING(0); 642 REGION_MAPPING(1); 643 REGION_MAPPING(2); 644 REGION_MAPPING(3); 645 REGION_MAPPING(4); 646 REGION_MAPPING(5); 647 REGION_MAPPING(6); 648 REGION_MAPPING(7); 649 REGION_MAPPING(8); 650 REGION_MAPPING(9); 651 REGION_MAPPING(10); 652 REGION_MAPPING(11); 653 REGION_MAPPING(12); 654 REGION_MAPPING(13); 655 REGION_MAPPING(14); 656 REGION_MAPPING(15); 657 REGION_MAPPING(16); 658 REGION_MAPPING(17); 659 REGION_MAPPING(18); 660 REGION_MAPPING(19); 661 REGION_MAPPING(20); 662 REGION_MAPPING(21); 663 REGION_MAPPING(22); 664 REGION_MAPPING(23); 665 REGION_MAPPING(24); 666 REGION_MAPPING(25); 667 REGION_MAPPING(26); 668 REGION_MAPPING(27); 669 REGION_MAPPING(28); 670 REGION_MAPPING(29); 671 REGION_MAPPING(30); 672 REGION_MAPPING(31); 673 674 static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n) 675 { 676 struct device *dev = container_of(kobj, struct device, kobj); 677 struct nd_region *nd_region = to_nd_region(dev); 678 679 if (n < nd_region->ndr_mappings) 680 return a->mode; 681 return 0; 682 } 683 684 static struct attribute *mapping_attributes[] = { 685 &dev_attr_mapping0.attr, 686 &dev_attr_mapping1.attr, 687 &dev_attr_mapping2.attr, 688 &dev_attr_mapping3.attr, 689 &dev_attr_mapping4.attr, 690 &dev_attr_mapping5.attr, 691 &dev_attr_mapping6.attr, 692 &dev_attr_mapping7.attr, 693 &dev_attr_mapping8.attr, 694 &dev_attr_mapping9.attr, 695 &dev_attr_mapping10.attr, 696 &dev_attr_mapping11.attr, 697 &dev_attr_mapping12.attr, 698 &dev_attr_mapping13.attr, 699 &dev_attr_mapping14.attr, 700 &dev_attr_mapping15.attr, 701 &dev_attr_mapping16.attr, 702 &dev_attr_mapping17.attr, 703 &dev_attr_mapping18.attr, 704 &dev_attr_mapping19.attr, 705 &dev_attr_mapping20.attr, 706 &dev_attr_mapping21.attr, 707 &dev_attr_mapping22.attr, 708 &dev_attr_mapping23.attr, 709 &dev_attr_mapping24.attr, 710 &dev_attr_mapping25.attr, 711 &dev_attr_mapping26.attr, 712 &dev_attr_mapping27.attr, 713 &dev_attr_mapping28.attr, 714 &dev_attr_mapping29.attr, 715 &dev_attr_mapping30.attr, 716 &dev_attr_mapping31.attr, 717 NULL, 718 }; 719 720 struct attribute_group nd_mapping_attribute_group = { 721 .is_visible = mapping_visible, 722 .attrs = mapping_attributes, 723 }; 724 EXPORT_SYMBOL_GPL(nd_mapping_attribute_group); 725 726 int nd_blk_region_init(struct nd_region *nd_region) 727 { 728 struct device *dev = &nd_region->dev; 729 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 730 731 if (!is_nd_blk(dev)) 732 return 0; 733 734 if (nd_region->ndr_mappings < 1) { 735 dev_err(dev, "invalid BLK region\n"); 736 return -ENXIO; 737 } 738 739 return to_nd_blk_region(dev)->enable(nvdimm_bus, dev); 740 } 741 742 /** 743 * nd_region_acquire_lane - allocate and lock a lane 744 * @nd_region: region id and number of lanes possible 745 * 746 * A lane correlates to a BLK-data-window and/or a log slot in the BTT. 747 * We optimize for the common case where there are 256 lanes, one 748 * per-cpu. For larger systems we need to lock to share lanes. For now 749 * this implementation assumes the cost of maintaining an allocator for 750 * free lanes is on the order of the lock hold time, so it implements a 751 * static lane = cpu % num_lanes mapping. 752 * 753 * In the case of a BTT instance on top of a BLK namespace a lane may be 754 * acquired recursively. We lock on the first instance. 755 * 756 * In the case of a BTT instance on top of PMEM, we only acquire a lane 757 * for the BTT metadata updates. 758 */ 759 unsigned int nd_region_acquire_lane(struct nd_region *nd_region) 760 { 761 unsigned int cpu, lane; 762 763 cpu = get_cpu(); 764 if (nd_region->num_lanes < nr_cpu_ids) { 765 struct nd_percpu_lane *ndl_lock, *ndl_count; 766 767 lane = cpu % nd_region->num_lanes; 768 ndl_count = per_cpu_ptr(nd_region->lane, cpu); 769 ndl_lock = per_cpu_ptr(nd_region->lane, lane); 770 if (ndl_count->count++ == 0) 771 spin_lock(&ndl_lock->lock); 772 } else 773 lane = cpu; 774 775 return lane; 776 } 777 EXPORT_SYMBOL(nd_region_acquire_lane); 778 779 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane) 780 { 781 if (nd_region->num_lanes < nr_cpu_ids) { 782 unsigned int cpu = get_cpu(); 783 struct nd_percpu_lane *ndl_lock, *ndl_count; 784 785 ndl_count = per_cpu_ptr(nd_region->lane, cpu); 786 ndl_lock = per_cpu_ptr(nd_region->lane, lane); 787 if (--ndl_count->count == 0) 788 spin_unlock(&ndl_lock->lock); 789 put_cpu(); 790 } 791 put_cpu(); 792 } 793 EXPORT_SYMBOL(nd_region_release_lane); 794 795 static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, 796 struct nd_region_desc *ndr_desc, struct device_type *dev_type, 797 const char *caller) 798 { 799 struct nd_region *nd_region; 800 struct device *dev; 801 void *region_buf; 802 unsigned int i; 803 int ro = 0; 804 805 for (i = 0; i < ndr_desc->num_mappings; i++) { 806 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; 807 struct nvdimm *nvdimm = mapping->nvdimm; 808 809 if ((mapping->start | mapping->size) % SZ_4K) { 810 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n", 811 caller, dev_name(&nvdimm->dev), i); 812 813 return NULL; 814 } 815 816 if (nvdimm->flags & NDD_UNARMED) 817 ro = 1; 818 } 819 820 if (dev_type == &nd_blk_device_type) { 821 struct nd_blk_region_desc *ndbr_desc; 822 struct nd_blk_region *ndbr; 823 824 ndbr_desc = to_blk_region_desc(ndr_desc); 825 ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping) 826 * ndr_desc->num_mappings, 827 GFP_KERNEL); 828 if (ndbr) { 829 nd_region = &ndbr->nd_region; 830 ndbr->enable = ndbr_desc->enable; 831 ndbr->do_io = ndbr_desc->do_io; 832 } 833 region_buf = ndbr; 834 } else { 835 nd_region = kzalloc(sizeof(struct nd_region) 836 + sizeof(struct nd_mapping) 837 * ndr_desc->num_mappings, 838 GFP_KERNEL); 839 region_buf = nd_region; 840 } 841 842 if (!region_buf) 843 return NULL; 844 nd_region->id = ida_simple_get(®ion_ida, 0, 0, GFP_KERNEL); 845 if (nd_region->id < 0) 846 goto err_id; 847 848 nd_region->lane = alloc_percpu(struct nd_percpu_lane); 849 if (!nd_region->lane) 850 goto err_percpu; 851 852 for (i = 0; i < nr_cpu_ids; i++) { 853 struct nd_percpu_lane *ndl; 854 855 ndl = per_cpu_ptr(nd_region->lane, i); 856 spin_lock_init(&ndl->lock); 857 ndl->count = 0; 858 } 859 860 for (i = 0; i < ndr_desc->num_mappings; i++) { 861 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; 862 struct nvdimm *nvdimm = mapping->nvdimm; 863 864 nd_region->mapping[i].nvdimm = nvdimm; 865 nd_region->mapping[i].start = mapping->start; 866 nd_region->mapping[i].size = mapping->size; 867 INIT_LIST_HEAD(&nd_region->mapping[i].labels); 868 mutex_init(&nd_region->mapping[i].lock); 869 870 get_device(&nvdimm->dev); 871 } 872 nd_region->ndr_mappings = ndr_desc->num_mappings; 873 nd_region->provider_data = ndr_desc->provider_data; 874 nd_region->nd_set = ndr_desc->nd_set; 875 nd_region->num_lanes = ndr_desc->num_lanes; 876 nd_region->flags = ndr_desc->flags; 877 nd_region->ro = ro; 878 nd_region->numa_node = ndr_desc->numa_node; 879 ida_init(&nd_region->ns_ida); 880 ida_init(&nd_region->btt_ida); 881 ida_init(&nd_region->pfn_ida); 882 ida_init(&nd_region->dax_ida); 883 dev = &nd_region->dev; 884 dev_set_name(dev, "region%d", nd_region->id); 885 dev->parent = &nvdimm_bus->dev; 886 dev->type = dev_type; 887 dev->groups = ndr_desc->attr_groups; 888 nd_region->ndr_size = resource_size(ndr_desc->res); 889 nd_region->ndr_start = ndr_desc->res->start; 890 nd_device_register(dev); 891 892 return nd_region; 893 894 err_percpu: 895 ida_simple_remove(®ion_ida, nd_region->id); 896 err_id: 897 kfree(region_buf); 898 return NULL; 899 } 900 901 struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, 902 struct nd_region_desc *ndr_desc) 903 { 904 ndr_desc->num_lanes = ND_MAX_LANES; 905 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type, 906 __func__); 907 } 908 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create); 909 910 struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, 911 struct nd_region_desc *ndr_desc) 912 { 913 if (ndr_desc->num_mappings > 1) 914 return NULL; 915 ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES); 916 return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type, 917 __func__); 918 } 919 EXPORT_SYMBOL_GPL(nvdimm_blk_region_create); 920 921 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, 922 struct nd_region_desc *ndr_desc) 923 { 924 ndr_desc->num_lanes = ND_MAX_LANES; 925 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type, 926 __func__); 927 } 928 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create); 929 930 /** 931 * nvdimm_flush - flush any posted write queues between the cpu and pmem media 932 * @nd_region: blk or interleaved pmem region 933 */ 934 void nvdimm_flush(struct nd_region *nd_region) 935 { 936 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); 937 int i, idx; 938 939 /* 940 * Try to encourage some diversity in flush hint addresses 941 * across cpus assuming a limited number of flush hints. 942 */ 943 idx = this_cpu_read(flush_idx); 944 idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8)); 945 946 /* 947 * The first wmb() is needed to 'sfence' all previous writes 948 * such that they are architecturally visible for the platform 949 * buffer flush. Note that we've already arranged for pmem 950 * writes to avoid the cache via arch_memcpy_to_pmem(). The 951 * final wmb() ensures ordering for the NVDIMM flush write. 952 */ 953 wmb(); 954 for (i = 0; i < nd_region->ndr_mappings; i++) 955 if (ndrd_get_flush_wpq(ndrd, i, 0)) 956 writeq(1, ndrd_get_flush_wpq(ndrd, i, idx)); 957 wmb(); 958 } 959 EXPORT_SYMBOL_GPL(nvdimm_flush); 960 961 /** 962 * nvdimm_has_flush - determine write flushing requirements 963 * @nd_region: blk or interleaved pmem region 964 * 965 * Returns 1 if writes require flushing 966 * Returns 0 if writes do not require flushing 967 * Returns -ENXIO if flushing capability can not be determined 968 */ 969 int nvdimm_has_flush(struct nd_region *nd_region) 970 { 971 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); 972 int i; 973 974 /* no nvdimm == flushing capability unknown */ 975 if (nd_region->ndr_mappings == 0) 976 return -ENXIO; 977 978 for (i = 0; i < nd_region->ndr_mappings; i++) 979 /* flush hints present, flushing required */ 980 if (ndrd_get_flush_wpq(ndrd, i, 0)) 981 return 1; 982 983 /* 984 * The platform defines dimm devices without hints, assume 985 * platform persistence mechanism like ADR 986 */ 987 return 0; 988 } 989 EXPORT_SYMBOL_GPL(nvdimm_has_flush); 990 991 void __exit nd_region_devs_exit(void) 992 { 993 ida_destroy(®ion_ida); 994 } 995