1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/scatterlist.h> 14 #include <linux/highmem.h> 15 #include <linux/sched.h> 16 #include <linux/slab.h> 17 #include <linux/hash.h> 18 #include <linux/pmem.h> 19 #include <linux/sort.h> 20 #include <linux/io.h> 21 #include <linux/nd.h> 22 #include "nd-core.h" 23 #include "nd.h" 24 25 /* 26 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is 27 * irrelevant. 28 */ 29 #include <linux/io-64-nonatomic-hi-lo.h> 30 31 static DEFINE_IDA(region_ida); 32 static DEFINE_PER_CPU(int, flush_idx); 33 34 static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm, 35 struct nd_region_data *ndrd) 36 { 37 int i, j; 38 39 dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm), 40 nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es"); 41 for (i = 0; i < nvdimm->num_flush; i++) { 42 struct resource *res = &nvdimm->flush_wpq[i]; 43 unsigned long pfn = PHYS_PFN(res->start); 44 void __iomem *flush_page; 45 46 /* check if flush hints share a page */ 47 for (j = 0; j < i; j++) { 48 struct resource *res_j = &nvdimm->flush_wpq[j]; 49 unsigned long pfn_j = PHYS_PFN(res_j->start); 50 51 if (pfn == pfn_j) 52 break; 53 } 54 55 if (j < i) 56 flush_page = (void __iomem *) ((unsigned long) 57 ndrd->flush_wpq[dimm][j] & PAGE_MASK); 58 else 59 flush_page = devm_nvdimm_ioremap(dev, 60 PHYS_PFN(pfn), PAGE_SIZE); 61 if (!flush_page) 62 return -ENXIO; 63 ndrd->flush_wpq[dimm][i] = flush_page 64 + (res->start & ~PAGE_MASK); 65 } 66 67 return 0; 68 } 69 70 int nd_region_activate(struct nd_region *nd_region) 71 { 72 int i, num_flush = 0; 73 struct nd_region_data *ndrd; 74 struct device *dev = &nd_region->dev; 75 size_t flush_data_size = sizeof(void *); 76 77 nvdimm_bus_lock(&nd_region->dev); 78 for (i = 0; i < nd_region->ndr_mappings; i++) { 79 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 80 struct nvdimm *nvdimm = nd_mapping->nvdimm; 81 82 /* at least one null hint slot per-dimm for the "no-hint" case */ 83 flush_data_size += sizeof(void *); 84 num_flush = min_not_zero(num_flush, nvdimm->num_flush); 85 if (!nvdimm->num_flush) 86 continue; 87 flush_data_size += nvdimm->num_flush * sizeof(void *); 88 } 89 nvdimm_bus_unlock(&nd_region->dev); 90 91 ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL); 92 if (!ndrd) 93 return -ENOMEM; 94 dev_set_drvdata(dev, ndrd); 95 96 ndrd->flush_mask = (1 << ilog2(num_flush)) - 1; 97 for (i = 0; i < nd_region->ndr_mappings; i++) { 98 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 99 struct nvdimm *nvdimm = nd_mapping->nvdimm; 100 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd); 101 102 if (rc) 103 return rc; 104 } 105 106 return 0; 107 } 108 109 static void nd_region_release(struct device *dev) 110 { 111 struct nd_region *nd_region = to_nd_region(dev); 112 u16 i; 113 114 for (i = 0; i < nd_region->ndr_mappings; i++) { 115 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 116 struct nvdimm *nvdimm = nd_mapping->nvdimm; 117 118 put_device(&nvdimm->dev); 119 } 120 free_percpu(nd_region->lane); 121 ida_simple_remove(®ion_ida, nd_region->id); 122 if (is_nd_blk(dev)) 123 kfree(to_nd_blk_region(dev)); 124 else 125 kfree(nd_region); 126 } 127 128 static struct device_type nd_blk_device_type = { 129 .name = "nd_blk", 130 .release = nd_region_release, 131 }; 132 133 static struct device_type nd_pmem_device_type = { 134 .name = "nd_pmem", 135 .release = nd_region_release, 136 }; 137 138 static struct device_type nd_volatile_device_type = { 139 .name = "nd_volatile", 140 .release = nd_region_release, 141 }; 142 143 bool is_nd_pmem(struct device *dev) 144 { 145 return dev ? dev->type == &nd_pmem_device_type : false; 146 } 147 148 bool is_nd_blk(struct device *dev) 149 { 150 return dev ? dev->type == &nd_blk_device_type : false; 151 } 152 153 struct nd_region *to_nd_region(struct device *dev) 154 { 155 struct nd_region *nd_region = container_of(dev, struct nd_region, dev); 156 157 WARN_ON(dev->type->release != nd_region_release); 158 return nd_region; 159 } 160 EXPORT_SYMBOL_GPL(to_nd_region); 161 162 struct nd_blk_region *to_nd_blk_region(struct device *dev) 163 { 164 struct nd_region *nd_region = to_nd_region(dev); 165 166 WARN_ON(!is_nd_blk(dev)); 167 return container_of(nd_region, struct nd_blk_region, nd_region); 168 } 169 EXPORT_SYMBOL_GPL(to_nd_blk_region); 170 171 void *nd_region_provider_data(struct nd_region *nd_region) 172 { 173 return nd_region->provider_data; 174 } 175 EXPORT_SYMBOL_GPL(nd_region_provider_data); 176 177 void *nd_blk_region_provider_data(struct nd_blk_region *ndbr) 178 { 179 return ndbr->blk_provider_data; 180 } 181 EXPORT_SYMBOL_GPL(nd_blk_region_provider_data); 182 183 void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data) 184 { 185 ndbr->blk_provider_data = data; 186 } 187 EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data); 188 189 /** 190 * nd_region_to_nstype() - region to an integer namespace type 191 * @nd_region: region-device to interrogate 192 * 193 * This is the 'nstype' attribute of a region as well, an input to the 194 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match 195 * namespace devices with namespace drivers. 196 */ 197 int nd_region_to_nstype(struct nd_region *nd_region) 198 { 199 if (is_nd_pmem(&nd_region->dev)) { 200 u16 i, alias; 201 202 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) { 203 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 204 struct nvdimm *nvdimm = nd_mapping->nvdimm; 205 206 if (nvdimm->flags & NDD_ALIASING) 207 alias++; 208 } 209 if (alias) 210 return ND_DEVICE_NAMESPACE_PMEM; 211 else 212 return ND_DEVICE_NAMESPACE_IO; 213 } else if (is_nd_blk(&nd_region->dev)) { 214 return ND_DEVICE_NAMESPACE_BLK; 215 } 216 217 return 0; 218 } 219 EXPORT_SYMBOL(nd_region_to_nstype); 220 221 static ssize_t size_show(struct device *dev, 222 struct device_attribute *attr, char *buf) 223 { 224 struct nd_region *nd_region = to_nd_region(dev); 225 unsigned long long size = 0; 226 227 if (is_nd_pmem(dev)) { 228 size = nd_region->ndr_size; 229 } else if (nd_region->ndr_mappings == 1) { 230 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 231 232 size = nd_mapping->size; 233 } 234 235 return sprintf(buf, "%llu\n", size); 236 } 237 static DEVICE_ATTR_RO(size); 238 239 static ssize_t mappings_show(struct device *dev, 240 struct device_attribute *attr, char *buf) 241 { 242 struct nd_region *nd_region = to_nd_region(dev); 243 244 return sprintf(buf, "%d\n", nd_region->ndr_mappings); 245 } 246 static DEVICE_ATTR_RO(mappings); 247 248 static ssize_t nstype_show(struct device *dev, 249 struct device_attribute *attr, char *buf) 250 { 251 struct nd_region *nd_region = to_nd_region(dev); 252 253 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region)); 254 } 255 static DEVICE_ATTR_RO(nstype); 256 257 static ssize_t set_cookie_show(struct device *dev, 258 struct device_attribute *attr, char *buf) 259 { 260 struct nd_region *nd_region = to_nd_region(dev); 261 struct nd_interleave_set *nd_set = nd_region->nd_set; 262 263 if (is_nd_pmem(dev) && nd_set) 264 /* pass, should be precluded by region_visible */; 265 else 266 return -ENXIO; 267 268 return sprintf(buf, "%#llx\n", nd_set->cookie); 269 } 270 static DEVICE_ATTR_RO(set_cookie); 271 272 resource_size_t nd_region_available_dpa(struct nd_region *nd_region) 273 { 274 resource_size_t blk_max_overlap = 0, available, overlap; 275 int i; 276 277 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); 278 279 retry: 280 available = 0; 281 overlap = blk_max_overlap; 282 for (i = 0; i < nd_region->ndr_mappings; i++) { 283 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 284 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 285 286 /* if a dimm is disabled the available capacity is zero */ 287 if (!ndd) 288 return 0; 289 290 if (is_nd_pmem(&nd_region->dev)) { 291 available += nd_pmem_available_dpa(nd_region, 292 nd_mapping, &overlap); 293 if (overlap > blk_max_overlap) { 294 blk_max_overlap = overlap; 295 goto retry; 296 } 297 } else if (is_nd_blk(&nd_region->dev)) 298 available += nd_blk_available_dpa(nd_region); 299 } 300 301 return available; 302 } 303 304 static ssize_t available_size_show(struct device *dev, 305 struct device_attribute *attr, char *buf) 306 { 307 struct nd_region *nd_region = to_nd_region(dev); 308 unsigned long long available = 0; 309 310 /* 311 * Flush in-flight updates and grab a snapshot of the available 312 * size. Of course, this value is potentially invalidated the 313 * memory nvdimm_bus_lock() is dropped, but that's userspace's 314 * problem to not race itself. 315 */ 316 nvdimm_bus_lock(dev); 317 wait_nvdimm_bus_probe_idle(dev); 318 available = nd_region_available_dpa(nd_region); 319 nvdimm_bus_unlock(dev); 320 321 return sprintf(buf, "%llu\n", available); 322 } 323 static DEVICE_ATTR_RO(available_size); 324 325 static ssize_t init_namespaces_show(struct device *dev, 326 struct device_attribute *attr, char *buf) 327 { 328 struct nd_region_data *ndrd = dev_get_drvdata(dev); 329 ssize_t rc; 330 331 nvdimm_bus_lock(dev); 332 if (ndrd) 333 rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count); 334 else 335 rc = -ENXIO; 336 nvdimm_bus_unlock(dev); 337 338 return rc; 339 } 340 static DEVICE_ATTR_RO(init_namespaces); 341 342 static ssize_t namespace_seed_show(struct device *dev, 343 struct device_attribute *attr, char *buf) 344 { 345 struct nd_region *nd_region = to_nd_region(dev); 346 ssize_t rc; 347 348 nvdimm_bus_lock(dev); 349 if (nd_region->ns_seed) 350 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); 351 else 352 rc = sprintf(buf, "\n"); 353 nvdimm_bus_unlock(dev); 354 return rc; 355 } 356 static DEVICE_ATTR_RO(namespace_seed); 357 358 static ssize_t btt_seed_show(struct device *dev, 359 struct device_attribute *attr, char *buf) 360 { 361 struct nd_region *nd_region = to_nd_region(dev); 362 ssize_t rc; 363 364 nvdimm_bus_lock(dev); 365 if (nd_region->btt_seed) 366 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed)); 367 else 368 rc = sprintf(buf, "\n"); 369 nvdimm_bus_unlock(dev); 370 371 return rc; 372 } 373 static DEVICE_ATTR_RO(btt_seed); 374 375 static ssize_t pfn_seed_show(struct device *dev, 376 struct device_attribute *attr, char *buf) 377 { 378 struct nd_region *nd_region = to_nd_region(dev); 379 ssize_t rc; 380 381 nvdimm_bus_lock(dev); 382 if (nd_region->pfn_seed) 383 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed)); 384 else 385 rc = sprintf(buf, "\n"); 386 nvdimm_bus_unlock(dev); 387 388 return rc; 389 } 390 static DEVICE_ATTR_RO(pfn_seed); 391 392 static ssize_t dax_seed_show(struct device *dev, 393 struct device_attribute *attr, char *buf) 394 { 395 struct nd_region *nd_region = to_nd_region(dev); 396 ssize_t rc; 397 398 nvdimm_bus_lock(dev); 399 if (nd_region->dax_seed) 400 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed)); 401 else 402 rc = sprintf(buf, "\n"); 403 nvdimm_bus_unlock(dev); 404 405 return rc; 406 } 407 static DEVICE_ATTR_RO(dax_seed); 408 409 static ssize_t read_only_show(struct device *dev, 410 struct device_attribute *attr, char *buf) 411 { 412 struct nd_region *nd_region = to_nd_region(dev); 413 414 return sprintf(buf, "%d\n", nd_region->ro); 415 } 416 417 static ssize_t read_only_store(struct device *dev, 418 struct device_attribute *attr, const char *buf, size_t len) 419 { 420 bool ro; 421 int rc = strtobool(buf, &ro); 422 struct nd_region *nd_region = to_nd_region(dev); 423 424 if (rc) 425 return rc; 426 427 nd_region->ro = ro; 428 return len; 429 } 430 static DEVICE_ATTR_RW(read_only); 431 432 static struct attribute *nd_region_attributes[] = { 433 &dev_attr_size.attr, 434 &dev_attr_nstype.attr, 435 &dev_attr_mappings.attr, 436 &dev_attr_btt_seed.attr, 437 &dev_attr_pfn_seed.attr, 438 &dev_attr_dax_seed.attr, 439 &dev_attr_read_only.attr, 440 &dev_attr_set_cookie.attr, 441 &dev_attr_available_size.attr, 442 &dev_attr_namespace_seed.attr, 443 &dev_attr_init_namespaces.attr, 444 NULL, 445 }; 446 447 static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) 448 { 449 struct device *dev = container_of(kobj, typeof(*dev), kobj); 450 struct nd_region *nd_region = to_nd_region(dev); 451 struct nd_interleave_set *nd_set = nd_region->nd_set; 452 int type = nd_region_to_nstype(nd_region); 453 454 if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr) 455 return 0; 456 457 if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr) 458 return 0; 459 460 if (a != &dev_attr_set_cookie.attr 461 && a != &dev_attr_available_size.attr) 462 return a->mode; 463 464 if ((type == ND_DEVICE_NAMESPACE_PMEM 465 || type == ND_DEVICE_NAMESPACE_BLK) 466 && a == &dev_attr_available_size.attr) 467 return a->mode; 468 else if (is_nd_pmem(dev) && nd_set) 469 return a->mode; 470 471 return 0; 472 } 473 474 struct attribute_group nd_region_attribute_group = { 475 .attrs = nd_region_attributes, 476 .is_visible = region_visible, 477 }; 478 EXPORT_SYMBOL_GPL(nd_region_attribute_group); 479 480 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region) 481 { 482 struct nd_interleave_set *nd_set = nd_region->nd_set; 483 484 if (nd_set) 485 return nd_set->cookie; 486 return 0; 487 } 488 489 void nd_mapping_free_labels(struct nd_mapping *nd_mapping) 490 { 491 struct nd_label_ent *label_ent, *e; 492 493 WARN_ON(!mutex_is_locked(&nd_mapping->lock)); 494 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { 495 list_del(&label_ent->list); 496 kfree(label_ent); 497 } 498 } 499 500 /* 501 * Upon successful probe/remove, take/release a reference on the 502 * associated interleave set (if present), and plant new btt + namespace 503 * seeds. Also, on the removal of a BLK region, notify the provider to 504 * disable the region. 505 */ 506 static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, 507 struct device *dev, bool probe) 508 { 509 struct nd_region *nd_region; 510 511 if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) { 512 int i; 513 514 nd_region = to_nd_region(dev); 515 for (i = 0; i < nd_region->ndr_mappings; i++) { 516 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 517 struct nvdimm_drvdata *ndd = nd_mapping->ndd; 518 struct nvdimm *nvdimm = nd_mapping->nvdimm; 519 520 mutex_lock(&nd_mapping->lock); 521 nd_mapping_free_labels(nd_mapping); 522 mutex_unlock(&nd_mapping->lock); 523 524 put_ndd(ndd); 525 nd_mapping->ndd = NULL; 526 if (ndd) 527 atomic_dec(&nvdimm->busy); 528 } 529 530 if (is_nd_pmem(dev)) 531 return; 532 } 533 if (dev->parent && (is_nd_blk(dev->parent) || is_nd_pmem(dev->parent)) 534 && probe) { 535 nd_region = to_nd_region(dev->parent); 536 nvdimm_bus_lock(dev); 537 if (nd_region->ns_seed == dev) 538 nd_region_create_ns_seed(nd_region); 539 nvdimm_bus_unlock(dev); 540 } 541 if (is_nd_btt(dev) && probe) { 542 struct nd_btt *nd_btt = to_nd_btt(dev); 543 544 nd_region = to_nd_region(dev->parent); 545 nvdimm_bus_lock(dev); 546 if (nd_region->btt_seed == dev) 547 nd_region_create_btt_seed(nd_region); 548 if (nd_region->ns_seed == &nd_btt->ndns->dev) 549 nd_region_create_ns_seed(nd_region); 550 nvdimm_bus_unlock(dev); 551 } 552 if (is_nd_pfn(dev) && probe) { 553 struct nd_pfn *nd_pfn = to_nd_pfn(dev); 554 555 nd_region = to_nd_region(dev->parent); 556 nvdimm_bus_lock(dev); 557 if (nd_region->pfn_seed == dev) 558 nd_region_create_pfn_seed(nd_region); 559 if (nd_region->ns_seed == &nd_pfn->ndns->dev) 560 nd_region_create_ns_seed(nd_region); 561 nvdimm_bus_unlock(dev); 562 } 563 if (is_nd_dax(dev) && probe) { 564 struct nd_dax *nd_dax = to_nd_dax(dev); 565 566 nd_region = to_nd_region(dev->parent); 567 nvdimm_bus_lock(dev); 568 if (nd_region->dax_seed == dev) 569 nd_region_create_dax_seed(nd_region); 570 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) 571 nd_region_create_ns_seed(nd_region); 572 nvdimm_bus_unlock(dev); 573 } 574 } 575 576 void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev) 577 { 578 nd_region_notify_driver_action(nvdimm_bus, dev, true); 579 } 580 581 void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev) 582 { 583 nd_region_notify_driver_action(nvdimm_bus, dev, false); 584 } 585 586 static ssize_t mappingN(struct device *dev, char *buf, int n) 587 { 588 struct nd_region *nd_region = to_nd_region(dev); 589 struct nd_mapping *nd_mapping; 590 struct nvdimm *nvdimm; 591 592 if (n >= nd_region->ndr_mappings) 593 return -ENXIO; 594 nd_mapping = &nd_region->mapping[n]; 595 nvdimm = nd_mapping->nvdimm; 596 597 return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev), 598 nd_mapping->start, nd_mapping->size); 599 } 600 601 #define REGION_MAPPING(idx) \ 602 static ssize_t mapping##idx##_show(struct device *dev, \ 603 struct device_attribute *attr, char *buf) \ 604 { \ 605 return mappingN(dev, buf, idx); \ 606 } \ 607 static DEVICE_ATTR_RO(mapping##idx) 608 609 /* 610 * 32 should be enough for a while, even in the presence of socket 611 * interleave a 32-way interleave set is a degenerate case. 612 */ 613 REGION_MAPPING(0); 614 REGION_MAPPING(1); 615 REGION_MAPPING(2); 616 REGION_MAPPING(3); 617 REGION_MAPPING(4); 618 REGION_MAPPING(5); 619 REGION_MAPPING(6); 620 REGION_MAPPING(7); 621 REGION_MAPPING(8); 622 REGION_MAPPING(9); 623 REGION_MAPPING(10); 624 REGION_MAPPING(11); 625 REGION_MAPPING(12); 626 REGION_MAPPING(13); 627 REGION_MAPPING(14); 628 REGION_MAPPING(15); 629 REGION_MAPPING(16); 630 REGION_MAPPING(17); 631 REGION_MAPPING(18); 632 REGION_MAPPING(19); 633 REGION_MAPPING(20); 634 REGION_MAPPING(21); 635 REGION_MAPPING(22); 636 REGION_MAPPING(23); 637 REGION_MAPPING(24); 638 REGION_MAPPING(25); 639 REGION_MAPPING(26); 640 REGION_MAPPING(27); 641 REGION_MAPPING(28); 642 REGION_MAPPING(29); 643 REGION_MAPPING(30); 644 REGION_MAPPING(31); 645 646 static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n) 647 { 648 struct device *dev = container_of(kobj, struct device, kobj); 649 struct nd_region *nd_region = to_nd_region(dev); 650 651 if (n < nd_region->ndr_mappings) 652 return a->mode; 653 return 0; 654 } 655 656 static struct attribute *mapping_attributes[] = { 657 &dev_attr_mapping0.attr, 658 &dev_attr_mapping1.attr, 659 &dev_attr_mapping2.attr, 660 &dev_attr_mapping3.attr, 661 &dev_attr_mapping4.attr, 662 &dev_attr_mapping5.attr, 663 &dev_attr_mapping6.attr, 664 &dev_attr_mapping7.attr, 665 &dev_attr_mapping8.attr, 666 &dev_attr_mapping9.attr, 667 &dev_attr_mapping10.attr, 668 &dev_attr_mapping11.attr, 669 &dev_attr_mapping12.attr, 670 &dev_attr_mapping13.attr, 671 &dev_attr_mapping14.attr, 672 &dev_attr_mapping15.attr, 673 &dev_attr_mapping16.attr, 674 &dev_attr_mapping17.attr, 675 &dev_attr_mapping18.attr, 676 &dev_attr_mapping19.attr, 677 &dev_attr_mapping20.attr, 678 &dev_attr_mapping21.attr, 679 &dev_attr_mapping22.attr, 680 &dev_attr_mapping23.attr, 681 &dev_attr_mapping24.attr, 682 &dev_attr_mapping25.attr, 683 &dev_attr_mapping26.attr, 684 &dev_attr_mapping27.attr, 685 &dev_attr_mapping28.attr, 686 &dev_attr_mapping29.attr, 687 &dev_attr_mapping30.attr, 688 &dev_attr_mapping31.attr, 689 NULL, 690 }; 691 692 struct attribute_group nd_mapping_attribute_group = { 693 .is_visible = mapping_visible, 694 .attrs = mapping_attributes, 695 }; 696 EXPORT_SYMBOL_GPL(nd_mapping_attribute_group); 697 698 int nd_blk_region_init(struct nd_region *nd_region) 699 { 700 struct device *dev = &nd_region->dev; 701 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 702 703 if (!is_nd_blk(dev)) 704 return 0; 705 706 if (nd_region->ndr_mappings < 1) { 707 dev_err(dev, "invalid BLK region\n"); 708 return -ENXIO; 709 } 710 711 return to_nd_blk_region(dev)->enable(nvdimm_bus, dev); 712 } 713 714 /** 715 * nd_region_acquire_lane - allocate and lock a lane 716 * @nd_region: region id and number of lanes possible 717 * 718 * A lane correlates to a BLK-data-window and/or a log slot in the BTT. 719 * We optimize for the common case where there are 256 lanes, one 720 * per-cpu. For larger systems we need to lock to share lanes. For now 721 * this implementation assumes the cost of maintaining an allocator for 722 * free lanes is on the order of the lock hold time, so it implements a 723 * static lane = cpu % num_lanes mapping. 724 * 725 * In the case of a BTT instance on top of a BLK namespace a lane may be 726 * acquired recursively. We lock on the first instance. 727 * 728 * In the case of a BTT instance on top of PMEM, we only acquire a lane 729 * for the BTT metadata updates. 730 */ 731 unsigned int nd_region_acquire_lane(struct nd_region *nd_region) 732 { 733 unsigned int cpu, lane; 734 735 cpu = get_cpu(); 736 if (nd_region->num_lanes < nr_cpu_ids) { 737 struct nd_percpu_lane *ndl_lock, *ndl_count; 738 739 lane = cpu % nd_region->num_lanes; 740 ndl_count = per_cpu_ptr(nd_region->lane, cpu); 741 ndl_lock = per_cpu_ptr(nd_region->lane, lane); 742 if (ndl_count->count++ == 0) 743 spin_lock(&ndl_lock->lock); 744 } else 745 lane = cpu; 746 747 return lane; 748 } 749 EXPORT_SYMBOL(nd_region_acquire_lane); 750 751 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane) 752 { 753 if (nd_region->num_lanes < nr_cpu_ids) { 754 unsigned int cpu = get_cpu(); 755 struct nd_percpu_lane *ndl_lock, *ndl_count; 756 757 ndl_count = per_cpu_ptr(nd_region->lane, cpu); 758 ndl_lock = per_cpu_ptr(nd_region->lane, lane); 759 if (--ndl_count->count == 0) 760 spin_unlock(&ndl_lock->lock); 761 put_cpu(); 762 } 763 put_cpu(); 764 } 765 EXPORT_SYMBOL(nd_region_release_lane); 766 767 static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, 768 struct nd_region_desc *ndr_desc, struct device_type *dev_type, 769 const char *caller) 770 { 771 struct nd_region *nd_region; 772 struct device *dev; 773 void *region_buf; 774 unsigned int i; 775 int ro = 0; 776 777 for (i = 0; i < ndr_desc->num_mappings; i++) { 778 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; 779 struct nvdimm *nvdimm = mapping->nvdimm; 780 781 if ((mapping->start | mapping->size) % SZ_4K) { 782 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n", 783 caller, dev_name(&nvdimm->dev), i); 784 785 return NULL; 786 } 787 788 if (nvdimm->flags & NDD_UNARMED) 789 ro = 1; 790 } 791 792 if (dev_type == &nd_blk_device_type) { 793 struct nd_blk_region_desc *ndbr_desc; 794 struct nd_blk_region *ndbr; 795 796 ndbr_desc = to_blk_region_desc(ndr_desc); 797 ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping) 798 * ndr_desc->num_mappings, 799 GFP_KERNEL); 800 if (ndbr) { 801 nd_region = &ndbr->nd_region; 802 ndbr->enable = ndbr_desc->enable; 803 ndbr->do_io = ndbr_desc->do_io; 804 } 805 region_buf = ndbr; 806 } else { 807 nd_region = kzalloc(sizeof(struct nd_region) 808 + sizeof(struct nd_mapping) 809 * ndr_desc->num_mappings, 810 GFP_KERNEL); 811 region_buf = nd_region; 812 } 813 814 if (!region_buf) 815 return NULL; 816 nd_region->id = ida_simple_get(®ion_ida, 0, 0, GFP_KERNEL); 817 if (nd_region->id < 0) 818 goto err_id; 819 820 nd_region->lane = alloc_percpu(struct nd_percpu_lane); 821 if (!nd_region->lane) 822 goto err_percpu; 823 824 for (i = 0; i < nr_cpu_ids; i++) { 825 struct nd_percpu_lane *ndl; 826 827 ndl = per_cpu_ptr(nd_region->lane, i); 828 spin_lock_init(&ndl->lock); 829 ndl->count = 0; 830 } 831 832 for (i = 0; i < ndr_desc->num_mappings; i++) { 833 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; 834 struct nvdimm *nvdimm = mapping->nvdimm; 835 836 nd_region->mapping[i].nvdimm = nvdimm; 837 nd_region->mapping[i].start = mapping->start; 838 nd_region->mapping[i].size = mapping->size; 839 INIT_LIST_HEAD(&nd_region->mapping[i].labels); 840 mutex_init(&nd_region->mapping[i].lock); 841 842 get_device(&nvdimm->dev); 843 } 844 nd_region->ndr_mappings = ndr_desc->num_mappings; 845 nd_region->provider_data = ndr_desc->provider_data; 846 nd_region->nd_set = ndr_desc->nd_set; 847 nd_region->num_lanes = ndr_desc->num_lanes; 848 nd_region->flags = ndr_desc->flags; 849 nd_region->ro = ro; 850 nd_region->numa_node = ndr_desc->numa_node; 851 ida_init(&nd_region->ns_ida); 852 ida_init(&nd_region->btt_ida); 853 ida_init(&nd_region->pfn_ida); 854 ida_init(&nd_region->dax_ida); 855 dev = &nd_region->dev; 856 dev_set_name(dev, "region%d", nd_region->id); 857 dev->parent = &nvdimm_bus->dev; 858 dev->type = dev_type; 859 dev->groups = ndr_desc->attr_groups; 860 nd_region->ndr_size = resource_size(ndr_desc->res); 861 nd_region->ndr_start = ndr_desc->res->start; 862 nd_device_register(dev); 863 864 return nd_region; 865 866 err_percpu: 867 ida_simple_remove(®ion_ida, nd_region->id); 868 err_id: 869 kfree(region_buf); 870 return NULL; 871 } 872 873 struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, 874 struct nd_region_desc *ndr_desc) 875 { 876 ndr_desc->num_lanes = ND_MAX_LANES; 877 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type, 878 __func__); 879 } 880 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create); 881 882 struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, 883 struct nd_region_desc *ndr_desc) 884 { 885 if (ndr_desc->num_mappings > 1) 886 return NULL; 887 ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES); 888 return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type, 889 __func__); 890 } 891 EXPORT_SYMBOL_GPL(nvdimm_blk_region_create); 892 893 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, 894 struct nd_region_desc *ndr_desc) 895 { 896 ndr_desc->num_lanes = ND_MAX_LANES; 897 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type, 898 __func__); 899 } 900 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create); 901 902 /** 903 * nvdimm_flush - flush any posted write queues between the cpu and pmem media 904 * @nd_region: blk or interleaved pmem region 905 */ 906 void nvdimm_flush(struct nd_region *nd_region) 907 { 908 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); 909 int i, idx; 910 911 /* 912 * Try to encourage some diversity in flush hint addresses 913 * across cpus assuming a limited number of flush hints. 914 */ 915 idx = this_cpu_read(flush_idx); 916 idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8)); 917 918 /* 919 * The first wmb() is needed to 'sfence' all previous writes 920 * such that they are architecturally visible for the platform 921 * buffer flush. Note that we've already arranged for pmem 922 * writes to avoid the cache via arch_memcpy_to_pmem(). The 923 * final wmb() ensures ordering for the NVDIMM flush write. 924 */ 925 wmb(); 926 for (i = 0; i < nd_region->ndr_mappings; i++) 927 if (ndrd->flush_wpq[i][0]) 928 writeq(1, ndrd->flush_wpq[i][idx & ndrd->flush_mask]); 929 wmb(); 930 } 931 EXPORT_SYMBOL_GPL(nvdimm_flush); 932 933 /** 934 * nvdimm_has_flush - determine write flushing requirements 935 * @nd_region: blk or interleaved pmem region 936 * 937 * Returns 1 if writes require flushing 938 * Returns 0 if writes do not require flushing 939 * Returns -ENXIO if flushing capability can not be determined 940 */ 941 int nvdimm_has_flush(struct nd_region *nd_region) 942 { 943 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); 944 int i; 945 946 /* no nvdimm == flushing capability unknown */ 947 if (nd_region->ndr_mappings == 0) 948 return -ENXIO; 949 950 for (i = 0; i < nd_region->ndr_mappings; i++) 951 /* flush hints present, flushing required */ 952 if (ndrd->flush_wpq[i][0]) 953 return 1; 954 955 /* 956 * The platform defines dimm devices without hints, assume 957 * platform persistence mechanism like ADR 958 */ 959 return 0; 960 } 961 EXPORT_SYMBOL_GPL(nvdimm_has_flush); 962 963 void __exit nd_region_devs_exit(void) 964 { 965 ida_destroy(®ion_ida); 966 } 967