1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/scatterlist.h> 14 #include <linux/highmem.h> 15 #include <linux/sched.h> 16 #include <linux/slab.h> 17 #include <linux/sort.h> 18 #include <linux/io.h> 19 #include <linux/nd.h> 20 #include "nd-core.h" 21 #include "nd.h" 22 23 static DEFINE_IDA(region_ida); 24 25 static void nd_region_release(struct device *dev) 26 { 27 struct nd_region *nd_region = to_nd_region(dev); 28 u16 i; 29 30 for (i = 0; i < nd_region->ndr_mappings; i++) { 31 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 32 struct nvdimm *nvdimm = nd_mapping->nvdimm; 33 34 put_device(&nvdimm->dev); 35 } 36 free_percpu(nd_region->lane); 37 ida_simple_remove(®ion_ida, nd_region->id); 38 if (is_nd_blk(dev)) 39 kfree(to_nd_blk_region(dev)); 40 else 41 kfree(nd_region); 42 } 43 44 static struct device_type nd_blk_device_type = { 45 .name = "nd_blk", 46 .release = nd_region_release, 47 }; 48 49 static struct device_type nd_pmem_device_type = { 50 .name = "nd_pmem", 51 .release = nd_region_release, 52 }; 53 54 static struct device_type nd_volatile_device_type = { 55 .name = "nd_volatile", 56 .release = nd_region_release, 57 }; 58 59 bool is_nd_pmem(struct device *dev) 60 { 61 return dev ? dev->type == &nd_pmem_device_type : false; 62 } 63 64 bool is_nd_blk(struct device *dev) 65 { 66 return dev ? dev->type == &nd_blk_device_type : false; 67 } 68 69 struct nd_region *to_nd_region(struct device *dev) 70 { 71 struct nd_region *nd_region = container_of(dev, struct nd_region, dev); 72 73 WARN_ON(dev->type->release != nd_region_release); 74 return nd_region; 75 } 76 EXPORT_SYMBOL_GPL(to_nd_region); 77 78 struct nd_blk_region *to_nd_blk_region(struct device *dev) 79 { 80 struct nd_region *nd_region = to_nd_region(dev); 81 82 WARN_ON(!is_nd_blk(dev)); 83 return container_of(nd_region, struct nd_blk_region, nd_region); 84 } 85 EXPORT_SYMBOL_GPL(to_nd_blk_region); 86 87 void *nd_region_provider_data(struct nd_region *nd_region) 88 { 89 return nd_region->provider_data; 90 } 91 EXPORT_SYMBOL_GPL(nd_region_provider_data); 92 93 void *nd_blk_region_provider_data(struct nd_blk_region *ndbr) 94 { 95 return ndbr->blk_provider_data; 96 } 97 EXPORT_SYMBOL_GPL(nd_blk_region_provider_data); 98 99 void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data) 100 { 101 ndbr->blk_provider_data = data; 102 } 103 EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data); 104 105 /** 106 * nd_region_to_nstype() - region to an integer namespace type 107 * @nd_region: region-device to interrogate 108 * 109 * This is the 'nstype' attribute of a region as well, an input to the 110 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match 111 * namespace devices with namespace drivers. 112 */ 113 int nd_region_to_nstype(struct nd_region *nd_region) 114 { 115 if (is_nd_pmem(&nd_region->dev)) { 116 u16 i, alias; 117 118 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) { 119 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 120 struct nvdimm *nvdimm = nd_mapping->nvdimm; 121 122 if (nvdimm->flags & NDD_ALIASING) 123 alias++; 124 } 125 if (alias) 126 return ND_DEVICE_NAMESPACE_PMEM; 127 else 128 return ND_DEVICE_NAMESPACE_IO; 129 } else if (is_nd_blk(&nd_region->dev)) { 130 return ND_DEVICE_NAMESPACE_BLK; 131 } 132 133 return 0; 134 } 135 EXPORT_SYMBOL(nd_region_to_nstype); 136 137 static int is_uuid_busy(struct device *dev, void *data) 138 { 139 struct nd_region *nd_region = to_nd_region(dev->parent); 140 u8 *uuid = data; 141 142 switch (nd_region_to_nstype(nd_region)) { 143 case ND_DEVICE_NAMESPACE_PMEM: { 144 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 145 146 if (!nspm->uuid) 147 break; 148 if (memcmp(uuid, nspm->uuid, NSLABEL_UUID_LEN) == 0) 149 return -EBUSY; 150 break; 151 } 152 case ND_DEVICE_NAMESPACE_BLK: { 153 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); 154 155 if (!nsblk->uuid) 156 break; 157 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) == 0) 158 return -EBUSY; 159 break; 160 } 161 default: 162 break; 163 } 164 165 return 0; 166 } 167 168 static int is_namespace_uuid_busy(struct device *dev, void *data) 169 { 170 if (is_nd_pmem(dev) || is_nd_blk(dev)) 171 return device_for_each_child(dev, data, is_uuid_busy); 172 return 0; 173 } 174 175 /** 176 * nd_is_uuid_unique - verify that no other namespace has @uuid 177 * @dev: any device on a nvdimm_bus 178 * @uuid: uuid to check 179 */ 180 bool nd_is_uuid_unique(struct device *dev, u8 *uuid) 181 { 182 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 183 184 if (!nvdimm_bus) 185 return false; 186 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev)); 187 if (device_for_each_child(&nvdimm_bus->dev, uuid, 188 is_namespace_uuid_busy) != 0) 189 return false; 190 return true; 191 } 192 193 static ssize_t size_show(struct device *dev, 194 struct device_attribute *attr, char *buf) 195 { 196 struct nd_region *nd_region = to_nd_region(dev); 197 unsigned long long size = 0; 198 199 if (is_nd_pmem(dev)) { 200 size = nd_region->ndr_size; 201 } else if (nd_region->ndr_mappings == 1) { 202 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 203 204 size = nd_mapping->size; 205 } 206 207 return sprintf(buf, "%llu\n", size); 208 } 209 static DEVICE_ATTR_RO(size); 210 211 static ssize_t mappings_show(struct device *dev, 212 struct device_attribute *attr, char *buf) 213 { 214 struct nd_region *nd_region = to_nd_region(dev); 215 216 return sprintf(buf, "%d\n", nd_region->ndr_mappings); 217 } 218 static DEVICE_ATTR_RO(mappings); 219 220 static ssize_t nstype_show(struct device *dev, 221 struct device_attribute *attr, char *buf) 222 { 223 struct nd_region *nd_region = to_nd_region(dev); 224 225 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region)); 226 } 227 static DEVICE_ATTR_RO(nstype); 228 229 static ssize_t set_cookie_show(struct device *dev, 230 struct device_attribute *attr, char *buf) 231 { 232 struct nd_region *nd_region = to_nd_region(dev); 233 struct nd_interleave_set *nd_set = nd_region->nd_set; 234 235 if (is_nd_pmem(dev) && nd_set) 236 /* pass, should be precluded by region_visible */; 237 else 238 return -ENXIO; 239 240 return sprintf(buf, "%#llx\n", nd_set->cookie); 241 } 242 static DEVICE_ATTR_RO(set_cookie); 243 244 resource_size_t nd_region_available_dpa(struct nd_region *nd_region) 245 { 246 resource_size_t blk_max_overlap = 0, available, overlap; 247 int i; 248 249 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); 250 251 retry: 252 available = 0; 253 overlap = blk_max_overlap; 254 for (i = 0; i < nd_region->ndr_mappings; i++) { 255 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 256 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 257 258 /* if a dimm is disabled the available capacity is zero */ 259 if (!ndd) 260 return 0; 261 262 if (is_nd_pmem(&nd_region->dev)) { 263 available += nd_pmem_available_dpa(nd_region, 264 nd_mapping, &overlap); 265 if (overlap > blk_max_overlap) { 266 blk_max_overlap = overlap; 267 goto retry; 268 } 269 } else if (is_nd_blk(&nd_region->dev)) { 270 available += nd_blk_available_dpa(nd_mapping); 271 } 272 } 273 274 return available; 275 } 276 277 static ssize_t available_size_show(struct device *dev, 278 struct device_attribute *attr, char *buf) 279 { 280 struct nd_region *nd_region = to_nd_region(dev); 281 unsigned long long available = 0; 282 283 /* 284 * Flush in-flight updates and grab a snapshot of the available 285 * size. Of course, this value is potentially invalidated the 286 * memory nvdimm_bus_lock() is dropped, but that's userspace's 287 * problem to not race itself. 288 */ 289 nvdimm_bus_lock(dev); 290 wait_nvdimm_bus_probe_idle(dev); 291 available = nd_region_available_dpa(nd_region); 292 nvdimm_bus_unlock(dev); 293 294 return sprintf(buf, "%llu\n", available); 295 } 296 static DEVICE_ATTR_RO(available_size); 297 298 static ssize_t init_namespaces_show(struct device *dev, 299 struct device_attribute *attr, char *buf) 300 { 301 struct nd_region_namespaces *num_ns = dev_get_drvdata(dev); 302 ssize_t rc; 303 304 nvdimm_bus_lock(dev); 305 if (num_ns) 306 rc = sprintf(buf, "%d/%d\n", num_ns->active, num_ns->count); 307 else 308 rc = -ENXIO; 309 nvdimm_bus_unlock(dev); 310 311 return rc; 312 } 313 static DEVICE_ATTR_RO(init_namespaces); 314 315 static ssize_t namespace_seed_show(struct device *dev, 316 struct device_attribute *attr, char *buf) 317 { 318 struct nd_region *nd_region = to_nd_region(dev); 319 ssize_t rc; 320 321 nvdimm_bus_lock(dev); 322 if (nd_region->ns_seed) 323 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); 324 else 325 rc = sprintf(buf, "\n"); 326 nvdimm_bus_unlock(dev); 327 return rc; 328 } 329 static DEVICE_ATTR_RO(namespace_seed); 330 331 static ssize_t btt_seed_show(struct device *dev, 332 struct device_attribute *attr, char *buf) 333 { 334 struct nd_region *nd_region = to_nd_region(dev); 335 ssize_t rc; 336 337 nvdimm_bus_lock(dev); 338 if (nd_region->btt_seed) 339 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed)); 340 else 341 rc = sprintf(buf, "\n"); 342 nvdimm_bus_unlock(dev); 343 344 return rc; 345 } 346 static DEVICE_ATTR_RO(btt_seed); 347 348 static ssize_t pfn_seed_show(struct device *dev, 349 struct device_attribute *attr, char *buf) 350 { 351 struct nd_region *nd_region = to_nd_region(dev); 352 ssize_t rc; 353 354 nvdimm_bus_lock(dev); 355 if (nd_region->pfn_seed) 356 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed)); 357 else 358 rc = sprintf(buf, "\n"); 359 nvdimm_bus_unlock(dev); 360 361 return rc; 362 } 363 static DEVICE_ATTR_RO(pfn_seed); 364 365 static ssize_t read_only_show(struct device *dev, 366 struct device_attribute *attr, char *buf) 367 { 368 struct nd_region *nd_region = to_nd_region(dev); 369 370 return sprintf(buf, "%d\n", nd_region->ro); 371 } 372 373 static ssize_t read_only_store(struct device *dev, 374 struct device_attribute *attr, const char *buf, size_t len) 375 { 376 bool ro; 377 int rc = strtobool(buf, &ro); 378 struct nd_region *nd_region = to_nd_region(dev); 379 380 if (rc) 381 return rc; 382 383 nd_region->ro = ro; 384 return len; 385 } 386 static DEVICE_ATTR_RW(read_only); 387 388 static struct attribute *nd_region_attributes[] = { 389 &dev_attr_size.attr, 390 &dev_attr_nstype.attr, 391 &dev_attr_mappings.attr, 392 &dev_attr_btt_seed.attr, 393 &dev_attr_pfn_seed.attr, 394 &dev_attr_read_only.attr, 395 &dev_attr_set_cookie.attr, 396 &dev_attr_available_size.attr, 397 &dev_attr_namespace_seed.attr, 398 &dev_attr_init_namespaces.attr, 399 NULL, 400 }; 401 402 static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) 403 { 404 struct device *dev = container_of(kobj, typeof(*dev), kobj); 405 struct nd_region *nd_region = to_nd_region(dev); 406 struct nd_interleave_set *nd_set = nd_region->nd_set; 407 int type = nd_region_to_nstype(nd_region); 408 409 if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr) 410 return 0; 411 412 if (a != &dev_attr_set_cookie.attr 413 && a != &dev_attr_available_size.attr) 414 return a->mode; 415 416 if ((type == ND_DEVICE_NAMESPACE_PMEM 417 || type == ND_DEVICE_NAMESPACE_BLK) 418 && a == &dev_attr_available_size.attr) 419 return a->mode; 420 else if (is_nd_pmem(dev) && nd_set) 421 return a->mode; 422 423 return 0; 424 } 425 426 struct attribute_group nd_region_attribute_group = { 427 .attrs = nd_region_attributes, 428 .is_visible = region_visible, 429 }; 430 EXPORT_SYMBOL_GPL(nd_region_attribute_group); 431 432 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region) 433 { 434 struct nd_interleave_set *nd_set = nd_region->nd_set; 435 436 if (nd_set) 437 return nd_set->cookie; 438 return 0; 439 } 440 441 /* 442 * Upon successful probe/remove, take/release a reference on the 443 * associated interleave set (if present), and plant new btt + namespace 444 * seeds. Also, on the removal of a BLK region, notify the provider to 445 * disable the region. 446 */ 447 static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, 448 struct device *dev, bool probe) 449 { 450 struct nd_region *nd_region; 451 452 if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) { 453 int i; 454 455 nd_region = to_nd_region(dev); 456 for (i = 0; i < nd_region->ndr_mappings; i++) { 457 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 458 struct nvdimm_drvdata *ndd = nd_mapping->ndd; 459 struct nvdimm *nvdimm = nd_mapping->nvdimm; 460 461 kfree(nd_mapping->labels); 462 nd_mapping->labels = NULL; 463 put_ndd(ndd); 464 nd_mapping->ndd = NULL; 465 if (ndd) 466 atomic_dec(&nvdimm->busy); 467 } 468 469 if (is_nd_pmem(dev)) 470 return; 471 472 to_nd_blk_region(dev)->disable(nvdimm_bus, dev); 473 } 474 if (dev->parent && is_nd_blk(dev->parent) && probe) { 475 nd_region = to_nd_region(dev->parent); 476 nvdimm_bus_lock(dev); 477 if (nd_region->ns_seed == dev) 478 nd_region_create_blk_seed(nd_region); 479 nvdimm_bus_unlock(dev); 480 } 481 if (is_nd_btt(dev) && probe) { 482 struct nd_btt *nd_btt = to_nd_btt(dev); 483 484 nd_region = to_nd_region(dev->parent); 485 nvdimm_bus_lock(dev); 486 if (nd_region->btt_seed == dev) 487 nd_region_create_btt_seed(nd_region); 488 if (nd_region->ns_seed == &nd_btt->ndns->dev && 489 is_nd_blk(dev->parent)) 490 nd_region_create_blk_seed(nd_region); 491 nvdimm_bus_unlock(dev); 492 } 493 if (is_nd_pfn(dev) && probe) { 494 nd_region = to_nd_region(dev->parent); 495 nvdimm_bus_lock(dev); 496 if (nd_region->pfn_seed == dev) 497 nd_region_create_pfn_seed(nd_region); 498 nvdimm_bus_unlock(dev); 499 } 500 } 501 502 void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev) 503 { 504 nd_region_notify_driver_action(nvdimm_bus, dev, true); 505 } 506 507 void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev) 508 { 509 nd_region_notify_driver_action(nvdimm_bus, dev, false); 510 } 511 512 static ssize_t mappingN(struct device *dev, char *buf, int n) 513 { 514 struct nd_region *nd_region = to_nd_region(dev); 515 struct nd_mapping *nd_mapping; 516 struct nvdimm *nvdimm; 517 518 if (n >= nd_region->ndr_mappings) 519 return -ENXIO; 520 nd_mapping = &nd_region->mapping[n]; 521 nvdimm = nd_mapping->nvdimm; 522 523 return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev), 524 nd_mapping->start, nd_mapping->size); 525 } 526 527 #define REGION_MAPPING(idx) \ 528 static ssize_t mapping##idx##_show(struct device *dev, \ 529 struct device_attribute *attr, char *buf) \ 530 { \ 531 return mappingN(dev, buf, idx); \ 532 } \ 533 static DEVICE_ATTR_RO(mapping##idx) 534 535 /* 536 * 32 should be enough for a while, even in the presence of socket 537 * interleave a 32-way interleave set is a degenerate case. 538 */ 539 REGION_MAPPING(0); 540 REGION_MAPPING(1); 541 REGION_MAPPING(2); 542 REGION_MAPPING(3); 543 REGION_MAPPING(4); 544 REGION_MAPPING(5); 545 REGION_MAPPING(6); 546 REGION_MAPPING(7); 547 REGION_MAPPING(8); 548 REGION_MAPPING(9); 549 REGION_MAPPING(10); 550 REGION_MAPPING(11); 551 REGION_MAPPING(12); 552 REGION_MAPPING(13); 553 REGION_MAPPING(14); 554 REGION_MAPPING(15); 555 REGION_MAPPING(16); 556 REGION_MAPPING(17); 557 REGION_MAPPING(18); 558 REGION_MAPPING(19); 559 REGION_MAPPING(20); 560 REGION_MAPPING(21); 561 REGION_MAPPING(22); 562 REGION_MAPPING(23); 563 REGION_MAPPING(24); 564 REGION_MAPPING(25); 565 REGION_MAPPING(26); 566 REGION_MAPPING(27); 567 REGION_MAPPING(28); 568 REGION_MAPPING(29); 569 REGION_MAPPING(30); 570 REGION_MAPPING(31); 571 572 static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n) 573 { 574 struct device *dev = container_of(kobj, struct device, kobj); 575 struct nd_region *nd_region = to_nd_region(dev); 576 577 if (n < nd_region->ndr_mappings) 578 return a->mode; 579 return 0; 580 } 581 582 static struct attribute *mapping_attributes[] = { 583 &dev_attr_mapping0.attr, 584 &dev_attr_mapping1.attr, 585 &dev_attr_mapping2.attr, 586 &dev_attr_mapping3.attr, 587 &dev_attr_mapping4.attr, 588 &dev_attr_mapping5.attr, 589 &dev_attr_mapping6.attr, 590 &dev_attr_mapping7.attr, 591 &dev_attr_mapping8.attr, 592 &dev_attr_mapping9.attr, 593 &dev_attr_mapping10.attr, 594 &dev_attr_mapping11.attr, 595 &dev_attr_mapping12.attr, 596 &dev_attr_mapping13.attr, 597 &dev_attr_mapping14.attr, 598 &dev_attr_mapping15.attr, 599 &dev_attr_mapping16.attr, 600 &dev_attr_mapping17.attr, 601 &dev_attr_mapping18.attr, 602 &dev_attr_mapping19.attr, 603 &dev_attr_mapping20.attr, 604 &dev_attr_mapping21.attr, 605 &dev_attr_mapping22.attr, 606 &dev_attr_mapping23.attr, 607 &dev_attr_mapping24.attr, 608 &dev_attr_mapping25.attr, 609 &dev_attr_mapping26.attr, 610 &dev_attr_mapping27.attr, 611 &dev_attr_mapping28.attr, 612 &dev_attr_mapping29.attr, 613 &dev_attr_mapping30.attr, 614 &dev_attr_mapping31.attr, 615 NULL, 616 }; 617 618 struct attribute_group nd_mapping_attribute_group = { 619 .is_visible = mapping_visible, 620 .attrs = mapping_attributes, 621 }; 622 EXPORT_SYMBOL_GPL(nd_mapping_attribute_group); 623 624 int nd_blk_region_init(struct nd_region *nd_region) 625 { 626 struct device *dev = &nd_region->dev; 627 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 628 629 if (!is_nd_blk(dev)) 630 return 0; 631 632 if (nd_region->ndr_mappings < 1) { 633 dev_err(dev, "invalid BLK region\n"); 634 return -ENXIO; 635 } 636 637 return to_nd_blk_region(dev)->enable(nvdimm_bus, dev); 638 } 639 640 /** 641 * nd_region_acquire_lane - allocate and lock a lane 642 * @nd_region: region id and number of lanes possible 643 * 644 * A lane correlates to a BLK-data-window and/or a log slot in the BTT. 645 * We optimize for the common case where there are 256 lanes, one 646 * per-cpu. For larger systems we need to lock to share lanes. For now 647 * this implementation assumes the cost of maintaining an allocator for 648 * free lanes is on the order of the lock hold time, so it implements a 649 * static lane = cpu % num_lanes mapping. 650 * 651 * In the case of a BTT instance on top of a BLK namespace a lane may be 652 * acquired recursively. We lock on the first instance. 653 * 654 * In the case of a BTT instance on top of PMEM, we only acquire a lane 655 * for the BTT metadata updates. 656 */ 657 unsigned int nd_region_acquire_lane(struct nd_region *nd_region) 658 { 659 unsigned int cpu, lane; 660 661 cpu = get_cpu(); 662 if (nd_region->num_lanes < nr_cpu_ids) { 663 struct nd_percpu_lane *ndl_lock, *ndl_count; 664 665 lane = cpu % nd_region->num_lanes; 666 ndl_count = per_cpu_ptr(nd_region->lane, cpu); 667 ndl_lock = per_cpu_ptr(nd_region->lane, lane); 668 if (ndl_count->count++ == 0) 669 spin_lock(&ndl_lock->lock); 670 } else 671 lane = cpu; 672 673 return lane; 674 } 675 EXPORT_SYMBOL(nd_region_acquire_lane); 676 677 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane) 678 { 679 if (nd_region->num_lanes < nr_cpu_ids) { 680 unsigned int cpu = get_cpu(); 681 struct nd_percpu_lane *ndl_lock, *ndl_count; 682 683 ndl_count = per_cpu_ptr(nd_region->lane, cpu); 684 ndl_lock = per_cpu_ptr(nd_region->lane, lane); 685 if (--ndl_count->count == 0) 686 spin_unlock(&ndl_lock->lock); 687 put_cpu(); 688 } 689 put_cpu(); 690 } 691 EXPORT_SYMBOL(nd_region_release_lane); 692 693 static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, 694 struct nd_region_desc *ndr_desc, struct device_type *dev_type, 695 const char *caller) 696 { 697 struct nd_region *nd_region; 698 struct device *dev; 699 void *region_buf; 700 unsigned int i; 701 int ro = 0; 702 703 for (i = 0; i < ndr_desc->num_mappings; i++) { 704 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; 705 struct nvdimm *nvdimm = nd_mapping->nvdimm; 706 707 if ((nd_mapping->start | nd_mapping->size) % SZ_4K) { 708 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n", 709 caller, dev_name(&nvdimm->dev), i); 710 711 return NULL; 712 } 713 714 if (nvdimm->flags & NDD_UNARMED) 715 ro = 1; 716 } 717 718 if (dev_type == &nd_blk_device_type) { 719 struct nd_blk_region_desc *ndbr_desc; 720 struct nd_blk_region *ndbr; 721 722 ndbr_desc = to_blk_region_desc(ndr_desc); 723 ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping) 724 * ndr_desc->num_mappings, 725 GFP_KERNEL); 726 if (ndbr) { 727 nd_region = &ndbr->nd_region; 728 ndbr->enable = ndbr_desc->enable; 729 ndbr->disable = ndbr_desc->disable; 730 ndbr->do_io = ndbr_desc->do_io; 731 } 732 region_buf = ndbr; 733 } else { 734 nd_region = kzalloc(sizeof(struct nd_region) 735 + sizeof(struct nd_mapping) 736 * ndr_desc->num_mappings, 737 GFP_KERNEL); 738 region_buf = nd_region; 739 } 740 741 if (!region_buf) 742 return NULL; 743 nd_region->id = ida_simple_get(®ion_ida, 0, 0, GFP_KERNEL); 744 if (nd_region->id < 0) 745 goto err_id; 746 747 nd_region->lane = alloc_percpu(struct nd_percpu_lane); 748 if (!nd_region->lane) 749 goto err_percpu; 750 751 for (i = 0; i < nr_cpu_ids; i++) { 752 struct nd_percpu_lane *ndl; 753 754 ndl = per_cpu_ptr(nd_region->lane, i); 755 spin_lock_init(&ndl->lock); 756 ndl->count = 0; 757 } 758 759 memcpy(nd_region->mapping, ndr_desc->nd_mapping, 760 sizeof(struct nd_mapping) * ndr_desc->num_mappings); 761 for (i = 0; i < ndr_desc->num_mappings; i++) { 762 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; 763 struct nvdimm *nvdimm = nd_mapping->nvdimm; 764 765 get_device(&nvdimm->dev); 766 } 767 nd_region->ndr_mappings = ndr_desc->num_mappings; 768 nd_region->provider_data = ndr_desc->provider_data; 769 nd_region->nd_set = ndr_desc->nd_set; 770 nd_region->num_lanes = ndr_desc->num_lanes; 771 nd_region->flags = ndr_desc->flags; 772 nd_region->ro = ro; 773 nd_region->numa_node = ndr_desc->numa_node; 774 ida_init(&nd_region->ns_ida); 775 ida_init(&nd_region->btt_ida); 776 ida_init(&nd_region->pfn_ida); 777 dev = &nd_region->dev; 778 dev_set_name(dev, "region%d", nd_region->id); 779 dev->parent = &nvdimm_bus->dev; 780 dev->type = dev_type; 781 dev->groups = ndr_desc->attr_groups; 782 nd_region->ndr_size = resource_size(ndr_desc->res); 783 nd_region->ndr_start = ndr_desc->res->start; 784 nd_device_register(dev); 785 786 return nd_region; 787 788 err_percpu: 789 ida_simple_remove(®ion_ida, nd_region->id); 790 err_id: 791 kfree(region_buf); 792 return NULL; 793 } 794 795 struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, 796 struct nd_region_desc *ndr_desc) 797 { 798 ndr_desc->num_lanes = ND_MAX_LANES; 799 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type, 800 __func__); 801 } 802 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create); 803 804 struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, 805 struct nd_region_desc *ndr_desc) 806 { 807 if (ndr_desc->num_mappings > 1) 808 return NULL; 809 ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES); 810 return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type, 811 __func__); 812 } 813 EXPORT_SYMBOL_GPL(nvdimm_blk_region_create); 814 815 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, 816 struct nd_region_desc *ndr_desc) 817 { 818 ndr_desc->num_lanes = ND_MAX_LANES; 819 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type, 820 __func__); 821 } 822 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create); 823