1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 #include <linux/vmalloc.h> 15 #include <linux/uaccess.h> 16 #include <linux/module.h> 17 #include <linux/blkdev.h> 18 #include <linux/fcntl.h> 19 #include <linux/async.h> 20 #include <linux/genhd.h> 21 #include <linux/ndctl.h> 22 #include <linux/sched.h> 23 #include <linux/slab.h> 24 #include <linux/fs.h> 25 #include <linux/io.h> 26 #include <linux/mm.h> 27 #include <linux/nd.h> 28 #include "nd-core.h" 29 #include "nd.h" 30 #include "pfn.h" 31 32 int nvdimm_major; 33 static int nvdimm_bus_major; 34 static struct class *nd_class; 35 static DEFINE_IDA(nd_ida); 36 37 static int to_nd_device_type(struct device *dev) 38 { 39 if (is_nvdimm(dev)) 40 return ND_DEVICE_DIMM; 41 else if (is_nd_pmem(dev)) 42 return ND_DEVICE_REGION_PMEM; 43 else if (is_nd_blk(dev)) 44 return ND_DEVICE_REGION_BLK; 45 else if (is_nd_dax(dev)) 46 return ND_DEVICE_DAX_PMEM; 47 else if (is_nd_pmem(dev->parent) || is_nd_blk(dev->parent)) 48 return nd_region_to_nstype(to_nd_region(dev->parent)); 49 50 return 0; 51 } 52 53 static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 54 { 55 /* 56 * Ensure that region devices always have their numa node set as 57 * early as possible. 58 */ 59 if (is_nd_pmem(dev) || is_nd_blk(dev)) 60 set_dev_node(dev, to_nd_region(dev)->numa_node); 61 return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT, 62 to_nd_device_type(dev)); 63 } 64 65 static struct module *to_bus_provider(struct device *dev) 66 { 67 /* pin bus providers while regions are enabled */ 68 if (is_nd_pmem(dev) || is_nd_blk(dev)) { 69 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 70 71 return nvdimm_bus->nd_desc->module; 72 } 73 return NULL; 74 } 75 76 static void nvdimm_bus_probe_start(struct nvdimm_bus *nvdimm_bus) 77 { 78 nvdimm_bus_lock(&nvdimm_bus->dev); 79 nvdimm_bus->probe_active++; 80 nvdimm_bus_unlock(&nvdimm_bus->dev); 81 } 82 83 static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus) 84 { 85 nvdimm_bus_lock(&nvdimm_bus->dev); 86 if (--nvdimm_bus->probe_active == 0) 87 wake_up(&nvdimm_bus->probe_wait); 88 nvdimm_bus_unlock(&nvdimm_bus->dev); 89 } 90 91 static int nvdimm_bus_probe(struct device *dev) 92 { 93 struct nd_device_driver *nd_drv = to_nd_device_driver(dev->driver); 94 struct module *provider = to_bus_provider(dev); 95 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 96 int rc; 97 98 if (!try_module_get(provider)) 99 return -ENXIO; 100 101 nvdimm_bus_probe_start(nvdimm_bus); 102 rc = nd_drv->probe(dev); 103 if (rc == 0) 104 nd_region_probe_success(nvdimm_bus, dev); 105 else 106 nd_region_disable(nvdimm_bus, dev); 107 nvdimm_bus_probe_end(nvdimm_bus); 108 109 dev_dbg(&nvdimm_bus->dev, "%s.probe(%s) = %d\n", dev->driver->name, 110 dev_name(dev), rc); 111 112 if (rc != 0) 113 module_put(provider); 114 return rc; 115 } 116 117 static int nvdimm_bus_remove(struct device *dev) 118 { 119 struct nd_device_driver *nd_drv = to_nd_device_driver(dev->driver); 120 struct module *provider = to_bus_provider(dev); 121 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 122 int rc = 0; 123 124 if (nd_drv->remove) 125 rc = nd_drv->remove(dev); 126 nd_region_disable(nvdimm_bus, dev); 127 128 dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name, 129 dev_name(dev), rc); 130 module_put(provider); 131 return rc; 132 } 133 134 static void nvdimm_bus_shutdown(struct device *dev) 135 { 136 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 137 struct nd_device_driver *nd_drv = NULL; 138 139 if (dev->driver) 140 nd_drv = to_nd_device_driver(dev->driver); 141 142 if (nd_drv && nd_drv->shutdown) { 143 nd_drv->shutdown(dev); 144 dev_dbg(&nvdimm_bus->dev, "%s.shutdown(%s)\n", 145 dev->driver->name, dev_name(dev)); 146 } 147 } 148 149 void nd_device_notify(struct device *dev, enum nvdimm_event event) 150 { 151 device_lock(dev); 152 if (dev->driver) { 153 struct nd_device_driver *nd_drv; 154 155 nd_drv = to_nd_device_driver(dev->driver); 156 if (nd_drv->notify) 157 nd_drv->notify(dev, event); 158 } 159 device_unlock(dev); 160 } 161 EXPORT_SYMBOL(nd_device_notify); 162 163 void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event) 164 { 165 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); 166 167 if (!nvdimm_bus) 168 return; 169 170 /* caller is responsible for holding a reference on the device */ 171 nd_device_notify(&nd_region->dev, event); 172 } 173 EXPORT_SYMBOL_GPL(nvdimm_region_notify); 174 175 struct clear_badblocks_context { 176 resource_size_t phys, cleared; 177 }; 178 179 static int nvdimm_clear_badblocks_region(struct device *dev, void *data) 180 { 181 struct clear_badblocks_context *ctx = data; 182 struct nd_region *nd_region; 183 resource_size_t ndr_end; 184 sector_t sector; 185 186 /* make sure device is a region */ 187 if (!is_nd_pmem(dev)) 188 return 0; 189 190 nd_region = to_nd_region(dev); 191 ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1; 192 193 /* make sure we are in the region */ 194 if (ctx->phys < nd_region->ndr_start 195 || (ctx->phys + ctx->cleared) > ndr_end) 196 return 0; 197 198 sector = (ctx->phys - nd_region->ndr_start) / 512; 199 badblocks_clear(&nd_region->bb, sector, ctx->cleared / 512); 200 201 return 0; 202 } 203 204 static void nvdimm_clear_badblocks_regions(struct nvdimm_bus *nvdimm_bus, 205 phys_addr_t phys, u64 cleared) 206 { 207 struct clear_badblocks_context ctx = { 208 .phys = phys, 209 .cleared = cleared, 210 }; 211 212 device_for_each_child(&nvdimm_bus->dev, &ctx, 213 nvdimm_clear_badblocks_region); 214 } 215 216 static void nvdimm_account_cleared_poison(struct nvdimm_bus *nvdimm_bus, 217 phys_addr_t phys, u64 cleared) 218 { 219 if (cleared > 0) 220 nvdimm_forget_poison(nvdimm_bus, phys, cleared); 221 222 if (cleared > 0 && cleared / 512) 223 nvdimm_clear_badblocks_regions(nvdimm_bus, phys, cleared); 224 } 225 226 long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, 227 unsigned int len) 228 { 229 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 230 struct nvdimm_bus_descriptor *nd_desc; 231 struct nd_cmd_clear_error clear_err; 232 struct nd_cmd_ars_cap ars_cap; 233 u32 clear_err_unit, mask; 234 int cmd_rc, rc; 235 236 if (!nvdimm_bus) 237 return -ENXIO; 238 239 nd_desc = nvdimm_bus->nd_desc; 240 /* 241 * if ndctl does not exist, it's PMEM_LEGACY and 242 * we want to just pretend everything is handled. 243 */ 244 if (!nd_desc->ndctl) 245 return len; 246 247 memset(&ars_cap, 0, sizeof(ars_cap)); 248 ars_cap.address = phys; 249 ars_cap.length = len; 250 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, &ars_cap, 251 sizeof(ars_cap), &cmd_rc); 252 if (rc < 0) 253 return rc; 254 if (cmd_rc < 0) 255 return cmd_rc; 256 clear_err_unit = ars_cap.clear_err_unit; 257 if (!clear_err_unit || !is_power_of_2(clear_err_unit)) 258 return -ENXIO; 259 260 mask = clear_err_unit - 1; 261 if ((phys | len) & mask) 262 return -ENXIO; 263 memset(&clear_err, 0, sizeof(clear_err)); 264 clear_err.address = phys; 265 clear_err.length = len; 266 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CLEAR_ERROR, &clear_err, 267 sizeof(clear_err), &cmd_rc); 268 if (rc < 0) 269 return rc; 270 if (cmd_rc < 0) 271 return cmd_rc; 272 273 nvdimm_account_cleared_poison(nvdimm_bus, phys, clear_err.cleared); 274 275 return clear_err.cleared; 276 } 277 EXPORT_SYMBOL_GPL(nvdimm_clear_poison); 278 279 static int nvdimm_bus_match(struct device *dev, struct device_driver *drv); 280 281 static struct bus_type nvdimm_bus_type = { 282 .name = "nd", 283 .uevent = nvdimm_bus_uevent, 284 .match = nvdimm_bus_match, 285 .probe = nvdimm_bus_probe, 286 .remove = nvdimm_bus_remove, 287 .shutdown = nvdimm_bus_shutdown, 288 }; 289 290 static void nvdimm_bus_release(struct device *dev) 291 { 292 struct nvdimm_bus *nvdimm_bus; 293 294 nvdimm_bus = container_of(dev, struct nvdimm_bus, dev); 295 ida_simple_remove(&nd_ida, nvdimm_bus->id); 296 kfree(nvdimm_bus); 297 } 298 299 static bool is_nvdimm_bus(struct device *dev) 300 { 301 return dev->release == nvdimm_bus_release; 302 } 303 304 struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev) 305 { 306 struct device *dev; 307 308 for (dev = nd_dev; dev; dev = dev->parent) 309 if (is_nvdimm_bus(dev)) 310 break; 311 dev_WARN_ONCE(nd_dev, !dev, "invalid dev, not on nd bus\n"); 312 if (dev) 313 return to_nvdimm_bus(dev); 314 return NULL; 315 } 316 317 struct nvdimm_bus *to_nvdimm_bus(struct device *dev) 318 { 319 struct nvdimm_bus *nvdimm_bus; 320 321 nvdimm_bus = container_of(dev, struct nvdimm_bus, dev); 322 WARN_ON(!is_nvdimm_bus(dev)); 323 return nvdimm_bus; 324 } 325 EXPORT_SYMBOL_GPL(to_nvdimm_bus); 326 327 struct nvdimm_bus *nvdimm_bus_register(struct device *parent, 328 struct nvdimm_bus_descriptor *nd_desc) 329 { 330 struct nvdimm_bus *nvdimm_bus; 331 int rc; 332 333 nvdimm_bus = kzalloc(sizeof(*nvdimm_bus), GFP_KERNEL); 334 if (!nvdimm_bus) 335 return NULL; 336 INIT_LIST_HEAD(&nvdimm_bus->list); 337 INIT_LIST_HEAD(&nvdimm_bus->mapping_list); 338 INIT_LIST_HEAD(&nvdimm_bus->poison_list); 339 init_waitqueue_head(&nvdimm_bus->probe_wait); 340 nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL); 341 mutex_init(&nvdimm_bus->reconfig_mutex); 342 spin_lock_init(&nvdimm_bus->poison_lock); 343 if (nvdimm_bus->id < 0) { 344 kfree(nvdimm_bus); 345 return NULL; 346 } 347 nvdimm_bus->nd_desc = nd_desc; 348 nvdimm_bus->dev.parent = parent; 349 nvdimm_bus->dev.release = nvdimm_bus_release; 350 nvdimm_bus->dev.groups = nd_desc->attr_groups; 351 nvdimm_bus->dev.bus = &nvdimm_bus_type; 352 dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id); 353 rc = device_register(&nvdimm_bus->dev); 354 if (rc) { 355 dev_dbg(&nvdimm_bus->dev, "registration failed: %d\n", rc); 356 goto err; 357 } 358 359 return nvdimm_bus; 360 err: 361 put_device(&nvdimm_bus->dev); 362 return NULL; 363 } 364 EXPORT_SYMBOL_GPL(nvdimm_bus_register); 365 366 void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus) 367 { 368 if (!nvdimm_bus) 369 return; 370 device_unregister(&nvdimm_bus->dev); 371 } 372 EXPORT_SYMBOL_GPL(nvdimm_bus_unregister); 373 374 static int child_unregister(struct device *dev, void *data) 375 { 376 /* 377 * the singular ndctl class device per bus needs to be 378 * "device_destroy"ed, so skip it here 379 * 380 * i.e. remove classless children 381 */ 382 if (dev->class) 383 /* pass */; 384 else 385 nd_device_unregister(dev, ND_SYNC); 386 return 0; 387 } 388 389 static void free_poison_list(struct list_head *poison_list) 390 { 391 struct nd_poison *pl, *next; 392 393 list_for_each_entry_safe(pl, next, poison_list, list) { 394 list_del(&pl->list); 395 kfree(pl); 396 } 397 list_del_init(poison_list); 398 } 399 400 static int nd_bus_remove(struct device *dev) 401 { 402 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 403 404 mutex_lock(&nvdimm_bus_list_mutex); 405 list_del_init(&nvdimm_bus->list); 406 mutex_unlock(&nvdimm_bus_list_mutex); 407 408 nd_synchronize(); 409 device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister); 410 411 spin_lock(&nvdimm_bus->poison_lock); 412 free_poison_list(&nvdimm_bus->poison_list); 413 spin_unlock(&nvdimm_bus->poison_lock); 414 415 nvdimm_bus_destroy_ndctl(nvdimm_bus); 416 417 return 0; 418 } 419 420 static int nd_bus_probe(struct device *dev) 421 { 422 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 423 int rc; 424 425 rc = nvdimm_bus_create_ndctl(nvdimm_bus); 426 if (rc) 427 return rc; 428 429 mutex_lock(&nvdimm_bus_list_mutex); 430 list_add_tail(&nvdimm_bus->list, &nvdimm_bus_list); 431 mutex_unlock(&nvdimm_bus_list_mutex); 432 433 /* enable bus provider attributes to look up their local context */ 434 dev_set_drvdata(dev, nvdimm_bus->nd_desc); 435 436 return 0; 437 } 438 439 static struct nd_device_driver nd_bus_driver = { 440 .probe = nd_bus_probe, 441 .remove = nd_bus_remove, 442 .drv = { 443 .name = "nd_bus", 444 .suppress_bind_attrs = true, 445 .bus = &nvdimm_bus_type, 446 .owner = THIS_MODULE, 447 .mod_name = KBUILD_MODNAME, 448 }, 449 }; 450 451 static int nvdimm_bus_match(struct device *dev, struct device_driver *drv) 452 { 453 struct nd_device_driver *nd_drv = to_nd_device_driver(drv); 454 455 if (is_nvdimm_bus(dev) && nd_drv == &nd_bus_driver) 456 return true; 457 458 return !!test_bit(to_nd_device_type(dev), &nd_drv->type); 459 } 460 461 static ASYNC_DOMAIN_EXCLUSIVE(nd_async_domain); 462 463 void nd_synchronize(void) 464 { 465 async_synchronize_full_domain(&nd_async_domain); 466 } 467 EXPORT_SYMBOL_GPL(nd_synchronize); 468 469 static void nd_async_device_register(void *d, async_cookie_t cookie) 470 { 471 struct device *dev = d; 472 473 if (device_add(dev) != 0) { 474 dev_err(dev, "%s: failed\n", __func__); 475 put_device(dev); 476 } 477 put_device(dev); 478 } 479 480 static void nd_async_device_unregister(void *d, async_cookie_t cookie) 481 { 482 struct device *dev = d; 483 484 /* flush bus operations before delete */ 485 nvdimm_bus_lock(dev); 486 nvdimm_bus_unlock(dev); 487 488 device_unregister(dev); 489 put_device(dev); 490 } 491 492 void __nd_device_register(struct device *dev) 493 { 494 if (!dev) 495 return; 496 dev->bus = &nvdimm_bus_type; 497 get_device(dev); 498 async_schedule_domain(nd_async_device_register, dev, 499 &nd_async_domain); 500 } 501 502 void nd_device_register(struct device *dev) 503 { 504 device_initialize(dev); 505 __nd_device_register(dev); 506 } 507 EXPORT_SYMBOL(nd_device_register); 508 509 void nd_device_unregister(struct device *dev, enum nd_async_mode mode) 510 { 511 switch (mode) { 512 case ND_ASYNC: 513 get_device(dev); 514 async_schedule_domain(nd_async_device_unregister, dev, 515 &nd_async_domain); 516 break; 517 case ND_SYNC: 518 nd_synchronize(); 519 device_unregister(dev); 520 break; 521 } 522 } 523 EXPORT_SYMBOL(nd_device_unregister); 524 525 /** 526 * __nd_driver_register() - register a region or a namespace driver 527 * @nd_drv: driver to register 528 * @owner: automatically set by nd_driver_register() macro 529 * @mod_name: automatically set by nd_driver_register() macro 530 */ 531 int __nd_driver_register(struct nd_device_driver *nd_drv, struct module *owner, 532 const char *mod_name) 533 { 534 struct device_driver *drv = &nd_drv->drv; 535 536 if (!nd_drv->type) { 537 pr_debug("driver type bitmask not set (%pf)\n", 538 __builtin_return_address(0)); 539 return -EINVAL; 540 } 541 542 if (!nd_drv->probe) { 543 pr_debug("%s ->probe() must be specified\n", mod_name); 544 return -EINVAL; 545 } 546 547 drv->bus = &nvdimm_bus_type; 548 drv->owner = owner; 549 drv->mod_name = mod_name; 550 551 return driver_register(drv); 552 } 553 EXPORT_SYMBOL(__nd_driver_register); 554 555 int nvdimm_revalidate_disk(struct gendisk *disk) 556 { 557 struct device *dev = disk_to_dev(disk)->parent; 558 struct nd_region *nd_region = to_nd_region(dev->parent); 559 const char *pol = nd_region->ro ? "only" : "write"; 560 561 if (nd_region->ro == get_disk_ro(disk)) 562 return 0; 563 564 dev_info(dev, "%s read-%s, marking %s read-%s\n", 565 dev_name(&nd_region->dev), pol, disk->disk_name, pol); 566 set_disk_ro(disk, nd_region->ro); 567 568 return 0; 569 570 } 571 EXPORT_SYMBOL(nvdimm_revalidate_disk); 572 573 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 574 char *buf) 575 { 576 return sprintf(buf, ND_DEVICE_MODALIAS_FMT "\n", 577 to_nd_device_type(dev)); 578 } 579 static DEVICE_ATTR_RO(modalias); 580 581 static ssize_t devtype_show(struct device *dev, struct device_attribute *attr, 582 char *buf) 583 { 584 return sprintf(buf, "%s\n", dev->type->name); 585 } 586 static DEVICE_ATTR_RO(devtype); 587 588 static struct attribute *nd_device_attributes[] = { 589 &dev_attr_modalias.attr, 590 &dev_attr_devtype.attr, 591 NULL, 592 }; 593 594 /** 595 * nd_device_attribute_group - generic attributes for all devices on an nd bus 596 */ 597 struct attribute_group nd_device_attribute_group = { 598 .attrs = nd_device_attributes, 599 }; 600 EXPORT_SYMBOL_GPL(nd_device_attribute_group); 601 602 static ssize_t numa_node_show(struct device *dev, 603 struct device_attribute *attr, char *buf) 604 { 605 return sprintf(buf, "%d\n", dev_to_node(dev)); 606 } 607 static DEVICE_ATTR_RO(numa_node); 608 609 static struct attribute *nd_numa_attributes[] = { 610 &dev_attr_numa_node.attr, 611 NULL, 612 }; 613 614 static umode_t nd_numa_attr_visible(struct kobject *kobj, struct attribute *a, 615 int n) 616 { 617 if (!IS_ENABLED(CONFIG_NUMA)) 618 return 0; 619 620 return a->mode; 621 } 622 623 /** 624 * nd_numa_attribute_group - NUMA attributes for all devices on an nd bus 625 */ 626 struct attribute_group nd_numa_attribute_group = { 627 .attrs = nd_numa_attributes, 628 .is_visible = nd_numa_attr_visible, 629 }; 630 EXPORT_SYMBOL_GPL(nd_numa_attribute_group); 631 632 int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus) 633 { 634 dev_t devt = MKDEV(nvdimm_bus_major, nvdimm_bus->id); 635 struct device *dev; 636 637 dev = device_create(nd_class, &nvdimm_bus->dev, devt, nvdimm_bus, 638 "ndctl%d", nvdimm_bus->id); 639 640 if (IS_ERR(dev)) 641 dev_dbg(&nvdimm_bus->dev, "failed to register ndctl%d: %ld\n", 642 nvdimm_bus->id, PTR_ERR(dev)); 643 return PTR_ERR_OR_ZERO(dev); 644 } 645 646 void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus) 647 { 648 device_destroy(nd_class, MKDEV(nvdimm_bus_major, nvdimm_bus->id)); 649 } 650 651 static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = { 652 [ND_CMD_IMPLEMENTED] = { }, 653 [ND_CMD_SMART] = { 654 .out_num = 2, 655 .out_sizes = { 4, 128, }, 656 }, 657 [ND_CMD_SMART_THRESHOLD] = { 658 .out_num = 2, 659 .out_sizes = { 4, 8, }, 660 }, 661 [ND_CMD_DIMM_FLAGS] = { 662 .out_num = 2, 663 .out_sizes = { 4, 4 }, 664 }, 665 [ND_CMD_GET_CONFIG_SIZE] = { 666 .out_num = 3, 667 .out_sizes = { 4, 4, 4, }, 668 }, 669 [ND_CMD_GET_CONFIG_DATA] = { 670 .in_num = 2, 671 .in_sizes = { 4, 4, }, 672 .out_num = 2, 673 .out_sizes = { 4, UINT_MAX, }, 674 }, 675 [ND_CMD_SET_CONFIG_DATA] = { 676 .in_num = 3, 677 .in_sizes = { 4, 4, UINT_MAX, }, 678 .out_num = 1, 679 .out_sizes = { 4, }, 680 }, 681 [ND_CMD_VENDOR] = { 682 .in_num = 3, 683 .in_sizes = { 4, 4, UINT_MAX, }, 684 .out_num = 3, 685 .out_sizes = { 4, 4, UINT_MAX, }, 686 }, 687 [ND_CMD_CALL] = { 688 .in_num = 2, 689 .in_sizes = { sizeof(struct nd_cmd_pkg), UINT_MAX, }, 690 .out_num = 1, 691 .out_sizes = { UINT_MAX, }, 692 }, 693 }; 694 695 const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd) 696 { 697 if (cmd < ARRAY_SIZE(__nd_cmd_dimm_descs)) 698 return &__nd_cmd_dimm_descs[cmd]; 699 return NULL; 700 } 701 EXPORT_SYMBOL_GPL(nd_cmd_dimm_desc); 702 703 static const struct nd_cmd_desc __nd_cmd_bus_descs[] = { 704 [ND_CMD_IMPLEMENTED] = { }, 705 [ND_CMD_ARS_CAP] = { 706 .in_num = 2, 707 .in_sizes = { 8, 8, }, 708 .out_num = 4, 709 .out_sizes = { 4, 4, 4, 4, }, 710 }, 711 [ND_CMD_ARS_START] = { 712 .in_num = 5, 713 .in_sizes = { 8, 8, 2, 1, 5, }, 714 .out_num = 2, 715 .out_sizes = { 4, 4, }, 716 }, 717 [ND_CMD_ARS_STATUS] = { 718 .out_num = 3, 719 .out_sizes = { 4, 4, UINT_MAX, }, 720 }, 721 [ND_CMD_CLEAR_ERROR] = { 722 .in_num = 2, 723 .in_sizes = { 8, 8, }, 724 .out_num = 3, 725 .out_sizes = { 4, 4, 8, }, 726 }, 727 [ND_CMD_CALL] = { 728 .in_num = 2, 729 .in_sizes = { sizeof(struct nd_cmd_pkg), UINT_MAX, }, 730 .out_num = 1, 731 .out_sizes = { UINT_MAX, }, 732 }, 733 }; 734 735 const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd) 736 { 737 if (cmd < ARRAY_SIZE(__nd_cmd_bus_descs)) 738 return &__nd_cmd_bus_descs[cmd]; 739 return NULL; 740 } 741 EXPORT_SYMBOL_GPL(nd_cmd_bus_desc); 742 743 u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, 744 const struct nd_cmd_desc *desc, int idx, void *buf) 745 { 746 if (idx >= desc->in_num) 747 return UINT_MAX; 748 749 if (desc->in_sizes[idx] < UINT_MAX) 750 return desc->in_sizes[idx]; 751 752 if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA && idx == 2) { 753 struct nd_cmd_set_config_hdr *hdr = buf; 754 755 return hdr->in_length; 756 } else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2) { 757 struct nd_cmd_vendor_hdr *hdr = buf; 758 759 return hdr->in_length; 760 } else if (cmd == ND_CMD_CALL) { 761 struct nd_cmd_pkg *pkg = buf; 762 763 return pkg->nd_size_in; 764 } 765 766 return UINT_MAX; 767 } 768 EXPORT_SYMBOL_GPL(nd_cmd_in_size); 769 770 u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd, 771 const struct nd_cmd_desc *desc, int idx, const u32 *in_field, 772 const u32 *out_field, unsigned long remainder) 773 { 774 if (idx >= desc->out_num) 775 return UINT_MAX; 776 777 if (desc->out_sizes[idx] < UINT_MAX) 778 return desc->out_sizes[idx]; 779 780 if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && idx == 1) 781 return in_field[1]; 782 else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2) 783 return out_field[1]; 784 else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2) { 785 /* 786 * Per table 9-276 ARS Data in ACPI 6.1, out_field[1] is 787 * "Size of Output Buffer in bytes, including this 788 * field." 789 */ 790 if (out_field[1] < 4) 791 return 0; 792 /* 793 * ACPI 6.1 is ambiguous if 'status' is included in the 794 * output size. If we encounter an output size that 795 * overshoots the remainder by 4 bytes, assume it was 796 * including 'status'. 797 */ 798 if (out_field[1] - 8 == remainder) 799 return remainder; 800 return out_field[1] - 4; 801 } else if (cmd == ND_CMD_CALL) { 802 struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field; 803 804 return pkg->nd_size_out; 805 } 806 807 808 return UINT_MAX; 809 } 810 EXPORT_SYMBOL_GPL(nd_cmd_out_size); 811 812 void wait_nvdimm_bus_probe_idle(struct device *dev) 813 { 814 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 815 816 do { 817 if (nvdimm_bus->probe_active == 0) 818 break; 819 nvdimm_bus_unlock(&nvdimm_bus->dev); 820 wait_event(nvdimm_bus->probe_wait, 821 nvdimm_bus->probe_active == 0); 822 nvdimm_bus_lock(&nvdimm_bus->dev); 823 } while (true); 824 } 825 826 static int nd_pmem_forget_poison_check(struct device *dev, void *data) 827 { 828 struct nd_cmd_clear_error *clear_err = 829 (struct nd_cmd_clear_error *)data; 830 struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL; 831 struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL; 832 struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL; 833 struct nd_namespace_common *ndns = NULL; 834 struct nd_namespace_io *nsio; 835 resource_size_t offset = 0, end_trunc = 0, start, end, pstart, pend; 836 837 if (nd_dax || !dev->driver) 838 return 0; 839 840 start = clear_err->address; 841 end = clear_err->address + clear_err->cleared - 1; 842 843 if (nd_btt || nd_pfn || nd_dax) { 844 if (nd_btt) 845 ndns = nd_btt->ndns; 846 else if (nd_pfn) 847 ndns = nd_pfn->ndns; 848 else if (nd_dax) 849 ndns = nd_dax->nd_pfn.ndns; 850 851 if (!ndns) 852 return 0; 853 } else 854 ndns = to_ndns(dev); 855 856 nsio = to_nd_namespace_io(&ndns->dev); 857 pstart = nsio->res.start + offset; 858 pend = nsio->res.end - end_trunc; 859 860 if ((pstart >= start) && (pend <= end)) 861 return -EBUSY; 862 863 return 0; 864 865 } 866 867 static int nd_ns_forget_poison_check(struct device *dev, void *data) 868 { 869 return device_for_each_child(dev, data, nd_pmem_forget_poison_check); 870 } 871 872 /* set_config requires an idle interleave set */ 873 static int nd_cmd_clear_to_send(struct nvdimm_bus *nvdimm_bus, 874 struct nvdimm *nvdimm, unsigned int cmd, void *data) 875 { 876 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 877 878 /* ask the bus provider if it would like to block this request */ 879 if (nd_desc->clear_to_send) { 880 int rc = nd_desc->clear_to_send(nd_desc, nvdimm, cmd); 881 882 if (rc) 883 return rc; 884 } 885 886 /* require clear error to go through the pmem driver */ 887 if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR) 888 return device_for_each_child(&nvdimm_bus->dev, data, 889 nd_ns_forget_poison_check); 890 891 if (!nvdimm || cmd != ND_CMD_SET_CONFIG_DATA) 892 return 0; 893 894 /* prevent label manipulation while the kernel owns label updates */ 895 wait_nvdimm_bus_probe_idle(&nvdimm_bus->dev); 896 if (atomic_read(&nvdimm->busy)) 897 return -EBUSY; 898 return 0; 899 } 900 901 static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, 902 int read_only, unsigned int ioctl_cmd, unsigned long arg) 903 { 904 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 905 size_t buf_len = 0, in_len = 0, out_len = 0; 906 static char out_env[ND_CMD_MAX_ENVELOPE]; 907 static char in_env[ND_CMD_MAX_ENVELOPE]; 908 const struct nd_cmd_desc *desc = NULL; 909 unsigned int cmd = _IOC_NR(ioctl_cmd); 910 void __user *p = (void __user *) arg; 911 struct device *dev = &nvdimm_bus->dev; 912 struct nd_cmd_pkg pkg; 913 const char *cmd_name, *dimm_name; 914 unsigned long cmd_mask; 915 void *buf; 916 int rc, i, cmd_rc; 917 918 if (nvdimm) { 919 desc = nd_cmd_dimm_desc(cmd); 920 cmd_name = nvdimm_cmd_name(cmd); 921 cmd_mask = nvdimm->cmd_mask; 922 dimm_name = dev_name(&nvdimm->dev); 923 } else { 924 desc = nd_cmd_bus_desc(cmd); 925 cmd_name = nvdimm_bus_cmd_name(cmd); 926 cmd_mask = nd_desc->cmd_mask; 927 dimm_name = "bus"; 928 } 929 930 if (cmd == ND_CMD_CALL) { 931 if (copy_from_user(&pkg, p, sizeof(pkg))) 932 return -EFAULT; 933 } 934 935 if (!desc || (desc->out_num + desc->in_num == 0) || 936 !test_bit(cmd, &cmd_mask)) 937 return -ENOTTY; 938 939 /* fail write commands (when read-only) */ 940 if (read_only) 941 switch (cmd) { 942 case ND_CMD_VENDOR: 943 case ND_CMD_SET_CONFIG_DATA: 944 case ND_CMD_ARS_START: 945 case ND_CMD_CLEAR_ERROR: 946 case ND_CMD_CALL: 947 dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n", 948 nvdimm ? nvdimm_cmd_name(cmd) 949 : nvdimm_bus_cmd_name(cmd)); 950 return -EPERM; 951 default: 952 break; 953 } 954 955 /* process an input envelope */ 956 for (i = 0; i < desc->in_num; i++) { 957 u32 in_size, copy; 958 959 in_size = nd_cmd_in_size(nvdimm, cmd, desc, i, in_env); 960 if (in_size == UINT_MAX) { 961 dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n", 962 __func__, dimm_name, cmd_name, i); 963 return -ENXIO; 964 } 965 if (in_len < sizeof(in_env)) 966 copy = min_t(u32, sizeof(in_env) - in_len, in_size); 967 else 968 copy = 0; 969 if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) 970 return -EFAULT; 971 in_len += in_size; 972 } 973 974 if (cmd == ND_CMD_CALL) { 975 dev_dbg(dev, "%s:%s, idx: %llu, in: %zu, out: %zu, len %zu\n", 976 __func__, dimm_name, pkg.nd_command, 977 in_len, out_len, buf_len); 978 979 for (i = 0; i < ARRAY_SIZE(pkg.nd_reserved2); i++) 980 if (pkg.nd_reserved2[i]) 981 return -EINVAL; 982 } 983 984 /* process an output envelope */ 985 for (i = 0; i < desc->out_num; i++) { 986 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, 987 (u32 *) in_env, (u32 *) out_env, 0); 988 u32 copy; 989 990 if (out_size == UINT_MAX) { 991 dev_dbg(dev, "%s:%s unknown output size cmd: %s field: %d\n", 992 __func__, dimm_name, cmd_name, i); 993 return -EFAULT; 994 } 995 if (out_len < sizeof(out_env)) 996 copy = min_t(u32, sizeof(out_env) - out_len, out_size); 997 else 998 copy = 0; 999 if (copy && copy_from_user(&out_env[out_len], 1000 p + in_len + out_len, copy)) 1001 return -EFAULT; 1002 out_len += out_size; 1003 } 1004 1005 buf_len = out_len + in_len; 1006 if (buf_len > ND_IOCTL_MAX_BUFLEN) { 1007 dev_dbg(dev, "%s:%s cmd: %s buf_len: %zu > %d\n", __func__, 1008 dimm_name, cmd_name, buf_len, 1009 ND_IOCTL_MAX_BUFLEN); 1010 return -EINVAL; 1011 } 1012 1013 buf = vmalloc(buf_len); 1014 if (!buf) 1015 return -ENOMEM; 1016 1017 if (copy_from_user(buf, p, buf_len)) { 1018 rc = -EFAULT; 1019 goto out; 1020 } 1021 1022 nvdimm_bus_lock(&nvdimm_bus->dev); 1023 rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, cmd, buf); 1024 if (rc) 1025 goto out_unlock; 1026 1027 rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, &cmd_rc); 1028 if (rc < 0) 1029 goto out_unlock; 1030 1031 if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR && cmd_rc >= 0) { 1032 struct nd_cmd_clear_error *clear_err = buf; 1033 1034 nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address, 1035 clear_err->cleared); 1036 } 1037 nvdimm_bus_unlock(&nvdimm_bus->dev); 1038 1039 if (copy_to_user(p, buf, buf_len)) 1040 rc = -EFAULT; 1041 1042 vfree(buf); 1043 return rc; 1044 1045 out_unlock: 1046 nvdimm_bus_unlock(&nvdimm_bus->dev); 1047 out: 1048 vfree(buf); 1049 return rc; 1050 } 1051 1052 static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1053 { 1054 long id = (long) file->private_data; 1055 int rc = -ENXIO, ro; 1056 struct nvdimm_bus *nvdimm_bus; 1057 1058 ro = ((file->f_flags & O_ACCMODE) == O_RDONLY); 1059 mutex_lock(&nvdimm_bus_list_mutex); 1060 list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) { 1061 if (nvdimm_bus->id == id) { 1062 rc = __nd_ioctl(nvdimm_bus, NULL, ro, cmd, arg); 1063 break; 1064 } 1065 } 1066 mutex_unlock(&nvdimm_bus_list_mutex); 1067 1068 return rc; 1069 } 1070 1071 static int match_dimm(struct device *dev, void *data) 1072 { 1073 long id = (long) data; 1074 1075 if (is_nvdimm(dev)) { 1076 struct nvdimm *nvdimm = to_nvdimm(dev); 1077 1078 return nvdimm->id == id; 1079 } 1080 1081 return 0; 1082 } 1083 1084 static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1085 { 1086 int rc = -ENXIO, ro; 1087 struct nvdimm_bus *nvdimm_bus; 1088 1089 ro = ((file->f_flags & O_ACCMODE) == O_RDONLY); 1090 mutex_lock(&nvdimm_bus_list_mutex); 1091 list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) { 1092 struct device *dev = device_find_child(&nvdimm_bus->dev, 1093 file->private_data, match_dimm); 1094 struct nvdimm *nvdimm; 1095 1096 if (!dev) 1097 continue; 1098 1099 nvdimm = to_nvdimm(dev); 1100 rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg); 1101 put_device(dev); 1102 break; 1103 } 1104 mutex_unlock(&nvdimm_bus_list_mutex); 1105 1106 return rc; 1107 } 1108 1109 static int nd_open(struct inode *inode, struct file *file) 1110 { 1111 long minor = iminor(inode); 1112 1113 file->private_data = (void *) minor; 1114 return 0; 1115 } 1116 1117 static const struct file_operations nvdimm_bus_fops = { 1118 .owner = THIS_MODULE, 1119 .open = nd_open, 1120 .unlocked_ioctl = nd_ioctl, 1121 .compat_ioctl = nd_ioctl, 1122 .llseek = noop_llseek, 1123 }; 1124 1125 static const struct file_operations nvdimm_fops = { 1126 .owner = THIS_MODULE, 1127 .open = nd_open, 1128 .unlocked_ioctl = nvdimm_ioctl, 1129 .compat_ioctl = nvdimm_ioctl, 1130 .llseek = noop_llseek, 1131 }; 1132 1133 int __init nvdimm_bus_init(void) 1134 { 1135 int rc; 1136 1137 BUILD_BUG_ON(sizeof(struct nd_smart_payload) != 128); 1138 BUILD_BUG_ON(sizeof(struct nd_smart_threshold_payload) != 8); 1139 1140 rc = bus_register(&nvdimm_bus_type); 1141 if (rc) 1142 return rc; 1143 1144 rc = register_chrdev(0, "ndctl", &nvdimm_bus_fops); 1145 if (rc < 0) 1146 goto err_bus_chrdev; 1147 nvdimm_bus_major = rc; 1148 1149 rc = register_chrdev(0, "dimmctl", &nvdimm_fops); 1150 if (rc < 0) 1151 goto err_dimm_chrdev; 1152 nvdimm_major = rc; 1153 1154 nd_class = class_create(THIS_MODULE, "nd"); 1155 if (IS_ERR(nd_class)) { 1156 rc = PTR_ERR(nd_class); 1157 goto err_class; 1158 } 1159 1160 rc = driver_register(&nd_bus_driver.drv); 1161 if (rc) 1162 goto err_nd_bus; 1163 1164 return 0; 1165 1166 err_nd_bus: 1167 class_destroy(nd_class); 1168 err_class: 1169 unregister_chrdev(nvdimm_major, "dimmctl"); 1170 err_dimm_chrdev: 1171 unregister_chrdev(nvdimm_bus_major, "ndctl"); 1172 err_bus_chrdev: 1173 bus_unregister(&nvdimm_bus_type); 1174 1175 return rc; 1176 } 1177 1178 void nvdimm_bus_exit(void) 1179 { 1180 driver_unregister(&nd_bus_driver.drv); 1181 class_destroy(nd_class); 1182 unregister_chrdev(nvdimm_bus_major, "ndctl"); 1183 unregister_chrdev(nvdimm_major, "dimmctl"); 1184 bus_unregister(&nvdimm_bus_type); 1185 ida_destroy(&nd_ida); 1186 } 1187