1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017-2018 Intel Corporation. All rights reserved. */ 3 #include <linux/memremap.h> 4 #include <linux/device.h> 5 #include <linux/mutex.h> 6 #include <linux/list.h> 7 #include <linux/slab.h> 8 #include <linux/dax.h> 9 #include <linux/io.h> 10 #include "dax-private.h" 11 #include "bus.h" 12 13 static struct class *dax_class; 14 15 static DEFINE_MUTEX(dax_bus_lock); 16 17 #define DAX_NAME_LEN 30 18 struct dax_id { 19 struct list_head list; 20 char dev_name[DAX_NAME_LEN]; 21 }; 22 23 static int dax_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 24 { 25 /* 26 * We only ever expect to handle device-dax instances, i.e. the 27 * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero 28 */ 29 return add_uevent_var(env, "MODALIAS=" DAX_DEVICE_MODALIAS_FMT, 0); 30 } 31 32 static struct dax_device_driver *to_dax_drv(struct device_driver *drv) 33 { 34 return container_of(drv, struct dax_device_driver, drv); 35 } 36 37 static struct dax_id *__dax_match_id(struct dax_device_driver *dax_drv, 38 const char *dev_name) 39 { 40 struct dax_id *dax_id; 41 42 lockdep_assert_held(&dax_bus_lock); 43 44 list_for_each_entry(dax_id, &dax_drv->ids, list) 45 if (sysfs_streq(dax_id->dev_name, dev_name)) 46 return dax_id; 47 return NULL; 48 } 49 50 static int dax_match_id(struct dax_device_driver *dax_drv, struct device *dev) 51 { 52 int match; 53 54 mutex_lock(&dax_bus_lock); 55 match = !!__dax_match_id(dax_drv, dev_name(dev)); 56 mutex_unlock(&dax_bus_lock); 57 58 return match; 59 } 60 61 enum id_action { 62 ID_REMOVE, 63 ID_ADD, 64 }; 65 66 static ssize_t do_id_store(struct device_driver *drv, const char *buf, 67 size_t count, enum id_action action) 68 { 69 struct dax_device_driver *dax_drv = to_dax_drv(drv); 70 unsigned int region_id, id; 71 char devname[DAX_NAME_LEN]; 72 struct dax_id *dax_id; 73 ssize_t rc = count; 74 int fields; 75 76 fields = sscanf(buf, "dax%d.%d", ®ion_id, &id); 77 if (fields != 2) 78 return -EINVAL; 79 sprintf(devname, "dax%d.%d", region_id, id); 80 if (!sysfs_streq(buf, devname)) 81 return -EINVAL; 82 83 mutex_lock(&dax_bus_lock); 84 dax_id = __dax_match_id(dax_drv, buf); 85 if (!dax_id) { 86 if (action == ID_ADD) { 87 dax_id = kzalloc(sizeof(*dax_id), GFP_KERNEL); 88 if (dax_id) { 89 strncpy(dax_id->dev_name, buf, DAX_NAME_LEN); 90 list_add(&dax_id->list, &dax_drv->ids); 91 } else 92 rc = -ENOMEM; 93 } else 94 /* nothing to remove */; 95 } else if (action == ID_REMOVE) { 96 list_del(&dax_id->list); 97 kfree(dax_id); 98 } else 99 /* dax_id already added */; 100 mutex_unlock(&dax_bus_lock); 101 102 if (rc < 0) 103 return rc; 104 if (action == ID_ADD) 105 rc = driver_attach(drv); 106 if (rc) 107 return rc; 108 return count; 109 } 110 111 static ssize_t new_id_store(struct device_driver *drv, const char *buf, 112 size_t count) 113 { 114 return do_id_store(drv, buf, count, ID_ADD); 115 } 116 static DRIVER_ATTR_WO(new_id); 117 118 static ssize_t remove_id_store(struct device_driver *drv, const char *buf, 119 size_t count) 120 { 121 return do_id_store(drv, buf, count, ID_REMOVE); 122 } 123 static DRIVER_ATTR_WO(remove_id); 124 125 static struct attribute *dax_drv_attrs[] = { 126 &driver_attr_new_id.attr, 127 &driver_attr_remove_id.attr, 128 NULL, 129 }; 130 ATTRIBUTE_GROUPS(dax_drv); 131 132 static int dax_bus_match(struct device *dev, struct device_driver *drv); 133 134 static bool is_static(struct dax_region *dax_region) 135 { 136 return (dax_region->res.flags & IORESOURCE_DAX_STATIC) != 0; 137 } 138 139 static u64 dev_dax_size(struct dev_dax *dev_dax) 140 { 141 u64 size = 0; 142 int i; 143 144 device_lock_assert(&dev_dax->dev); 145 146 for (i = 0; i < dev_dax->nr_range; i++) 147 size += range_len(&dev_dax->ranges[i].range); 148 149 return size; 150 } 151 152 static int dax_bus_probe(struct device *dev) 153 { 154 struct dax_device_driver *dax_drv = to_dax_drv(dev->driver); 155 struct dev_dax *dev_dax = to_dev_dax(dev); 156 struct dax_region *dax_region = dev_dax->region; 157 int rc; 158 159 if (dev_dax_size(dev_dax) == 0 || dev_dax->id < 0) 160 return -ENXIO; 161 162 rc = dax_drv->probe(dev_dax); 163 164 if (rc || is_static(dax_region)) 165 return rc; 166 167 /* 168 * Track new seed creation only after successful probe of the 169 * previous seed. 170 */ 171 if (dax_region->seed == dev) 172 dax_region->seed = NULL; 173 174 return 0; 175 } 176 177 static int dax_bus_remove(struct device *dev) 178 { 179 struct dax_device_driver *dax_drv = to_dax_drv(dev->driver); 180 struct dev_dax *dev_dax = to_dev_dax(dev); 181 int ret = 0; 182 183 if (dax_drv->remove) 184 ret = dax_drv->remove(dev_dax); 185 186 return ret; 187 } 188 189 static struct bus_type dax_bus_type = { 190 .name = "dax", 191 .uevent = dax_bus_uevent, 192 .match = dax_bus_match, 193 .probe = dax_bus_probe, 194 .remove = dax_bus_remove, 195 .drv_groups = dax_drv_groups, 196 }; 197 198 static int dax_bus_match(struct device *dev, struct device_driver *drv) 199 { 200 struct dax_device_driver *dax_drv = to_dax_drv(drv); 201 202 /* 203 * All but the 'device-dax' driver, which has 'match_always' 204 * set, requires an exact id match. 205 */ 206 if (dax_drv->match_always) 207 return 1; 208 209 return dax_match_id(dax_drv, dev); 210 } 211 212 /* 213 * Rely on the fact that drvdata is set before the attributes are 214 * registered, and that the attributes are unregistered before drvdata 215 * is cleared to assume that drvdata is always valid. 216 */ 217 static ssize_t id_show(struct device *dev, 218 struct device_attribute *attr, char *buf) 219 { 220 struct dax_region *dax_region = dev_get_drvdata(dev); 221 222 return sprintf(buf, "%d\n", dax_region->id); 223 } 224 static DEVICE_ATTR_RO(id); 225 226 static ssize_t region_size_show(struct device *dev, 227 struct device_attribute *attr, char *buf) 228 { 229 struct dax_region *dax_region = dev_get_drvdata(dev); 230 231 return sprintf(buf, "%llu\n", (unsigned long long) 232 resource_size(&dax_region->res)); 233 } 234 static struct device_attribute dev_attr_region_size = __ATTR(size, 0444, 235 region_size_show, NULL); 236 237 static ssize_t region_align_show(struct device *dev, 238 struct device_attribute *attr, char *buf) 239 { 240 struct dax_region *dax_region = dev_get_drvdata(dev); 241 242 return sprintf(buf, "%u\n", dax_region->align); 243 } 244 static struct device_attribute dev_attr_region_align = 245 __ATTR(align, 0400, region_align_show, NULL); 246 247 #define for_each_dax_region_resource(dax_region, res) \ 248 for (res = (dax_region)->res.child; res; res = res->sibling) 249 250 static unsigned long long dax_region_avail_size(struct dax_region *dax_region) 251 { 252 resource_size_t size = resource_size(&dax_region->res); 253 struct resource *res; 254 255 device_lock_assert(dax_region->dev); 256 257 for_each_dax_region_resource(dax_region, res) 258 size -= resource_size(res); 259 return size; 260 } 261 262 static ssize_t available_size_show(struct device *dev, 263 struct device_attribute *attr, char *buf) 264 { 265 struct dax_region *dax_region = dev_get_drvdata(dev); 266 unsigned long long size; 267 268 device_lock(dev); 269 size = dax_region_avail_size(dax_region); 270 device_unlock(dev); 271 272 return sprintf(buf, "%llu\n", size); 273 } 274 static DEVICE_ATTR_RO(available_size); 275 276 static ssize_t seed_show(struct device *dev, 277 struct device_attribute *attr, char *buf) 278 { 279 struct dax_region *dax_region = dev_get_drvdata(dev); 280 struct device *seed; 281 ssize_t rc; 282 283 if (is_static(dax_region)) 284 return -EINVAL; 285 286 device_lock(dev); 287 seed = dax_region->seed; 288 rc = sprintf(buf, "%s\n", seed ? dev_name(seed) : ""); 289 device_unlock(dev); 290 291 return rc; 292 } 293 static DEVICE_ATTR_RO(seed); 294 295 static ssize_t create_show(struct device *dev, 296 struct device_attribute *attr, char *buf) 297 { 298 struct dax_region *dax_region = dev_get_drvdata(dev); 299 struct device *youngest; 300 ssize_t rc; 301 302 if (is_static(dax_region)) 303 return -EINVAL; 304 305 device_lock(dev); 306 youngest = dax_region->youngest; 307 rc = sprintf(buf, "%s\n", youngest ? dev_name(youngest) : ""); 308 device_unlock(dev); 309 310 return rc; 311 } 312 313 static ssize_t create_store(struct device *dev, struct device_attribute *attr, 314 const char *buf, size_t len) 315 { 316 struct dax_region *dax_region = dev_get_drvdata(dev); 317 unsigned long long avail; 318 ssize_t rc; 319 int val; 320 321 if (is_static(dax_region)) 322 return -EINVAL; 323 324 rc = kstrtoint(buf, 0, &val); 325 if (rc) 326 return rc; 327 if (val != 1) 328 return -EINVAL; 329 330 device_lock(dev); 331 avail = dax_region_avail_size(dax_region); 332 if (avail == 0) 333 rc = -ENOSPC; 334 else { 335 struct dev_dax_data data = { 336 .dax_region = dax_region, 337 .size = 0, 338 .id = -1, 339 }; 340 struct dev_dax *dev_dax = devm_create_dev_dax(&data); 341 342 if (IS_ERR(dev_dax)) 343 rc = PTR_ERR(dev_dax); 344 else { 345 /* 346 * In support of crafting multiple new devices 347 * simultaneously multiple seeds can be created, 348 * but only the first one that has not been 349 * successfully bound is tracked as the region 350 * seed. 351 */ 352 if (!dax_region->seed) 353 dax_region->seed = &dev_dax->dev; 354 dax_region->youngest = &dev_dax->dev; 355 rc = len; 356 } 357 } 358 device_unlock(dev); 359 360 return rc; 361 } 362 static DEVICE_ATTR_RW(create); 363 364 void kill_dev_dax(struct dev_dax *dev_dax) 365 { 366 struct dax_device *dax_dev = dev_dax->dax_dev; 367 struct inode *inode = dax_inode(dax_dev); 368 369 kill_dax(dax_dev); 370 unmap_mapping_range(inode->i_mapping, 0, 0, 1); 371 } 372 EXPORT_SYMBOL_GPL(kill_dev_dax); 373 374 static void trim_dev_dax_range(struct dev_dax *dev_dax) 375 { 376 int i = dev_dax->nr_range - 1; 377 struct range *range = &dev_dax->ranges[i].range; 378 struct dax_region *dax_region = dev_dax->region; 379 380 device_lock_assert(dax_region->dev); 381 dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i, 382 (unsigned long long)range->start, 383 (unsigned long long)range->end); 384 385 __release_region(&dax_region->res, range->start, range_len(range)); 386 if (--dev_dax->nr_range == 0) { 387 kfree(dev_dax->ranges); 388 dev_dax->ranges = NULL; 389 } 390 } 391 392 static void free_dev_dax_ranges(struct dev_dax *dev_dax) 393 { 394 while (dev_dax->nr_range) 395 trim_dev_dax_range(dev_dax); 396 } 397 398 static void unregister_dev_dax(void *dev) 399 { 400 struct dev_dax *dev_dax = to_dev_dax(dev); 401 402 dev_dbg(dev, "%s\n", __func__); 403 404 kill_dev_dax(dev_dax); 405 free_dev_dax_ranges(dev_dax); 406 device_del(dev); 407 put_device(dev); 408 } 409 410 /* a return value >= 0 indicates this invocation invalidated the id */ 411 static int __free_dev_dax_id(struct dev_dax *dev_dax) 412 { 413 struct dax_region *dax_region = dev_dax->region; 414 struct device *dev = &dev_dax->dev; 415 int rc = dev_dax->id; 416 417 device_lock_assert(dev); 418 419 if (is_static(dax_region) || dev_dax->id < 0) 420 return -1; 421 ida_free(&dax_region->ida, dev_dax->id); 422 dev_dax->id = -1; 423 return rc; 424 } 425 426 static int free_dev_dax_id(struct dev_dax *dev_dax) 427 { 428 struct device *dev = &dev_dax->dev; 429 int rc; 430 431 device_lock(dev); 432 rc = __free_dev_dax_id(dev_dax); 433 device_unlock(dev); 434 return rc; 435 } 436 437 static ssize_t delete_store(struct device *dev, struct device_attribute *attr, 438 const char *buf, size_t len) 439 { 440 struct dax_region *dax_region = dev_get_drvdata(dev); 441 struct dev_dax *dev_dax; 442 struct device *victim; 443 bool do_del = false; 444 int rc; 445 446 if (is_static(dax_region)) 447 return -EINVAL; 448 449 victim = device_find_child_by_name(dax_region->dev, buf); 450 if (!victim) 451 return -ENXIO; 452 453 device_lock(dev); 454 device_lock(victim); 455 dev_dax = to_dev_dax(victim); 456 if (victim->driver || dev_dax_size(dev_dax)) 457 rc = -EBUSY; 458 else { 459 /* 460 * Invalidate the device so it does not become active 461 * again, but always preserve device-id-0 so that 462 * /sys/bus/dax/ is guaranteed to be populated while any 463 * dax_region is registered. 464 */ 465 if (dev_dax->id > 0) { 466 do_del = __free_dev_dax_id(dev_dax) >= 0; 467 rc = len; 468 if (dax_region->seed == victim) 469 dax_region->seed = NULL; 470 if (dax_region->youngest == victim) 471 dax_region->youngest = NULL; 472 } else 473 rc = -EBUSY; 474 } 475 device_unlock(victim); 476 477 /* won the race to invalidate the device, clean it up */ 478 if (do_del) 479 devm_release_action(dev, unregister_dev_dax, victim); 480 device_unlock(dev); 481 put_device(victim); 482 483 return rc; 484 } 485 static DEVICE_ATTR_WO(delete); 486 487 static umode_t dax_region_visible(struct kobject *kobj, struct attribute *a, 488 int n) 489 { 490 struct device *dev = container_of(kobj, struct device, kobj); 491 struct dax_region *dax_region = dev_get_drvdata(dev); 492 493 if (is_static(dax_region)) 494 if (a == &dev_attr_available_size.attr 495 || a == &dev_attr_create.attr 496 || a == &dev_attr_seed.attr 497 || a == &dev_attr_delete.attr) 498 return 0; 499 return a->mode; 500 } 501 502 static struct attribute *dax_region_attributes[] = { 503 &dev_attr_available_size.attr, 504 &dev_attr_region_size.attr, 505 &dev_attr_region_align.attr, 506 &dev_attr_create.attr, 507 &dev_attr_seed.attr, 508 &dev_attr_delete.attr, 509 &dev_attr_id.attr, 510 NULL, 511 }; 512 513 static const struct attribute_group dax_region_attribute_group = { 514 .name = "dax_region", 515 .attrs = dax_region_attributes, 516 .is_visible = dax_region_visible, 517 }; 518 519 static const struct attribute_group *dax_region_attribute_groups[] = { 520 &dax_region_attribute_group, 521 NULL, 522 }; 523 524 static void dax_region_free(struct kref *kref) 525 { 526 struct dax_region *dax_region; 527 528 dax_region = container_of(kref, struct dax_region, kref); 529 kfree(dax_region); 530 } 531 532 void dax_region_put(struct dax_region *dax_region) 533 { 534 kref_put(&dax_region->kref, dax_region_free); 535 } 536 EXPORT_SYMBOL_GPL(dax_region_put); 537 538 static void dax_region_unregister(void *region) 539 { 540 struct dax_region *dax_region = region; 541 542 sysfs_remove_groups(&dax_region->dev->kobj, 543 dax_region_attribute_groups); 544 dax_region_put(dax_region); 545 } 546 547 struct dax_region *alloc_dax_region(struct device *parent, int region_id, 548 struct range *range, int target_node, unsigned int align, 549 unsigned long flags) 550 { 551 struct dax_region *dax_region; 552 553 /* 554 * The DAX core assumes that it can store its private data in 555 * parent->driver_data. This WARN is a reminder / safeguard for 556 * developers of device-dax drivers. 557 */ 558 if (dev_get_drvdata(parent)) { 559 dev_WARN(parent, "dax core failed to setup private data\n"); 560 return NULL; 561 } 562 563 if (!IS_ALIGNED(range->start, align) 564 || !IS_ALIGNED(range_len(range), align)) 565 return NULL; 566 567 dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL); 568 if (!dax_region) 569 return NULL; 570 571 dev_set_drvdata(parent, dax_region); 572 kref_init(&dax_region->kref); 573 dax_region->id = region_id; 574 dax_region->align = align; 575 dax_region->dev = parent; 576 dax_region->target_node = target_node; 577 ida_init(&dax_region->ida); 578 dax_region->res = (struct resource) { 579 .start = range->start, 580 .end = range->end, 581 .flags = IORESOURCE_MEM | flags, 582 }; 583 584 if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) { 585 kfree(dax_region); 586 return NULL; 587 } 588 589 kref_get(&dax_region->kref); 590 if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region)) 591 return NULL; 592 return dax_region; 593 } 594 EXPORT_SYMBOL_GPL(alloc_dax_region); 595 596 static void dax_mapping_release(struct device *dev) 597 { 598 struct dax_mapping *mapping = to_dax_mapping(dev); 599 struct dev_dax *dev_dax = to_dev_dax(dev->parent); 600 601 ida_free(&dev_dax->ida, mapping->id); 602 kfree(mapping); 603 } 604 605 static void unregister_dax_mapping(void *data) 606 { 607 struct device *dev = data; 608 struct dax_mapping *mapping = to_dax_mapping(dev); 609 struct dev_dax *dev_dax = to_dev_dax(dev->parent); 610 struct dax_region *dax_region = dev_dax->region; 611 612 dev_dbg(dev, "%s\n", __func__); 613 614 device_lock_assert(dax_region->dev); 615 616 dev_dax->ranges[mapping->range_id].mapping = NULL; 617 mapping->range_id = -1; 618 619 device_del(dev); 620 put_device(dev); 621 } 622 623 static struct dev_dax_range *get_dax_range(struct device *dev) 624 { 625 struct dax_mapping *mapping = to_dax_mapping(dev); 626 struct dev_dax *dev_dax = to_dev_dax(dev->parent); 627 struct dax_region *dax_region = dev_dax->region; 628 629 device_lock(dax_region->dev); 630 if (mapping->range_id < 0) { 631 device_unlock(dax_region->dev); 632 return NULL; 633 } 634 635 return &dev_dax->ranges[mapping->range_id]; 636 } 637 638 static void put_dax_range(struct dev_dax_range *dax_range) 639 { 640 struct dax_mapping *mapping = dax_range->mapping; 641 struct dev_dax *dev_dax = to_dev_dax(mapping->dev.parent); 642 struct dax_region *dax_region = dev_dax->region; 643 644 device_unlock(dax_region->dev); 645 } 646 647 static ssize_t start_show(struct device *dev, 648 struct device_attribute *attr, char *buf) 649 { 650 struct dev_dax_range *dax_range; 651 ssize_t rc; 652 653 dax_range = get_dax_range(dev); 654 if (!dax_range) 655 return -ENXIO; 656 rc = sprintf(buf, "%#llx\n", dax_range->range.start); 657 put_dax_range(dax_range); 658 659 return rc; 660 } 661 static DEVICE_ATTR(start, 0400, start_show, NULL); 662 663 static ssize_t end_show(struct device *dev, 664 struct device_attribute *attr, char *buf) 665 { 666 struct dev_dax_range *dax_range; 667 ssize_t rc; 668 669 dax_range = get_dax_range(dev); 670 if (!dax_range) 671 return -ENXIO; 672 rc = sprintf(buf, "%#llx\n", dax_range->range.end); 673 put_dax_range(dax_range); 674 675 return rc; 676 } 677 static DEVICE_ATTR(end, 0400, end_show, NULL); 678 679 static ssize_t pgoff_show(struct device *dev, 680 struct device_attribute *attr, char *buf) 681 { 682 struct dev_dax_range *dax_range; 683 ssize_t rc; 684 685 dax_range = get_dax_range(dev); 686 if (!dax_range) 687 return -ENXIO; 688 rc = sprintf(buf, "%#lx\n", dax_range->pgoff); 689 put_dax_range(dax_range); 690 691 return rc; 692 } 693 static DEVICE_ATTR(page_offset, 0400, pgoff_show, NULL); 694 695 static struct attribute *dax_mapping_attributes[] = { 696 &dev_attr_start.attr, 697 &dev_attr_end.attr, 698 &dev_attr_page_offset.attr, 699 NULL, 700 }; 701 702 static const struct attribute_group dax_mapping_attribute_group = { 703 .attrs = dax_mapping_attributes, 704 }; 705 706 static const struct attribute_group *dax_mapping_attribute_groups[] = { 707 &dax_mapping_attribute_group, 708 NULL, 709 }; 710 711 static struct device_type dax_mapping_type = { 712 .release = dax_mapping_release, 713 .groups = dax_mapping_attribute_groups, 714 }; 715 716 static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id) 717 { 718 struct dax_region *dax_region = dev_dax->region; 719 struct dax_mapping *mapping; 720 struct device *dev; 721 int rc; 722 723 device_lock_assert(dax_region->dev); 724 725 if (dev_WARN_ONCE(&dev_dax->dev, !dax_region->dev->driver, 726 "region disabled\n")) 727 return -ENXIO; 728 729 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); 730 if (!mapping) 731 return -ENOMEM; 732 mapping->range_id = range_id; 733 mapping->id = ida_alloc(&dev_dax->ida, GFP_KERNEL); 734 if (mapping->id < 0) { 735 kfree(mapping); 736 return -ENOMEM; 737 } 738 dev_dax->ranges[range_id].mapping = mapping; 739 dev = &mapping->dev; 740 device_initialize(dev); 741 dev->parent = &dev_dax->dev; 742 dev->type = &dax_mapping_type; 743 dev_set_name(dev, "mapping%d", mapping->id); 744 rc = device_add(dev); 745 if (rc) { 746 put_device(dev); 747 return rc; 748 } 749 750 rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_mapping, 751 dev); 752 if (rc) 753 return rc; 754 return 0; 755 } 756 757 static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start, 758 resource_size_t size) 759 { 760 struct dax_region *dax_region = dev_dax->region; 761 struct resource *res = &dax_region->res; 762 struct device *dev = &dev_dax->dev; 763 struct dev_dax_range *ranges; 764 unsigned long pgoff = 0; 765 struct resource *alloc; 766 int i, rc; 767 768 device_lock_assert(dax_region->dev); 769 770 /* handle the seed alloc special case */ 771 if (!size) { 772 if (dev_WARN_ONCE(dev, dev_dax->nr_range, 773 "0-size allocation must be first\n")) 774 return -EBUSY; 775 /* nr_range == 0 is elsewhere special cased as 0-size device */ 776 return 0; 777 } 778 779 alloc = __request_region(res, start, size, dev_name(dev), 0); 780 if (!alloc) 781 return -ENOMEM; 782 783 ranges = krealloc(dev_dax->ranges, sizeof(*ranges) 784 * (dev_dax->nr_range + 1), GFP_KERNEL); 785 if (!ranges) { 786 __release_region(res, alloc->start, resource_size(alloc)); 787 return -ENOMEM; 788 } 789 790 for (i = 0; i < dev_dax->nr_range; i++) 791 pgoff += PHYS_PFN(range_len(&ranges[i].range)); 792 dev_dax->ranges = ranges; 793 ranges[dev_dax->nr_range++] = (struct dev_dax_range) { 794 .pgoff = pgoff, 795 .range = { 796 .start = alloc->start, 797 .end = alloc->end, 798 }, 799 }; 800 801 dev_dbg(dev, "alloc range[%d]: %pa:%pa\n", dev_dax->nr_range - 1, 802 &alloc->start, &alloc->end); 803 /* 804 * A dev_dax instance must be registered before mapping device 805 * children can be added. Defer to devm_create_dev_dax() to add 806 * the initial mapping device. 807 */ 808 if (!device_is_registered(&dev_dax->dev)) 809 return 0; 810 811 rc = devm_register_dax_mapping(dev_dax, dev_dax->nr_range - 1); 812 if (rc) 813 trim_dev_dax_range(dev_dax); 814 815 return rc; 816 } 817 818 static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, resource_size_t size) 819 { 820 int last_range = dev_dax->nr_range - 1; 821 struct dev_dax_range *dax_range = &dev_dax->ranges[last_range]; 822 struct dax_region *dax_region = dev_dax->region; 823 bool is_shrink = resource_size(res) > size; 824 struct range *range = &dax_range->range; 825 struct device *dev = &dev_dax->dev; 826 int rc; 827 828 device_lock_assert(dax_region->dev); 829 830 if (dev_WARN_ONCE(dev, !size, "deletion is handled by dev_dax_shrink\n")) 831 return -EINVAL; 832 833 rc = adjust_resource(res, range->start, size); 834 if (rc) 835 return rc; 836 837 *range = (struct range) { 838 .start = range->start, 839 .end = range->start + size - 1, 840 }; 841 842 dev_dbg(dev, "%s range[%d]: %#llx:%#llx\n", is_shrink ? "shrink" : "extend", 843 last_range, (unsigned long long) range->start, 844 (unsigned long long) range->end); 845 846 return 0; 847 } 848 849 static ssize_t size_show(struct device *dev, 850 struct device_attribute *attr, char *buf) 851 { 852 struct dev_dax *dev_dax = to_dev_dax(dev); 853 unsigned long long size; 854 855 device_lock(dev); 856 size = dev_dax_size(dev_dax); 857 device_unlock(dev); 858 859 return sprintf(buf, "%llu\n", size); 860 } 861 862 static bool alloc_is_aligned(struct dev_dax *dev_dax, resource_size_t size) 863 { 864 /* 865 * The minimum mapping granularity for a device instance is a 866 * single subsection, unless the arch says otherwise. 867 */ 868 return IS_ALIGNED(size, max_t(unsigned long, dev_dax->align, memremap_compat_align())); 869 } 870 871 static int dev_dax_shrink(struct dev_dax *dev_dax, resource_size_t size) 872 { 873 resource_size_t to_shrink = dev_dax_size(dev_dax) - size; 874 struct dax_region *dax_region = dev_dax->region; 875 struct device *dev = &dev_dax->dev; 876 int i; 877 878 for (i = dev_dax->nr_range - 1; i >= 0; i--) { 879 struct range *range = &dev_dax->ranges[i].range; 880 struct dax_mapping *mapping = dev_dax->ranges[i].mapping; 881 struct resource *adjust = NULL, *res; 882 resource_size_t shrink; 883 884 shrink = min_t(u64, to_shrink, range_len(range)); 885 if (shrink >= range_len(range)) { 886 devm_release_action(dax_region->dev, 887 unregister_dax_mapping, &mapping->dev); 888 trim_dev_dax_range(dev_dax); 889 to_shrink -= shrink; 890 if (!to_shrink) 891 break; 892 continue; 893 } 894 895 for_each_dax_region_resource(dax_region, res) 896 if (strcmp(res->name, dev_name(dev)) == 0 897 && res->start == range->start) { 898 adjust = res; 899 break; 900 } 901 902 if (dev_WARN_ONCE(dev, !adjust || i != dev_dax->nr_range - 1, 903 "failed to find matching resource\n")) 904 return -ENXIO; 905 return adjust_dev_dax_range(dev_dax, adjust, range_len(range) 906 - shrink); 907 } 908 return 0; 909 } 910 911 /* 912 * Only allow adjustments that preserve the relative pgoff of existing 913 * allocations. I.e. the dev_dax->ranges array is ordered by increasing pgoff. 914 */ 915 static bool adjust_ok(struct dev_dax *dev_dax, struct resource *res) 916 { 917 struct dev_dax_range *last; 918 int i; 919 920 if (dev_dax->nr_range == 0) 921 return false; 922 if (strcmp(res->name, dev_name(&dev_dax->dev)) != 0) 923 return false; 924 last = &dev_dax->ranges[dev_dax->nr_range - 1]; 925 if (last->range.start != res->start || last->range.end != res->end) 926 return false; 927 for (i = 0; i < dev_dax->nr_range - 1; i++) { 928 struct dev_dax_range *dax_range = &dev_dax->ranges[i]; 929 930 if (dax_range->pgoff > last->pgoff) 931 return false; 932 } 933 934 return true; 935 } 936 937 static ssize_t dev_dax_resize(struct dax_region *dax_region, 938 struct dev_dax *dev_dax, resource_size_t size) 939 { 940 resource_size_t avail = dax_region_avail_size(dax_region), to_alloc; 941 resource_size_t dev_size = dev_dax_size(dev_dax); 942 struct resource *region_res = &dax_region->res; 943 struct device *dev = &dev_dax->dev; 944 struct resource *res, *first; 945 resource_size_t alloc = 0; 946 int rc; 947 948 if (dev->driver) 949 return -EBUSY; 950 if (size == dev_size) 951 return 0; 952 if (size > dev_size && size - dev_size > avail) 953 return -ENOSPC; 954 if (size < dev_size) 955 return dev_dax_shrink(dev_dax, size); 956 957 to_alloc = size - dev_size; 958 if (dev_WARN_ONCE(dev, !alloc_is_aligned(dev_dax, to_alloc), 959 "resize of %pa misaligned\n", &to_alloc)) 960 return -ENXIO; 961 962 /* 963 * Expand the device into the unused portion of the region. This 964 * may involve adjusting the end of an existing resource, or 965 * allocating a new resource. 966 */ 967 retry: 968 first = region_res->child; 969 if (!first) 970 return alloc_dev_dax_range(dev_dax, dax_region->res.start, to_alloc); 971 972 rc = -ENOSPC; 973 for (res = first; res; res = res->sibling) { 974 struct resource *next = res->sibling; 975 976 /* space at the beginning of the region */ 977 if (res == first && res->start > dax_region->res.start) { 978 alloc = min(res->start - dax_region->res.start, to_alloc); 979 rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, alloc); 980 break; 981 } 982 983 alloc = 0; 984 /* space between allocations */ 985 if (next && next->start > res->end + 1) 986 alloc = min(next->start - (res->end + 1), to_alloc); 987 988 /* space at the end of the region */ 989 if (!alloc && !next && res->end < region_res->end) 990 alloc = min(region_res->end - res->end, to_alloc); 991 992 if (!alloc) 993 continue; 994 995 if (adjust_ok(dev_dax, res)) { 996 rc = adjust_dev_dax_range(dev_dax, res, resource_size(res) + alloc); 997 break; 998 } 999 rc = alloc_dev_dax_range(dev_dax, res->end + 1, alloc); 1000 break; 1001 } 1002 if (rc) 1003 return rc; 1004 to_alloc -= alloc; 1005 if (to_alloc) 1006 goto retry; 1007 return 0; 1008 } 1009 1010 static ssize_t size_store(struct device *dev, struct device_attribute *attr, 1011 const char *buf, size_t len) 1012 { 1013 ssize_t rc; 1014 unsigned long long val; 1015 struct dev_dax *dev_dax = to_dev_dax(dev); 1016 struct dax_region *dax_region = dev_dax->region; 1017 1018 rc = kstrtoull(buf, 0, &val); 1019 if (rc) 1020 return rc; 1021 1022 if (!alloc_is_aligned(dev_dax, val)) { 1023 dev_dbg(dev, "%s: size: %lld misaligned\n", __func__, val); 1024 return -EINVAL; 1025 } 1026 1027 device_lock(dax_region->dev); 1028 if (!dax_region->dev->driver) { 1029 device_unlock(dax_region->dev); 1030 return -ENXIO; 1031 } 1032 device_lock(dev); 1033 rc = dev_dax_resize(dax_region, dev_dax, val); 1034 device_unlock(dev); 1035 device_unlock(dax_region->dev); 1036 1037 return rc == 0 ? len : rc; 1038 } 1039 static DEVICE_ATTR_RW(size); 1040 1041 static ssize_t range_parse(const char *opt, size_t len, struct range *range) 1042 { 1043 unsigned long long addr = 0; 1044 char *start, *end, *str; 1045 ssize_t rc = -EINVAL; 1046 1047 str = kstrdup(opt, GFP_KERNEL); 1048 if (!str) 1049 return rc; 1050 1051 end = str; 1052 start = strsep(&end, "-"); 1053 if (!start || !end) 1054 goto err; 1055 1056 rc = kstrtoull(start, 16, &addr); 1057 if (rc) 1058 goto err; 1059 range->start = addr; 1060 1061 rc = kstrtoull(end, 16, &addr); 1062 if (rc) 1063 goto err; 1064 range->end = addr; 1065 1066 err: 1067 kfree(str); 1068 return rc; 1069 } 1070 1071 static ssize_t mapping_store(struct device *dev, struct device_attribute *attr, 1072 const char *buf, size_t len) 1073 { 1074 struct dev_dax *dev_dax = to_dev_dax(dev); 1075 struct dax_region *dax_region = dev_dax->region; 1076 size_t to_alloc; 1077 struct range r; 1078 ssize_t rc; 1079 1080 rc = range_parse(buf, len, &r); 1081 if (rc) 1082 return rc; 1083 1084 rc = -ENXIO; 1085 device_lock(dax_region->dev); 1086 if (!dax_region->dev->driver) { 1087 device_unlock(dax_region->dev); 1088 return rc; 1089 } 1090 device_lock(dev); 1091 1092 to_alloc = range_len(&r); 1093 if (alloc_is_aligned(dev_dax, to_alloc)) 1094 rc = alloc_dev_dax_range(dev_dax, r.start, to_alloc); 1095 device_unlock(dev); 1096 device_unlock(dax_region->dev); 1097 1098 return rc == 0 ? len : rc; 1099 } 1100 static DEVICE_ATTR_WO(mapping); 1101 1102 static ssize_t align_show(struct device *dev, 1103 struct device_attribute *attr, char *buf) 1104 { 1105 struct dev_dax *dev_dax = to_dev_dax(dev); 1106 1107 return sprintf(buf, "%d\n", dev_dax->align); 1108 } 1109 1110 static ssize_t dev_dax_validate_align(struct dev_dax *dev_dax) 1111 { 1112 struct device *dev = &dev_dax->dev; 1113 int i; 1114 1115 for (i = 0; i < dev_dax->nr_range; i++) { 1116 size_t len = range_len(&dev_dax->ranges[i].range); 1117 1118 if (!alloc_is_aligned(dev_dax, len)) { 1119 dev_dbg(dev, "%s: align %u invalid for range %d\n", 1120 __func__, dev_dax->align, i); 1121 return -EINVAL; 1122 } 1123 } 1124 1125 return 0; 1126 } 1127 1128 static ssize_t align_store(struct device *dev, struct device_attribute *attr, 1129 const char *buf, size_t len) 1130 { 1131 struct dev_dax *dev_dax = to_dev_dax(dev); 1132 struct dax_region *dax_region = dev_dax->region; 1133 unsigned long val, align_save; 1134 ssize_t rc; 1135 1136 rc = kstrtoul(buf, 0, &val); 1137 if (rc) 1138 return -ENXIO; 1139 1140 if (!dax_align_valid(val)) 1141 return -EINVAL; 1142 1143 device_lock(dax_region->dev); 1144 if (!dax_region->dev->driver) { 1145 device_unlock(dax_region->dev); 1146 return -ENXIO; 1147 } 1148 1149 device_lock(dev); 1150 if (dev->driver) { 1151 rc = -EBUSY; 1152 goto out_unlock; 1153 } 1154 1155 align_save = dev_dax->align; 1156 dev_dax->align = val; 1157 rc = dev_dax_validate_align(dev_dax); 1158 if (rc) 1159 dev_dax->align = align_save; 1160 out_unlock: 1161 device_unlock(dev); 1162 device_unlock(dax_region->dev); 1163 return rc == 0 ? len : rc; 1164 } 1165 static DEVICE_ATTR_RW(align); 1166 1167 static int dev_dax_target_node(struct dev_dax *dev_dax) 1168 { 1169 struct dax_region *dax_region = dev_dax->region; 1170 1171 return dax_region->target_node; 1172 } 1173 1174 static ssize_t target_node_show(struct device *dev, 1175 struct device_attribute *attr, char *buf) 1176 { 1177 struct dev_dax *dev_dax = to_dev_dax(dev); 1178 1179 return sprintf(buf, "%d\n", dev_dax_target_node(dev_dax)); 1180 } 1181 static DEVICE_ATTR_RO(target_node); 1182 1183 static ssize_t resource_show(struct device *dev, 1184 struct device_attribute *attr, char *buf) 1185 { 1186 struct dev_dax *dev_dax = to_dev_dax(dev); 1187 struct dax_region *dax_region = dev_dax->region; 1188 unsigned long long start; 1189 1190 if (dev_dax->nr_range < 1) 1191 start = dax_region->res.start; 1192 else 1193 start = dev_dax->ranges[0].range.start; 1194 1195 return sprintf(buf, "%#llx\n", start); 1196 } 1197 static DEVICE_ATTR(resource, 0400, resource_show, NULL); 1198 1199 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 1200 char *buf) 1201 { 1202 /* 1203 * We only ever expect to handle device-dax instances, i.e. the 1204 * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero 1205 */ 1206 return sprintf(buf, DAX_DEVICE_MODALIAS_FMT "\n", 0); 1207 } 1208 static DEVICE_ATTR_RO(modalias); 1209 1210 static ssize_t numa_node_show(struct device *dev, 1211 struct device_attribute *attr, char *buf) 1212 { 1213 return sprintf(buf, "%d\n", dev_to_node(dev)); 1214 } 1215 static DEVICE_ATTR_RO(numa_node); 1216 1217 static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n) 1218 { 1219 struct device *dev = container_of(kobj, struct device, kobj); 1220 struct dev_dax *dev_dax = to_dev_dax(dev); 1221 struct dax_region *dax_region = dev_dax->region; 1222 1223 if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0) 1224 return 0; 1225 if (a == &dev_attr_numa_node.attr && !IS_ENABLED(CONFIG_NUMA)) 1226 return 0; 1227 if (a == &dev_attr_mapping.attr && is_static(dax_region)) 1228 return 0; 1229 if ((a == &dev_attr_align.attr || 1230 a == &dev_attr_size.attr) && is_static(dax_region)) 1231 return 0444; 1232 return a->mode; 1233 } 1234 1235 static struct attribute *dev_dax_attributes[] = { 1236 &dev_attr_modalias.attr, 1237 &dev_attr_size.attr, 1238 &dev_attr_mapping.attr, 1239 &dev_attr_target_node.attr, 1240 &dev_attr_align.attr, 1241 &dev_attr_resource.attr, 1242 &dev_attr_numa_node.attr, 1243 NULL, 1244 }; 1245 1246 static const struct attribute_group dev_dax_attribute_group = { 1247 .attrs = dev_dax_attributes, 1248 .is_visible = dev_dax_visible, 1249 }; 1250 1251 static const struct attribute_group *dax_attribute_groups[] = { 1252 &dev_dax_attribute_group, 1253 NULL, 1254 }; 1255 1256 static void dev_dax_release(struct device *dev) 1257 { 1258 struct dev_dax *dev_dax = to_dev_dax(dev); 1259 struct dax_region *dax_region = dev_dax->region; 1260 struct dax_device *dax_dev = dev_dax->dax_dev; 1261 1262 put_dax(dax_dev); 1263 free_dev_dax_id(dev_dax); 1264 dax_region_put(dax_region); 1265 kfree(dev_dax->pgmap); 1266 kfree(dev_dax); 1267 } 1268 1269 static const struct device_type dev_dax_type = { 1270 .release = dev_dax_release, 1271 .groups = dax_attribute_groups, 1272 }; 1273 1274 struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data) 1275 { 1276 struct dax_region *dax_region = data->dax_region; 1277 struct device *parent = dax_region->dev; 1278 struct dax_device *dax_dev; 1279 struct dev_dax *dev_dax; 1280 struct inode *inode; 1281 struct device *dev; 1282 int rc; 1283 1284 dev_dax = kzalloc(sizeof(*dev_dax), GFP_KERNEL); 1285 if (!dev_dax) 1286 return ERR_PTR(-ENOMEM); 1287 1288 if (is_static(dax_region)) { 1289 if (dev_WARN_ONCE(parent, data->id < 0, 1290 "dynamic id specified to static region\n")) { 1291 rc = -EINVAL; 1292 goto err_id; 1293 } 1294 1295 dev_dax->id = data->id; 1296 } else { 1297 if (dev_WARN_ONCE(parent, data->id >= 0, 1298 "static id specified to dynamic region\n")) { 1299 rc = -EINVAL; 1300 goto err_id; 1301 } 1302 1303 rc = ida_alloc(&dax_region->ida, GFP_KERNEL); 1304 if (rc < 0) 1305 goto err_id; 1306 dev_dax->id = rc; 1307 } 1308 1309 dev_dax->region = dax_region; 1310 dev = &dev_dax->dev; 1311 device_initialize(dev); 1312 dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id); 1313 1314 rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, data->size); 1315 if (rc) 1316 goto err_range; 1317 1318 if (data->pgmap) { 1319 dev_WARN_ONCE(parent, !is_static(dax_region), 1320 "custom dev_pagemap requires a static dax_region\n"); 1321 1322 dev_dax->pgmap = kmemdup(data->pgmap, 1323 sizeof(struct dev_pagemap), GFP_KERNEL); 1324 if (!dev_dax->pgmap) { 1325 rc = -ENOMEM; 1326 goto err_pgmap; 1327 } 1328 } 1329 1330 /* 1331 * No 'host' or dax_operations since there is no access to this 1332 * device outside of mmap of the resulting character device. 1333 */ 1334 dax_dev = alloc_dax(dev_dax, NULL, NULL, DAXDEV_F_SYNC); 1335 if (IS_ERR(dax_dev)) { 1336 rc = PTR_ERR(dax_dev); 1337 goto err_alloc_dax; 1338 } 1339 1340 /* a device_dax instance is dead while the driver is not attached */ 1341 kill_dax(dax_dev); 1342 1343 dev_dax->dax_dev = dax_dev; 1344 dev_dax->target_node = dax_region->target_node; 1345 dev_dax->align = dax_region->align; 1346 ida_init(&dev_dax->ida); 1347 kref_get(&dax_region->kref); 1348 1349 inode = dax_inode(dax_dev); 1350 dev->devt = inode->i_rdev; 1351 if (data->subsys == DEV_DAX_BUS) 1352 dev->bus = &dax_bus_type; 1353 else 1354 dev->class = dax_class; 1355 dev->parent = parent; 1356 dev->type = &dev_dax_type; 1357 1358 rc = device_add(dev); 1359 if (rc) { 1360 kill_dev_dax(dev_dax); 1361 put_device(dev); 1362 return ERR_PTR(rc); 1363 } 1364 1365 rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev); 1366 if (rc) 1367 return ERR_PTR(rc); 1368 1369 /* register mapping device for the initial allocation range */ 1370 if (dev_dax->nr_range && range_len(&dev_dax->ranges[0].range)) { 1371 rc = devm_register_dax_mapping(dev_dax, 0); 1372 if (rc) 1373 return ERR_PTR(rc); 1374 } 1375 1376 return dev_dax; 1377 1378 err_alloc_dax: 1379 kfree(dev_dax->pgmap); 1380 err_pgmap: 1381 free_dev_dax_ranges(dev_dax); 1382 err_range: 1383 free_dev_dax_id(dev_dax); 1384 err_id: 1385 kfree(dev_dax); 1386 1387 return ERR_PTR(rc); 1388 } 1389 EXPORT_SYMBOL_GPL(devm_create_dev_dax); 1390 1391 static int match_always_count; 1392 1393 int __dax_driver_register(struct dax_device_driver *dax_drv, 1394 struct module *module, const char *mod_name) 1395 { 1396 struct device_driver *drv = &dax_drv->drv; 1397 int rc = 0; 1398 1399 /* 1400 * dax_bus_probe() calls dax_drv->probe() unconditionally. 1401 * So better be safe than sorry and ensure it is provided. 1402 */ 1403 if (!dax_drv->probe) 1404 return -EINVAL; 1405 1406 INIT_LIST_HEAD(&dax_drv->ids); 1407 drv->owner = module; 1408 drv->name = mod_name; 1409 drv->mod_name = mod_name; 1410 drv->bus = &dax_bus_type; 1411 1412 /* there can only be one default driver */ 1413 mutex_lock(&dax_bus_lock); 1414 match_always_count += dax_drv->match_always; 1415 if (match_always_count > 1) { 1416 match_always_count--; 1417 WARN_ON(1); 1418 rc = -EINVAL; 1419 } 1420 mutex_unlock(&dax_bus_lock); 1421 if (rc) 1422 return rc; 1423 return driver_register(drv); 1424 } 1425 EXPORT_SYMBOL_GPL(__dax_driver_register); 1426 1427 void dax_driver_unregister(struct dax_device_driver *dax_drv) 1428 { 1429 struct device_driver *drv = &dax_drv->drv; 1430 struct dax_id *dax_id, *_id; 1431 1432 mutex_lock(&dax_bus_lock); 1433 match_always_count -= dax_drv->match_always; 1434 list_for_each_entry_safe(dax_id, _id, &dax_drv->ids, list) { 1435 list_del(&dax_id->list); 1436 kfree(dax_id); 1437 } 1438 mutex_unlock(&dax_bus_lock); 1439 driver_unregister(drv); 1440 } 1441 EXPORT_SYMBOL_GPL(dax_driver_unregister); 1442 1443 int __init dax_bus_init(void) 1444 { 1445 int rc; 1446 1447 if (IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT)) { 1448 dax_class = class_create(THIS_MODULE, "dax"); 1449 if (IS_ERR(dax_class)) 1450 return PTR_ERR(dax_class); 1451 } 1452 1453 rc = bus_register(&dax_bus_type); 1454 if (rc) 1455 class_destroy(dax_class); 1456 return rc; 1457 } 1458 1459 void __exit dax_bus_exit(void) 1460 { 1461 bus_unregister(&dax_bus_type); 1462 class_destroy(dax_class); 1463 } 1464