1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017-2018 Intel Corporation. All rights reserved. */ 3 #include <linux/memremap.h> 4 #include <linux/device.h> 5 #include <linux/mutex.h> 6 #include <linux/list.h> 7 #include <linux/slab.h> 8 #include <linux/dax.h> 9 #include <linux/io.h> 10 #include "dax-private.h" 11 #include "bus.h" 12 13 static DEFINE_MUTEX(dax_bus_lock); 14 15 #define DAX_NAME_LEN 30 16 struct dax_id { 17 struct list_head list; 18 char dev_name[DAX_NAME_LEN]; 19 }; 20 21 static int dax_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) 22 { 23 /* 24 * We only ever expect to handle device-dax instances, i.e. the 25 * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero 26 */ 27 return add_uevent_var(env, "MODALIAS=" DAX_DEVICE_MODALIAS_FMT, 0); 28 } 29 30 static struct dax_device_driver *to_dax_drv(struct device_driver *drv) 31 { 32 return container_of(drv, struct dax_device_driver, drv); 33 } 34 35 static struct dax_id *__dax_match_id(struct dax_device_driver *dax_drv, 36 const char *dev_name) 37 { 38 struct dax_id *dax_id; 39 40 lockdep_assert_held(&dax_bus_lock); 41 42 list_for_each_entry(dax_id, &dax_drv->ids, list) 43 if (sysfs_streq(dax_id->dev_name, dev_name)) 44 return dax_id; 45 return NULL; 46 } 47 48 static int dax_match_id(struct dax_device_driver *dax_drv, struct device *dev) 49 { 50 int match; 51 52 mutex_lock(&dax_bus_lock); 53 match = !!__dax_match_id(dax_drv, dev_name(dev)); 54 mutex_unlock(&dax_bus_lock); 55 56 return match; 57 } 58 59 static int dax_match_type(struct dax_device_driver *dax_drv, struct device *dev) 60 { 61 enum dax_driver_type type = DAXDRV_DEVICE_TYPE; 62 struct dev_dax *dev_dax = to_dev_dax(dev); 63 64 if (dev_dax->region->res.flags & IORESOURCE_DAX_KMEM) 65 type = DAXDRV_KMEM_TYPE; 66 67 if (dax_drv->type == type) 68 return 1; 69 70 /* default to device mode if dax_kmem is disabled */ 71 if (dax_drv->type == DAXDRV_DEVICE_TYPE && 72 !IS_ENABLED(CONFIG_DEV_DAX_KMEM)) 73 return 1; 74 75 return 0; 76 } 77 78 enum id_action { 79 ID_REMOVE, 80 ID_ADD, 81 }; 82 83 static ssize_t do_id_store(struct device_driver *drv, const char *buf, 84 size_t count, enum id_action action) 85 { 86 struct dax_device_driver *dax_drv = to_dax_drv(drv); 87 unsigned int region_id, id; 88 char devname[DAX_NAME_LEN]; 89 struct dax_id *dax_id; 90 ssize_t rc = count; 91 int fields; 92 93 fields = sscanf(buf, "dax%d.%d", ®ion_id, &id); 94 if (fields != 2) 95 return -EINVAL; 96 sprintf(devname, "dax%d.%d", region_id, id); 97 if (!sysfs_streq(buf, devname)) 98 return -EINVAL; 99 100 mutex_lock(&dax_bus_lock); 101 dax_id = __dax_match_id(dax_drv, buf); 102 if (!dax_id) { 103 if (action == ID_ADD) { 104 dax_id = kzalloc(sizeof(*dax_id), GFP_KERNEL); 105 if (dax_id) { 106 strncpy(dax_id->dev_name, buf, DAX_NAME_LEN); 107 list_add(&dax_id->list, &dax_drv->ids); 108 } else 109 rc = -ENOMEM; 110 } 111 } else if (action == ID_REMOVE) { 112 list_del(&dax_id->list); 113 kfree(dax_id); 114 } 115 mutex_unlock(&dax_bus_lock); 116 117 if (rc < 0) 118 return rc; 119 if (action == ID_ADD) 120 rc = driver_attach(drv); 121 if (rc) 122 return rc; 123 return count; 124 } 125 126 static ssize_t new_id_store(struct device_driver *drv, const char *buf, 127 size_t count) 128 { 129 return do_id_store(drv, buf, count, ID_ADD); 130 } 131 static DRIVER_ATTR_WO(new_id); 132 133 static ssize_t remove_id_store(struct device_driver *drv, const char *buf, 134 size_t count) 135 { 136 return do_id_store(drv, buf, count, ID_REMOVE); 137 } 138 static DRIVER_ATTR_WO(remove_id); 139 140 static struct attribute *dax_drv_attrs[] = { 141 &driver_attr_new_id.attr, 142 &driver_attr_remove_id.attr, 143 NULL, 144 }; 145 ATTRIBUTE_GROUPS(dax_drv); 146 147 static int dax_bus_match(struct device *dev, struct device_driver *drv); 148 149 /* 150 * Static dax regions are regions created by an external subsystem 151 * nvdimm where a single range is assigned. Its boundaries are by the external 152 * subsystem and are usually limited to one physical memory range. For example, 153 * for PMEM it is usually defined by NVDIMM Namespace boundaries (i.e. a 154 * single contiguous range) 155 * 156 * On dynamic dax regions, the assigned region can be partitioned by dax core 157 * into multiple subdivisions. A subdivision is represented into one 158 * /dev/daxN.M device composed by one or more potentially discontiguous ranges. 159 * 160 * When allocating a dax region, drivers must set whether it's static 161 * (IORESOURCE_DAX_STATIC). On static dax devices, the @pgmap is pre-assigned 162 * to dax core when calling devm_create_dev_dax(), whereas in dynamic dax 163 * devices it is NULL but afterwards allocated by dax core on device ->probe(). 164 * Care is needed to make sure that dynamic dax devices are torn down with a 165 * cleared @pgmap field (see kill_dev_dax()). 166 */ 167 static bool is_static(struct dax_region *dax_region) 168 { 169 return (dax_region->res.flags & IORESOURCE_DAX_STATIC) != 0; 170 } 171 172 bool static_dev_dax(struct dev_dax *dev_dax) 173 { 174 return is_static(dev_dax->region); 175 } 176 EXPORT_SYMBOL_GPL(static_dev_dax); 177 178 static u64 dev_dax_size(struct dev_dax *dev_dax) 179 { 180 u64 size = 0; 181 int i; 182 183 device_lock_assert(&dev_dax->dev); 184 185 for (i = 0; i < dev_dax->nr_range; i++) 186 size += range_len(&dev_dax->ranges[i].range); 187 188 return size; 189 } 190 191 static int dax_bus_probe(struct device *dev) 192 { 193 struct dax_device_driver *dax_drv = to_dax_drv(dev->driver); 194 struct dev_dax *dev_dax = to_dev_dax(dev); 195 struct dax_region *dax_region = dev_dax->region; 196 int rc; 197 198 if (dev_dax_size(dev_dax) == 0 || dev_dax->id < 0) 199 return -ENXIO; 200 201 rc = dax_drv->probe(dev_dax); 202 203 if (rc || is_static(dax_region)) 204 return rc; 205 206 /* 207 * Track new seed creation only after successful probe of the 208 * previous seed. 209 */ 210 if (dax_region->seed == dev) 211 dax_region->seed = NULL; 212 213 return 0; 214 } 215 216 static void dax_bus_remove(struct device *dev) 217 { 218 struct dax_device_driver *dax_drv = to_dax_drv(dev->driver); 219 struct dev_dax *dev_dax = to_dev_dax(dev); 220 221 if (dax_drv->remove) 222 dax_drv->remove(dev_dax); 223 } 224 225 static struct bus_type dax_bus_type = { 226 .name = "dax", 227 .uevent = dax_bus_uevent, 228 .match = dax_bus_match, 229 .probe = dax_bus_probe, 230 .remove = dax_bus_remove, 231 .drv_groups = dax_drv_groups, 232 }; 233 234 static int dax_bus_match(struct device *dev, struct device_driver *drv) 235 { 236 struct dax_device_driver *dax_drv = to_dax_drv(drv); 237 238 if (dax_match_id(dax_drv, dev)) 239 return 1; 240 return dax_match_type(dax_drv, dev); 241 } 242 243 /* 244 * Rely on the fact that drvdata is set before the attributes are 245 * registered, and that the attributes are unregistered before drvdata 246 * is cleared to assume that drvdata is always valid. 247 */ 248 static ssize_t id_show(struct device *dev, 249 struct device_attribute *attr, char *buf) 250 { 251 struct dax_region *dax_region = dev_get_drvdata(dev); 252 253 return sprintf(buf, "%d\n", dax_region->id); 254 } 255 static DEVICE_ATTR_RO(id); 256 257 static ssize_t region_size_show(struct device *dev, 258 struct device_attribute *attr, char *buf) 259 { 260 struct dax_region *dax_region = dev_get_drvdata(dev); 261 262 return sprintf(buf, "%llu\n", (unsigned long long) 263 resource_size(&dax_region->res)); 264 } 265 static struct device_attribute dev_attr_region_size = __ATTR(size, 0444, 266 region_size_show, NULL); 267 268 static ssize_t region_align_show(struct device *dev, 269 struct device_attribute *attr, char *buf) 270 { 271 struct dax_region *dax_region = dev_get_drvdata(dev); 272 273 return sprintf(buf, "%u\n", dax_region->align); 274 } 275 static struct device_attribute dev_attr_region_align = 276 __ATTR(align, 0400, region_align_show, NULL); 277 278 #define for_each_dax_region_resource(dax_region, res) \ 279 for (res = (dax_region)->res.child; res; res = res->sibling) 280 281 static unsigned long long dax_region_avail_size(struct dax_region *dax_region) 282 { 283 resource_size_t size = resource_size(&dax_region->res); 284 struct resource *res; 285 286 device_lock_assert(dax_region->dev); 287 288 for_each_dax_region_resource(dax_region, res) 289 size -= resource_size(res); 290 return size; 291 } 292 293 static ssize_t available_size_show(struct device *dev, 294 struct device_attribute *attr, char *buf) 295 { 296 struct dax_region *dax_region = dev_get_drvdata(dev); 297 unsigned long long size; 298 299 device_lock(dev); 300 size = dax_region_avail_size(dax_region); 301 device_unlock(dev); 302 303 return sprintf(buf, "%llu\n", size); 304 } 305 static DEVICE_ATTR_RO(available_size); 306 307 static ssize_t seed_show(struct device *dev, 308 struct device_attribute *attr, char *buf) 309 { 310 struct dax_region *dax_region = dev_get_drvdata(dev); 311 struct device *seed; 312 ssize_t rc; 313 314 if (is_static(dax_region)) 315 return -EINVAL; 316 317 device_lock(dev); 318 seed = dax_region->seed; 319 rc = sprintf(buf, "%s\n", seed ? dev_name(seed) : ""); 320 device_unlock(dev); 321 322 return rc; 323 } 324 static DEVICE_ATTR_RO(seed); 325 326 static ssize_t create_show(struct device *dev, 327 struct device_attribute *attr, char *buf) 328 { 329 struct dax_region *dax_region = dev_get_drvdata(dev); 330 struct device *youngest; 331 ssize_t rc; 332 333 if (is_static(dax_region)) 334 return -EINVAL; 335 336 device_lock(dev); 337 youngest = dax_region->youngest; 338 rc = sprintf(buf, "%s\n", youngest ? dev_name(youngest) : ""); 339 device_unlock(dev); 340 341 return rc; 342 } 343 344 static ssize_t create_store(struct device *dev, struct device_attribute *attr, 345 const char *buf, size_t len) 346 { 347 struct dax_region *dax_region = dev_get_drvdata(dev); 348 unsigned long long avail; 349 ssize_t rc; 350 int val; 351 352 if (is_static(dax_region)) 353 return -EINVAL; 354 355 rc = kstrtoint(buf, 0, &val); 356 if (rc) 357 return rc; 358 if (val != 1) 359 return -EINVAL; 360 361 device_lock(dev); 362 avail = dax_region_avail_size(dax_region); 363 if (avail == 0) 364 rc = -ENOSPC; 365 else { 366 struct dev_dax_data data = { 367 .dax_region = dax_region, 368 .size = 0, 369 .id = -1, 370 }; 371 struct dev_dax *dev_dax = devm_create_dev_dax(&data); 372 373 if (IS_ERR(dev_dax)) 374 rc = PTR_ERR(dev_dax); 375 else { 376 /* 377 * In support of crafting multiple new devices 378 * simultaneously multiple seeds can be created, 379 * but only the first one that has not been 380 * successfully bound is tracked as the region 381 * seed. 382 */ 383 if (!dax_region->seed) 384 dax_region->seed = &dev_dax->dev; 385 dax_region->youngest = &dev_dax->dev; 386 rc = len; 387 } 388 } 389 device_unlock(dev); 390 391 return rc; 392 } 393 static DEVICE_ATTR_RW(create); 394 395 void kill_dev_dax(struct dev_dax *dev_dax) 396 { 397 struct dax_device *dax_dev = dev_dax->dax_dev; 398 struct inode *inode = dax_inode(dax_dev); 399 400 kill_dax(dax_dev); 401 unmap_mapping_range(inode->i_mapping, 0, 0, 1); 402 403 /* 404 * Dynamic dax region have the pgmap allocated via dev_kzalloc() 405 * and thus freed by devm. Clear the pgmap to not have stale pgmap 406 * ranges on probe() from previous reconfigurations of region devices. 407 */ 408 if (!static_dev_dax(dev_dax)) 409 dev_dax->pgmap = NULL; 410 } 411 EXPORT_SYMBOL_GPL(kill_dev_dax); 412 413 static void trim_dev_dax_range(struct dev_dax *dev_dax) 414 { 415 int i = dev_dax->nr_range - 1; 416 struct range *range = &dev_dax->ranges[i].range; 417 struct dax_region *dax_region = dev_dax->region; 418 419 device_lock_assert(dax_region->dev); 420 dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i, 421 (unsigned long long)range->start, 422 (unsigned long long)range->end); 423 424 __release_region(&dax_region->res, range->start, range_len(range)); 425 if (--dev_dax->nr_range == 0) { 426 kfree(dev_dax->ranges); 427 dev_dax->ranges = NULL; 428 } 429 } 430 431 static void free_dev_dax_ranges(struct dev_dax *dev_dax) 432 { 433 while (dev_dax->nr_range) 434 trim_dev_dax_range(dev_dax); 435 } 436 437 static void unregister_dev_dax(void *dev) 438 { 439 struct dev_dax *dev_dax = to_dev_dax(dev); 440 441 dev_dbg(dev, "%s\n", __func__); 442 443 kill_dev_dax(dev_dax); 444 device_del(dev); 445 free_dev_dax_ranges(dev_dax); 446 put_device(dev); 447 } 448 449 /* a return value >= 0 indicates this invocation invalidated the id */ 450 static int __free_dev_dax_id(struct dev_dax *dev_dax) 451 { 452 struct dax_region *dax_region = dev_dax->region; 453 struct device *dev = &dev_dax->dev; 454 int rc = dev_dax->id; 455 456 device_lock_assert(dev); 457 458 if (is_static(dax_region) || dev_dax->id < 0) 459 return -1; 460 ida_free(&dax_region->ida, dev_dax->id); 461 dev_dax->id = -1; 462 return rc; 463 } 464 465 static int free_dev_dax_id(struct dev_dax *dev_dax) 466 { 467 struct device *dev = &dev_dax->dev; 468 int rc; 469 470 device_lock(dev); 471 rc = __free_dev_dax_id(dev_dax); 472 device_unlock(dev); 473 return rc; 474 } 475 476 static ssize_t delete_store(struct device *dev, struct device_attribute *attr, 477 const char *buf, size_t len) 478 { 479 struct dax_region *dax_region = dev_get_drvdata(dev); 480 struct dev_dax *dev_dax; 481 struct device *victim; 482 bool do_del = false; 483 int rc; 484 485 if (is_static(dax_region)) 486 return -EINVAL; 487 488 victim = device_find_child_by_name(dax_region->dev, buf); 489 if (!victim) 490 return -ENXIO; 491 492 device_lock(dev); 493 device_lock(victim); 494 dev_dax = to_dev_dax(victim); 495 if (victim->driver || dev_dax_size(dev_dax)) 496 rc = -EBUSY; 497 else { 498 /* 499 * Invalidate the device so it does not become active 500 * again, but always preserve device-id-0 so that 501 * /sys/bus/dax/ is guaranteed to be populated while any 502 * dax_region is registered. 503 */ 504 if (dev_dax->id > 0) { 505 do_del = __free_dev_dax_id(dev_dax) >= 0; 506 rc = len; 507 if (dax_region->seed == victim) 508 dax_region->seed = NULL; 509 if (dax_region->youngest == victim) 510 dax_region->youngest = NULL; 511 } else 512 rc = -EBUSY; 513 } 514 device_unlock(victim); 515 516 /* won the race to invalidate the device, clean it up */ 517 if (do_del) 518 devm_release_action(dev, unregister_dev_dax, victim); 519 device_unlock(dev); 520 put_device(victim); 521 522 return rc; 523 } 524 static DEVICE_ATTR_WO(delete); 525 526 static umode_t dax_region_visible(struct kobject *kobj, struct attribute *a, 527 int n) 528 { 529 struct device *dev = container_of(kobj, struct device, kobj); 530 struct dax_region *dax_region = dev_get_drvdata(dev); 531 532 if (is_static(dax_region)) 533 if (a == &dev_attr_available_size.attr 534 || a == &dev_attr_create.attr 535 || a == &dev_attr_seed.attr 536 || a == &dev_attr_delete.attr) 537 return 0; 538 return a->mode; 539 } 540 541 static struct attribute *dax_region_attributes[] = { 542 &dev_attr_available_size.attr, 543 &dev_attr_region_size.attr, 544 &dev_attr_region_align.attr, 545 &dev_attr_create.attr, 546 &dev_attr_seed.attr, 547 &dev_attr_delete.attr, 548 &dev_attr_id.attr, 549 NULL, 550 }; 551 552 static const struct attribute_group dax_region_attribute_group = { 553 .name = "dax_region", 554 .attrs = dax_region_attributes, 555 .is_visible = dax_region_visible, 556 }; 557 558 static const struct attribute_group *dax_region_attribute_groups[] = { 559 &dax_region_attribute_group, 560 NULL, 561 }; 562 563 static void dax_region_free(struct kref *kref) 564 { 565 struct dax_region *dax_region; 566 567 dax_region = container_of(kref, struct dax_region, kref); 568 kfree(dax_region); 569 } 570 571 void dax_region_put(struct dax_region *dax_region) 572 { 573 kref_put(&dax_region->kref, dax_region_free); 574 } 575 EXPORT_SYMBOL_GPL(dax_region_put); 576 577 static void dax_region_unregister(void *region) 578 { 579 struct dax_region *dax_region = region; 580 581 sysfs_remove_groups(&dax_region->dev->kobj, 582 dax_region_attribute_groups); 583 dax_region_put(dax_region); 584 } 585 586 struct dax_region *alloc_dax_region(struct device *parent, int region_id, 587 struct range *range, int target_node, unsigned int align, 588 unsigned long flags) 589 { 590 struct dax_region *dax_region; 591 592 /* 593 * The DAX core assumes that it can store its private data in 594 * parent->driver_data. This WARN is a reminder / safeguard for 595 * developers of device-dax drivers. 596 */ 597 if (dev_get_drvdata(parent)) { 598 dev_WARN(parent, "dax core failed to setup private data\n"); 599 return NULL; 600 } 601 602 if (!IS_ALIGNED(range->start, align) 603 || !IS_ALIGNED(range_len(range), align)) 604 return NULL; 605 606 dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL); 607 if (!dax_region) 608 return NULL; 609 610 dev_set_drvdata(parent, dax_region); 611 kref_init(&dax_region->kref); 612 dax_region->id = region_id; 613 dax_region->align = align; 614 dax_region->dev = parent; 615 dax_region->target_node = target_node; 616 ida_init(&dax_region->ida); 617 dax_region->res = (struct resource) { 618 .start = range->start, 619 .end = range->end, 620 .flags = IORESOURCE_MEM | flags, 621 }; 622 623 if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) { 624 kfree(dax_region); 625 return NULL; 626 } 627 628 kref_get(&dax_region->kref); 629 if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region)) 630 return NULL; 631 return dax_region; 632 } 633 EXPORT_SYMBOL_GPL(alloc_dax_region); 634 635 static void dax_mapping_release(struct device *dev) 636 { 637 struct dax_mapping *mapping = to_dax_mapping(dev); 638 struct dev_dax *dev_dax = to_dev_dax(dev->parent); 639 640 ida_free(&dev_dax->ida, mapping->id); 641 kfree(mapping); 642 } 643 644 static void unregister_dax_mapping(void *data) 645 { 646 struct device *dev = data; 647 struct dax_mapping *mapping = to_dax_mapping(dev); 648 struct dev_dax *dev_dax = to_dev_dax(dev->parent); 649 struct dax_region *dax_region = dev_dax->region; 650 651 dev_dbg(dev, "%s\n", __func__); 652 653 device_lock_assert(dax_region->dev); 654 655 dev_dax->ranges[mapping->range_id].mapping = NULL; 656 mapping->range_id = -1; 657 658 device_del(dev); 659 put_device(dev); 660 } 661 662 static struct dev_dax_range *get_dax_range(struct device *dev) 663 { 664 struct dax_mapping *mapping = to_dax_mapping(dev); 665 struct dev_dax *dev_dax = to_dev_dax(dev->parent); 666 struct dax_region *dax_region = dev_dax->region; 667 668 device_lock(dax_region->dev); 669 if (mapping->range_id < 0) { 670 device_unlock(dax_region->dev); 671 return NULL; 672 } 673 674 return &dev_dax->ranges[mapping->range_id]; 675 } 676 677 static void put_dax_range(struct dev_dax_range *dax_range) 678 { 679 struct dax_mapping *mapping = dax_range->mapping; 680 struct dev_dax *dev_dax = to_dev_dax(mapping->dev.parent); 681 struct dax_region *dax_region = dev_dax->region; 682 683 device_unlock(dax_region->dev); 684 } 685 686 static ssize_t start_show(struct device *dev, 687 struct device_attribute *attr, char *buf) 688 { 689 struct dev_dax_range *dax_range; 690 ssize_t rc; 691 692 dax_range = get_dax_range(dev); 693 if (!dax_range) 694 return -ENXIO; 695 rc = sprintf(buf, "%#llx\n", dax_range->range.start); 696 put_dax_range(dax_range); 697 698 return rc; 699 } 700 static DEVICE_ATTR(start, 0400, start_show, NULL); 701 702 static ssize_t end_show(struct device *dev, 703 struct device_attribute *attr, char *buf) 704 { 705 struct dev_dax_range *dax_range; 706 ssize_t rc; 707 708 dax_range = get_dax_range(dev); 709 if (!dax_range) 710 return -ENXIO; 711 rc = sprintf(buf, "%#llx\n", dax_range->range.end); 712 put_dax_range(dax_range); 713 714 return rc; 715 } 716 static DEVICE_ATTR(end, 0400, end_show, NULL); 717 718 static ssize_t pgoff_show(struct device *dev, 719 struct device_attribute *attr, char *buf) 720 { 721 struct dev_dax_range *dax_range; 722 ssize_t rc; 723 724 dax_range = get_dax_range(dev); 725 if (!dax_range) 726 return -ENXIO; 727 rc = sprintf(buf, "%#lx\n", dax_range->pgoff); 728 put_dax_range(dax_range); 729 730 return rc; 731 } 732 static DEVICE_ATTR(page_offset, 0400, pgoff_show, NULL); 733 734 static struct attribute *dax_mapping_attributes[] = { 735 &dev_attr_start.attr, 736 &dev_attr_end.attr, 737 &dev_attr_page_offset.attr, 738 NULL, 739 }; 740 741 static const struct attribute_group dax_mapping_attribute_group = { 742 .attrs = dax_mapping_attributes, 743 }; 744 745 static const struct attribute_group *dax_mapping_attribute_groups[] = { 746 &dax_mapping_attribute_group, 747 NULL, 748 }; 749 750 static struct device_type dax_mapping_type = { 751 .release = dax_mapping_release, 752 .groups = dax_mapping_attribute_groups, 753 }; 754 755 static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id) 756 { 757 struct dax_region *dax_region = dev_dax->region; 758 struct dax_mapping *mapping; 759 struct device *dev; 760 int rc; 761 762 device_lock_assert(dax_region->dev); 763 764 if (dev_WARN_ONCE(&dev_dax->dev, !dax_region->dev->driver, 765 "region disabled\n")) 766 return -ENXIO; 767 768 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); 769 if (!mapping) 770 return -ENOMEM; 771 mapping->range_id = range_id; 772 mapping->id = ida_alloc(&dev_dax->ida, GFP_KERNEL); 773 if (mapping->id < 0) { 774 kfree(mapping); 775 return -ENOMEM; 776 } 777 dev_dax->ranges[range_id].mapping = mapping; 778 dev = &mapping->dev; 779 device_initialize(dev); 780 dev->parent = &dev_dax->dev; 781 dev->type = &dax_mapping_type; 782 dev_set_name(dev, "mapping%d", mapping->id); 783 rc = device_add(dev); 784 if (rc) { 785 put_device(dev); 786 return rc; 787 } 788 789 rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_mapping, 790 dev); 791 if (rc) 792 return rc; 793 return 0; 794 } 795 796 static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start, 797 resource_size_t size) 798 { 799 struct dax_region *dax_region = dev_dax->region; 800 struct resource *res = &dax_region->res; 801 struct device *dev = &dev_dax->dev; 802 struct dev_dax_range *ranges; 803 unsigned long pgoff = 0; 804 struct resource *alloc; 805 int i, rc; 806 807 device_lock_assert(dax_region->dev); 808 809 /* handle the seed alloc special case */ 810 if (!size) { 811 if (dev_WARN_ONCE(dev, dev_dax->nr_range, 812 "0-size allocation must be first\n")) 813 return -EBUSY; 814 /* nr_range == 0 is elsewhere special cased as 0-size device */ 815 return 0; 816 } 817 818 alloc = __request_region(res, start, size, dev_name(dev), 0); 819 if (!alloc) 820 return -ENOMEM; 821 822 ranges = krealloc(dev_dax->ranges, sizeof(*ranges) 823 * (dev_dax->nr_range + 1), GFP_KERNEL); 824 if (!ranges) { 825 __release_region(res, alloc->start, resource_size(alloc)); 826 return -ENOMEM; 827 } 828 829 for (i = 0; i < dev_dax->nr_range; i++) 830 pgoff += PHYS_PFN(range_len(&ranges[i].range)); 831 dev_dax->ranges = ranges; 832 ranges[dev_dax->nr_range++] = (struct dev_dax_range) { 833 .pgoff = pgoff, 834 .range = { 835 .start = alloc->start, 836 .end = alloc->end, 837 }, 838 }; 839 840 dev_dbg(dev, "alloc range[%d]: %pa:%pa\n", dev_dax->nr_range - 1, 841 &alloc->start, &alloc->end); 842 /* 843 * A dev_dax instance must be registered before mapping device 844 * children can be added. Defer to devm_create_dev_dax() to add 845 * the initial mapping device. 846 */ 847 if (!device_is_registered(&dev_dax->dev)) 848 return 0; 849 850 rc = devm_register_dax_mapping(dev_dax, dev_dax->nr_range - 1); 851 if (rc) 852 trim_dev_dax_range(dev_dax); 853 854 return rc; 855 } 856 857 static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, resource_size_t size) 858 { 859 int last_range = dev_dax->nr_range - 1; 860 struct dev_dax_range *dax_range = &dev_dax->ranges[last_range]; 861 struct dax_region *dax_region = dev_dax->region; 862 bool is_shrink = resource_size(res) > size; 863 struct range *range = &dax_range->range; 864 struct device *dev = &dev_dax->dev; 865 int rc; 866 867 device_lock_assert(dax_region->dev); 868 869 if (dev_WARN_ONCE(dev, !size, "deletion is handled by dev_dax_shrink\n")) 870 return -EINVAL; 871 872 rc = adjust_resource(res, range->start, size); 873 if (rc) 874 return rc; 875 876 *range = (struct range) { 877 .start = range->start, 878 .end = range->start + size - 1, 879 }; 880 881 dev_dbg(dev, "%s range[%d]: %#llx:%#llx\n", is_shrink ? "shrink" : "extend", 882 last_range, (unsigned long long) range->start, 883 (unsigned long long) range->end); 884 885 return 0; 886 } 887 888 static ssize_t size_show(struct device *dev, 889 struct device_attribute *attr, char *buf) 890 { 891 struct dev_dax *dev_dax = to_dev_dax(dev); 892 unsigned long long size; 893 894 device_lock(dev); 895 size = dev_dax_size(dev_dax); 896 device_unlock(dev); 897 898 return sprintf(buf, "%llu\n", size); 899 } 900 901 static bool alloc_is_aligned(struct dev_dax *dev_dax, resource_size_t size) 902 { 903 /* 904 * The minimum mapping granularity for a device instance is a 905 * single subsection, unless the arch says otherwise. 906 */ 907 return IS_ALIGNED(size, max_t(unsigned long, dev_dax->align, memremap_compat_align())); 908 } 909 910 static int dev_dax_shrink(struct dev_dax *dev_dax, resource_size_t size) 911 { 912 resource_size_t to_shrink = dev_dax_size(dev_dax) - size; 913 struct dax_region *dax_region = dev_dax->region; 914 struct device *dev = &dev_dax->dev; 915 int i; 916 917 for (i = dev_dax->nr_range - 1; i >= 0; i--) { 918 struct range *range = &dev_dax->ranges[i].range; 919 struct dax_mapping *mapping = dev_dax->ranges[i].mapping; 920 struct resource *adjust = NULL, *res; 921 resource_size_t shrink; 922 923 shrink = min_t(u64, to_shrink, range_len(range)); 924 if (shrink >= range_len(range)) { 925 devm_release_action(dax_region->dev, 926 unregister_dax_mapping, &mapping->dev); 927 trim_dev_dax_range(dev_dax); 928 to_shrink -= shrink; 929 if (!to_shrink) 930 break; 931 continue; 932 } 933 934 for_each_dax_region_resource(dax_region, res) 935 if (strcmp(res->name, dev_name(dev)) == 0 936 && res->start == range->start) { 937 adjust = res; 938 break; 939 } 940 941 if (dev_WARN_ONCE(dev, !adjust || i != dev_dax->nr_range - 1, 942 "failed to find matching resource\n")) 943 return -ENXIO; 944 return adjust_dev_dax_range(dev_dax, adjust, range_len(range) 945 - shrink); 946 } 947 return 0; 948 } 949 950 /* 951 * Only allow adjustments that preserve the relative pgoff of existing 952 * allocations. I.e. the dev_dax->ranges array is ordered by increasing pgoff. 953 */ 954 static bool adjust_ok(struct dev_dax *dev_dax, struct resource *res) 955 { 956 struct dev_dax_range *last; 957 int i; 958 959 if (dev_dax->nr_range == 0) 960 return false; 961 if (strcmp(res->name, dev_name(&dev_dax->dev)) != 0) 962 return false; 963 last = &dev_dax->ranges[dev_dax->nr_range - 1]; 964 if (last->range.start != res->start || last->range.end != res->end) 965 return false; 966 for (i = 0; i < dev_dax->nr_range - 1; i++) { 967 struct dev_dax_range *dax_range = &dev_dax->ranges[i]; 968 969 if (dax_range->pgoff > last->pgoff) 970 return false; 971 } 972 973 return true; 974 } 975 976 static ssize_t dev_dax_resize(struct dax_region *dax_region, 977 struct dev_dax *dev_dax, resource_size_t size) 978 { 979 resource_size_t avail = dax_region_avail_size(dax_region), to_alloc; 980 resource_size_t dev_size = dev_dax_size(dev_dax); 981 struct resource *region_res = &dax_region->res; 982 struct device *dev = &dev_dax->dev; 983 struct resource *res, *first; 984 resource_size_t alloc = 0; 985 int rc; 986 987 if (dev->driver) 988 return -EBUSY; 989 if (size == dev_size) 990 return 0; 991 if (size > dev_size && size - dev_size > avail) 992 return -ENOSPC; 993 if (size < dev_size) 994 return dev_dax_shrink(dev_dax, size); 995 996 to_alloc = size - dev_size; 997 if (dev_WARN_ONCE(dev, !alloc_is_aligned(dev_dax, to_alloc), 998 "resize of %pa misaligned\n", &to_alloc)) 999 return -ENXIO; 1000 1001 /* 1002 * Expand the device into the unused portion of the region. This 1003 * may involve adjusting the end of an existing resource, or 1004 * allocating a new resource. 1005 */ 1006 retry: 1007 first = region_res->child; 1008 if (!first) 1009 return alloc_dev_dax_range(dev_dax, dax_region->res.start, to_alloc); 1010 1011 rc = -ENOSPC; 1012 for (res = first; res; res = res->sibling) { 1013 struct resource *next = res->sibling; 1014 1015 /* space at the beginning of the region */ 1016 if (res == first && res->start > dax_region->res.start) { 1017 alloc = min(res->start - dax_region->res.start, to_alloc); 1018 rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, alloc); 1019 break; 1020 } 1021 1022 alloc = 0; 1023 /* space between allocations */ 1024 if (next && next->start > res->end + 1) 1025 alloc = min(next->start - (res->end + 1), to_alloc); 1026 1027 /* space at the end of the region */ 1028 if (!alloc && !next && res->end < region_res->end) 1029 alloc = min(region_res->end - res->end, to_alloc); 1030 1031 if (!alloc) 1032 continue; 1033 1034 if (adjust_ok(dev_dax, res)) { 1035 rc = adjust_dev_dax_range(dev_dax, res, resource_size(res) + alloc); 1036 break; 1037 } 1038 rc = alloc_dev_dax_range(dev_dax, res->end + 1, alloc); 1039 break; 1040 } 1041 if (rc) 1042 return rc; 1043 to_alloc -= alloc; 1044 if (to_alloc) 1045 goto retry; 1046 return 0; 1047 } 1048 1049 static ssize_t size_store(struct device *dev, struct device_attribute *attr, 1050 const char *buf, size_t len) 1051 { 1052 ssize_t rc; 1053 unsigned long long val; 1054 struct dev_dax *dev_dax = to_dev_dax(dev); 1055 struct dax_region *dax_region = dev_dax->region; 1056 1057 rc = kstrtoull(buf, 0, &val); 1058 if (rc) 1059 return rc; 1060 1061 if (!alloc_is_aligned(dev_dax, val)) { 1062 dev_dbg(dev, "%s: size: %lld misaligned\n", __func__, val); 1063 return -EINVAL; 1064 } 1065 1066 device_lock(dax_region->dev); 1067 if (!dax_region->dev->driver) { 1068 device_unlock(dax_region->dev); 1069 return -ENXIO; 1070 } 1071 device_lock(dev); 1072 rc = dev_dax_resize(dax_region, dev_dax, val); 1073 device_unlock(dev); 1074 device_unlock(dax_region->dev); 1075 1076 return rc == 0 ? len : rc; 1077 } 1078 static DEVICE_ATTR_RW(size); 1079 1080 static ssize_t range_parse(const char *opt, size_t len, struct range *range) 1081 { 1082 unsigned long long addr = 0; 1083 char *start, *end, *str; 1084 ssize_t rc = -EINVAL; 1085 1086 str = kstrdup(opt, GFP_KERNEL); 1087 if (!str) 1088 return rc; 1089 1090 end = str; 1091 start = strsep(&end, "-"); 1092 if (!start || !end) 1093 goto err; 1094 1095 rc = kstrtoull(start, 16, &addr); 1096 if (rc) 1097 goto err; 1098 range->start = addr; 1099 1100 rc = kstrtoull(end, 16, &addr); 1101 if (rc) 1102 goto err; 1103 range->end = addr; 1104 1105 err: 1106 kfree(str); 1107 return rc; 1108 } 1109 1110 static ssize_t mapping_store(struct device *dev, struct device_attribute *attr, 1111 const char *buf, size_t len) 1112 { 1113 struct dev_dax *dev_dax = to_dev_dax(dev); 1114 struct dax_region *dax_region = dev_dax->region; 1115 size_t to_alloc; 1116 struct range r; 1117 ssize_t rc; 1118 1119 rc = range_parse(buf, len, &r); 1120 if (rc) 1121 return rc; 1122 1123 rc = -ENXIO; 1124 device_lock(dax_region->dev); 1125 if (!dax_region->dev->driver) { 1126 device_unlock(dax_region->dev); 1127 return rc; 1128 } 1129 device_lock(dev); 1130 1131 to_alloc = range_len(&r); 1132 if (alloc_is_aligned(dev_dax, to_alloc)) 1133 rc = alloc_dev_dax_range(dev_dax, r.start, to_alloc); 1134 device_unlock(dev); 1135 device_unlock(dax_region->dev); 1136 1137 return rc == 0 ? len : rc; 1138 } 1139 static DEVICE_ATTR_WO(mapping); 1140 1141 static ssize_t align_show(struct device *dev, 1142 struct device_attribute *attr, char *buf) 1143 { 1144 struct dev_dax *dev_dax = to_dev_dax(dev); 1145 1146 return sprintf(buf, "%d\n", dev_dax->align); 1147 } 1148 1149 static ssize_t dev_dax_validate_align(struct dev_dax *dev_dax) 1150 { 1151 struct device *dev = &dev_dax->dev; 1152 int i; 1153 1154 for (i = 0; i < dev_dax->nr_range; i++) { 1155 size_t len = range_len(&dev_dax->ranges[i].range); 1156 1157 if (!alloc_is_aligned(dev_dax, len)) { 1158 dev_dbg(dev, "%s: align %u invalid for range %d\n", 1159 __func__, dev_dax->align, i); 1160 return -EINVAL; 1161 } 1162 } 1163 1164 return 0; 1165 } 1166 1167 static ssize_t align_store(struct device *dev, struct device_attribute *attr, 1168 const char *buf, size_t len) 1169 { 1170 struct dev_dax *dev_dax = to_dev_dax(dev); 1171 struct dax_region *dax_region = dev_dax->region; 1172 unsigned long val, align_save; 1173 ssize_t rc; 1174 1175 rc = kstrtoul(buf, 0, &val); 1176 if (rc) 1177 return -ENXIO; 1178 1179 if (!dax_align_valid(val)) 1180 return -EINVAL; 1181 1182 device_lock(dax_region->dev); 1183 if (!dax_region->dev->driver) { 1184 device_unlock(dax_region->dev); 1185 return -ENXIO; 1186 } 1187 1188 device_lock(dev); 1189 if (dev->driver) { 1190 rc = -EBUSY; 1191 goto out_unlock; 1192 } 1193 1194 align_save = dev_dax->align; 1195 dev_dax->align = val; 1196 rc = dev_dax_validate_align(dev_dax); 1197 if (rc) 1198 dev_dax->align = align_save; 1199 out_unlock: 1200 device_unlock(dev); 1201 device_unlock(dax_region->dev); 1202 return rc == 0 ? len : rc; 1203 } 1204 static DEVICE_ATTR_RW(align); 1205 1206 static int dev_dax_target_node(struct dev_dax *dev_dax) 1207 { 1208 struct dax_region *dax_region = dev_dax->region; 1209 1210 return dax_region->target_node; 1211 } 1212 1213 static ssize_t target_node_show(struct device *dev, 1214 struct device_attribute *attr, char *buf) 1215 { 1216 struct dev_dax *dev_dax = to_dev_dax(dev); 1217 1218 return sprintf(buf, "%d\n", dev_dax_target_node(dev_dax)); 1219 } 1220 static DEVICE_ATTR_RO(target_node); 1221 1222 static ssize_t resource_show(struct device *dev, 1223 struct device_attribute *attr, char *buf) 1224 { 1225 struct dev_dax *dev_dax = to_dev_dax(dev); 1226 struct dax_region *dax_region = dev_dax->region; 1227 unsigned long long start; 1228 1229 if (dev_dax->nr_range < 1) 1230 start = dax_region->res.start; 1231 else 1232 start = dev_dax->ranges[0].range.start; 1233 1234 return sprintf(buf, "%#llx\n", start); 1235 } 1236 static DEVICE_ATTR(resource, 0400, resource_show, NULL); 1237 1238 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 1239 char *buf) 1240 { 1241 /* 1242 * We only ever expect to handle device-dax instances, i.e. the 1243 * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero 1244 */ 1245 return sprintf(buf, DAX_DEVICE_MODALIAS_FMT "\n", 0); 1246 } 1247 static DEVICE_ATTR_RO(modalias); 1248 1249 static ssize_t numa_node_show(struct device *dev, 1250 struct device_attribute *attr, char *buf) 1251 { 1252 return sprintf(buf, "%d\n", dev_to_node(dev)); 1253 } 1254 static DEVICE_ATTR_RO(numa_node); 1255 1256 static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n) 1257 { 1258 struct device *dev = container_of(kobj, struct device, kobj); 1259 struct dev_dax *dev_dax = to_dev_dax(dev); 1260 struct dax_region *dax_region = dev_dax->region; 1261 1262 if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0) 1263 return 0; 1264 if (a == &dev_attr_numa_node.attr && !IS_ENABLED(CONFIG_NUMA)) 1265 return 0; 1266 if (a == &dev_attr_mapping.attr && is_static(dax_region)) 1267 return 0; 1268 if ((a == &dev_attr_align.attr || 1269 a == &dev_attr_size.attr) && is_static(dax_region)) 1270 return 0444; 1271 return a->mode; 1272 } 1273 1274 static struct attribute *dev_dax_attributes[] = { 1275 &dev_attr_modalias.attr, 1276 &dev_attr_size.attr, 1277 &dev_attr_mapping.attr, 1278 &dev_attr_target_node.attr, 1279 &dev_attr_align.attr, 1280 &dev_attr_resource.attr, 1281 &dev_attr_numa_node.attr, 1282 NULL, 1283 }; 1284 1285 static const struct attribute_group dev_dax_attribute_group = { 1286 .attrs = dev_dax_attributes, 1287 .is_visible = dev_dax_visible, 1288 }; 1289 1290 static const struct attribute_group *dax_attribute_groups[] = { 1291 &dev_dax_attribute_group, 1292 NULL, 1293 }; 1294 1295 static void dev_dax_release(struct device *dev) 1296 { 1297 struct dev_dax *dev_dax = to_dev_dax(dev); 1298 struct dax_region *dax_region = dev_dax->region; 1299 struct dax_device *dax_dev = dev_dax->dax_dev; 1300 1301 put_dax(dax_dev); 1302 free_dev_dax_id(dev_dax); 1303 dax_region_put(dax_region); 1304 kfree(dev_dax->pgmap); 1305 kfree(dev_dax); 1306 } 1307 1308 static const struct device_type dev_dax_type = { 1309 .release = dev_dax_release, 1310 .groups = dax_attribute_groups, 1311 }; 1312 1313 struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data) 1314 { 1315 struct dax_region *dax_region = data->dax_region; 1316 struct device *parent = dax_region->dev; 1317 struct dax_device *dax_dev; 1318 struct dev_dax *dev_dax; 1319 struct inode *inode; 1320 struct device *dev; 1321 int rc; 1322 1323 dev_dax = kzalloc(sizeof(*dev_dax), GFP_KERNEL); 1324 if (!dev_dax) 1325 return ERR_PTR(-ENOMEM); 1326 1327 if (is_static(dax_region)) { 1328 if (dev_WARN_ONCE(parent, data->id < 0, 1329 "dynamic id specified to static region\n")) { 1330 rc = -EINVAL; 1331 goto err_id; 1332 } 1333 1334 dev_dax->id = data->id; 1335 } else { 1336 if (dev_WARN_ONCE(parent, data->id >= 0, 1337 "static id specified to dynamic region\n")) { 1338 rc = -EINVAL; 1339 goto err_id; 1340 } 1341 1342 rc = ida_alloc(&dax_region->ida, GFP_KERNEL); 1343 if (rc < 0) 1344 goto err_id; 1345 dev_dax->id = rc; 1346 } 1347 1348 dev_dax->region = dax_region; 1349 dev = &dev_dax->dev; 1350 device_initialize(dev); 1351 dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id); 1352 1353 rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, data->size); 1354 if (rc) 1355 goto err_range; 1356 1357 if (data->pgmap) { 1358 dev_WARN_ONCE(parent, !is_static(dax_region), 1359 "custom dev_pagemap requires a static dax_region\n"); 1360 1361 dev_dax->pgmap = kmemdup(data->pgmap, 1362 sizeof(struct dev_pagemap), GFP_KERNEL); 1363 if (!dev_dax->pgmap) { 1364 rc = -ENOMEM; 1365 goto err_pgmap; 1366 } 1367 } 1368 1369 /* 1370 * No dax_operations since there is no access to this device outside of 1371 * mmap of the resulting character device. 1372 */ 1373 dax_dev = alloc_dax(dev_dax, NULL); 1374 if (IS_ERR(dax_dev)) { 1375 rc = PTR_ERR(dax_dev); 1376 goto err_alloc_dax; 1377 } 1378 set_dax_synchronous(dax_dev); 1379 set_dax_nocache(dax_dev); 1380 set_dax_nomc(dax_dev); 1381 1382 /* a device_dax instance is dead while the driver is not attached */ 1383 kill_dax(dax_dev); 1384 1385 dev_dax->dax_dev = dax_dev; 1386 dev_dax->target_node = dax_region->target_node; 1387 dev_dax->align = dax_region->align; 1388 ida_init(&dev_dax->ida); 1389 kref_get(&dax_region->kref); 1390 1391 inode = dax_inode(dax_dev); 1392 dev->devt = inode->i_rdev; 1393 dev->bus = &dax_bus_type; 1394 dev->parent = parent; 1395 dev->type = &dev_dax_type; 1396 1397 rc = device_add(dev); 1398 if (rc) { 1399 kill_dev_dax(dev_dax); 1400 put_device(dev); 1401 return ERR_PTR(rc); 1402 } 1403 1404 rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev); 1405 if (rc) 1406 return ERR_PTR(rc); 1407 1408 /* register mapping device for the initial allocation range */ 1409 if (dev_dax->nr_range && range_len(&dev_dax->ranges[0].range)) { 1410 rc = devm_register_dax_mapping(dev_dax, 0); 1411 if (rc) 1412 return ERR_PTR(rc); 1413 } 1414 1415 return dev_dax; 1416 1417 err_alloc_dax: 1418 kfree(dev_dax->pgmap); 1419 err_pgmap: 1420 free_dev_dax_ranges(dev_dax); 1421 err_range: 1422 free_dev_dax_id(dev_dax); 1423 err_id: 1424 kfree(dev_dax); 1425 1426 return ERR_PTR(rc); 1427 } 1428 EXPORT_SYMBOL_GPL(devm_create_dev_dax); 1429 1430 int __dax_driver_register(struct dax_device_driver *dax_drv, 1431 struct module *module, const char *mod_name) 1432 { 1433 struct device_driver *drv = &dax_drv->drv; 1434 1435 /* 1436 * dax_bus_probe() calls dax_drv->probe() unconditionally. 1437 * So better be safe than sorry and ensure it is provided. 1438 */ 1439 if (!dax_drv->probe) 1440 return -EINVAL; 1441 1442 INIT_LIST_HEAD(&dax_drv->ids); 1443 drv->owner = module; 1444 drv->name = mod_name; 1445 drv->mod_name = mod_name; 1446 drv->bus = &dax_bus_type; 1447 1448 return driver_register(drv); 1449 } 1450 EXPORT_SYMBOL_GPL(__dax_driver_register); 1451 1452 void dax_driver_unregister(struct dax_device_driver *dax_drv) 1453 { 1454 struct device_driver *drv = &dax_drv->drv; 1455 struct dax_id *dax_id, *_id; 1456 1457 mutex_lock(&dax_bus_lock); 1458 list_for_each_entry_safe(dax_id, _id, &dax_drv->ids, list) { 1459 list_del(&dax_id->list); 1460 kfree(dax_id); 1461 } 1462 mutex_unlock(&dax_bus_lock); 1463 driver_unregister(drv); 1464 } 1465 EXPORT_SYMBOL_GPL(dax_driver_unregister); 1466 1467 int __init dax_bus_init(void) 1468 { 1469 return bus_register(&dax_bus_type); 1470 } 1471 1472 void __exit dax_bus_exit(void) 1473 { 1474 bus_unregister(&dax_bus_type); 1475 } 1476