1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright(c) 2017 Intel Corporation. All rights reserved. 4 */ 5 #include <linux/pagemap.h> 6 #include <linux/module.h> 7 #include <linux/mount.h> 8 #include <linux/pseudo_fs.h> 9 #include <linux/magic.h> 10 #include <linux/genhd.h> 11 #include <linux/pfn_t.h> 12 #include <linux/cdev.h> 13 #include <linux/hash.h> 14 #include <linux/slab.h> 15 #include <linux/uio.h> 16 #include <linux/dax.h> 17 #include <linux/fs.h> 18 #include "dax-private.h" 19 20 /** 21 * struct dax_device - anchor object for dax services 22 * @inode: core vfs 23 * @cdev: optional character interface for "device dax" 24 * @host: optional name for lookups where the device path is not available 25 * @private: dax driver private data 26 * @flags: state and boolean properties 27 */ 28 struct dax_device { 29 struct hlist_node list; 30 struct inode inode; 31 struct cdev cdev; 32 const char *host; 33 void *private; 34 unsigned long flags; 35 const struct dax_operations *ops; 36 }; 37 38 static dev_t dax_devt; 39 DEFINE_STATIC_SRCU(dax_srcu); 40 static struct vfsmount *dax_mnt; 41 static DEFINE_IDA(dax_minor_ida); 42 static struct kmem_cache *dax_cache __read_mostly; 43 static struct super_block *dax_superblock __read_mostly; 44 45 #define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head)) 46 static struct hlist_head dax_host_list[DAX_HASH_SIZE]; 47 static DEFINE_SPINLOCK(dax_host_lock); 48 49 int dax_read_lock(void) 50 { 51 return srcu_read_lock(&dax_srcu); 52 } 53 EXPORT_SYMBOL_GPL(dax_read_lock); 54 55 void dax_read_unlock(int id) 56 { 57 srcu_read_unlock(&dax_srcu, id); 58 } 59 EXPORT_SYMBOL_GPL(dax_read_unlock); 60 61 static int dax_host_hash(const char *host) 62 { 63 return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE; 64 } 65 66 /** 67 * dax_get_by_host() - temporary lookup mechanism for filesystem-dax 68 * @host: alternate name for the device registered by a dax driver 69 */ 70 static struct dax_device *dax_get_by_host(const char *host) 71 { 72 struct dax_device *dax_dev, *found = NULL; 73 int hash, id; 74 75 if (!host) 76 return NULL; 77 78 hash = dax_host_hash(host); 79 80 id = dax_read_lock(); 81 spin_lock(&dax_host_lock); 82 hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) { 83 if (!dax_alive(dax_dev) 84 || strcmp(host, dax_dev->host) != 0) 85 continue; 86 87 if (igrab(&dax_dev->inode)) 88 found = dax_dev; 89 break; 90 } 91 spin_unlock(&dax_host_lock); 92 dax_read_unlock(id); 93 94 return found; 95 } 96 97 #ifdef CONFIG_BLOCK 98 #include <linux/blkdev.h> 99 100 int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, 101 pgoff_t *pgoff) 102 { 103 sector_t start_sect = bdev ? get_start_sect(bdev) : 0; 104 phys_addr_t phys_off = (start_sect + sector) * 512; 105 106 if (pgoff) 107 *pgoff = PHYS_PFN(phys_off); 108 if (phys_off % PAGE_SIZE || size % PAGE_SIZE) 109 return -EINVAL; 110 return 0; 111 } 112 EXPORT_SYMBOL(bdev_dax_pgoff); 113 114 #if IS_ENABLED(CONFIG_FS_DAX) 115 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) 116 { 117 if (!blk_queue_dax(bdev->bd_disk->queue)) 118 return NULL; 119 return dax_get_by_host(bdev->bd_disk->disk_name); 120 } 121 EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); 122 123 bool generic_fsdax_supported(struct dax_device *dax_dev, 124 struct block_device *bdev, int blocksize, sector_t start, 125 sector_t sectors) 126 { 127 bool dax_enabled = false; 128 pgoff_t pgoff, pgoff_end; 129 void *kaddr, *end_kaddr; 130 pfn_t pfn, end_pfn; 131 sector_t last_page; 132 long len, len2; 133 int err, id; 134 135 if (blocksize != PAGE_SIZE) { 136 pr_info("%pg: error: unsupported blocksize for dax\n", bdev); 137 return false; 138 } 139 140 if (!dax_dev) { 141 pr_debug("%pg: error: dax unsupported by block device\n", bdev); 142 return false; 143 } 144 145 err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff); 146 if (err) { 147 pr_info("%pg: error: unaligned partition for dax\n", bdev); 148 return false; 149 } 150 151 last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512; 152 err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end); 153 if (err) { 154 pr_info("%pg: error: unaligned partition for dax\n", bdev); 155 return false; 156 } 157 158 id = dax_read_lock(); 159 len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); 160 len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn); 161 162 if (len < 1 || len2 < 1) { 163 pr_info("%pg: error: dax access failed (%ld)\n", 164 bdev, len < 1 ? len : len2); 165 dax_read_unlock(id); 166 return false; 167 } 168 169 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) { 170 /* 171 * An arch that has enabled the pmem api should also 172 * have its drivers support pfn_t_devmap() 173 * 174 * This is a developer warning and should not trigger in 175 * production. dax_flush() will crash since it depends 176 * on being able to do (page_address(pfn_to_page())). 177 */ 178 WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)); 179 dax_enabled = true; 180 } else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) { 181 struct dev_pagemap *pgmap, *end_pgmap; 182 183 pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL); 184 end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL); 185 if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX 186 && pfn_t_to_page(pfn)->pgmap == pgmap 187 && pfn_t_to_page(end_pfn)->pgmap == pgmap 188 && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr)) 189 && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr))) 190 dax_enabled = true; 191 put_dev_pagemap(pgmap); 192 put_dev_pagemap(end_pgmap); 193 194 } 195 dax_read_unlock(id); 196 197 if (!dax_enabled) { 198 pr_info("%pg: error: dax support not enabled\n", bdev); 199 return false; 200 } 201 return true; 202 } 203 EXPORT_SYMBOL_GPL(generic_fsdax_supported); 204 #endif /* CONFIG_FS_DAX */ 205 206 /** 207 * __bdev_dax_supported() - Check if the device supports dax for filesystem 208 * @bdev: block device to check 209 * @blocksize: The block size of the device 210 * 211 * This is a library function for filesystems to check if the block device 212 * can be mounted with dax option. 213 * 214 * Return: true if supported, false if unsupported 215 */ 216 bool __bdev_dax_supported(struct block_device *bdev, int blocksize) 217 { 218 struct dax_device *dax_dev; 219 struct request_queue *q; 220 char buf[BDEVNAME_SIZE]; 221 bool ret; 222 223 q = bdev_get_queue(bdev); 224 if (!q || !blk_queue_dax(q)) { 225 pr_debug("%s: error: request queue doesn't support dax\n", 226 bdevname(bdev, buf)); 227 return false; 228 } 229 230 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 231 if (!dax_dev) { 232 pr_debug("%s: error: device does not support dax\n", 233 bdevname(bdev, buf)); 234 return false; 235 } 236 237 ret = dax_supported(dax_dev, bdev, blocksize, 0, 238 i_size_read(bdev->bd_inode) / 512); 239 240 put_dax(dax_dev); 241 242 return ret; 243 } 244 EXPORT_SYMBOL_GPL(__bdev_dax_supported); 245 #endif 246 247 enum dax_device_flags { 248 /* !alive + rcu grace period == no new operations / mappings */ 249 DAXDEV_ALIVE, 250 /* gate whether dax_flush() calls the low level flush routine */ 251 DAXDEV_WRITE_CACHE, 252 /* flag to check if device supports synchronous flush */ 253 DAXDEV_SYNC, 254 }; 255 256 static ssize_t write_cache_show(struct device *dev, 257 struct device_attribute *attr, char *buf) 258 { 259 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev)); 260 ssize_t rc; 261 262 WARN_ON_ONCE(!dax_dev); 263 if (!dax_dev) 264 return -ENXIO; 265 266 rc = sprintf(buf, "%d\n", !!dax_write_cache_enabled(dax_dev)); 267 put_dax(dax_dev); 268 return rc; 269 } 270 271 static ssize_t write_cache_store(struct device *dev, 272 struct device_attribute *attr, const char *buf, size_t len) 273 { 274 bool write_cache; 275 int rc = strtobool(buf, &write_cache); 276 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev)); 277 278 WARN_ON_ONCE(!dax_dev); 279 if (!dax_dev) 280 return -ENXIO; 281 282 if (rc) 283 len = rc; 284 else 285 dax_write_cache(dax_dev, write_cache); 286 287 put_dax(dax_dev); 288 return len; 289 } 290 static DEVICE_ATTR_RW(write_cache); 291 292 static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n) 293 { 294 struct device *dev = container_of(kobj, typeof(*dev), kobj); 295 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev)); 296 297 WARN_ON_ONCE(!dax_dev); 298 if (!dax_dev) 299 return 0; 300 301 #ifndef CONFIG_ARCH_HAS_PMEM_API 302 if (a == &dev_attr_write_cache.attr) 303 return 0; 304 #endif 305 return a->mode; 306 } 307 308 static struct attribute *dax_attributes[] = { 309 &dev_attr_write_cache.attr, 310 NULL, 311 }; 312 313 struct attribute_group dax_attribute_group = { 314 .name = "dax", 315 .attrs = dax_attributes, 316 .is_visible = dax_visible, 317 }; 318 EXPORT_SYMBOL_GPL(dax_attribute_group); 319 320 /** 321 * dax_direct_access() - translate a device pgoff to an absolute pfn 322 * @dax_dev: a dax_device instance representing the logical memory range 323 * @pgoff: offset in pages from the start of the device to translate 324 * @nr_pages: number of consecutive pages caller can handle relative to @pfn 325 * @kaddr: output parameter that returns a virtual address mapping of pfn 326 * @pfn: output parameter that returns an absolute pfn translation of @pgoff 327 * 328 * Return: negative errno if an error occurs, otherwise the number of 329 * pages accessible at the device relative @pgoff. 330 */ 331 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, 332 void **kaddr, pfn_t *pfn) 333 { 334 long avail; 335 336 if (!dax_dev) 337 return -EOPNOTSUPP; 338 339 if (!dax_alive(dax_dev)) 340 return -ENXIO; 341 342 if (nr_pages < 0) 343 return -EINVAL; 344 345 avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages, 346 kaddr, pfn); 347 if (!avail) 348 return -ERANGE; 349 return min(avail, nr_pages); 350 } 351 EXPORT_SYMBOL_GPL(dax_direct_access); 352 353 bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, 354 int blocksize, sector_t start, sector_t len) 355 { 356 bool ret = false; 357 int id; 358 359 if (!dax_dev) 360 return false; 361 362 id = dax_read_lock(); 363 if (dax_alive(dax_dev) && dax_dev->ops->dax_supported) 364 ret = dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, 365 start, len); 366 dax_read_unlock(id); 367 return ret; 368 } 369 EXPORT_SYMBOL_GPL(dax_supported); 370 371 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 372 size_t bytes, struct iov_iter *i) 373 { 374 if (!dax_alive(dax_dev)) 375 return 0; 376 377 return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i); 378 } 379 EXPORT_SYMBOL_GPL(dax_copy_from_iter); 380 381 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 382 size_t bytes, struct iov_iter *i) 383 { 384 if (!dax_alive(dax_dev)) 385 return 0; 386 387 return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i); 388 } 389 EXPORT_SYMBOL_GPL(dax_copy_to_iter); 390 391 int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 392 size_t nr_pages) 393 { 394 if (!dax_alive(dax_dev)) 395 return -ENXIO; 396 /* 397 * There are no callers that want to zero more than one page as of now. 398 * Once users are there, this check can be removed after the 399 * device mapper code has been updated to split ranges across targets. 400 */ 401 if (nr_pages != 1) 402 return -EIO; 403 404 return dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages); 405 } 406 EXPORT_SYMBOL_GPL(dax_zero_page_range); 407 408 #ifdef CONFIG_ARCH_HAS_PMEM_API 409 void arch_wb_cache_pmem(void *addr, size_t size); 410 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) 411 { 412 if (unlikely(!dax_write_cache_enabled(dax_dev))) 413 return; 414 415 arch_wb_cache_pmem(addr, size); 416 } 417 #else 418 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) 419 { 420 } 421 #endif 422 EXPORT_SYMBOL_GPL(dax_flush); 423 424 void dax_write_cache(struct dax_device *dax_dev, bool wc) 425 { 426 if (wc) 427 set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); 428 else 429 clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); 430 } 431 EXPORT_SYMBOL_GPL(dax_write_cache); 432 433 bool dax_write_cache_enabled(struct dax_device *dax_dev) 434 { 435 return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); 436 } 437 EXPORT_SYMBOL_GPL(dax_write_cache_enabled); 438 439 bool __dax_synchronous(struct dax_device *dax_dev) 440 { 441 return test_bit(DAXDEV_SYNC, &dax_dev->flags); 442 } 443 EXPORT_SYMBOL_GPL(__dax_synchronous); 444 445 void __set_dax_synchronous(struct dax_device *dax_dev) 446 { 447 set_bit(DAXDEV_SYNC, &dax_dev->flags); 448 } 449 EXPORT_SYMBOL_GPL(__set_dax_synchronous); 450 451 bool dax_alive(struct dax_device *dax_dev) 452 { 453 lockdep_assert_held(&dax_srcu); 454 return test_bit(DAXDEV_ALIVE, &dax_dev->flags); 455 } 456 EXPORT_SYMBOL_GPL(dax_alive); 457 458 /* 459 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring 460 * that any fault handlers or operations that might have seen 461 * dax_alive(), have completed. Any operations that start after 462 * synchronize_srcu() has run will abort upon seeing !dax_alive(). 463 */ 464 void kill_dax(struct dax_device *dax_dev) 465 { 466 if (!dax_dev) 467 return; 468 469 clear_bit(DAXDEV_ALIVE, &dax_dev->flags); 470 471 synchronize_srcu(&dax_srcu); 472 473 spin_lock(&dax_host_lock); 474 hlist_del_init(&dax_dev->list); 475 spin_unlock(&dax_host_lock); 476 } 477 EXPORT_SYMBOL_GPL(kill_dax); 478 479 void run_dax(struct dax_device *dax_dev) 480 { 481 set_bit(DAXDEV_ALIVE, &dax_dev->flags); 482 } 483 EXPORT_SYMBOL_GPL(run_dax); 484 485 static struct inode *dax_alloc_inode(struct super_block *sb) 486 { 487 struct dax_device *dax_dev; 488 struct inode *inode; 489 490 dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL); 491 if (!dax_dev) 492 return NULL; 493 494 inode = &dax_dev->inode; 495 inode->i_rdev = 0; 496 return inode; 497 } 498 499 static struct dax_device *to_dax_dev(struct inode *inode) 500 { 501 return container_of(inode, struct dax_device, inode); 502 } 503 504 static void dax_free_inode(struct inode *inode) 505 { 506 struct dax_device *dax_dev = to_dax_dev(inode); 507 kfree(dax_dev->host); 508 dax_dev->host = NULL; 509 if (inode->i_rdev) 510 ida_simple_remove(&dax_minor_ida, iminor(inode)); 511 kmem_cache_free(dax_cache, dax_dev); 512 } 513 514 static void dax_destroy_inode(struct inode *inode) 515 { 516 struct dax_device *dax_dev = to_dax_dev(inode); 517 WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags), 518 "kill_dax() must be called before final iput()\n"); 519 } 520 521 static const struct super_operations dax_sops = { 522 .statfs = simple_statfs, 523 .alloc_inode = dax_alloc_inode, 524 .destroy_inode = dax_destroy_inode, 525 .free_inode = dax_free_inode, 526 .drop_inode = generic_delete_inode, 527 }; 528 529 static int dax_init_fs_context(struct fs_context *fc) 530 { 531 struct pseudo_fs_context *ctx = init_pseudo(fc, DAXFS_MAGIC); 532 if (!ctx) 533 return -ENOMEM; 534 ctx->ops = &dax_sops; 535 return 0; 536 } 537 538 static struct file_system_type dax_fs_type = { 539 .name = "dax", 540 .init_fs_context = dax_init_fs_context, 541 .kill_sb = kill_anon_super, 542 }; 543 544 static int dax_test(struct inode *inode, void *data) 545 { 546 dev_t devt = *(dev_t *) data; 547 548 return inode->i_rdev == devt; 549 } 550 551 static int dax_set(struct inode *inode, void *data) 552 { 553 dev_t devt = *(dev_t *) data; 554 555 inode->i_rdev = devt; 556 return 0; 557 } 558 559 static struct dax_device *dax_dev_get(dev_t devt) 560 { 561 struct dax_device *dax_dev; 562 struct inode *inode; 563 564 inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31), 565 dax_test, dax_set, &devt); 566 567 if (!inode) 568 return NULL; 569 570 dax_dev = to_dax_dev(inode); 571 if (inode->i_state & I_NEW) { 572 set_bit(DAXDEV_ALIVE, &dax_dev->flags); 573 inode->i_cdev = &dax_dev->cdev; 574 inode->i_mode = S_IFCHR; 575 inode->i_flags = S_DAX; 576 mapping_set_gfp_mask(&inode->i_data, GFP_USER); 577 unlock_new_inode(inode); 578 } 579 580 return dax_dev; 581 } 582 583 static void dax_add_host(struct dax_device *dax_dev, const char *host) 584 { 585 int hash; 586 587 /* 588 * Unconditionally init dax_dev since it's coming from a 589 * non-zeroed slab cache 590 */ 591 INIT_HLIST_NODE(&dax_dev->list); 592 dax_dev->host = host; 593 if (!host) 594 return; 595 596 hash = dax_host_hash(host); 597 spin_lock(&dax_host_lock); 598 hlist_add_head(&dax_dev->list, &dax_host_list[hash]); 599 spin_unlock(&dax_host_lock); 600 } 601 602 struct dax_device *alloc_dax(void *private, const char *__host, 603 const struct dax_operations *ops, unsigned long flags) 604 { 605 struct dax_device *dax_dev; 606 const char *host; 607 dev_t devt; 608 int minor; 609 610 if (ops && !ops->zero_page_range) { 611 pr_debug("%s: error: device does not provide dax" 612 " operation zero_page_range()\n", 613 __host ? __host : "Unknown"); 614 return ERR_PTR(-EINVAL); 615 } 616 617 host = kstrdup(__host, GFP_KERNEL); 618 if (__host && !host) 619 return ERR_PTR(-ENOMEM); 620 621 minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL); 622 if (minor < 0) 623 goto err_minor; 624 625 devt = MKDEV(MAJOR(dax_devt), minor); 626 dax_dev = dax_dev_get(devt); 627 if (!dax_dev) 628 goto err_dev; 629 630 dax_add_host(dax_dev, host); 631 dax_dev->ops = ops; 632 dax_dev->private = private; 633 if (flags & DAXDEV_F_SYNC) 634 set_dax_synchronous(dax_dev); 635 636 return dax_dev; 637 638 err_dev: 639 ida_simple_remove(&dax_minor_ida, minor); 640 err_minor: 641 kfree(host); 642 return ERR_PTR(-ENOMEM); 643 } 644 EXPORT_SYMBOL_GPL(alloc_dax); 645 646 void put_dax(struct dax_device *dax_dev) 647 { 648 if (!dax_dev) 649 return; 650 iput(&dax_dev->inode); 651 } 652 EXPORT_SYMBOL_GPL(put_dax); 653 654 /** 655 * inode_dax: convert a public inode into its dax_dev 656 * @inode: An inode with i_cdev pointing to a dax_dev 657 * 658 * Note this is not equivalent to to_dax_dev() which is for private 659 * internal use where we know the inode filesystem type == dax_fs_type. 660 */ 661 struct dax_device *inode_dax(struct inode *inode) 662 { 663 struct cdev *cdev = inode->i_cdev; 664 665 return container_of(cdev, struct dax_device, cdev); 666 } 667 EXPORT_SYMBOL_GPL(inode_dax); 668 669 struct inode *dax_inode(struct dax_device *dax_dev) 670 { 671 return &dax_dev->inode; 672 } 673 EXPORT_SYMBOL_GPL(dax_inode); 674 675 void *dax_get_private(struct dax_device *dax_dev) 676 { 677 if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags)) 678 return NULL; 679 return dax_dev->private; 680 } 681 EXPORT_SYMBOL_GPL(dax_get_private); 682 683 static void init_once(void *_dax_dev) 684 { 685 struct dax_device *dax_dev = _dax_dev; 686 struct inode *inode = &dax_dev->inode; 687 688 memset(dax_dev, 0, sizeof(*dax_dev)); 689 inode_init_once(inode); 690 } 691 692 static int dax_fs_init(void) 693 { 694 int rc; 695 696 dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0, 697 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 698 SLAB_MEM_SPREAD|SLAB_ACCOUNT), 699 init_once); 700 if (!dax_cache) 701 return -ENOMEM; 702 703 dax_mnt = kern_mount(&dax_fs_type); 704 if (IS_ERR(dax_mnt)) { 705 rc = PTR_ERR(dax_mnt); 706 goto err_mount; 707 } 708 dax_superblock = dax_mnt->mnt_sb; 709 710 return 0; 711 712 err_mount: 713 kmem_cache_destroy(dax_cache); 714 715 return rc; 716 } 717 718 static void dax_fs_exit(void) 719 { 720 kern_unmount(dax_mnt); 721 kmem_cache_destroy(dax_cache); 722 } 723 724 static int __init dax_core_init(void) 725 { 726 int rc; 727 728 rc = dax_fs_init(); 729 if (rc) 730 return rc; 731 732 rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax"); 733 if (rc) 734 goto err_chrdev; 735 736 rc = dax_bus_init(); 737 if (rc) 738 goto err_bus; 739 return 0; 740 741 err_bus: 742 unregister_chrdev_region(dax_devt, MINORMASK+1); 743 err_chrdev: 744 dax_fs_exit(); 745 return 0; 746 } 747 748 static void __exit dax_core_exit(void) 749 { 750 dax_bus_exit(); 751 unregister_chrdev_region(dax_devt, MINORMASK+1); 752 ida_destroy(&dax_minor_ida); 753 dax_fs_exit(); 754 } 755 756 MODULE_AUTHOR("Intel Corporation"); 757 MODULE_LICENSE("GPL v2"); 758 subsys_initcall(dax_core_init); 759 module_exit(dax_core_exit); 760