1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright(c) 2017 Intel Corporation. All rights reserved. 4 */ 5 #include <linux/pagemap.h> 6 #include <linux/module.h> 7 #include <linux/mount.h> 8 #include <linux/pseudo_fs.h> 9 #include <linux/magic.h> 10 #include <linux/genhd.h> 11 #include <linux/pfn_t.h> 12 #include <linux/cdev.h> 13 #include <linux/hash.h> 14 #include <linux/slab.h> 15 #include <linux/uio.h> 16 #include <linux/dax.h> 17 #include <linux/fs.h> 18 #include "dax-private.h" 19 20 static dev_t dax_devt; 21 DEFINE_STATIC_SRCU(dax_srcu); 22 static struct vfsmount *dax_mnt; 23 static DEFINE_IDA(dax_minor_ida); 24 static struct kmem_cache *dax_cache __read_mostly; 25 static struct super_block *dax_superblock __read_mostly; 26 27 #define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head)) 28 static struct hlist_head dax_host_list[DAX_HASH_SIZE]; 29 static DEFINE_SPINLOCK(dax_host_lock); 30 31 int dax_read_lock(void) 32 { 33 return srcu_read_lock(&dax_srcu); 34 } 35 EXPORT_SYMBOL_GPL(dax_read_lock); 36 37 void dax_read_unlock(int id) 38 { 39 srcu_read_unlock(&dax_srcu, id); 40 } 41 EXPORT_SYMBOL_GPL(dax_read_unlock); 42 43 #ifdef CONFIG_BLOCK 44 #include <linux/blkdev.h> 45 46 int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, 47 pgoff_t *pgoff) 48 { 49 phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512; 50 51 if (pgoff) 52 *pgoff = PHYS_PFN(phys_off); 53 if (phys_off % PAGE_SIZE || size % PAGE_SIZE) 54 return -EINVAL; 55 return 0; 56 } 57 EXPORT_SYMBOL(bdev_dax_pgoff); 58 59 #if IS_ENABLED(CONFIG_FS_DAX) 60 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) 61 { 62 if (!blk_queue_dax(bdev->bd_disk->queue)) 63 return NULL; 64 return dax_get_by_host(bdev->bd_disk->disk_name); 65 } 66 EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); 67 #endif 68 69 bool __generic_fsdax_supported(struct dax_device *dax_dev, 70 struct block_device *bdev, int blocksize, sector_t start, 71 sector_t sectors) 72 { 73 bool dax_enabled = false; 74 pgoff_t pgoff, pgoff_end; 75 char buf[BDEVNAME_SIZE]; 76 void *kaddr, *end_kaddr; 77 pfn_t pfn, end_pfn; 78 sector_t last_page; 79 long len, len2; 80 int err, id; 81 82 if (blocksize != PAGE_SIZE) { 83 pr_info("%s: error: unsupported blocksize for dax\n", 84 bdevname(bdev, buf)); 85 return false; 86 } 87 88 err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff); 89 if (err) { 90 pr_info("%s: error: unaligned partition for dax\n", 91 bdevname(bdev, buf)); 92 return false; 93 } 94 95 last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512; 96 err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end); 97 if (err) { 98 pr_info("%s: error: unaligned partition for dax\n", 99 bdevname(bdev, buf)); 100 return false; 101 } 102 103 id = dax_read_lock(); 104 len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); 105 len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn); 106 107 if (len < 1 || len2 < 1) { 108 pr_info("%s: error: dax access failed (%ld)\n", 109 bdevname(bdev, buf), len < 1 ? len : len2); 110 dax_read_unlock(id); 111 return false; 112 } 113 114 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) { 115 /* 116 * An arch that has enabled the pmem api should also 117 * have its drivers support pfn_t_devmap() 118 * 119 * This is a developer warning and should not trigger in 120 * production. dax_flush() will crash since it depends 121 * on being able to do (page_address(pfn_to_page())). 122 */ 123 WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)); 124 dax_enabled = true; 125 } else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) { 126 struct dev_pagemap *pgmap, *end_pgmap; 127 128 pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL); 129 end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL); 130 if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX 131 && pfn_t_to_page(pfn)->pgmap == pgmap 132 && pfn_t_to_page(end_pfn)->pgmap == pgmap 133 && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr)) 134 && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr))) 135 dax_enabled = true; 136 put_dev_pagemap(pgmap); 137 put_dev_pagemap(end_pgmap); 138 139 } 140 dax_read_unlock(id); 141 142 if (!dax_enabled) { 143 pr_info("%s: error: dax support not enabled\n", 144 bdevname(bdev, buf)); 145 return false; 146 } 147 return true; 148 } 149 EXPORT_SYMBOL_GPL(__generic_fsdax_supported); 150 151 /** 152 * __bdev_dax_supported() - Check if the device supports dax for filesystem 153 * @bdev: block device to check 154 * @blocksize: The block size of the device 155 * 156 * This is a library function for filesystems to check if the block device 157 * can be mounted with dax option. 158 * 159 * Return: true if supported, false if unsupported 160 */ 161 bool __bdev_dax_supported(struct block_device *bdev, int blocksize) 162 { 163 struct dax_device *dax_dev; 164 struct request_queue *q; 165 char buf[BDEVNAME_SIZE]; 166 bool ret; 167 int id; 168 169 q = bdev_get_queue(bdev); 170 if (!q || !blk_queue_dax(q)) { 171 pr_debug("%s: error: request queue doesn't support dax\n", 172 bdevname(bdev, buf)); 173 return false; 174 } 175 176 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 177 if (!dax_dev) { 178 pr_debug("%s: error: device does not support dax\n", 179 bdevname(bdev, buf)); 180 return false; 181 } 182 183 id = dax_read_lock(); 184 ret = dax_supported(dax_dev, bdev, blocksize, 0, 185 i_size_read(bdev->bd_inode) / 512); 186 dax_read_unlock(id); 187 188 put_dax(dax_dev); 189 190 return ret; 191 } 192 EXPORT_SYMBOL_GPL(__bdev_dax_supported); 193 #endif 194 195 enum dax_device_flags { 196 /* !alive + rcu grace period == no new operations / mappings */ 197 DAXDEV_ALIVE, 198 /* gate whether dax_flush() calls the low level flush routine */ 199 DAXDEV_WRITE_CACHE, 200 /* flag to check if device supports synchronous flush */ 201 DAXDEV_SYNC, 202 }; 203 204 /** 205 * struct dax_device - anchor object for dax services 206 * @inode: core vfs 207 * @cdev: optional character interface for "device dax" 208 * @host: optional name for lookups where the device path is not available 209 * @private: dax driver private data 210 * @flags: state and boolean properties 211 */ 212 struct dax_device { 213 struct hlist_node list; 214 struct inode inode; 215 struct cdev cdev; 216 const char *host; 217 void *private; 218 unsigned long flags; 219 const struct dax_operations *ops; 220 }; 221 222 static ssize_t write_cache_show(struct device *dev, 223 struct device_attribute *attr, char *buf) 224 { 225 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev)); 226 ssize_t rc; 227 228 WARN_ON_ONCE(!dax_dev); 229 if (!dax_dev) 230 return -ENXIO; 231 232 rc = sprintf(buf, "%d\n", !!dax_write_cache_enabled(dax_dev)); 233 put_dax(dax_dev); 234 return rc; 235 } 236 237 static ssize_t write_cache_store(struct device *dev, 238 struct device_attribute *attr, const char *buf, size_t len) 239 { 240 bool write_cache; 241 int rc = strtobool(buf, &write_cache); 242 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev)); 243 244 WARN_ON_ONCE(!dax_dev); 245 if (!dax_dev) 246 return -ENXIO; 247 248 if (rc) 249 len = rc; 250 else 251 dax_write_cache(dax_dev, write_cache); 252 253 put_dax(dax_dev); 254 return len; 255 } 256 static DEVICE_ATTR_RW(write_cache); 257 258 static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n) 259 { 260 struct device *dev = container_of(kobj, typeof(*dev), kobj); 261 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev)); 262 263 WARN_ON_ONCE(!dax_dev); 264 if (!dax_dev) 265 return 0; 266 267 #ifndef CONFIG_ARCH_HAS_PMEM_API 268 if (a == &dev_attr_write_cache.attr) 269 return 0; 270 #endif 271 return a->mode; 272 } 273 274 static struct attribute *dax_attributes[] = { 275 &dev_attr_write_cache.attr, 276 NULL, 277 }; 278 279 struct attribute_group dax_attribute_group = { 280 .name = "dax", 281 .attrs = dax_attributes, 282 .is_visible = dax_visible, 283 }; 284 EXPORT_SYMBOL_GPL(dax_attribute_group); 285 286 /** 287 * dax_direct_access() - translate a device pgoff to an absolute pfn 288 * @dax_dev: a dax_device instance representing the logical memory range 289 * @pgoff: offset in pages from the start of the device to translate 290 * @nr_pages: number of consecutive pages caller can handle relative to @pfn 291 * @kaddr: output parameter that returns a virtual address mapping of pfn 292 * @pfn: output parameter that returns an absolute pfn translation of @pgoff 293 * 294 * Return: negative errno if an error occurs, otherwise the number of 295 * pages accessible at the device relative @pgoff. 296 */ 297 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, 298 void **kaddr, pfn_t *pfn) 299 { 300 long avail; 301 302 if (!dax_dev) 303 return -EOPNOTSUPP; 304 305 if (!dax_alive(dax_dev)) 306 return -ENXIO; 307 308 if (nr_pages < 0) 309 return nr_pages; 310 311 avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages, 312 kaddr, pfn); 313 if (!avail) 314 return -ERANGE; 315 return min(avail, nr_pages); 316 } 317 EXPORT_SYMBOL_GPL(dax_direct_access); 318 319 bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, 320 int blocksize, sector_t start, sector_t len) 321 { 322 if (!dax_alive(dax_dev)) 323 return false; 324 325 return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len); 326 } 327 328 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 329 size_t bytes, struct iov_iter *i) 330 { 331 if (!dax_alive(dax_dev)) 332 return 0; 333 334 return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i); 335 } 336 EXPORT_SYMBOL_GPL(dax_copy_from_iter); 337 338 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 339 size_t bytes, struct iov_iter *i) 340 { 341 if (!dax_alive(dax_dev)) 342 return 0; 343 344 return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i); 345 } 346 EXPORT_SYMBOL_GPL(dax_copy_to_iter); 347 348 int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 349 size_t nr_pages) 350 { 351 if (!dax_alive(dax_dev)) 352 return -ENXIO; 353 /* 354 * There are no callers that want to zero more than one page as of now. 355 * Once users are there, this check can be removed after the 356 * device mapper code has been updated to split ranges across targets. 357 */ 358 if (nr_pages != 1) 359 return -EIO; 360 361 return dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages); 362 } 363 EXPORT_SYMBOL_GPL(dax_zero_page_range); 364 365 #ifdef CONFIG_ARCH_HAS_PMEM_API 366 void arch_wb_cache_pmem(void *addr, size_t size); 367 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) 368 { 369 if (unlikely(!dax_write_cache_enabled(dax_dev))) 370 return; 371 372 arch_wb_cache_pmem(addr, size); 373 } 374 #else 375 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) 376 { 377 } 378 #endif 379 EXPORT_SYMBOL_GPL(dax_flush); 380 381 void dax_write_cache(struct dax_device *dax_dev, bool wc) 382 { 383 if (wc) 384 set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); 385 else 386 clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); 387 } 388 EXPORT_SYMBOL_GPL(dax_write_cache); 389 390 bool dax_write_cache_enabled(struct dax_device *dax_dev) 391 { 392 return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); 393 } 394 EXPORT_SYMBOL_GPL(dax_write_cache_enabled); 395 396 bool __dax_synchronous(struct dax_device *dax_dev) 397 { 398 return test_bit(DAXDEV_SYNC, &dax_dev->flags); 399 } 400 EXPORT_SYMBOL_GPL(__dax_synchronous); 401 402 void __set_dax_synchronous(struct dax_device *dax_dev) 403 { 404 set_bit(DAXDEV_SYNC, &dax_dev->flags); 405 } 406 EXPORT_SYMBOL_GPL(__set_dax_synchronous); 407 408 bool dax_alive(struct dax_device *dax_dev) 409 { 410 lockdep_assert_held(&dax_srcu); 411 return test_bit(DAXDEV_ALIVE, &dax_dev->flags); 412 } 413 EXPORT_SYMBOL_GPL(dax_alive); 414 415 static int dax_host_hash(const char *host) 416 { 417 return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE; 418 } 419 420 /* 421 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring 422 * that any fault handlers or operations that might have seen 423 * dax_alive(), have completed. Any operations that start after 424 * synchronize_srcu() has run will abort upon seeing !dax_alive(). 425 */ 426 void kill_dax(struct dax_device *dax_dev) 427 { 428 if (!dax_dev) 429 return; 430 431 clear_bit(DAXDEV_ALIVE, &dax_dev->flags); 432 433 synchronize_srcu(&dax_srcu); 434 435 spin_lock(&dax_host_lock); 436 hlist_del_init(&dax_dev->list); 437 spin_unlock(&dax_host_lock); 438 } 439 EXPORT_SYMBOL_GPL(kill_dax); 440 441 void run_dax(struct dax_device *dax_dev) 442 { 443 set_bit(DAXDEV_ALIVE, &dax_dev->flags); 444 } 445 EXPORT_SYMBOL_GPL(run_dax); 446 447 static struct inode *dax_alloc_inode(struct super_block *sb) 448 { 449 struct dax_device *dax_dev; 450 struct inode *inode; 451 452 dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL); 453 if (!dax_dev) 454 return NULL; 455 456 inode = &dax_dev->inode; 457 inode->i_rdev = 0; 458 return inode; 459 } 460 461 static struct dax_device *to_dax_dev(struct inode *inode) 462 { 463 return container_of(inode, struct dax_device, inode); 464 } 465 466 static void dax_free_inode(struct inode *inode) 467 { 468 struct dax_device *dax_dev = to_dax_dev(inode); 469 kfree(dax_dev->host); 470 dax_dev->host = NULL; 471 if (inode->i_rdev) 472 ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev)); 473 kmem_cache_free(dax_cache, dax_dev); 474 } 475 476 static void dax_destroy_inode(struct inode *inode) 477 { 478 struct dax_device *dax_dev = to_dax_dev(inode); 479 WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags), 480 "kill_dax() must be called before final iput()\n"); 481 } 482 483 static const struct super_operations dax_sops = { 484 .statfs = simple_statfs, 485 .alloc_inode = dax_alloc_inode, 486 .destroy_inode = dax_destroy_inode, 487 .free_inode = dax_free_inode, 488 .drop_inode = generic_delete_inode, 489 }; 490 491 static int dax_init_fs_context(struct fs_context *fc) 492 { 493 struct pseudo_fs_context *ctx = init_pseudo(fc, DAXFS_MAGIC); 494 if (!ctx) 495 return -ENOMEM; 496 ctx->ops = &dax_sops; 497 return 0; 498 } 499 500 static struct file_system_type dax_fs_type = { 501 .name = "dax", 502 .init_fs_context = dax_init_fs_context, 503 .kill_sb = kill_anon_super, 504 }; 505 506 static int dax_test(struct inode *inode, void *data) 507 { 508 dev_t devt = *(dev_t *) data; 509 510 return inode->i_rdev == devt; 511 } 512 513 static int dax_set(struct inode *inode, void *data) 514 { 515 dev_t devt = *(dev_t *) data; 516 517 inode->i_rdev = devt; 518 return 0; 519 } 520 521 static struct dax_device *dax_dev_get(dev_t devt) 522 { 523 struct dax_device *dax_dev; 524 struct inode *inode; 525 526 inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31), 527 dax_test, dax_set, &devt); 528 529 if (!inode) 530 return NULL; 531 532 dax_dev = to_dax_dev(inode); 533 if (inode->i_state & I_NEW) { 534 set_bit(DAXDEV_ALIVE, &dax_dev->flags); 535 inode->i_cdev = &dax_dev->cdev; 536 inode->i_mode = S_IFCHR; 537 inode->i_flags = S_DAX; 538 mapping_set_gfp_mask(&inode->i_data, GFP_USER); 539 unlock_new_inode(inode); 540 } 541 542 return dax_dev; 543 } 544 545 static void dax_add_host(struct dax_device *dax_dev, const char *host) 546 { 547 int hash; 548 549 /* 550 * Unconditionally init dax_dev since it's coming from a 551 * non-zeroed slab cache 552 */ 553 INIT_HLIST_NODE(&dax_dev->list); 554 dax_dev->host = host; 555 if (!host) 556 return; 557 558 hash = dax_host_hash(host); 559 spin_lock(&dax_host_lock); 560 hlist_add_head(&dax_dev->list, &dax_host_list[hash]); 561 spin_unlock(&dax_host_lock); 562 } 563 564 struct dax_device *alloc_dax(void *private, const char *__host, 565 const struct dax_operations *ops, unsigned long flags) 566 { 567 struct dax_device *dax_dev; 568 const char *host; 569 dev_t devt; 570 int minor; 571 572 if (ops && !ops->zero_page_range) { 573 pr_debug("%s: error: device does not provide dax" 574 " operation zero_page_range()\n", 575 __host ? __host : "Unknown"); 576 return ERR_PTR(-EINVAL); 577 } 578 579 host = kstrdup(__host, GFP_KERNEL); 580 if (__host && !host) 581 return ERR_PTR(-ENOMEM); 582 583 minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL); 584 if (minor < 0) 585 goto err_minor; 586 587 devt = MKDEV(MAJOR(dax_devt), minor); 588 dax_dev = dax_dev_get(devt); 589 if (!dax_dev) 590 goto err_dev; 591 592 dax_add_host(dax_dev, host); 593 dax_dev->ops = ops; 594 dax_dev->private = private; 595 if (flags & DAXDEV_F_SYNC) 596 set_dax_synchronous(dax_dev); 597 598 return dax_dev; 599 600 err_dev: 601 ida_simple_remove(&dax_minor_ida, minor); 602 err_minor: 603 kfree(host); 604 return ERR_PTR(-ENOMEM); 605 } 606 EXPORT_SYMBOL_GPL(alloc_dax); 607 608 void put_dax(struct dax_device *dax_dev) 609 { 610 if (!dax_dev) 611 return; 612 iput(&dax_dev->inode); 613 } 614 EXPORT_SYMBOL_GPL(put_dax); 615 616 /** 617 * dax_get_by_host() - temporary lookup mechanism for filesystem-dax 618 * @host: alternate name for the device registered by a dax driver 619 */ 620 struct dax_device *dax_get_by_host(const char *host) 621 { 622 struct dax_device *dax_dev, *found = NULL; 623 int hash, id; 624 625 if (!host) 626 return NULL; 627 628 hash = dax_host_hash(host); 629 630 id = dax_read_lock(); 631 spin_lock(&dax_host_lock); 632 hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) { 633 if (!dax_alive(dax_dev) 634 || strcmp(host, dax_dev->host) != 0) 635 continue; 636 637 if (igrab(&dax_dev->inode)) 638 found = dax_dev; 639 break; 640 } 641 spin_unlock(&dax_host_lock); 642 dax_read_unlock(id); 643 644 return found; 645 } 646 EXPORT_SYMBOL_GPL(dax_get_by_host); 647 648 /** 649 * inode_dax: convert a public inode into its dax_dev 650 * @inode: An inode with i_cdev pointing to a dax_dev 651 * 652 * Note this is not equivalent to to_dax_dev() which is for private 653 * internal use where we know the inode filesystem type == dax_fs_type. 654 */ 655 struct dax_device *inode_dax(struct inode *inode) 656 { 657 struct cdev *cdev = inode->i_cdev; 658 659 return container_of(cdev, struct dax_device, cdev); 660 } 661 EXPORT_SYMBOL_GPL(inode_dax); 662 663 struct inode *dax_inode(struct dax_device *dax_dev) 664 { 665 return &dax_dev->inode; 666 } 667 EXPORT_SYMBOL_GPL(dax_inode); 668 669 void *dax_get_private(struct dax_device *dax_dev) 670 { 671 if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags)) 672 return NULL; 673 return dax_dev->private; 674 } 675 EXPORT_SYMBOL_GPL(dax_get_private); 676 677 static void init_once(void *_dax_dev) 678 { 679 struct dax_device *dax_dev = _dax_dev; 680 struct inode *inode = &dax_dev->inode; 681 682 memset(dax_dev, 0, sizeof(*dax_dev)); 683 inode_init_once(inode); 684 } 685 686 static int dax_fs_init(void) 687 { 688 int rc; 689 690 dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0, 691 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 692 SLAB_MEM_SPREAD|SLAB_ACCOUNT), 693 init_once); 694 if (!dax_cache) 695 return -ENOMEM; 696 697 dax_mnt = kern_mount(&dax_fs_type); 698 if (IS_ERR(dax_mnt)) { 699 rc = PTR_ERR(dax_mnt); 700 goto err_mount; 701 } 702 dax_superblock = dax_mnt->mnt_sb; 703 704 return 0; 705 706 err_mount: 707 kmem_cache_destroy(dax_cache); 708 709 return rc; 710 } 711 712 static void dax_fs_exit(void) 713 { 714 kern_unmount(dax_mnt); 715 kmem_cache_destroy(dax_cache); 716 } 717 718 static int __init dax_core_init(void) 719 { 720 int rc; 721 722 rc = dax_fs_init(); 723 if (rc) 724 return rc; 725 726 rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax"); 727 if (rc) 728 goto err_chrdev; 729 730 rc = dax_bus_init(); 731 if (rc) 732 goto err_bus; 733 return 0; 734 735 err_bus: 736 unregister_chrdev_region(dax_devt, MINORMASK+1); 737 err_chrdev: 738 dax_fs_exit(); 739 return 0; 740 } 741 742 static void __exit dax_core_exit(void) 743 { 744 unregister_chrdev_region(dax_devt, MINORMASK+1); 745 ida_destroy(&dax_minor_ida); 746 dax_fs_exit(); 747 } 748 749 MODULE_AUTHOR("Intel Corporation"); 750 MODULE_LICENSE("GPL v2"); 751 subsys_initcall(dax_core_init); 752 module_exit(dax_core_exit); 753