1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright(c) 2017 Intel Corporation. All rights reserved. 4 */ 5 #include <linux/pagemap.h> 6 #include <linux/module.h> 7 #include <linux/mount.h> 8 #include <linux/pseudo_fs.h> 9 #include <linux/magic.h> 10 #include <linux/genhd.h> 11 #include <linux/pfn_t.h> 12 #include <linux/cdev.h> 13 #include <linux/hash.h> 14 #include <linux/slab.h> 15 #include <linux/uio.h> 16 #include <linux/dax.h> 17 #include <linux/fs.h> 18 #include "dax-private.h" 19 20 /** 21 * struct dax_device - anchor object for dax services 22 * @inode: core vfs 23 * @cdev: optional character interface for "device dax" 24 * @host: optional name for lookups where the device path is not available 25 * @private: dax driver private data 26 * @flags: state and boolean properties 27 */ 28 struct dax_device { 29 struct hlist_node list; 30 struct inode inode; 31 struct cdev cdev; 32 const char *host; 33 void *private; 34 unsigned long flags; 35 const struct dax_operations *ops; 36 }; 37 38 static dev_t dax_devt; 39 DEFINE_STATIC_SRCU(dax_srcu); 40 static struct vfsmount *dax_mnt; 41 static DEFINE_IDA(dax_minor_ida); 42 static struct kmem_cache *dax_cache __read_mostly; 43 static struct super_block *dax_superblock __read_mostly; 44 45 #define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head)) 46 static struct hlist_head dax_host_list[DAX_HASH_SIZE]; 47 static DEFINE_SPINLOCK(dax_host_lock); 48 49 int dax_read_lock(void) 50 { 51 return srcu_read_lock(&dax_srcu); 52 } 53 EXPORT_SYMBOL_GPL(dax_read_lock); 54 55 void dax_read_unlock(int id) 56 { 57 srcu_read_unlock(&dax_srcu, id); 58 } 59 EXPORT_SYMBOL_GPL(dax_read_unlock); 60 61 static int dax_host_hash(const char *host) 62 { 63 return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE; 64 } 65 66 #if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX) 67 #include <linux/blkdev.h> 68 69 int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, 70 pgoff_t *pgoff) 71 { 72 sector_t start_sect = bdev ? get_start_sect(bdev) : 0; 73 phys_addr_t phys_off = (start_sect + sector) * 512; 74 75 if (pgoff) 76 *pgoff = PHYS_PFN(phys_off); 77 if (phys_off % PAGE_SIZE || size % PAGE_SIZE) 78 return -EINVAL; 79 return 0; 80 } 81 EXPORT_SYMBOL(bdev_dax_pgoff); 82 83 /** 84 * dax_get_by_host() - temporary lookup mechanism for filesystem-dax 85 * @host: alternate name for the device registered by a dax driver 86 */ 87 static struct dax_device *dax_get_by_host(const char *host) 88 { 89 struct dax_device *dax_dev, *found = NULL; 90 int hash, id; 91 92 if (!host) 93 return NULL; 94 95 hash = dax_host_hash(host); 96 97 id = dax_read_lock(); 98 spin_lock(&dax_host_lock); 99 hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) { 100 if (!dax_alive(dax_dev) 101 || strcmp(host, dax_dev->host) != 0) 102 continue; 103 104 if (igrab(&dax_dev->inode)) 105 found = dax_dev; 106 break; 107 } 108 spin_unlock(&dax_host_lock); 109 dax_read_unlock(id); 110 111 return found; 112 } 113 114 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) 115 { 116 if (!blk_queue_dax(bdev->bd_disk->queue)) 117 return NULL; 118 return dax_get_by_host(bdev->bd_disk->disk_name); 119 } 120 EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); 121 122 bool generic_fsdax_supported(struct dax_device *dax_dev, 123 struct block_device *bdev, int blocksize, sector_t start, 124 sector_t sectors) 125 { 126 bool dax_enabled = false; 127 pgoff_t pgoff, pgoff_end; 128 void *kaddr, *end_kaddr; 129 pfn_t pfn, end_pfn; 130 sector_t last_page; 131 long len, len2; 132 int err, id; 133 134 if (blocksize != PAGE_SIZE) { 135 pr_info("%pg: error: unsupported blocksize for dax\n", bdev); 136 return false; 137 } 138 139 if (!dax_dev) { 140 pr_debug("%pg: error: dax unsupported by block device\n", bdev); 141 return false; 142 } 143 144 err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff); 145 if (err) { 146 pr_info("%pg: error: unaligned partition for dax\n", bdev); 147 return false; 148 } 149 150 last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512; 151 err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end); 152 if (err) { 153 pr_info("%pg: error: unaligned partition for dax\n", bdev); 154 return false; 155 } 156 157 id = dax_read_lock(); 158 len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); 159 len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn); 160 161 if (len < 1 || len2 < 1) { 162 pr_info("%pg: error: dax access failed (%ld)\n", 163 bdev, len < 1 ? len : len2); 164 dax_read_unlock(id); 165 return false; 166 } 167 168 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) { 169 /* 170 * An arch that has enabled the pmem api should also 171 * have its drivers support pfn_t_devmap() 172 * 173 * This is a developer warning and should not trigger in 174 * production. dax_flush() will crash since it depends 175 * on being able to do (page_address(pfn_to_page())). 176 */ 177 WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)); 178 dax_enabled = true; 179 } else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) { 180 struct dev_pagemap *pgmap, *end_pgmap; 181 182 pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL); 183 end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL); 184 if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX 185 && pfn_t_to_page(pfn)->pgmap == pgmap 186 && pfn_t_to_page(end_pfn)->pgmap == pgmap 187 && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr)) 188 && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr))) 189 dax_enabled = true; 190 put_dev_pagemap(pgmap); 191 put_dev_pagemap(end_pgmap); 192 193 } 194 dax_read_unlock(id); 195 196 if (!dax_enabled) { 197 pr_info("%pg: error: dax support not enabled\n", bdev); 198 return false; 199 } 200 return true; 201 } 202 EXPORT_SYMBOL_GPL(generic_fsdax_supported); 203 204 bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, 205 int blocksize, sector_t start, sector_t len) 206 { 207 bool ret = false; 208 int id; 209 210 if (!dax_dev) 211 return false; 212 213 id = dax_read_lock(); 214 if (dax_alive(dax_dev) && dax_dev->ops->dax_supported) 215 ret = dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, 216 start, len); 217 dax_read_unlock(id); 218 return ret; 219 } 220 EXPORT_SYMBOL_GPL(dax_supported); 221 #endif /* CONFIG_BLOCK && CONFIG_FS_DAX */ 222 223 enum dax_device_flags { 224 /* !alive + rcu grace period == no new operations / mappings */ 225 DAXDEV_ALIVE, 226 /* gate whether dax_flush() calls the low level flush routine */ 227 DAXDEV_WRITE_CACHE, 228 /* flag to check if device supports synchronous flush */ 229 DAXDEV_SYNC, 230 }; 231 232 /** 233 * dax_direct_access() - translate a device pgoff to an absolute pfn 234 * @dax_dev: a dax_device instance representing the logical memory range 235 * @pgoff: offset in pages from the start of the device to translate 236 * @nr_pages: number of consecutive pages caller can handle relative to @pfn 237 * @kaddr: output parameter that returns a virtual address mapping of pfn 238 * @pfn: output parameter that returns an absolute pfn translation of @pgoff 239 * 240 * Return: negative errno if an error occurs, otherwise the number of 241 * pages accessible at the device relative @pgoff. 242 */ 243 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, 244 void **kaddr, pfn_t *pfn) 245 { 246 long avail; 247 248 if (!dax_dev) 249 return -EOPNOTSUPP; 250 251 if (!dax_alive(dax_dev)) 252 return -ENXIO; 253 254 if (nr_pages < 0) 255 return -EINVAL; 256 257 avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages, 258 kaddr, pfn); 259 if (!avail) 260 return -ERANGE; 261 return min(avail, nr_pages); 262 } 263 EXPORT_SYMBOL_GPL(dax_direct_access); 264 265 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 266 size_t bytes, struct iov_iter *i) 267 { 268 if (!dax_alive(dax_dev)) 269 return 0; 270 271 return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i); 272 } 273 EXPORT_SYMBOL_GPL(dax_copy_from_iter); 274 275 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 276 size_t bytes, struct iov_iter *i) 277 { 278 if (!dax_alive(dax_dev)) 279 return 0; 280 281 return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i); 282 } 283 EXPORT_SYMBOL_GPL(dax_copy_to_iter); 284 285 int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 286 size_t nr_pages) 287 { 288 if (!dax_alive(dax_dev)) 289 return -ENXIO; 290 /* 291 * There are no callers that want to zero more than one page as of now. 292 * Once users are there, this check can be removed after the 293 * device mapper code has been updated to split ranges across targets. 294 */ 295 if (nr_pages != 1) 296 return -EIO; 297 298 return dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages); 299 } 300 EXPORT_SYMBOL_GPL(dax_zero_page_range); 301 302 #ifdef CONFIG_ARCH_HAS_PMEM_API 303 void arch_wb_cache_pmem(void *addr, size_t size); 304 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) 305 { 306 if (unlikely(!dax_write_cache_enabled(dax_dev))) 307 return; 308 309 arch_wb_cache_pmem(addr, size); 310 } 311 #else 312 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) 313 { 314 } 315 #endif 316 EXPORT_SYMBOL_GPL(dax_flush); 317 318 void dax_write_cache(struct dax_device *dax_dev, bool wc) 319 { 320 if (wc) 321 set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); 322 else 323 clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); 324 } 325 EXPORT_SYMBOL_GPL(dax_write_cache); 326 327 bool dax_write_cache_enabled(struct dax_device *dax_dev) 328 { 329 return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); 330 } 331 EXPORT_SYMBOL_GPL(dax_write_cache_enabled); 332 333 bool __dax_synchronous(struct dax_device *dax_dev) 334 { 335 return test_bit(DAXDEV_SYNC, &dax_dev->flags); 336 } 337 EXPORT_SYMBOL_GPL(__dax_synchronous); 338 339 void __set_dax_synchronous(struct dax_device *dax_dev) 340 { 341 set_bit(DAXDEV_SYNC, &dax_dev->flags); 342 } 343 EXPORT_SYMBOL_GPL(__set_dax_synchronous); 344 345 bool dax_alive(struct dax_device *dax_dev) 346 { 347 lockdep_assert_held(&dax_srcu); 348 return test_bit(DAXDEV_ALIVE, &dax_dev->flags); 349 } 350 EXPORT_SYMBOL_GPL(dax_alive); 351 352 /* 353 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring 354 * that any fault handlers or operations that might have seen 355 * dax_alive(), have completed. Any operations that start after 356 * synchronize_srcu() has run will abort upon seeing !dax_alive(). 357 */ 358 void kill_dax(struct dax_device *dax_dev) 359 { 360 if (!dax_dev) 361 return; 362 363 clear_bit(DAXDEV_ALIVE, &dax_dev->flags); 364 365 synchronize_srcu(&dax_srcu); 366 367 spin_lock(&dax_host_lock); 368 hlist_del_init(&dax_dev->list); 369 spin_unlock(&dax_host_lock); 370 } 371 EXPORT_SYMBOL_GPL(kill_dax); 372 373 void run_dax(struct dax_device *dax_dev) 374 { 375 set_bit(DAXDEV_ALIVE, &dax_dev->flags); 376 } 377 EXPORT_SYMBOL_GPL(run_dax); 378 379 static struct inode *dax_alloc_inode(struct super_block *sb) 380 { 381 struct dax_device *dax_dev; 382 struct inode *inode; 383 384 dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL); 385 if (!dax_dev) 386 return NULL; 387 388 inode = &dax_dev->inode; 389 inode->i_rdev = 0; 390 return inode; 391 } 392 393 static struct dax_device *to_dax_dev(struct inode *inode) 394 { 395 return container_of(inode, struct dax_device, inode); 396 } 397 398 static void dax_free_inode(struct inode *inode) 399 { 400 struct dax_device *dax_dev = to_dax_dev(inode); 401 kfree(dax_dev->host); 402 dax_dev->host = NULL; 403 if (inode->i_rdev) 404 ida_simple_remove(&dax_minor_ida, iminor(inode)); 405 kmem_cache_free(dax_cache, dax_dev); 406 } 407 408 static void dax_destroy_inode(struct inode *inode) 409 { 410 struct dax_device *dax_dev = to_dax_dev(inode); 411 WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags), 412 "kill_dax() must be called before final iput()\n"); 413 } 414 415 static const struct super_operations dax_sops = { 416 .statfs = simple_statfs, 417 .alloc_inode = dax_alloc_inode, 418 .destroy_inode = dax_destroy_inode, 419 .free_inode = dax_free_inode, 420 .drop_inode = generic_delete_inode, 421 }; 422 423 static int dax_init_fs_context(struct fs_context *fc) 424 { 425 struct pseudo_fs_context *ctx = init_pseudo(fc, DAXFS_MAGIC); 426 if (!ctx) 427 return -ENOMEM; 428 ctx->ops = &dax_sops; 429 return 0; 430 } 431 432 static struct file_system_type dax_fs_type = { 433 .name = "dax", 434 .init_fs_context = dax_init_fs_context, 435 .kill_sb = kill_anon_super, 436 }; 437 438 static int dax_test(struct inode *inode, void *data) 439 { 440 dev_t devt = *(dev_t *) data; 441 442 return inode->i_rdev == devt; 443 } 444 445 static int dax_set(struct inode *inode, void *data) 446 { 447 dev_t devt = *(dev_t *) data; 448 449 inode->i_rdev = devt; 450 return 0; 451 } 452 453 static struct dax_device *dax_dev_get(dev_t devt) 454 { 455 struct dax_device *dax_dev; 456 struct inode *inode; 457 458 inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31), 459 dax_test, dax_set, &devt); 460 461 if (!inode) 462 return NULL; 463 464 dax_dev = to_dax_dev(inode); 465 if (inode->i_state & I_NEW) { 466 set_bit(DAXDEV_ALIVE, &dax_dev->flags); 467 inode->i_cdev = &dax_dev->cdev; 468 inode->i_mode = S_IFCHR; 469 inode->i_flags = S_DAX; 470 mapping_set_gfp_mask(&inode->i_data, GFP_USER); 471 unlock_new_inode(inode); 472 } 473 474 return dax_dev; 475 } 476 477 static void dax_add_host(struct dax_device *dax_dev, const char *host) 478 { 479 int hash; 480 481 /* 482 * Unconditionally init dax_dev since it's coming from a 483 * non-zeroed slab cache 484 */ 485 INIT_HLIST_NODE(&dax_dev->list); 486 dax_dev->host = host; 487 if (!host) 488 return; 489 490 hash = dax_host_hash(host); 491 spin_lock(&dax_host_lock); 492 hlist_add_head(&dax_dev->list, &dax_host_list[hash]); 493 spin_unlock(&dax_host_lock); 494 } 495 496 struct dax_device *alloc_dax(void *private, const char *__host, 497 const struct dax_operations *ops, unsigned long flags) 498 { 499 struct dax_device *dax_dev; 500 const char *host; 501 dev_t devt; 502 int minor; 503 504 if (ops && !ops->zero_page_range) { 505 pr_debug("%s: error: device does not provide dax" 506 " operation zero_page_range()\n", 507 __host ? __host : "Unknown"); 508 return ERR_PTR(-EINVAL); 509 } 510 511 host = kstrdup(__host, GFP_KERNEL); 512 if (__host && !host) 513 return ERR_PTR(-ENOMEM); 514 515 minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL); 516 if (minor < 0) 517 goto err_minor; 518 519 devt = MKDEV(MAJOR(dax_devt), minor); 520 dax_dev = dax_dev_get(devt); 521 if (!dax_dev) 522 goto err_dev; 523 524 dax_add_host(dax_dev, host); 525 dax_dev->ops = ops; 526 dax_dev->private = private; 527 if (flags & DAXDEV_F_SYNC) 528 set_dax_synchronous(dax_dev); 529 530 return dax_dev; 531 532 err_dev: 533 ida_simple_remove(&dax_minor_ida, minor); 534 err_minor: 535 kfree(host); 536 return ERR_PTR(-ENOMEM); 537 } 538 EXPORT_SYMBOL_GPL(alloc_dax); 539 540 void put_dax(struct dax_device *dax_dev) 541 { 542 if (!dax_dev) 543 return; 544 iput(&dax_dev->inode); 545 } 546 EXPORT_SYMBOL_GPL(put_dax); 547 548 /** 549 * inode_dax: convert a public inode into its dax_dev 550 * @inode: An inode with i_cdev pointing to a dax_dev 551 * 552 * Note this is not equivalent to to_dax_dev() which is for private 553 * internal use where we know the inode filesystem type == dax_fs_type. 554 */ 555 struct dax_device *inode_dax(struct inode *inode) 556 { 557 struct cdev *cdev = inode->i_cdev; 558 559 return container_of(cdev, struct dax_device, cdev); 560 } 561 EXPORT_SYMBOL_GPL(inode_dax); 562 563 struct inode *dax_inode(struct dax_device *dax_dev) 564 { 565 return &dax_dev->inode; 566 } 567 EXPORT_SYMBOL_GPL(dax_inode); 568 569 void *dax_get_private(struct dax_device *dax_dev) 570 { 571 if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags)) 572 return NULL; 573 return dax_dev->private; 574 } 575 EXPORT_SYMBOL_GPL(dax_get_private); 576 577 static void init_once(void *_dax_dev) 578 { 579 struct dax_device *dax_dev = _dax_dev; 580 struct inode *inode = &dax_dev->inode; 581 582 memset(dax_dev, 0, sizeof(*dax_dev)); 583 inode_init_once(inode); 584 } 585 586 static int dax_fs_init(void) 587 { 588 int rc; 589 590 dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0, 591 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 592 SLAB_MEM_SPREAD|SLAB_ACCOUNT), 593 init_once); 594 if (!dax_cache) 595 return -ENOMEM; 596 597 dax_mnt = kern_mount(&dax_fs_type); 598 if (IS_ERR(dax_mnt)) { 599 rc = PTR_ERR(dax_mnt); 600 goto err_mount; 601 } 602 dax_superblock = dax_mnt->mnt_sb; 603 604 return 0; 605 606 err_mount: 607 kmem_cache_destroy(dax_cache); 608 609 return rc; 610 } 611 612 static void dax_fs_exit(void) 613 { 614 kern_unmount(dax_mnt); 615 kmem_cache_destroy(dax_cache); 616 } 617 618 static int __init dax_core_init(void) 619 { 620 int rc; 621 622 rc = dax_fs_init(); 623 if (rc) 624 return rc; 625 626 rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax"); 627 if (rc) 628 goto err_chrdev; 629 630 rc = dax_bus_init(); 631 if (rc) 632 goto err_bus; 633 return 0; 634 635 err_bus: 636 unregister_chrdev_region(dax_devt, MINORMASK+1); 637 err_chrdev: 638 dax_fs_exit(); 639 return 0; 640 } 641 642 static void __exit dax_core_exit(void) 643 { 644 dax_bus_exit(); 645 unregister_chrdev_region(dax_devt, MINORMASK+1); 646 ida_destroy(&dax_minor_ida); 647 dax_fs_exit(); 648 } 649 650 MODULE_AUTHOR("Intel Corporation"); 651 MODULE_LICENSE("GPL v2"); 652 subsys_initcall(dax_core_init); 653 module_exit(dax_core_exit); 654