1 /* 2 * Copyright(c) 2017 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/pagemap.h> 14 #include <linux/module.h> 15 #include <linux/mount.h> 16 #include <linux/magic.h> 17 #include <linux/genhd.h> 18 #include <linux/pfn_t.h> 19 #include <linux/cdev.h> 20 #include <linux/hash.h> 21 #include <linux/slab.h> 22 #include <linux/uio.h> 23 #include <linux/dax.h> 24 #include <linux/fs.h> 25 26 static dev_t dax_devt; 27 DEFINE_STATIC_SRCU(dax_srcu); 28 static struct vfsmount *dax_mnt; 29 static DEFINE_IDA(dax_minor_ida); 30 static struct kmem_cache *dax_cache __read_mostly; 31 static struct super_block *dax_superblock __read_mostly; 32 33 #define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head)) 34 static struct hlist_head dax_host_list[DAX_HASH_SIZE]; 35 static DEFINE_SPINLOCK(dax_host_lock); 36 37 int dax_read_lock(void) 38 { 39 return srcu_read_lock(&dax_srcu); 40 } 41 EXPORT_SYMBOL_GPL(dax_read_lock); 42 43 void dax_read_unlock(int id) 44 { 45 srcu_read_unlock(&dax_srcu, id); 46 } 47 EXPORT_SYMBOL_GPL(dax_read_unlock); 48 49 #ifdef CONFIG_BLOCK 50 #include <linux/blkdev.h> 51 52 int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, 53 pgoff_t *pgoff) 54 { 55 phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512; 56 57 if (pgoff) 58 *pgoff = PHYS_PFN(phys_off); 59 if (phys_off % PAGE_SIZE || size % PAGE_SIZE) 60 return -EINVAL; 61 return 0; 62 } 63 EXPORT_SYMBOL(bdev_dax_pgoff); 64 65 #if IS_ENABLED(CONFIG_FS_DAX) 66 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) 67 { 68 if (!blk_queue_dax(bdev->bd_queue)) 69 return NULL; 70 return fs_dax_get_by_host(bdev->bd_disk->disk_name); 71 } 72 EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); 73 #endif 74 75 /** 76 * __bdev_dax_supported() - Check if the device supports dax for filesystem 77 * @bdev: block device to check 78 * @blocksize: The block size of the device 79 * 80 * This is a library function for filesystems to check if the block device 81 * can be mounted with dax option. 82 * 83 * Return: true if supported, false if unsupported 84 */ 85 bool __bdev_dax_supported(struct block_device *bdev, int blocksize) 86 { 87 struct dax_device *dax_dev; 88 bool dax_enabled = false; 89 pgoff_t pgoff, pgoff_end; 90 struct request_queue *q; 91 char buf[BDEVNAME_SIZE]; 92 void *kaddr, *end_kaddr; 93 pfn_t pfn, end_pfn; 94 sector_t last_page; 95 long len, len2; 96 int err, id; 97 98 if (blocksize != PAGE_SIZE) { 99 pr_debug("%s: error: unsupported blocksize for dax\n", 100 bdevname(bdev, buf)); 101 return false; 102 } 103 104 q = bdev_get_queue(bdev); 105 if (!q || !blk_queue_dax(q)) { 106 pr_debug("%s: error: request queue doesn't support dax\n", 107 bdevname(bdev, buf)); 108 return false; 109 } 110 111 err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff); 112 if (err) { 113 pr_debug("%s: error: unaligned partition for dax\n", 114 bdevname(bdev, buf)); 115 return false; 116 } 117 118 last_page = PFN_DOWN(i_size_read(bdev->bd_inode) - 1) * 8; 119 err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end); 120 if (err) { 121 pr_debug("%s: error: unaligned partition for dax\n", 122 bdevname(bdev, buf)); 123 return false; 124 } 125 126 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 127 if (!dax_dev) { 128 pr_debug("%s: error: device does not support dax\n", 129 bdevname(bdev, buf)); 130 return false; 131 } 132 133 id = dax_read_lock(); 134 len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); 135 len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn); 136 dax_read_unlock(id); 137 138 put_dax(dax_dev); 139 140 if (len < 1 || len2 < 1) { 141 pr_debug("%s: error: dax access failed (%ld)\n", 142 bdevname(bdev, buf), len < 1 ? len : len2); 143 return false; 144 } 145 146 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) { 147 /* 148 * An arch that has enabled the pmem api should also 149 * have its drivers support pfn_t_devmap() 150 * 151 * This is a developer warning and should not trigger in 152 * production. dax_flush() will crash since it depends 153 * on being able to do (page_address(pfn_to_page())). 154 */ 155 WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)); 156 dax_enabled = true; 157 } else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) { 158 struct dev_pagemap *pgmap, *end_pgmap; 159 160 pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL); 161 end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL); 162 if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX 163 && pfn_t_to_page(pfn)->pgmap == pgmap 164 && pfn_t_to_page(end_pfn)->pgmap == pgmap 165 && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr)) 166 && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr))) 167 dax_enabled = true; 168 put_dev_pagemap(pgmap); 169 put_dev_pagemap(end_pgmap); 170 171 } 172 173 if (!dax_enabled) { 174 pr_debug("%s: error: dax support not enabled\n", 175 bdevname(bdev, buf)); 176 return false; 177 } 178 return true; 179 } 180 EXPORT_SYMBOL_GPL(__bdev_dax_supported); 181 #endif 182 183 enum dax_device_flags { 184 /* !alive + rcu grace period == no new operations / mappings */ 185 DAXDEV_ALIVE, 186 /* gate whether dax_flush() calls the low level flush routine */ 187 DAXDEV_WRITE_CACHE, 188 }; 189 190 /** 191 * struct dax_device - anchor object for dax services 192 * @inode: core vfs 193 * @cdev: optional character interface for "device dax" 194 * @host: optional name for lookups where the device path is not available 195 * @private: dax driver private data 196 * @flags: state and boolean properties 197 */ 198 struct dax_device { 199 struct hlist_node list; 200 struct inode inode; 201 struct cdev cdev; 202 const char *host; 203 void *private; 204 unsigned long flags; 205 const struct dax_operations *ops; 206 }; 207 208 static ssize_t write_cache_show(struct device *dev, 209 struct device_attribute *attr, char *buf) 210 { 211 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev)); 212 ssize_t rc; 213 214 WARN_ON_ONCE(!dax_dev); 215 if (!dax_dev) 216 return -ENXIO; 217 218 rc = sprintf(buf, "%d\n", !!dax_write_cache_enabled(dax_dev)); 219 put_dax(dax_dev); 220 return rc; 221 } 222 223 static ssize_t write_cache_store(struct device *dev, 224 struct device_attribute *attr, const char *buf, size_t len) 225 { 226 bool write_cache; 227 int rc = strtobool(buf, &write_cache); 228 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev)); 229 230 WARN_ON_ONCE(!dax_dev); 231 if (!dax_dev) 232 return -ENXIO; 233 234 if (rc) 235 len = rc; 236 else 237 dax_write_cache(dax_dev, write_cache); 238 239 put_dax(dax_dev); 240 return len; 241 } 242 static DEVICE_ATTR_RW(write_cache); 243 244 static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n) 245 { 246 struct device *dev = container_of(kobj, typeof(*dev), kobj); 247 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev)); 248 249 WARN_ON_ONCE(!dax_dev); 250 if (!dax_dev) 251 return 0; 252 253 #ifndef CONFIG_ARCH_HAS_PMEM_API 254 if (a == &dev_attr_write_cache.attr) 255 return 0; 256 #endif 257 return a->mode; 258 } 259 260 static struct attribute *dax_attributes[] = { 261 &dev_attr_write_cache.attr, 262 NULL, 263 }; 264 265 struct attribute_group dax_attribute_group = { 266 .name = "dax", 267 .attrs = dax_attributes, 268 .is_visible = dax_visible, 269 }; 270 EXPORT_SYMBOL_GPL(dax_attribute_group); 271 272 /** 273 * dax_direct_access() - translate a device pgoff to an absolute pfn 274 * @dax_dev: a dax_device instance representing the logical memory range 275 * @pgoff: offset in pages from the start of the device to translate 276 * @nr_pages: number of consecutive pages caller can handle relative to @pfn 277 * @kaddr: output parameter that returns a virtual address mapping of pfn 278 * @pfn: output parameter that returns an absolute pfn translation of @pgoff 279 * 280 * Return: negative errno if an error occurs, otherwise the number of 281 * pages accessible at the device relative @pgoff. 282 */ 283 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, 284 void **kaddr, pfn_t *pfn) 285 { 286 long avail; 287 288 if (!dax_dev) 289 return -EOPNOTSUPP; 290 291 if (!dax_alive(dax_dev)) 292 return -ENXIO; 293 294 if (nr_pages < 0) 295 return nr_pages; 296 297 avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages, 298 kaddr, pfn); 299 if (!avail) 300 return -ERANGE; 301 return min(avail, nr_pages); 302 } 303 EXPORT_SYMBOL_GPL(dax_direct_access); 304 305 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 306 size_t bytes, struct iov_iter *i) 307 { 308 if (!dax_alive(dax_dev)) 309 return 0; 310 311 return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i); 312 } 313 EXPORT_SYMBOL_GPL(dax_copy_from_iter); 314 315 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 316 size_t bytes, struct iov_iter *i) 317 { 318 if (!dax_alive(dax_dev)) 319 return 0; 320 321 return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i); 322 } 323 EXPORT_SYMBOL_GPL(dax_copy_to_iter); 324 325 #ifdef CONFIG_ARCH_HAS_PMEM_API 326 void arch_wb_cache_pmem(void *addr, size_t size); 327 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) 328 { 329 if (unlikely(!dax_write_cache_enabled(dax_dev))) 330 return; 331 332 arch_wb_cache_pmem(addr, size); 333 } 334 #else 335 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) 336 { 337 } 338 #endif 339 EXPORT_SYMBOL_GPL(dax_flush); 340 341 void dax_write_cache(struct dax_device *dax_dev, bool wc) 342 { 343 if (wc) 344 set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); 345 else 346 clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); 347 } 348 EXPORT_SYMBOL_GPL(dax_write_cache); 349 350 bool dax_write_cache_enabled(struct dax_device *dax_dev) 351 { 352 return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); 353 } 354 EXPORT_SYMBOL_GPL(dax_write_cache_enabled); 355 356 bool dax_alive(struct dax_device *dax_dev) 357 { 358 lockdep_assert_held(&dax_srcu); 359 return test_bit(DAXDEV_ALIVE, &dax_dev->flags); 360 } 361 EXPORT_SYMBOL_GPL(dax_alive); 362 363 static int dax_host_hash(const char *host) 364 { 365 return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE; 366 } 367 368 /* 369 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring 370 * that any fault handlers or operations that might have seen 371 * dax_alive(), have completed. Any operations that start after 372 * synchronize_srcu() has run will abort upon seeing !dax_alive(). 373 */ 374 void kill_dax(struct dax_device *dax_dev) 375 { 376 if (!dax_dev) 377 return; 378 379 clear_bit(DAXDEV_ALIVE, &dax_dev->flags); 380 381 synchronize_srcu(&dax_srcu); 382 383 spin_lock(&dax_host_lock); 384 hlist_del_init(&dax_dev->list); 385 spin_unlock(&dax_host_lock); 386 387 dax_dev->private = NULL; 388 } 389 EXPORT_SYMBOL_GPL(kill_dax); 390 391 static struct inode *dax_alloc_inode(struct super_block *sb) 392 { 393 struct dax_device *dax_dev; 394 struct inode *inode; 395 396 dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL); 397 if (!dax_dev) 398 return NULL; 399 400 inode = &dax_dev->inode; 401 inode->i_rdev = 0; 402 return inode; 403 } 404 405 static struct dax_device *to_dax_dev(struct inode *inode) 406 { 407 return container_of(inode, struct dax_device, inode); 408 } 409 410 static void dax_i_callback(struct rcu_head *head) 411 { 412 struct inode *inode = container_of(head, struct inode, i_rcu); 413 struct dax_device *dax_dev = to_dax_dev(inode); 414 415 kfree(dax_dev->host); 416 dax_dev->host = NULL; 417 if (inode->i_rdev) 418 ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev)); 419 kmem_cache_free(dax_cache, dax_dev); 420 } 421 422 static void dax_destroy_inode(struct inode *inode) 423 { 424 struct dax_device *dax_dev = to_dax_dev(inode); 425 426 WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags), 427 "kill_dax() must be called before final iput()\n"); 428 call_rcu(&inode->i_rcu, dax_i_callback); 429 } 430 431 static const struct super_operations dax_sops = { 432 .statfs = simple_statfs, 433 .alloc_inode = dax_alloc_inode, 434 .destroy_inode = dax_destroy_inode, 435 .drop_inode = generic_delete_inode, 436 }; 437 438 static struct dentry *dax_mount(struct file_system_type *fs_type, 439 int flags, const char *dev_name, void *data) 440 { 441 return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC); 442 } 443 444 static struct file_system_type dax_fs_type = { 445 .name = "dax", 446 .mount = dax_mount, 447 .kill_sb = kill_anon_super, 448 }; 449 450 static int dax_test(struct inode *inode, void *data) 451 { 452 dev_t devt = *(dev_t *) data; 453 454 return inode->i_rdev == devt; 455 } 456 457 static int dax_set(struct inode *inode, void *data) 458 { 459 dev_t devt = *(dev_t *) data; 460 461 inode->i_rdev = devt; 462 return 0; 463 } 464 465 static struct dax_device *dax_dev_get(dev_t devt) 466 { 467 struct dax_device *dax_dev; 468 struct inode *inode; 469 470 inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31), 471 dax_test, dax_set, &devt); 472 473 if (!inode) 474 return NULL; 475 476 dax_dev = to_dax_dev(inode); 477 if (inode->i_state & I_NEW) { 478 set_bit(DAXDEV_ALIVE, &dax_dev->flags); 479 inode->i_cdev = &dax_dev->cdev; 480 inode->i_mode = S_IFCHR; 481 inode->i_flags = S_DAX; 482 mapping_set_gfp_mask(&inode->i_data, GFP_USER); 483 unlock_new_inode(inode); 484 } 485 486 return dax_dev; 487 } 488 489 static void dax_add_host(struct dax_device *dax_dev, const char *host) 490 { 491 int hash; 492 493 /* 494 * Unconditionally init dax_dev since it's coming from a 495 * non-zeroed slab cache 496 */ 497 INIT_HLIST_NODE(&dax_dev->list); 498 dax_dev->host = host; 499 if (!host) 500 return; 501 502 hash = dax_host_hash(host); 503 spin_lock(&dax_host_lock); 504 hlist_add_head(&dax_dev->list, &dax_host_list[hash]); 505 spin_unlock(&dax_host_lock); 506 } 507 508 struct dax_device *alloc_dax(void *private, const char *__host, 509 const struct dax_operations *ops) 510 { 511 struct dax_device *dax_dev; 512 const char *host; 513 dev_t devt; 514 int minor; 515 516 host = kstrdup(__host, GFP_KERNEL); 517 if (__host && !host) 518 return NULL; 519 520 minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL); 521 if (minor < 0) 522 goto err_minor; 523 524 devt = MKDEV(MAJOR(dax_devt), minor); 525 dax_dev = dax_dev_get(devt); 526 if (!dax_dev) 527 goto err_dev; 528 529 dax_add_host(dax_dev, host); 530 dax_dev->ops = ops; 531 dax_dev->private = private; 532 return dax_dev; 533 534 err_dev: 535 ida_simple_remove(&dax_minor_ida, minor); 536 err_minor: 537 kfree(host); 538 return NULL; 539 } 540 EXPORT_SYMBOL_GPL(alloc_dax); 541 542 void put_dax(struct dax_device *dax_dev) 543 { 544 if (!dax_dev) 545 return; 546 iput(&dax_dev->inode); 547 } 548 EXPORT_SYMBOL_GPL(put_dax); 549 550 /** 551 * dax_get_by_host() - temporary lookup mechanism for filesystem-dax 552 * @host: alternate name for the device registered by a dax driver 553 */ 554 struct dax_device *dax_get_by_host(const char *host) 555 { 556 struct dax_device *dax_dev, *found = NULL; 557 int hash, id; 558 559 if (!host) 560 return NULL; 561 562 hash = dax_host_hash(host); 563 564 id = dax_read_lock(); 565 spin_lock(&dax_host_lock); 566 hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) { 567 if (!dax_alive(dax_dev) 568 || strcmp(host, dax_dev->host) != 0) 569 continue; 570 571 if (igrab(&dax_dev->inode)) 572 found = dax_dev; 573 break; 574 } 575 spin_unlock(&dax_host_lock); 576 dax_read_unlock(id); 577 578 return found; 579 } 580 EXPORT_SYMBOL_GPL(dax_get_by_host); 581 582 /** 583 * inode_dax: convert a public inode into its dax_dev 584 * @inode: An inode with i_cdev pointing to a dax_dev 585 * 586 * Note this is not equivalent to to_dax_dev() which is for private 587 * internal use where we know the inode filesystem type == dax_fs_type. 588 */ 589 struct dax_device *inode_dax(struct inode *inode) 590 { 591 struct cdev *cdev = inode->i_cdev; 592 593 return container_of(cdev, struct dax_device, cdev); 594 } 595 EXPORT_SYMBOL_GPL(inode_dax); 596 597 struct inode *dax_inode(struct dax_device *dax_dev) 598 { 599 return &dax_dev->inode; 600 } 601 EXPORT_SYMBOL_GPL(dax_inode); 602 603 void *dax_get_private(struct dax_device *dax_dev) 604 { 605 return dax_dev->private; 606 } 607 EXPORT_SYMBOL_GPL(dax_get_private); 608 609 static void init_once(void *_dax_dev) 610 { 611 struct dax_device *dax_dev = _dax_dev; 612 struct inode *inode = &dax_dev->inode; 613 614 memset(dax_dev, 0, sizeof(*dax_dev)); 615 inode_init_once(inode); 616 } 617 618 static int __dax_fs_init(void) 619 { 620 int rc; 621 622 dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0, 623 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 624 SLAB_MEM_SPREAD|SLAB_ACCOUNT), 625 init_once); 626 if (!dax_cache) 627 return -ENOMEM; 628 629 rc = register_filesystem(&dax_fs_type); 630 if (rc) 631 goto err_register_fs; 632 633 dax_mnt = kern_mount(&dax_fs_type); 634 if (IS_ERR(dax_mnt)) { 635 rc = PTR_ERR(dax_mnt); 636 goto err_mount; 637 } 638 dax_superblock = dax_mnt->mnt_sb; 639 640 return 0; 641 642 err_mount: 643 unregister_filesystem(&dax_fs_type); 644 err_register_fs: 645 kmem_cache_destroy(dax_cache); 646 647 return rc; 648 } 649 650 static void __dax_fs_exit(void) 651 { 652 kern_unmount(dax_mnt); 653 unregister_filesystem(&dax_fs_type); 654 kmem_cache_destroy(dax_cache); 655 } 656 657 static int __init dax_fs_init(void) 658 { 659 int rc; 660 661 rc = __dax_fs_init(); 662 if (rc) 663 return rc; 664 665 rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax"); 666 if (rc) 667 __dax_fs_exit(); 668 return rc; 669 } 670 671 static void __exit dax_fs_exit(void) 672 { 673 unregister_chrdev_region(dax_devt, MINORMASK+1); 674 ida_destroy(&dax_minor_ida); 675 __dax_fs_exit(); 676 } 677 678 MODULE_AUTHOR("Intel Corporation"); 679 MODULE_LICENSE("GPL v2"); 680 subsys_initcall(dax_fs_init); 681 module_exit(dax_fs_exit); 682