1 /* 2 * Copyright(c) 2017 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/pagemap.h> 14 #include <linux/module.h> 15 #include <linux/mount.h> 16 #include <linux/magic.h> 17 #include <linux/genhd.h> 18 #include <linux/pfn_t.h> 19 #include <linux/cdev.h> 20 #include <linux/hash.h> 21 #include <linux/slab.h> 22 #include <linux/uio.h> 23 #include <linux/dax.h> 24 #include <linux/fs.h> 25 26 static dev_t dax_devt; 27 DEFINE_STATIC_SRCU(dax_srcu); 28 static struct vfsmount *dax_mnt; 29 static DEFINE_IDA(dax_minor_ida); 30 static struct kmem_cache *dax_cache __read_mostly; 31 static struct super_block *dax_superblock __read_mostly; 32 33 #define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head)) 34 static struct hlist_head dax_host_list[DAX_HASH_SIZE]; 35 static DEFINE_SPINLOCK(dax_host_lock); 36 37 int dax_read_lock(void) 38 { 39 return srcu_read_lock(&dax_srcu); 40 } 41 EXPORT_SYMBOL_GPL(dax_read_lock); 42 43 void dax_read_unlock(int id) 44 { 45 srcu_read_unlock(&dax_srcu, id); 46 } 47 EXPORT_SYMBOL_GPL(dax_read_unlock); 48 49 #ifdef CONFIG_BLOCK 50 #include <linux/blkdev.h> 51 52 int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, 53 pgoff_t *pgoff) 54 { 55 phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512; 56 57 if (pgoff) 58 *pgoff = PHYS_PFN(phys_off); 59 if (phys_off % PAGE_SIZE || size % PAGE_SIZE) 60 return -EINVAL; 61 return 0; 62 } 63 EXPORT_SYMBOL(bdev_dax_pgoff); 64 65 #if IS_ENABLED(CONFIG_FS_DAX) 66 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) 67 { 68 if (!blk_queue_dax(bdev->bd_queue)) 69 return NULL; 70 return fs_dax_get_by_host(bdev->bd_disk->disk_name); 71 } 72 EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); 73 #endif 74 75 /** 76 * __bdev_dax_supported() - Check if the device supports dax for filesystem 77 * @sb: The superblock of the device 78 * @blocksize: The block size of the device 79 * 80 * This is a library function for filesystems to check if the block device 81 * can be mounted with dax option. 82 * 83 * Return: negative errno if unsupported, 0 if supported. 84 */ 85 int __bdev_dax_supported(struct super_block *sb, int blocksize) 86 { 87 struct block_device *bdev = sb->s_bdev; 88 struct dax_device *dax_dev; 89 pgoff_t pgoff; 90 int err, id; 91 void *kaddr; 92 pfn_t pfn; 93 long len; 94 95 if (blocksize != PAGE_SIZE) { 96 pr_debug("VFS (%s): error: unsupported blocksize for dax\n", 97 sb->s_id); 98 return -EINVAL; 99 } 100 101 err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff); 102 if (err) { 103 pr_debug("VFS (%s): error: unaligned partition for dax\n", 104 sb->s_id); 105 return err; 106 } 107 108 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 109 if (!dax_dev) { 110 pr_debug("VFS (%s): error: device does not support dax\n", 111 sb->s_id); 112 return -EOPNOTSUPP; 113 } 114 115 id = dax_read_lock(); 116 len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); 117 dax_read_unlock(id); 118 119 put_dax(dax_dev); 120 121 if (len < 1) { 122 pr_debug("VFS (%s): error: dax access failed (%ld)\n", 123 sb->s_id, len); 124 return len < 0 ? len : -EIO; 125 } 126 127 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) { 128 /* 129 * An arch that has enabled the pmem api should also 130 * have its drivers support pfn_t_devmap() 131 * 132 * This is a developer warning and should not trigger in 133 * production. dax_flush() will crash since it depends 134 * on being able to do (page_address(pfn_to_page())). 135 */ 136 WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)); 137 } else if (pfn_t_devmap(pfn)) { 138 /* pass */; 139 } else { 140 pr_debug("VFS (%s): error: dax support not enabled\n", 141 sb->s_id); 142 return -EOPNOTSUPP; 143 } 144 145 return 0; 146 } 147 EXPORT_SYMBOL_GPL(__bdev_dax_supported); 148 #endif 149 150 enum dax_device_flags { 151 /* !alive + rcu grace period == no new operations / mappings */ 152 DAXDEV_ALIVE, 153 /* gate whether dax_flush() calls the low level flush routine */ 154 DAXDEV_WRITE_CACHE, 155 }; 156 157 /** 158 * struct dax_device - anchor object for dax services 159 * @inode: core vfs 160 * @cdev: optional character interface for "device dax" 161 * @host: optional name for lookups where the device path is not available 162 * @private: dax driver private data 163 * @flags: state and boolean properties 164 */ 165 struct dax_device { 166 struct hlist_node list; 167 struct inode inode; 168 struct cdev cdev; 169 const char *host; 170 void *private; 171 unsigned long flags; 172 const struct dax_operations *ops; 173 }; 174 175 static ssize_t write_cache_show(struct device *dev, 176 struct device_attribute *attr, char *buf) 177 { 178 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev)); 179 ssize_t rc; 180 181 WARN_ON_ONCE(!dax_dev); 182 if (!dax_dev) 183 return -ENXIO; 184 185 rc = sprintf(buf, "%d\n", !!test_bit(DAXDEV_WRITE_CACHE, 186 &dax_dev->flags)); 187 put_dax(dax_dev); 188 return rc; 189 } 190 191 static ssize_t write_cache_store(struct device *dev, 192 struct device_attribute *attr, const char *buf, size_t len) 193 { 194 bool write_cache; 195 int rc = strtobool(buf, &write_cache); 196 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev)); 197 198 WARN_ON_ONCE(!dax_dev); 199 if (!dax_dev) 200 return -ENXIO; 201 202 if (rc) 203 len = rc; 204 else if (write_cache) 205 set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); 206 else 207 clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); 208 209 put_dax(dax_dev); 210 return len; 211 } 212 static DEVICE_ATTR_RW(write_cache); 213 214 static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n) 215 { 216 struct device *dev = container_of(kobj, typeof(*dev), kobj); 217 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev)); 218 219 WARN_ON_ONCE(!dax_dev); 220 if (!dax_dev) 221 return 0; 222 223 #ifndef CONFIG_ARCH_HAS_PMEM_API 224 if (a == &dev_attr_write_cache.attr) 225 return 0; 226 #endif 227 return a->mode; 228 } 229 230 static struct attribute *dax_attributes[] = { 231 &dev_attr_write_cache.attr, 232 NULL, 233 }; 234 235 struct attribute_group dax_attribute_group = { 236 .name = "dax", 237 .attrs = dax_attributes, 238 .is_visible = dax_visible, 239 }; 240 EXPORT_SYMBOL_GPL(dax_attribute_group); 241 242 /** 243 * dax_direct_access() - translate a device pgoff to an absolute pfn 244 * @dax_dev: a dax_device instance representing the logical memory range 245 * @pgoff: offset in pages from the start of the device to translate 246 * @nr_pages: number of consecutive pages caller can handle relative to @pfn 247 * @kaddr: output parameter that returns a virtual address mapping of pfn 248 * @pfn: output parameter that returns an absolute pfn translation of @pgoff 249 * 250 * Return: negative errno if an error occurs, otherwise the number of 251 * pages accessible at the device relative @pgoff. 252 */ 253 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, 254 void **kaddr, pfn_t *pfn) 255 { 256 long avail; 257 258 if (!dax_dev) 259 return -EOPNOTSUPP; 260 261 if (!dax_alive(dax_dev)) 262 return -ENXIO; 263 264 if (nr_pages < 0) 265 return nr_pages; 266 267 avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages, 268 kaddr, pfn); 269 if (!avail) 270 return -ERANGE; 271 return min(avail, nr_pages); 272 } 273 EXPORT_SYMBOL_GPL(dax_direct_access); 274 275 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 276 size_t bytes, struct iov_iter *i) 277 { 278 if (!dax_alive(dax_dev)) 279 return 0; 280 281 return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i); 282 } 283 EXPORT_SYMBOL_GPL(dax_copy_from_iter); 284 285 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 286 size_t bytes, struct iov_iter *i) 287 { 288 if (!dax_alive(dax_dev)) 289 return 0; 290 291 return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i); 292 } 293 EXPORT_SYMBOL_GPL(dax_copy_to_iter); 294 295 #ifdef CONFIG_ARCH_HAS_PMEM_API 296 void arch_wb_cache_pmem(void *addr, size_t size); 297 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) 298 { 299 if (unlikely(!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags))) 300 return; 301 302 arch_wb_cache_pmem(addr, size); 303 } 304 #else 305 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) 306 { 307 } 308 #endif 309 EXPORT_SYMBOL_GPL(dax_flush); 310 311 void dax_write_cache(struct dax_device *dax_dev, bool wc) 312 { 313 if (wc) 314 set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); 315 else 316 clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); 317 } 318 EXPORT_SYMBOL_GPL(dax_write_cache); 319 320 bool dax_write_cache_enabled(struct dax_device *dax_dev) 321 { 322 return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); 323 } 324 EXPORT_SYMBOL_GPL(dax_write_cache_enabled); 325 326 bool dax_alive(struct dax_device *dax_dev) 327 { 328 lockdep_assert_held(&dax_srcu); 329 return test_bit(DAXDEV_ALIVE, &dax_dev->flags); 330 } 331 EXPORT_SYMBOL_GPL(dax_alive); 332 333 static int dax_host_hash(const char *host) 334 { 335 return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE; 336 } 337 338 /* 339 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring 340 * that any fault handlers or operations that might have seen 341 * dax_alive(), have completed. Any operations that start after 342 * synchronize_srcu() has run will abort upon seeing !dax_alive(). 343 */ 344 void kill_dax(struct dax_device *dax_dev) 345 { 346 if (!dax_dev) 347 return; 348 349 clear_bit(DAXDEV_ALIVE, &dax_dev->flags); 350 351 synchronize_srcu(&dax_srcu); 352 353 spin_lock(&dax_host_lock); 354 hlist_del_init(&dax_dev->list); 355 spin_unlock(&dax_host_lock); 356 357 dax_dev->private = NULL; 358 } 359 EXPORT_SYMBOL_GPL(kill_dax); 360 361 static struct inode *dax_alloc_inode(struct super_block *sb) 362 { 363 struct dax_device *dax_dev; 364 struct inode *inode; 365 366 dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL); 367 if (!dax_dev) 368 return NULL; 369 370 inode = &dax_dev->inode; 371 inode->i_rdev = 0; 372 return inode; 373 } 374 375 static struct dax_device *to_dax_dev(struct inode *inode) 376 { 377 return container_of(inode, struct dax_device, inode); 378 } 379 380 static void dax_i_callback(struct rcu_head *head) 381 { 382 struct inode *inode = container_of(head, struct inode, i_rcu); 383 struct dax_device *dax_dev = to_dax_dev(inode); 384 385 kfree(dax_dev->host); 386 dax_dev->host = NULL; 387 if (inode->i_rdev) 388 ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev)); 389 kmem_cache_free(dax_cache, dax_dev); 390 } 391 392 static void dax_destroy_inode(struct inode *inode) 393 { 394 struct dax_device *dax_dev = to_dax_dev(inode); 395 396 WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags), 397 "kill_dax() must be called before final iput()\n"); 398 call_rcu(&inode->i_rcu, dax_i_callback); 399 } 400 401 static const struct super_operations dax_sops = { 402 .statfs = simple_statfs, 403 .alloc_inode = dax_alloc_inode, 404 .destroy_inode = dax_destroy_inode, 405 .drop_inode = generic_delete_inode, 406 }; 407 408 static struct dentry *dax_mount(struct file_system_type *fs_type, 409 int flags, const char *dev_name, void *data) 410 { 411 return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC); 412 } 413 414 static struct file_system_type dax_fs_type = { 415 .name = "dax", 416 .mount = dax_mount, 417 .kill_sb = kill_anon_super, 418 }; 419 420 static int dax_test(struct inode *inode, void *data) 421 { 422 dev_t devt = *(dev_t *) data; 423 424 return inode->i_rdev == devt; 425 } 426 427 static int dax_set(struct inode *inode, void *data) 428 { 429 dev_t devt = *(dev_t *) data; 430 431 inode->i_rdev = devt; 432 return 0; 433 } 434 435 static struct dax_device *dax_dev_get(dev_t devt) 436 { 437 struct dax_device *dax_dev; 438 struct inode *inode; 439 440 inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31), 441 dax_test, dax_set, &devt); 442 443 if (!inode) 444 return NULL; 445 446 dax_dev = to_dax_dev(inode); 447 if (inode->i_state & I_NEW) { 448 set_bit(DAXDEV_ALIVE, &dax_dev->flags); 449 inode->i_cdev = &dax_dev->cdev; 450 inode->i_mode = S_IFCHR; 451 inode->i_flags = S_DAX; 452 mapping_set_gfp_mask(&inode->i_data, GFP_USER); 453 unlock_new_inode(inode); 454 } 455 456 return dax_dev; 457 } 458 459 static void dax_add_host(struct dax_device *dax_dev, const char *host) 460 { 461 int hash; 462 463 /* 464 * Unconditionally init dax_dev since it's coming from a 465 * non-zeroed slab cache 466 */ 467 INIT_HLIST_NODE(&dax_dev->list); 468 dax_dev->host = host; 469 if (!host) 470 return; 471 472 hash = dax_host_hash(host); 473 spin_lock(&dax_host_lock); 474 hlist_add_head(&dax_dev->list, &dax_host_list[hash]); 475 spin_unlock(&dax_host_lock); 476 } 477 478 struct dax_device *alloc_dax(void *private, const char *__host, 479 const struct dax_operations *ops) 480 { 481 struct dax_device *dax_dev; 482 const char *host; 483 dev_t devt; 484 int minor; 485 486 host = kstrdup(__host, GFP_KERNEL); 487 if (__host && !host) 488 return NULL; 489 490 minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL); 491 if (minor < 0) 492 goto err_minor; 493 494 devt = MKDEV(MAJOR(dax_devt), minor); 495 dax_dev = dax_dev_get(devt); 496 if (!dax_dev) 497 goto err_dev; 498 499 dax_add_host(dax_dev, host); 500 dax_dev->ops = ops; 501 dax_dev->private = private; 502 return dax_dev; 503 504 err_dev: 505 ida_simple_remove(&dax_minor_ida, minor); 506 err_minor: 507 kfree(host); 508 return NULL; 509 } 510 EXPORT_SYMBOL_GPL(alloc_dax); 511 512 void put_dax(struct dax_device *dax_dev) 513 { 514 if (!dax_dev) 515 return; 516 iput(&dax_dev->inode); 517 } 518 EXPORT_SYMBOL_GPL(put_dax); 519 520 /** 521 * dax_get_by_host() - temporary lookup mechanism for filesystem-dax 522 * @host: alternate name for the device registered by a dax driver 523 */ 524 struct dax_device *dax_get_by_host(const char *host) 525 { 526 struct dax_device *dax_dev, *found = NULL; 527 int hash, id; 528 529 if (!host) 530 return NULL; 531 532 hash = dax_host_hash(host); 533 534 id = dax_read_lock(); 535 spin_lock(&dax_host_lock); 536 hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) { 537 if (!dax_alive(dax_dev) 538 || strcmp(host, dax_dev->host) != 0) 539 continue; 540 541 if (igrab(&dax_dev->inode)) 542 found = dax_dev; 543 break; 544 } 545 spin_unlock(&dax_host_lock); 546 dax_read_unlock(id); 547 548 return found; 549 } 550 EXPORT_SYMBOL_GPL(dax_get_by_host); 551 552 /** 553 * inode_dax: convert a public inode into its dax_dev 554 * @inode: An inode with i_cdev pointing to a dax_dev 555 * 556 * Note this is not equivalent to to_dax_dev() which is for private 557 * internal use where we know the inode filesystem type == dax_fs_type. 558 */ 559 struct dax_device *inode_dax(struct inode *inode) 560 { 561 struct cdev *cdev = inode->i_cdev; 562 563 return container_of(cdev, struct dax_device, cdev); 564 } 565 EXPORT_SYMBOL_GPL(inode_dax); 566 567 struct inode *dax_inode(struct dax_device *dax_dev) 568 { 569 return &dax_dev->inode; 570 } 571 EXPORT_SYMBOL_GPL(dax_inode); 572 573 void *dax_get_private(struct dax_device *dax_dev) 574 { 575 return dax_dev->private; 576 } 577 EXPORT_SYMBOL_GPL(dax_get_private); 578 579 static void init_once(void *_dax_dev) 580 { 581 struct dax_device *dax_dev = _dax_dev; 582 struct inode *inode = &dax_dev->inode; 583 584 memset(dax_dev, 0, sizeof(*dax_dev)); 585 inode_init_once(inode); 586 } 587 588 static int __dax_fs_init(void) 589 { 590 int rc; 591 592 dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0, 593 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 594 SLAB_MEM_SPREAD|SLAB_ACCOUNT), 595 init_once); 596 if (!dax_cache) 597 return -ENOMEM; 598 599 rc = register_filesystem(&dax_fs_type); 600 if (rc) 601 goto err_register_fs; 602 603 dax_mnt = kern_mount(&dax_fs_type); 604 if (IS_ERR(dax_mnt)) { 605 rc = PTR_ERR(dax_mnt); 606 goto err_mount; 607 } 608 dax_superblock = dax_mnt->mnt_sb; 609 610 return 0; 611 612 err_mount: 613 unregister_filesystem(&dax_fs_type); 614 err_register_fs: 615 kmem_cache_destroy(dax_cache); 616 617 return rc; 618 } 619 620 static void __dax_fs_exit(void) 621 { 622 kern_unmount(dax_mnt); 623 unregister_filesystem(&dax_fs_type); 624 kmem_cache_destroy(dax_cache); 625 } 626 627 static int __init dax_fs_init(void) 628 { 629 int rc; 630 631 rc = __dax_fs_init(); 632 if (rc) 633 return rc; 634 635 rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax"); 636 if (rc) 637 __dax_fs_exit(); 638 return rc; 639 } 640 641 static void __exit dax_fs_exit(void) 642 { 643 unregister_chrdev_region(dax_devt, MINORMASK+1); 644 ida_destroy(&dax_minor_ida); 645 __dax_fs_exit(); 646 } 647 648 MODULE_AUTHOR("Intel Corporation"); 649 MODULE_LICENSE("GPL v2"); 650 subsys_initcall(dax_fs_init); 651 module_exit(dax_fs_exit); 652