1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright(c) 2017 Intel Corporation. All rights reserved. 4 */ 5 #include <linux/pagemap.h> 6 #include <linux/module.h> 7 #include <linux/mount.h> 8 #include <linux/pseudo_fs.h> 9 #include <linux/magic.h> 10 #include <linux/pfn_t.h> 11 #include <linux/cdev.h> 12 #include <linux/slab.h> 13 #include <linux/uio.h> 14 #include <linux/dax.h> 15 #include <linux/fs.h> 16 #include "dax-private.h" 17 18 /** 19 * struct dax_device - anchor object for dax services 20 * @inode: core vfs 21 * @cdev: optional character interface for "device dax" 22 * @private: dax driver private data 23 * @flags: state and boolean properties 24 * @ops: operations for this device 25 */ 26 struct dax_device { 27 struct inode inode; 28 struct cdev cdev; 29 void *private; 30 unsigned long flags; 31 const struct dax_operations *ops; 32 }; 33 34 static dev_t dax_devt; 35 DEFINE_STATIC_SRCU(dax_srcu); 36 static struct vfsmount *dax_mnt; 37 static DEFINE_IDA(dax_minor_ida); 38 static struct kmem_cache *dax_cache __read_mostly; 39 static struct super_block *dax_superblock __read_mostly; 40 41 int dax_read_lock(void) 42 { 43 return srcu_read_lock(&dax_srcu); 44 } 45 EXPORT_SYMBOL_GPL(dax_read_lock); 46 47 void dax_read_unlock(int id) 48 { 49 srcu_read_unlock(&dax_srcu, id); 50 } 51 EXPORT_SYMBOL_GPL(dax_read_unlock); 52 53 #if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX) 54 #include <linux/blkdev.h> 55 56 static DEFINE_XARRAY(dax_hosts); 57 58 int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk) 59 { 60 return xa_insert(&dax_hosts, (unsigned long)disk, dax_dev, GFP_KERNEL); 61 } 62 EXPORT_SYMBOL_GPL(dax_add_host); 63 64 void dax_remove_host(struct gendisk *disk) 65 { 66 xa_erase(&dax_hosts, (unsigned long)disk); 67 } 68 EXPORT_SYMBOL_GPL(dax_remove_host); 69 70 /** 71 * fs_dax_get_by_bdev() - temporary lookup mechanism for filesystem-dax 72 * @bdev: block device to find a dax_device for 73 * @start_off: returns the byte offset into the dax_device that @bdev starts 74 */ 75 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off) 76 { 77 struct dax_device *dax_dev; 78 u64 part_size; 79 int id; 80 81 if (!blk_queue_dax(bdev->bd_disk->queue)) 82 return NULL; 83 84 *start_off = get_start_sect(bdev) * SECTOR_SIZE; 85 part_size = bdev_nr_sectors(bdev) * SECTOR_SIZE; 86 if (*start_off % PAGE_SIZE || part_size % PAGE_SIZE) { 87 pr_info("%pg: error: unaligned partition for dax\n", bdev); 88 return NULL; 89 } 90 91 id = dax_read_lock(); 92 dax_dev = xa_load(&dax_hosts, (unsigned long)bdev->bd_disk); 93 if (!dax_dev || !dax_alive(dax_dev) || !igrab(&dax_dev->inode)) 94 dax_dev = NULL; 95 dax_read_unlock(id); 96 97 return dax_dev; 98 } 99 EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); 100 #endif /* CONFIG_BLOCK && CONFIG_FS_DAX */ 101 102 enum dax_device_flags { 103 /* !alive + rcu grace period == no new operations / mappings */ 104 DAXDEV_ALIVE, 105 /* gate whether dax_flush() calls the low level flush routine */ 106 DAXDEV_WRITE_CACHE, 107 /* flag to check if device supports synchronous flush */ 108 DAXDEV_SYNC, 109 /* do not leave the caches dirty after writes */ 110 DAXDEV_NOCACHE, 111 /* handle CPU fetch exceptions during reads */ 112 DAXDEV_NOMC, 113 }; 114 115 /** 116 * dax_direct_access() - translate a device pgoff to an absolute pfn 117 * @dax_dev: a dax_device instance representing the logical memory range 118 * @pgoff: offset in pages from the start of the device to translate 119 * @nr_pages: number of consecutive pages caller can handle relative to @pfn 120 * @mode: indicator on normal access or recovery write 121 * @kaddr: output parameter that returns a virtual address mapping of pfn 122 * @pfn: output parameter that returns an absolute pfn translation of @pgoff 123 * 124 * Return: negative errno if an error occurs, otherwise the number of 125 * pages accessible at the device relative @pgoff. 126 */ 127 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, 128 enum dax_access_mode mode, void **kaddr, pfn_t *pfn) 129 { 130 long avail; 131 132 if (!dax_dev) 133 return -EOPNOTSUPP; 134 135 if (!dax_alive(dax_dev)) 136 return -ENXIO; 137 138 if (nr_pages < 0) 139 return -EINVAL; 140 141 avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages, 142 mode, kaddr, pfn); 143 if (!avail) 144 return -ERANGE; 145 return min(avail, nr_pages); 146 } 147 EXPORT_SYMBOL_GPL(dax_direct_access); 148 149 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 150 size_t bytes, struct iov_iter *i) 151 { 152 if (!dax_alive(dax_dev)) 153 return 0; 154 155 /* 156 * The userspace address for the memory copy has already been validated 157 * via access_ok() in vfs_write, so use the 'no check' version to bypass 158 * the HARDENED_USERCOPY overhead. 159 */ 160 if (test_bit(DAXDEV_NOCACHE, &dax_dev->flags)) 161 return _copy_from_iter_flushcache(addr, bytes, i); 162 return _copy_from_iter(addr, bytes, i); 163 } 164 165 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 166 size_t bytes, struct iov_iter *i) 167 { 168 if (!dax_alive(dax_dev)) 169 return 0; 170 171 /* 172 * The userspace address for the memory copy has already been validated 173 * via access_ok() in vfs_red, so use the 'no check' version to bypass 174 * the HARDENED_USERCOPY overhead. 175 */ 176 if (test_bit(DAXDEV_NOMC, &dax_dev->flags)) 177 return _copy_mc_to_iter(addr, bytes, i); 178 return _copy_to_iter(addr, bytes, i); 179 } 180 181 int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 182 size_t nr_pages) 183 { 184 if (!dax_alive(dax_dev)) 185 return -ENXIO; 186 /* 187 * There are no callers that want to zero more than one page as of now. 188 * Once users are there, this check can be removed after the 189 * device mapper code has been updated to split ranges across targets. 190 */ 191 if (nr_pages != 1) 192 return -EIO; 193 194 return dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages); 195 } 196 EXPORT_SYMBOL_GPL(dax_zero_page_range); 197 198 size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, 199 void *addr, size_t bytes, struct iov_iter *iter) 200 { 201 if (!dax_dev->ops->recovery_write) 202 return 0; 203 return dax_dev->ops->recovery_write(dax_dev, pgoff, addr, bytes, iter); 204 } 205 EXPORT_SYMBOL_GPL(dax_recovery_write); 206 207 #ifdef CONFIG_ARCH_HAS_PMEM_API 208 void arch_wb_cache_pmem(void *addr, size_t size); 209 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) 210 { 211 if (unlikely(!dax_write_cache_enabled(dax_dev))) 212 return; 213 214 arch_wb_cache_pmem(addr, size); 215 } 216 #else 217 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) 218 { 219 } 220 #endif 221 EXPORT_SYMBOL_GPL(dax_flush); 222 223 void dax_write_cache(struct dax_device *dax_dev, bool wc) 224 { 225 if (wc) 226 set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); 227 else 228 clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); 229 } 230 EXPORT_SYMBOL_GPL(dax_write_cache); 231 232 bool dax_write_cache_enabled(struct dax_device *dax_dev) 233 { 234 return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); 235 } 236 EXPORT_SYMBOL_GPL(dax_write_cache_enabled); 237 238 bool dax_synchronous(struct dax_device *dax_dev) 239 { 240 return test_bit(DAXDEV_SYNC, &dax_dev->flags); 241 } 242 EXPORT_SYMBOL_GPL(dax_synchronous); 243 244 void set_dax_synchronous(struct dax_device *dax_dev) 245 { 246 set_bit(DAXDEV_SYNC, &dax_dev->flags); 247 } 248 EXPORT_SYMBOL_GPL(set_dax_synchronous); 249 250 void set_dax_nocache(struct dax_device *dax_dev) 251 { 252 set_bit(DAXDEV_NOCACHE, &dax_dev->flags); 253 } 254 EXPORT_SYMBOL_GPL(set_dax_nocache); 255 256 void set_dax_nomc(struct dax_device *dax_dev) 257 { 258 set_bit(DAXDEV_NOMC, &dax_dev->flags); 259 } 260 EXPORT_SYMBOL_GPL(set_dax_nomc); 261 262 bool dax_alive(struct dax_device *dax_dev) 263 { 264 lockdep_assert_held(&dax_srcu); 265 return test_bit(DAXDEV_ALIVE, &dax_dev->flags); 266 } 267 EXPORT_SYMBOL_GPL(dax_alive); 268 269 /* 270 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring 271 * that any fault handlers or operations that might have seen 272 * dax_alive(), have completed. Any operations that start after 273 * synchronize_srcu() has run will abort upon seeing !dax_alive(). 274 */ 275 void kill_dax(struct dax_device *dax_dev) 276 { 277 if (!dax_dev) 278 return; 279 280 clear_bit(DAXDEV_ALIVE, &dax_dev->flags); 281 synchronize_srcu(&dax_srcu); 282 } 283 EXPORT_SYMBOL_GPL(kill_dax); 284 285 void run_dax(struct dax_device *dax_dev) 286 { 287 set_bit(DAXDEV_ALIVE, &dax_dev->flags); 288 } 289 EXPORT_SYMBOL_GPL(run_dax); 290 291 static struct inode *dax_alloc_inode(struct super_block *sb) 292 { 293 struct dax_device *dax_dev; 294 struct inode *inode; 295 296 dax_dev = alloc_inode_sb(sb, dax_cache, GFP_KERNEL); 297 if (!dax_dev) 298 return NULL; 299 300 inode = &dax_dev->inode; 301 inode->i_rdev = 0; 302 return inode; 303 } 304 305 static struct dax_device *to_dax_dev(struct inode *inode) 306 { 307 return container_of(inode, struct dax_device, inode); 308 } 309 310 static void dax_free_inode(struct inode *inode) 311 { 312 struct dax_device *dax_dev = to_dax_dev(inode); 313 if (inode->i_rdev) 314 ida_simple_remove(&dax_minor_ida, iminor(inode)); 315 kmem_cache_free(dax_cache, dax_dev); 316 } 317 318 static void dax_destroy_inode(struct inode *inode) 319 { 320 struct dax_device *dax_dev = to_dax_dev(inode); 321 WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags), 322 "kill_dax() must be called before final iput()\n"); 323 } 324 325 static const struct super_operations dax_sops = { 326 .statfs = simple_statfs, 327 .alloc_inode = dax_alloc_inode, 328 .destroy_inode = dax_destroy_inode, 329 .free_inode = dax_free_inode, 330 .drop_inode = generic_delete_inode, 331 }; 332 333 static int dax_init_fs_context(struct fs_context *fc) 334 { 335 struct pseudo_fs_context *ctx = init_pseudo(fc, DAXFS_MAGIC); 336 if (!ctx) 337 return -ENOMEM; 338 ctx->ops = &dax_sops; 339 return 0; 340 } 341 342 static struct file_system_type dax_fs_type = { 343 .name = "dax", 344 .init_fs_context = dax_init_fs_context, 345 .kill_sb = kill_anon_super, 346 }; 347 348 static int dax_test(struct inode *inode, void *data) 349 { 350 dev_t devt = *(dev_t *) data; 351 352 return inode->i_rdev == devt; 353 } 354 355 static int dax_set(struct inode *inode, void *data) 356 { 357 dev_t devt = *(dev_t *) data; 358 359 inode->i_rdev = devt; 360 return 0; 361 } 362 363 static struct dax_device *dax_dev_get(dev_t devt) 364 { 365 struct dax_device *dax_dev; 366 struct inode *inode; 367 368 inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31), 369 dax_test, dax_set, &devt); 370 371 if (!inode) 372 return NULL; 373 374 dax_dev = to_dax_dev(inode); 375 if (inode->i_state & I_NEW) { 376 set_bit(DAXDEV_ALIVE, &dax_dev->flags); 377 inode->i_cdev = &dax_dev->cdev; 378 inode->i_mode = S_IFCHR; 379 inode->i_flags = S_DAX; 380 mapping_set_gfp_mask(&inode->i_data, GFP_USER); 381 unlock_new_inode(inode); 382 } 383 384 return dax_dev; 385 } 386 387 struct dax_device *alloc_dax(void *private, const struct dax_operations *ops) 388 { 389 struct dax_device *dax_dev; 390 dev_t devt; 391 int minor; 392 393 if (WARN_ON_ONCE(ops && !ops->zero_page_range)) 394 return ERR_PTR(-EINVAL); 395 396 minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL); 397 if (minor < 0) 398 return ERR_PTR(-ENOMEM); 399 400 devt = MKDEV(MAJOR(dax_devt), minor); 401 dax_dev = dax_dev_get(devt); 402 if (!dax_dev) 403 goto err_dev; 404 405 dax_dev->ops = ops; 406 dax_dev->private = private; 407 return dax_dev; 408 409 err_dev: 410 ida_simple_remove(&dax_minor_ida, minor); 411 return ERR_PTR(-ENOMEM); 412 } 413 EXPORT_SYMBOL_GPL(alloc_dax); 414 415 void put_dax(struct dax_device *dax_dev) 416 { 417 if (!dax_dev) 418 return; 419 iput(&dax_dev->inode); 420 } 421 EXPORT_SYMBOL_GPL(put_dax); 422 423 /** 424 * inode_dax: convert a public inode into its dax_dev 425 * @inode: An inode with i_cdev pointing to a dax_dev 426 * 427 * Note this is not equivalent to to_dax_dev() which is for private 428 * internal use where we know the inode filesystem type == dax_fs_type. 429 */ 430 struct dax_device *inode_dax(struct inode *inode) 431 { 432 struct cdev *cdev = inode->i_cdev; 433 434 return container_of(cdev, struct dax_device, cdev); 435 } 436 EXPORT_SYMBOL_GPL(inode_dax); 437 438 struct inode *dax_inode(struct dax_device *dax_dev) 439 { 440 return &dax_dev->inode; 441 } 442 EXPORT_SYMBOL_GPL(dax_inode); 443 444 void *dax_get_private(struct dax_device *dax_dev) 445 { 446 if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags)) 447 return NULL; 448 return dax_dev->private; 449 } 450 EXPORT_SYMBOL_GPL(dax_get_private); 451 452 static void init_once(void *_dax_dev) 453 { 454 struct dax_device *dax_dev = _dax_dev; 455 struct inode *inode = &dax_dev->inode; 456 457 memset(dax_dev, 0, sizeof(*dax_dev)); 458 inode_init_once(inode); 459 } 460 461 static int dax_fs_init(void) 462 { 463 int rc; 464 465 dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0, 466 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 467 SLAB_MEM_SPREAD|SLAB_ACCOUNT), 468 init_once); 469 if (!dax_cache) 470 return -ENOMEM; 471 472 dax_mnt = kern_mount(&dax_fs_type); 473 if (IS_ERR(dax_mnt)) { 474 rc = PTR_ERR(dax_mnt); 475 goto err_mount; 476 } 477 dax_superblock = dax_mnt->mnt_sb; 478 479 return 0; 480 481 err_mount: 482 kmem_cache_destroy(dax_cache); 483 484 return rc; 485 } 486 487 static void dax_fs_exit(void) 488 { 489 kern_unmount(dax_mnt); 490 rcu_barrier(); 491 kmem_cache_destroy(dax_cache); 492 } 493 494 static int __init dax_core_init(void) 495 { 496 int rc; 497 498 rc = dax_fs_init(); 499 if (rc) 500 return rc; 501 502 rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax"); 503 if (rc) 504 goto err_chrdev; 505 506 rc = dax_bus_init(); 507 if (rc) 508 goto err_bus; 509 return 0; 510 511 err_bus: 512 unregister_chrdev_region(dax_devt, MINORMASK+1); 513 err_chrdev: 514 dax_fs_exit(); 515 return 0; 516 } 517 518 static void __exit dax_core_exit(void) 519 { 520 dax_bus_exit(); 521 unregister_chrdev_region(dax_devt, MINORMASK+1); 522 ida_destroy(&dax_minor_ida); 523 dax_fs_exit(); 524 } 525 526 MODULE_AUTHOR("Intel Corporation"); 527 MODULE_LICENSE("GPL v2"); 528 subsys_initcall(dax_core_init); 529 module_exit(dax_core_exit); 530