xref: /openbmc/linux/drivers/dax/super.c (revision 1ea7ca1b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright(c) 2017 Intel Corporation. All rights reserved.
4  */
5 #include <linux/pagemap.h>
6 #include <linux/module.h>
7 #include <linux/mount.h>
8 #include <linux/pseudo_fs.h>
9 #include <linux/magic.h>
10 #include <linux/pfn_t.h>
11 #include <linux/cdev.h>
12 #include <linux/slab.h>
13 #include <linux/uio.h>
14 #include <linux/dax.h>
15 #include <linux/fs.h>
16 #include "dax-private.h"
17 
18 /**
19  * struct dax_device - anchor object for dax services
20  * @inode: core vfs
21  * @cdev: optional character interface for "device dax"
22  * @private: dax driver private data
23  * @flags: state and boolean properties
24  * @ops: operations for this device
25  * @holder_data: holder of a dax_device: could be filesystem or mapped device
26  * @holder_ops: operations for the inner holder
27  */
28 struct dax_device {
29 	struct inode inode;
30 	struct cdev cdev;
31 	void *private;
32 	unsigned long flags;
33 	const struct dax_operations *ops;
34 	void *holder_data;
35 	const struct dax_holder_operations *holder_ops;
36 };
37 
38 static dev_t dax_devt;
39 DEFINE_STATIC_SRCU(dax_srcu);
40 static struct vfsmount *dax_mnt;
41 static DEFINE_IDA(dax_minor_ida);
42 static struct kmem_cache *dax_cache __read_mostly;
43 static struct super_block *dax_superblock __read_mostly;
44 
dax_read_lock(void)45 int dax_read_lock(void)
46 {
47 	return srcu_read_lock(&dax_srcu);
48 }
49 EXPORT_SYMBOL_GPL(dax_read_lock);
50 
dax_read_unlock(int id)51 void dax_read_unlock(int id)
52 {
53 	srcu_read_unlock(&dax_srcu, id);
54 }
55 EXPORT_SYMBOL_GPL(dax_read_unlock);
56 
57 #if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX)
58 #include <linux/blkdev.h>
59 
60 static DEFINE_XARRAY(dax_hosts);
61 
dax_add_host(struct dax_device * dax_dev,struct gendisk * disk)62 int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk)
63 {
64 	return xa_insert(&dax_hosts, (unsigned long)disk, dax_dev, GFP_KERNEL);
65 }
66 EXPORT_SYMBOL_GPL(dax_add_host);
67 
dax_remove_host(struct gendisk * disk)68 void dax_remove_host(struct gendisk *disk)
69 {
70 	xa_erase(&dax_hosts, (unsigned long)disk);
71 }
72 EXPORT_SYMBOL_GPL(dax_remove_host);
73 
74 /**
75  * fs_dax_get_by_bdev() - temporary lookup mechanism for filesystem-dax
76  * @bdev: block device to find a dax_device for
77  * @start_off: returns the byte offset into the dax_device that @bdev starts
78  * @holder: filesystem or mapped device inside the dax_device
79  * @ops: operations for the inner holder
80  */
fs_dax_get_by_bdev(struct block_device * bdev,u64 * start_off,void * holder,const struct dax_holder_operations * ops)81 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off,
82 		void *holder, const struct dax_holder_operations *ops)
83 {
84 	struct dax_device *dax_dev;
85 	u64 part_size;
86 	int id;
87 
88 	if (!blk_queue_dax(bdev->bd_disk->queue))
89 		return NULL;
90 
91 	*start_off = get_start_sect(bdev) * SECTOR_SIZE;
92 	part_size = bdev_nr_sectors(bdev) * SECTOR_SIZE;
93 	if (*start_off % PAGE_SIZE || part_size % PAGE_SIZE) {
94 		pr_info("%pg: error: unaligned partition for dax\n", bdev);
95 		return NULL;
96 	}
97 
98 	id = dax_read_lock();
99 	dax_dev = xa_load(&dax_hosts, (unsigned long)bdev->bd_disk);
100 	if (!dax_dev || !dax_alive(dax_dev) || !igrab(&dax_dev->inode))
101 		dax_dev = NULL;
102 	else if (holder) {
103 		if (!cmpxchg(&dax_dev->holder_data, NULL, holder))
104 			dax_dev->holder_ops = ops;
105 		else
106 			dax_dev = NULL;
107 	}
108 	dax_read_unlock(id);
109 
110 	return dax_dev;
111 }
112 EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
113 
fs_put_dax(struct dax_device * dax_dev,void * holder)114 void fs_put_dax(struct dax_device *dax_dev, void *holder)
115 {
116 	if (dax_dev && holder &&
117 	    cmpxchg(&dax_dev->holder_data, holder, NULL) == holder)
118 		dax_dev->holder_ops = NULL;
119 	put_dax(dax_dev);
120 }
121 EXPORT_SYMBOL_GPL(fs_put_dax);
122 #endif /* CONFIG_BLOCK && CONFIG_FS_DAX */
123 
124 enum dax_device_flags {
125 	/* !alive + rcu grace period == no new operations / mappings */
126 	DAXDEV_ALIVE,
127 	/* gate whether dax_flush() calls the low level flush routine */
128 	DAXDEV_WRITE_CACHE,
129 	/* flag to check if device supports synchronous flush */
130 	DAXDEV_SYNC,
131 	/* do not leave the caches dirty after writes */
132 	DAXDEV_NOCACHE,
133 	/* handle CPU fetch exceptions during reads */
134 	DAXDEV_NOMC,
135 };
136 
137 /**
138  * dax_direct_access() - translate a device pgoff to an absolute pfn
139  * @dax_dev: a dax_device instance representing the logical memory range
140  * @pgoff: offset in pages from the start of the device to translate
141  * @nr_pages: number of consecutive pages caller can handle relative to @pfn
142  * @mode: indicator on normal access or recovery write
143  * @kaddr: output parameter that returns a virtual address mapping of pfn
144  * @pfn: output parameter that returns an absolute pfn translation of @pgoff
145  *
146  * Return: negative errno if an error occurs, otherwise the number of
147  * pages accessible at the device relative @pgoff.
148  */
dax_direct_access(struct dax_device * dax_dev,pgoff_t pgoff,long nr_pages,enum dax_access_mode mode,void ** kaddr,pfn_t * pfn)149 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
150 		enum dax_access_mode mode, void **kaddr, pfn_t *pfn)
151 {
152 	long avail;
153 
154 	if (!dax_dev)
155 		return -EOPNOTSUPP;
156 
157 	if (!dax_alive(dax_dev))
158 		return -ENXIO;
159 
160 	if (nr_pages < 0)
161 		return -EINVAL;
162 
163 	avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
164 			mode, kaddr, pfn);
165 	if (!avail)
166 		return -ERANGE;
167 	return min(avail, nr_pages);
168 }
169 EXPORT_SYMBOL_GPL(dax_direct_access);
170 
dax_copy_from_iter(struct dax_device * dax_dev,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)171 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
172 		size_t bytes, struct iov_iter *i)
173 {
174 	if (!dax_alive(dax_dev))
175 		return 0;
176 
177 	/*
178 	 * The userspace address for the memory copy has already been validated
179 	 * via access_ok() in vfs_write, so use the 'no check' version to bypass
180 	 * the HARDENED_USERCOPY overhead.
181 	 */
182 	if (test_bit(DAXDEV_NOCACHE, &dax_dev->flags))
183 		return _copy_from_iter_flushcache(addr, bytes, i);
184 	return _copy_from_iter(addr, bytes, i);
185 }
186 
dax_copy_to_iter(struct dax_device * dax_dev,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)187 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
188 		size_t bytes, struct iov_iter *i)
189 {
190 	if (!dax_alive(dax_dev))
191 		return 0;
192 
193 	/*
194 	 * The userspace address for the memory copy has already been validated
195 	 * via access_ok() in vfs_red, so use the 'no check' version to bypass
196 	 * the HARDENED_USERCOPY overhead.
197 	 */
198 	if (test_bit(DAXDEV_NOMC, &dax_dev->flags))
199 		return _copy_mc_to_iter(addr, bytes, i);
200 	return _copy_to_iter(addr, bytes, i);
201 }
202 
dax_zero_page_range(struct dax_device * dax_dev,pgoff_t pgoff,size_t nr_pages)203 int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
204 			size_t nr_pages)
205 {
206 	int ret;
207 
208 	if (!dax_alive(dax_dev))
209 		return -ENXIO;
210 	/*
211 	 * There are no callers that want to zero more than one page as of now.
212 	 * Once users are there, this check can be removed after the
213 	 * device mapper code has been updated to split ranges across targets.
214 	 */
215 	if (nr_pages != 1)
216 		return -EIO;
217 
218 	ret = dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages);
219 	return dax_mem2blk_err(ret);
220 }
221 EXPORT_SYMBOL_GPL(dax_zero_page_range);
222 
dax_recovery_write(struct dax_device * dax_dev,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * iter)223 size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
224 		void *addr, size_t bytes, struct iov_iter *iter)
225 {
226 	if (!dax_dev->ops->recovery_write)
227 		return 0;
228 	return dax_dev->ops->recovery_write(dax_dev, pgoff, addr, bytes, iter);
229 }
230 EXPORT_SYMBOL_GPL(dax_recovery_write);
231 
dax_holder_notify_failure(struct dax_device * dax_dev,u64 off,u64 len,int mf_flags)232 int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off,
233 			      u64 len, int mf_flags)
234 {
235 	int rc, id;
236 
237 	id = dax_read_lock();
238 	if (!dax_alive(dax_dev)) {
239 		rc = -ENXIO;
240 		goto out;
241 	}
242 
243 	if (!dax_dev->holder_ops) {
244 		rc = -EOPNOTSUPP;
245 		goto out;
246 	}
247 
248 	rc = dax_dev->holder_ops->notify_failure(dax_dev, off, len, mf_flags);
249 out:
250 	dax_read_unlock(id);
251 	return rc;
252 }
253 EXPORT_SYMBOL_GPL(dax_holder_notify_failure);
254 
255 #ifdef CONFIG_ARCH_HAS_PMEM_API
256 void arch_wb_cache_pmem(void *addr, size_t size);
dax_flush(struct dax_device * dax_dev,void * addr,size_t size)257 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
258 {
259 	if (unlikely(!dax_write_cache_enabled(dax_dev)))
260 		return;
261 
262 	arch_wb_cache_pmem(addr, size);
263 }
264 #else
dax_flush(struct dax_device * dax_dev,void * addr,size_t size)265 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
266 {
267 }
268 #endif
269 EXPORT_SYMBOL_GPL(dax_flush);
270 
dax_write_cache(struct dax_device * dax_dev,bool wc)271 void dax_write_cache(struct dax_device *dax_dev, bool wc)
272 {
273 	if (wc)
274 		set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
275 	else
276 		clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
277 }
278 EXPORT_SYMBOL_GPL(dax_write_cache);
279 
dax_write_cache_enabled(struct dax_device * dax_dev)280 bool dax_write_cache_enabled(struct dax_device *dax_dev)
281 {
282 	return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
283 }
284 EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
285 
dax_synchronous(struct dax_device * dax_dev)286 bool dax_synchronous(struct dax_device *dax_dev)
287 {
288 	return test_bit(DAXDEV_SYNC, &dax_dev->flags);
289 }
290 EXPORT_SYMBOL_GPL(dax_synchronous);
291 
set_dax_synchronous(struct dax_device * dax_dev)292 void set_dax_synchronous(struct dax_device *dax_dev)
293 {
294 	set_bit(DAXDEV_SYNC, &dax_dev->flags);
295 }
296 EXPORT_SYMBOL_GPL(set_dax_synchronous);
297 
set_dax_nocache(struct dax_device * dax_dev)298 void set_dax_nocache(struct dax_device *dax_dev)
299 {
300 	set_bit(DAXDEV_NOCACHE, &dax_dev->flags);
301 }
302 EXPORT_SYMBOL_GPL(set_dax_nocache);
303 
set_dax_nomc(struct dax_device * dax_dev)304 void set_dax_nomc(struct dax_device *dax_dev)
305 {
306 	set_bit(DAXDEV_NOMC, &dax_dev->flags);
307 }
308 EXPORT_SYMBOL_GPL(set_dax_nomc);
309 
dax_alive(struct dax_device * dax_dev)310 bool dax_alive(struct dax_device *dax_dev)
311 {
312 	lockdep_assert_held(&dax_srcu);
313 	return test_bit(DAXDEV_ALIVE, &dax_dev->flags);
314 }
315 EXPORT_SYMBOL_GPL(dax_alive);
316 
317 /*
318  * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
319  * that any fault handlers or operations that might have seen
320  * dax_alive(), have completed.  Any operations that start after
321  * synchronize_srcu() has run will abort upon seeing !dax_alive().
322  */
kill_dax(struct dax_device * dax_dev)323 void kill_dax(struct dax_device *dax_dev)
324 {
325 	if (!dax_dev)
326 		return;
327 
328 	if (dax_dev->holder_data != NULL)
329 		dax_holder_notify_failure(dax_dev, 0, U64_MAX, 0);
330 
331 	clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
332 	synchronize_srcu(&dax_srcu);
333 
334 	/* clear holder data */
335 	dax_dev->holder_ops = NULL;
336 	dax_dev->holder_data = NULL;
337 }
338 EXPORT_SYMBOL_GPL(kill_dax);
339 
run_dax(struct dax_device * dax_dev)340 void run_dax(struct dax_device *dax_dev)
341 {
342 	set_bit(DAXDEV_ALIVE, &dax_dev->flags);
343 }
344 EXPORT_SYMBOL_GPL(run_dax);
345 
dax_alloc_inode(struct super_block * sb)346 static struct inode *dax_alloc_inode(struct super_block *sb)
347 {
348 	struct dax_device *dax_dev;
349 	struct inode *inode;
350 
351 	dax_dev = alloc_inode_sb(sb, dax_cache, GFP_KERNEL);
352 	if (!dax_dev)
353 		return NULL;
354 
355 	inode = &dax_dev->inode;
356 	inode->i_rdev = 0;
357 	return inode;
358 }
359 
to_dax_dev(struct inode * inode)360 static struct dax_device *to_dax_dev(struct inode *inode)
361 {
362 	return container_of(inode, struct dax_device, inode);
363 }
364 
dax_free_inode(struct inode * inode)365 static void dax_free_inode(struct inode *inode)
366 {
367 	struct dax_device *dax_dev = to_dax_dev(inode);
368 	if (inode->i_rdev)
369 		ida_free(&dax_minor_ida, iminor(inode));
370 	kmem_cache_free(dax_cache, dax_dev);
371 }
372 
dax_destroy_inode(struct inode * inode)373 static void dax_destroy_inode(struct inode *inode)
374 {
375 	struct dax_device *dax_dev = to_dax_dev(inode);
376 	WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
377 			"kill_dax() must be called before final iput()\n");
378 }
379 
380 static const struct super_operations dax_sops = {
381 	.statfs = simple_statfs,
382 	.alloc_inode = dax_alloc_inode,
383 	.destroy_inode = dax_destroy_inode,
384 	.free_inode = dax_free_inode,
385 	.drop_inode = generic_delete_inode,
386 };
387 
dax_init_fs_context(struct fs_context * fc)388 static int dax_init_fs_context(struct fs_context *fc)
389 {
390 	struct pseudo_fs_context *ctx = init_pseudo(fc, DAXFS_MAGIC);
391 	if (!ctx)
392 		return -ENOMEM;
393 	ctx->ops = &dax_sops;
394 	return 0;
395 }
396 
397 static struct file_system_type dax_fs_type = {
398 	.name		= "dax",
399 	.init_fs_context = dax_init_fs_context,
400 	.kill_sb	= kill_anon_super,
401 };
402 
dax_test(struct inode * inode,void * data)403 static int dax_test(struct inode *inode, void *data)
404 {
405 	dev_t devt = *(dev_t *) data;
406 
407 	return inode->i_rdev == devt;
408 }
409 
dax_set(struct inode * inode,void * data)410 static int dax_set(struct inode *inode, void *data)
411 {
412 	dev_t devt = *(dev_t *) data;
413 
414 	inode->i_rdev = devt;
415 	return 0;
416 }
417 
dax_dev_get(dev_t devt)418 static struct dax_device *dax_dev_get(dev_t devt)
419 {
420 	struct dax_device *dax_dev;
421 	struct inode *inode;
422 
423 	inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
424 			dax_test, dax_set, &devt);
425 
426 	if (!inode)
427 		return NULL;
428 
429 	dax_dev = to_dax_dev(inode);
430 	if (inode->i_state & I_NEW) {
431 		set_bit(DAXDEV_ALIVE, &dax_dev->flags);
432 		inode->i_cdev = &dax_dev->cdev;
433 		inode->i_mode = S_IFCHR;
434 		inode->i_flags = S_DAX;
435 		mapping_set_gfp_mask(&inode->i_data, GFP_USER);
436 		unlock_new_inode(inode);
437 	}
438 
439 	return dax_dev;
440 }
441 
alloc_dax(void * private,const struct dax_operations * ops)442 struct dax_device *alloc_dax(void *private, const struct dax_operations *ops)
443 {
444 	struct dax_device *dax_dev;
445 	dev_t devt;
446 	int minor;
447 
448 	if (WARN_ON_ONCE(ops && !ops->zero_page_range))
449 		return ERR_PTR(-EINVAL);
450 
451 	minor = ida_alloc_max(&dax_minor_ida, MINORMASK, GFP_KERNEL);
452 	if (minor < 0)
453 		return ERR_PTR(-ENOMEM);
454 
455 	devt = MKDEV(MAJOR(dax_devt), minor);
456 	dax_dev = dax_dev_get(devt);
457 	if (!dax_dev)
458 		goto err_dev;
459 
460 	dax_dev->ops = ops;
461 	dax_dev->private = private;
462 	return dax_dev;
463 
464  err_dev:
465 	ida_free(&dax_minor_ida, minor);
466 	return ERR_PTR(-ENOMEM);
467 }
468 EXPORT_SYMBOL_GPL(alloc_dax);
469 
put_dax(struct dax_device * dax_dev)470 void put_dax(struct dax_device *dax_dev)
471 {
472 	if (!dax_dev)
473 		return;
474 	iput(&dax_dev->inode);
475 }
476 EXPORT_SYMBOL_GPL(put_dax);
477 
478 /**
479  * dax_holder() - obtain the holder of a dax device
480  * @dax_dev: a dax_device instance
481  *
482  * Return: the holder's data which represents the holder if registered,
483  * otherwize NULL.
484  */
dax_holder(struct dax_device * dax_dev)485 void *dax_holder(struct dax_device *dax_dev)
486 {
487 	return dax_dev->holder_data;
488 }
489 EXPORT_SYMBOL_GPL(dax_holder);
490 
491 /**
492  * inode_dax: convert a public inode into its dax_dev
493  * @inode: An inode with i_cdev pointing to a dax_dev
494  *
495  * Note this is not equivalent to to_dax_dev() which is for private
496  * internal use where we know the inode filesystem type == dax_fs_type.
497  */
inode_dax(struct inode * inode)498 struct dax_device *inode_dax(struct inode *inode)
499 {
500 	struct cdev *cdev = inode->i_cdev;
501 
502 	return container_of(cdev, struct dax_device, cdev);
503 }
504 EXPORT_SYMBOL_GPL(inode_dax);
505 
dax_inode(struct dax_device * dax_dev)506 struct inode *dax_inode(struct dax_device *dax_dev)
507 {
508 	return &dax_dev->inode;
509 }
510 EXPORT_SYMBOL_GPL(dax_inode);
511 
dax_get_private(struct dax_device * dax_dev)512 void *dax_get_private(struct dax_device *dax_dev)
513 {
514 	if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags))
515 		return NULL;
516 	return dax_dev->private;
517 }
518 EXPORT_SYMBOL_GPL(dax_get_private);
519 
init_once(void * _dax_dev)520 static void init_once(void *_dax_dev)
521 {
522 	struct dax_device *dax_dev = _dax_dev;
523 	struct inode *inode = &dax_dev->inode;
524 
525 	memset(dax_dev, 0, sizeof(*dax_dev));
526 	inode_init_once(inode);
527 }
528 
dax_fs_init(void)529 static int dax_fs_init(void)
530 {
531 	int rc;
532 
533 	dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
534 			(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
535 			 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
536 			init_once);
537 	if (!dax_cache)
538 		return -ENOMEM;
539 
540 	dax_mnt = kern_mount(&dax_fs_type);
541 	if (IS_ERR(dax_mnt)) {
542 		rc = PTR_ERR(dax_mnt);
543 		goto err_mount;
544 	}
545 	dax_superblock = dax_mnt->mnt_sb;
546 
547 	return 0;
548 
549  err_mount:
550 	kmem_cache_destroy(dax_cache);
551 
552 	return rc;
553 }
554 
dax_fs_exit(void)555 static void dax_fs_exit(void)
556 {
557 	kern_unmount(dax_mnt);
558 	rcu_barrier();
559 	kmem_cache_destroy(dax_cache);
560 }
561 
dax_core_init(void)562 static int __init dax_core_init(void)
563 {
564 	int rc;
565 
566 	rc = dax_fs_init();
567 	if (rc)
568 		return rc;
569 
570 	rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
571 	if (rc)
572 		goto err_chrdev;
573 
574 	rc = dax_bus_init();
575 	if (rc)
576 		goto err_bus;
577 	return 0;
578 
579 err_bus:
580 	unregister_chrdev_region(dax_devt, MINORMASK+1);
581 err_chrdev:
582 	dax_fs_exit();
583 	return 0;
584 }
585 
dax_core_exit(void)586 static void __exit dax_core_exit(void)
587 {
588 	dax_bus_exit();
589 	unregister_chrdev_region(dax_devt, MINORMASK+1);
590 	ida_destroy(&dax_minor_ida);
591 	dax_fs_exit();
592 }
593 
594 MODULE_AUTHOR("Intel Corporation");
595 MODULE_LICENSE("GPL v2");
596 subsys_initcall(dax_core_init);
597 module_exit(dax_core_exit);
598