xref: /openbmc/linux/fs/dax.c (revision 2596e07a)
1 /*
2  * fs/dax.c - Direct Access filesystem code
3  * Copyright (c) 2013-2014 Intel Corporation
4  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  */
16 
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
21 #include <linux/fs.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 
35 static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
36 {
37 	struct request_queue *q = bdev->bd_queue;
38 	long rc = -EIO;
39 
40 	dax->addr = (void __pmem *) ERR_PTR(-EIO);
41 	if (blk_queue_enter(q, true) != 0)
42 		return rc;
43 
44 	rc = bdev_direct_access(bdev, dax);
45 	if (rc < 0) {
46 		dax->addr = (void __pmem *) ERR_PTR(rc);
47 		blk_queue_exit(q);
48 		return rc;
49 	}
50 	return rc;
51 }
52 
53 static void dax_unmap_atomic(struct block_device *bdev,
54 		const struct blk_dax_ctl *dax)
55 {
56 	if (IS_ERR(dax->addr))
57 		return;
58 	blk_queue_exit(bdev->bd_queue);
59 }
60 
61 struct page *read_dax_sector(struct block_device *bdev, sector_t n)
62 {
63 	struct page *page = alloc_pages(GFP_KERNEL, 0);
64 	struct blk_dax_ctl dax = {
65 		.size = PAGE_SIZE,
66 		.sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
67 	};
68 	long rc;
69 
70 	if (!page)
71 		return ERR_PTR(-ENOMEM);
72 
73 	rc = dax_map_atomic(bdev, &dax);
74 	if (rc < 0)
75 		return ERR_PTR(rc);
76 	memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
77 	dax_unmap_atomic(bdev, &dax);
78 	return page;
79 }
80 
81 /*
82  * dax_clear_sectors() is called from within transaction context from XFS,
83  * and hence this means the stack from this point must follow GFP_NOFS
84  * semantics for all operations.
85  */
86 int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size)
87 {
88 	struct blk_dax_ctl dax = {
89 		.sector = _sector,
90 		.size = _size,
91 	};
92 
93 	might_sleep();
94 	do {
95 		long count, sz;
96 
97 		count = dax_map_atomic(bdev, &dax);
98 		if (count < 0)
99 			return count;
100 		sz = min_t(long, count, SZ_128K);
101 		clear_pmem(dax.addr, sz);
102 		dax.size -= sz;
103 		dax.sector += sz / 512;
104 		dax_unmap_atomic(bdev, &dax);
105 		cond_resched();
106 	} while (dax.size);
107 
108 	wmb_pmem();
109 	return 0;
110 }
111 EXPORT_SYMBOL_GPL(dax_clear_sectors);
112 
113 /* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
114 static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
115 		loff_t pos, loff_t end)
116 {
117 	loff_t final = end - pos + first; /* The final byte of the buffer */
118 
119 	if (first > 0)
120 		clear_pmem(addr, first);
121 	if (final < size)
122 		clear_pmem(addr + final, size - final);
123 }
124 
125 static bool buffer_written(struct buffer_head *bh)
126 {
127 	return buffer_mapped(bh) && !buffer_unwritten(bh);
128 }
129 
130 /*
131  * When ext4 encounters a hole, it returns without modifying the buffer_head
132  * which means that we can't trust b_size.  To cope with this, we set b_state
133  * to 0 before calling get_block and, if any bit is set, we know we can trust
134  * b_size.  Unfortunate, really, since ext4 knows precisely how long a hole is
135  * and would save us time calling get_block repeatedly.
136  */
137 static bool buffer_size_valid(struct buffer_head *bh)
138 {
139 	return bh->b_state != 0;
140 }
141 
142 
143 static sector_t to_sector(const struct buffer_head *bh,
144 		const struct inode *inode)
145 {
146 	sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
147 
148 	return sector;
149 }
150 
151 static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
152 		      loff_t start, loff_t end, get_block_t get_block,
153 		      struct buffer_head *bh)
154 {
155 	loff_t pos = start, max = start, bh_max = start;
156 	bool hole = false, need_wmb = false;
157 	struct block_device *bdev = NULL;
158 	int rw = iov_iter_rw(iter), rc;
159 	long map_len = 0;
160 	struct blk_dax_ctl dax = {
161 		.addr = (void __pmem *) ERR_PTR(-EIO),
162 	};
163 
164 	if (rw == READ)
165 		end = min(end, i_size_read(inode));
166 
167 	while (pos < end) {
168 		size_t len;
169 		if (pos == max) {
170 			unsigned blkbits = inode->i_blkbits;
171 			long page = pos >> PAGE_SHIFT;
172 			sector_t block = page << (PAGE_SHIFT - blkbits);
173 			unsigned first = pos - (block << blkbits);
174 			long size;
175 
176 			if (pos == bh_max) {
177 				bh->b_size = PAGE_ALIGN(end - pos);
178 				bh->b_state = 0;
179 				rc = get_block(inode, block, bh, rw == WRITE);
180 				if (rc)
181 					break;
182 				if (!buffer_size_valid(bh))
183 					bh->b_size = 1 << blkbits;
184 				bh_max = pos - first + bh->b_size;
185 				bdev = bh->b_bdev;
186 			} else {
187 				unsigned done = bh->b_size -
188 						(bh_max - (pos - first));
189 				bh->b_blocknr += done >> blkbits;
190 				bh->b_size -= done;
191 			}
192 
193 			hole = rw == READ && !buffer_written(bh);
194 			if (hole) {
195 				size = bh->b_size - first;
196 			} else {
197 				dax_unmap_atomic(bdev, &dax);
198 				dax.sector = to_sector(bh, inode);
199 				dax.size = bh->b_size;
200 				map_len = dax_map_atomic(bdev, &dax);
201 				if (map_len < 0) {
202 					rc = map_len;
203 					break;
204 				}
205 				if (buffer_unwritten(bh) || buffer_new(bh)) {
206 					dax_new_buf(dax.addr, map_len, first,
207 							pos, end);
208 					need_wmb = true;
209 				}
210 				dax.addr += first;
211 				size = map_len - first;
212 			}
213 			max = min(pos + size, end);
214 		}
215 
216 		if (iov_iter_rw(iter) == WRITE) {
217 			len = copy_from_iter_pmem(dax.addr, max - pos, iter);
218 			need_wmb = true;
219 		} else if (!hole)
220 			len = copy_to_iter((void __force *) dax.addr, max - pos,
221 					iter);
222 		else
223 			len = iov_iter_zero(max - pos, iter);
224 
225 		if (!len) {
226 			rc = -EFAULT;
227 			break;
228 		}
229 
230 		pos += len;
231 		if (!IS_ERR(dax.addr))
232 			dax.addr += len;
233 	}
234 
235 	if (need_wmb)
236 		wmb_pmem();
237 	dax_unmap_atomic(bdev, &dax);
238 
239 	return (pos == start) ? rc : pos - start;
240 }
241 
242 /**
243  * dax_do_io - Perform I/O to a DAX file
244  * @iocb: The control block for this I/O
245  * @inode: The file which the I/O is directed at
246  * @iter: The addresses to do I/O from or to
247  * @pos: The file offset where the I/O starts
248  * @get_block: The filesystem method used to translate file offsets to blocks
249  * @end_io: A filesystem callback for I/O completion
250  * @flags: See below
251  *
252  * This function uses the same locking scheme as do_blockdev_direct_IO:
253  * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
254  * caller for writes.  For reads, we take and release the i_mutex ourselves.
255  * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
256  * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
257  * is in progress.
258  */
259 ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
260 		  struct iov_iter *iter, loff_t pos, get_block_t get_block,
261 		  dio_iodone_t end_io, int flags)
262 {
263 	struct buffer_head bh;
264 	ssize_t retval = -EINVAL;
265 	loff_t end = pos + iov_iter_count(iter);
266 
267 	memset(&bh, 0, sizeof(bh));
268 	bh.b_bdev = inode->i_sb->s_bdev;
269 
270 	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
271 		struct address_space *mapping = inode->i_mapping;
272 		inode_lock(inode);
273 		retval = filemap_write_and_wait_range(mapping, pos, end - 1);
274 		if (retval) {
275 			inode_unlock(inode);
276 			goto out;
277 		}
278 	}
279 
280 	/* Protects against truncate */
281 	if (!(flags & DIO_SKIP_DIO_COUNT))
282 		inode_dio_begin(inode);
283 
284 	retval = dax_io(inode, iter, pos, end, get_block, &bh);
285 
286 	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
287 		inode_unlock(inode);
288 
289 	if ((retval > 0) && end_io)
290 		end_io(iocb, pos, retval, bh.b_private);
291 
292 	if (!(flags & DIO_SKIP_DIO_COUNT))
293 		inode_dio_end(inode);
294  out:
295 	return retval;
296 }
297 EXPORT_SYMBOL_GPL(dax_do_io);
298 
299 /*
300  * The user has performed a load from a hole in the file.  Allocating
301  * a new page in the file would cause excessive storage usage for
302  * workloads with sparse files.  We allocate a page cache page instead.
303  * We'll kick it out of the page cache if it's ever written to,
304  * otherwise it will simply fall out of the page cache under memory
305  * pressure without ever having been dirtied.
306  */
307 static int dax_load_hole(struct address_space *mapping, struct page *page,
308 							struct vm_fault *vmf)
309 {
310 	unsigned long size;
311 	struct inode *inode = mapping->host;
312 	if (!page)
313 		page = find_or_create_page(mapping, vmf->pgoff,
314 						GFP_KERNEL | __GFP_ZERO);
315 	if (!page)
316 		return VM_FAULT_OOM;
317 	/* Recheck i_size under page lock to avoid truncate race */
318 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
319 	if (vmf->pgoff >= size) {
320 		unlock_page(page);
321 		page_cache_release(page);
322 		return VM_FAULT_SIGBUS;
323 	}
324 
325 	vmf->page = page;
326 	return VM_FAULT_LOCKED;
327 }
328 
329 static int copy_user_bh(struct page *to, struct inode *inode,
330 		struct buffer_head *bh, unsigned long vaddr)
331 {
332 	struct blk_dax_ctl dax = {
333 		.sector = to_sector(bh, inode),
334 		.size = bh->b_size,
335 	};
336 	struct block_device *bdev = bh->b_bdev;
337 	void *vto;
338 
339 	if (dax_map_atomic(bdev, &dax) < 0)
340 		return PTR_ERR(dax.addr);
341 	vto = kmap_atomic(to);
342 	copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
343 	kunmap_atomic(vto);
344 	dax_unmap_atomic(bdev, &dax);
345 	return 0;
346 }
347 
348 #define NO_SECTOR -1
349 #define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_CACHE_SHIFT))
350 
351 static int dax_radix_entry(struct address_space *mapping, pgoff_t index,
352 		sector_t sector, bool pmd_entry, bool dirty)
353 {
354 	struct radix_tree_root *page_tree = &mapping->page_tree;
355 	pgoff_t pmd_index = DAX_PMD_INDEX(index);
356 	int type, error = 0;
357 	void *entry;
358 
359 	WARN_ON_ONCE(pmd_entry && !dirty);
360 	if (dirty)
361 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
362 
363 	spin_lock_irq(&mapping->tree_lock);
364 
365 	entry = radix_tree_lookup(page_tree, pmd_index);
366 	if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD) {
367 		index = pmd_index;
368 		goto dirty;
369 	}
370 
371 	entry = radix_tree_lookup(page_tree, index);
372 	if (entry) {
373 		type = RADIX_DAX_TYPE(entry);
374 		if (WARN_ON_ONCE(type != RADIX_DAX_PTE &&
375 					type != RADIX_DAX_PMD)) {
376 			error = -EIO;
377 			goto unlock;
378 		}
379 
380 		if (!pmd_entry || type == RADIX_DAX_PMD)
381 			goto dirty;
382 
383 		/*
384 		 * We only insert dirty PMD entries into the radix tree.  This
385 		 * means we don't need to worry about removing a dirty PTE
386 		 * entry and inserting a clean PMD entry, thus reducing the
387 		 * range we would flush with a follow-up fsync/msync call.
388 		 */
389 		radix_tree_delete(&mapping->page_tree, index);
390 		mapping->nrexceptional--;
391 	}
392 
393 	if (sector == NO_SECTOR) {
394 		/*
395 		 * This can happen during correct operation if our pfn_mkwrite
396 		 * fault raced against a hole punch operation.  If this
397 		 * happens the pte that was hole punched will have been
398 		 * unmapped and the radix tree entry will have been removed by
399 		 * the time we are called, but the call will still happen.  We
400 		 * will return all the way up to wp_pfn_shared(), where the
401 		 * pte_same() check will fail, eventually causing page fault
402 		 * to be retried by the CPU.
403 		 */
404 		goto unlock;
405 	}
406 
407 	error = radix_tree_insert(page_tree, index,
408 			RADIX_DAX_ENTRY(sector, pmd_entry));
409 	if (error)
410 		goto unlock;
411 
412 	mapping->nrexceptional++;
413  dirty:
414 	if (dirty)
415 		radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
416  unlock:
417 	spin_unlock_irq(&mapping->tree_lock);
418 	return error;
419 }
420 
421 static int dax_writeback_one(struct block_device *bdev,
422 		struct address_space *mapping, pgoff_t index, void *entry)
423 {
424 	struct radix_tree_root *page_tree = &mapping->page_tree;
425 	int type = RADIX_DAX_TYPE(entry);
426 	struct radix_tree_node *node;
427 	struct blk_dax_ctl dax;
428 	void **slot;
429 	int ret = 0;
430 
431 	spin_lock_irq(&mapping->tree_lock);
432 	/*
433 	 * Regular page slots are stabilized by the page lock even
434 	 * without the tree itself locked.  These unlocked entries
435 	 * need verification under the tree lock.
436 	 */
437 	if (!__radix_tree_lookup(page_tree, index, &node, &slot))
438 		goto unlock;
439 	if (*slot != entry)
440 		goto unlock;
441 
442 	/* another fsync thread may have already written back this entry */
443 	if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
444 		goto unlock;
445 
446 	if (WARN_ON_ONCE(type != RADIX_DAX_PTE && type != RADIX_DAX_PMD)) {
447 		ret = -EIO;
448 		goto unlock;
449 	}
450 
451 	dax.sector = RADIX_DAX_SECTOR(entry);
452 	dax.size = (type == RADIX_DAX_PMD ? PMD_SIZE : PAGE_SIZE);
453 	spin_unlock_irq(&mapping->tree_lock);
454 
455 	/*
456 	 * We cannot hold tree_lock while calling dax_map_atomic() because it
457 	 * eventually calls cond_resched().
458 	 */
459 	ret = dax_map_atomic(bdev, &dax);
460 	if (ret < 0)
461 		return ret;
462 
463 	if (WARN_ON_ONCE(ret < dax.size)) {
464 		ret = -EIO;
465 		goto unmap;
466 	}
467 
468 	wb_cache_pmem(dax.addr, dax.size);
469 
470 	spin_lock_irq(&mapping->tree_lock);
471 	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
472 	spin_unlock_irq(&mapping->tree_lock);
473  unmap:
474 	dax_unmap_atomic(bdev, &dax);
475 	return ret;
476 
477  unlock:
478 	spin_unlock_irq(&mapping->tree_lock);
479 	return ret;
480 }
481 
482 /*
483  * Flush the mapping to the persistent domain within the byte range of [start,
484  * end]. This is required by data integrity operations to ensure file data is
485  * on persistent storage prior to completion of the operation.
486  */
487 int dax_writeback_mapping_range(struct address_space *mapping,
488 		struct block_device *bdev, struct writeback_control *wbc)
489 {
490 	struct inode *inode = mapping->host;
491 	pgoff_t start_index, end_index, pmd_index;
492 	pgoff_t indices[PAGEVEC_SIZE];
493 	struct pagevec pvec;
494 	bool done = false;
495 	int i, ret = 0;
496 	void *entry;
497 
498 	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
499 		return -EIO;
500 
501 	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
502 		return 0;
503 
504 	start_index = wbc->range_start >> PAGE_CACHE_SHIFT;
505 	end_index = wbc->range_end >> PAGE_CACHE_SHIFT;
506 	pmd_index = DAX_PMD_INDEX(start_index);
507 
508 	rcu_read_lock();
509 	entry = radix_tree_lookup(&mapping->page_tree, pmd_index);
510 	rcu_read_unlock();
511 
512 	/* see if the start of our range is covered by a PMD entry */
513 	if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD)
514 		start_index = pmd_index;
515 
516 	tag_pages_for_writeback(mapping, start_index, end_index);
517 
518 	pagevec_init(&pvec, 0);
519 	while (!done) {
520 		pvec.nr = find_get_entries_tag(mapping, start_index,
521 				PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
522 				pvec.pages, indices);
523 
524 		if (pvec.nr == 0)
525 			break;
526 
527 		for (i = 0; i < pvec.nr; i++) {
528 			if (indices[i] > end_index) {
529 				done = true;
530 				break;
531 			}
532 
533 			ret = dax_writeback_one(bdev, mapping, indices[i],
534 					pvec.pages[i]);
535 			if (ret < 0)
536 				return ret;
537 		}
538 	}
539 	wmb_pmem();
540 	return 0;
541 }
542 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
543 
544 static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
545 			struct vm_area_struct *vma, struct vm_fault *vmf)
546 {
547 	unsigned long vaddr = (unsigned long)vmf->virtual_address;
548 	struct address_space *mapping = inode->i_mapping;
549 	struct block_device *bdev = bh->b_bdev;
550 	struct blk_dax_ctl dax = {
551 		.sector = to_sector(bh, inode),
552 		.size = bh->b_size,
553 	};
554 	pgoff_t size;
555 	int error;
556 
557 	i_mmap_lock_read(mapping);
558 
559 	/*
560 	 * Check truncate didn't happen while we were allocating a block.
561 	 * If it did, this block may or may not be still allocated to the
562 	 * file.  We can't tell the filesystem to free it because we can't
563 	 * take i_mutex here.  In the worst case, the file still has blocks
564 	 * allocated past the end of the file.
565 	 */
566 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
567 	if (unlikely(vmf->pgoff >= size)) {
568 		error = -EIO;
569 		goto out;
570 	}
571 
572 	if (dax_map_atomic(bdev, &dax) < 0) {
573 		error = PTR_ERR(dax.addr);
574 		goto out;
575 	}
576 
577 	if (buffer_unwritten(bh) || buffer_new(bh)) {
578 		clear_pmem(dax.addr, PAGE_SIZE);
579 		wmb_pmem();
580 	}
581 	dax_unmap_atomic(bdev, &dax);
582 
583 	error = dax_radix_entry(mapping, vmf->pgoff, dax.sector, false,
584 			vmf->flags & FAULT_FLAG_WRITE);
585 	if (error)
586 		goto out;
587 
588 	error = vm_insert_mixed(vma, vaddr, dax.pfn);
589 
590  out:
591 	i_mmap_unlock_read(mapping);
592 
593 	return error;
594 }
595 
596 /**
597  * __dax_fault - handle a page fault on a DAX file
598  * @vma: The virtual memory area where the fault occurred
599  * @vmf: The description of the fault
600  * @get_block: The filesystem method used to translate file offsets to blocks
601  * @complete_unwritten: The filesystem method used to convert unwritten blocks
602  *	to written so the data written to them is exposed. This is required for
603  *	required by write faults for filesystems that will return unwritten
604  *	extent mappings from @get_block, but it is optional for reads as
605  *	dax_insert_mapping() will always zero unwritten blocks. If the fs does
606  *	not support unwritten extents, the it should pass NULL.
607  *
608  * When a page fault occurs, filesystems may call this helper in their
609  * fault handler for DAX files. __dax_fault() assumes the caller has done all
610  * the necessary locking for the page fault to proceed successfully.
611  */
612 int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
613 			get_block_t get_block, dax_iodone_t complete_unwritten)
614 {
615 	struct file *file = vma->vm_file;
616 	struct address_space *mapping = file->f_mapping;
617 	struct inode *inode = mapping->host;
618 	struct page *page;
619 	struct buffer_head bh;
620 	unsigned long vaddr = (unsigned long)vmf->virtual_address;
621 	unsigned blkbits = inode->i_blkbits;
622 	sector_t block;
623 	pgoff_t size;
624 	int error;
625 	int major = 0;
626 
627 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
628 	if (vmf->pgoff >= size)
629 		return VM_FAULT_SIGBUS;
630 
631 	memset(&bh, 0, sizeof(bh));
632 	block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
633 	bh.b_bdev = inode->i_sb->s_bdev;
634 	bh.b_size = PAGE_SIZE;
635 
636  repeat:
637 	page = find_get_page(mapping, vmf->pgoff);
638 	if (page) {
639 		if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
640 			page_cache_release(page);
641 			return VM_FAULT_RETRY;
642 		}
643 		if (unlikely(page->mapping != mapping)) {
644 			unlock_page(page);
645 			page_cache_release(page);
646 			goto repeat;
647 		}
648 		size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
649 		if (unlikely(vmf->pgoff >= size)) {
650 			/*
651 			 * We have a struct page covering a hole in the file
652 			 * from a read fault and we've raced with a truncate
653 			 */
654 			error = -EIO;
655 			goto unlock_page;
656 		}
657 	}
658 
659 	error = get_block(inode, block, &bh, 0);
660 	if (!error && (bh.b_size < PAGE_SIZE))
661 		error = -EIO;		/* fs corruption? */
662 	if (error)
663 		goto unlock_page;
664 
665 	if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
666 		if (vmf->flags & FAULT_FLAG_WRITE) {
667 			error = get_block(inode, block, &bh, 1);
668 			count_vm_event(PGMAJFAULT);
669 			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
670 			major = VM_FAULT_MAJOR;
671 			if (!error && (bh.b_size < PAGE_SIZE))
672 				error = -EIO;
673 			if (error)
674 				goto unlock_page;
675 		} else {
676 			return dax_load_hole(mapping, page, vmf);
677 		}
678 	}
679 
680 	if (vmf->cow_page) {
681 		struct page *new_page = vmf->cow_page;
682 		if (buffer_written(&bh))
683 			error = copy_user_bh(new_page, inode, &bh, vaddr);
684 		else
685 			clear_user_highpage(new_page, vaddr);
686 		if (error)
687 			goto unlock_page;
688 		vmf->page = page;
689 		if (!page) {
690 			i_mmap_lock_read(mapping);
691 			/* Check we didn't race with truncate */
692 			size = (i_size_read(inode) + PAGE_SIZE - 1) >>
693 								PAGE_SHIFT;
694 			if (vmf->pgoff >= size) {
695 				i_mmap_unlock_read(mapping);
696 				error = -EIO;
697 				goto out;
698 			}
699 		}
700 		return VM_FAULT_LOCKED;
701 	}
702 
703 	/* Check we didn't race with a read fault installing a new page */
704 	if (!page && major)
705 		page = find_lock_page(mapping, vmf->pgoff);
706 
707 	if (page) {
708 		unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
709 							PAGE_CACHE_SIZE, 0);
710 		delete_from_page_cache(page);
711 		unlock_page(page);
712 		page_cache_release(page);
713 		page = NULL;
714 	}
715 
716 	/*
717 	 * If we successfully insert the new mapping over an unwritten extent,
718 	 * we need to ensure we convert the unwritten extent. If there is an
719 	 * error inserting the mapping, the filesystem needs to leave it as
720 	 * unwritten to prevent exposure of the stale underlying data to
721 	 * userspace, but we still need to call the completion function so
722 	 * the private resources on the mapping buffer can be released. We
723 	 * indicate what the callback should do via the uptodate variable, same
724 	 * as for normal BH based IO completions.
725 	 */
726 	error = dax_insert_mapping(inode, &bh, vma, vmf);
727 	if (buffer_unwritten(&bh)) {
728 		if (complete_unwritten)
729 			complete_unwritten(&bh, !error);
730 		else
731 			WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
732 	}
733 
734  out:
735 	if (error == -ENOMEM)
736 		return VM_FAULT_OOM | major;
737 	/* -EBUSY is fine, somebody else faulted on the same PTE */
738 	if ((error < 0) && (error != -EBUSY))
739 		return VM_FAULT_SIGBUS | major;
740 	return VM_FAULT_NOPAGE | major;
741 
742  unlock_page:
743 	if (page) {
744 		unlock_page(page);
745 		page_cache_release(page);
746 	}
747 	goto out;
748 }
749 EXPORT_SYMBOL(__dax_fault);
750 
751 /**
752  * dax_fault - handle a page fault on a DAX file
753  * @vma: The virtual memory area where the fault occurred
754  * @vmf: The description of the fault
755  * @get_block: The filesystem method used to translate file offsets to blocks
756  *
757  * When a page fault occurs, filesystems may call this helper in their
758  * fault handler for DAX files.
759  */
760 int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
761 	      get_block_t get_block, dax_iodone_t complete_unwritten)
762 {
763 	int result;
764 	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
765 
766 	if (vmf->flags & FAULT_FLAG_WRITE) {
767 		sb_start_pagefault(sb);
768 		file_update_time(vma->vm_file);
769 	}
770 	result = __dax_fault(vma, vmf, get_block, complete_unwritten);
771 	if (vmf->flags & FAULT_FLAG_WRITE)
772 		sb_end_pagefault(sb);
773 
774 	return result;
775 }
776 EXPORT_SYMBOL_GPL(dax_fault);
777 
778 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
779 /*
780  * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
781  * more often than one might expect in the below function.
782  */
783 #define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
784 
785 static void __dax_dbg(struct buffer_head *bh, unsigned long address,
786 		const char *reason, const char *fn)
787 {
788 	if (bh) {
789 		char bname[BDEVNAME_SIZE];
790 		bdevname(bh->b_bdev, bname);
791 		pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
792 			"length %zd fallback: %s\n", fn, current->comm,
793 			address, bname, bh->b_state, (u64)bh->b_blocknr,
794 			bh->b_size, reason);
795 	} else {
796 		pr_debug("%s: %s addr: %lx fallback: %s\n", fn,
797 			current->comm, address, reason);
798 	}
799 }
800 
801 #define dax_pmd_dbg(bh, address, reason)	__dax_dbg(bh, address, reason, "dax_pmd")
802 
803 int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
804 		pmd_t *pmd, unsigned int flags, get_block_t get_block,
805 		dax_iodone_t complete_unwritten)
806 {
807 	struct file *file = vma->vm_file;
808 	struct address_space *mapping = file->f_mapping;
809 	struct inode *inode = mapping->host;
810 	struct buffer_head bh;
811 	unsigned blkbits = inode->i_blkbits;
812 	unsigned long pmd_addr = address & PMD_MASK;
813 	bool write = flags & FAULT_FLAG_WRITE;
814 	struct block_device *bdev;
815 	pgoff_t size, pgoff;
816 	sector_t block;
817 	int error, result = 0;
818 	bool alloc = false;
819 
820 	/* dax pmd mappings require pfn_t_devmap() */
821 	if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
822 		return VM_FAULT_FALLBACK;
823 
824 	/* Fall back to PTEs if we're going to COW */
825 	if (write && !(vma->vm_flags & VM_SHARED)) {
826 		split_huge_pmd(vma, pmd, address);
827 		dax_pmd_dbg(NULL, address, "cow write");
828 		return VM_FAULT_FALLBACK;
829 	}
830 	/* If the PMD would extend outside the VMA */
831 	if (pmd_addr < vma->vm_start) {
832 		dax_pmd_dbg(NULL, address, "vma start unaligned");
833 		return VM_FAULT_FALLBACK;
834 	}
835 	if ((pmd_addr + PMD_SIZE) > vma->vm_end) {
836 		dax_pmd_dbg(NULL, address, "vma end unaligned");
837 		return VM_FAULT_FALLBACK;
838 	}
839 
840 	pgoff = linear_page_index(vma, pmd_addr);
841 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
842 	if (pgoff >= size)
843 		return VM_FAULT_SIGBUS;
844 	/* If the PMD would cover blocks out of the file */
845 	if ((pgoff | PG_PMD_COLOUR) >= size) {
846 		dax_pmd_dbg(NULL, address,
847 				"offset + huge page size > file size");
848 		return VM_FAULT_FALLBACK;
849 	}
850 
851 	memset(&bh, 0, sizeof(bh));
852 	bh.b_bdev = inode->i_sb->s_bdev;
853 	block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
854 
855 	bh.b_size = PMD_SIZE;
856 
857 	if (get_block(inode, block, &bh, 0) != 0)
858 		return VM_FAULT_SIGBUS;
859 
860 	if (!buffer_mapped(&bh) && write) {
861 		if (get_block(inode, block, &bh, 1) != 0)
862 			return VM_FAULT_SIGBUS;
863 		alloc = true;
864 	}
865 
866 	bdev = bh.b_bdev;
867 
868 	/*
869 	 * If the filesystem isn't willing to tell us the length of a hole,
870 	 * just fall back to PTEs.  Calling get_block 512 times in a loop
871 	 * would be silly.
872 	 */
873 	if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) {
874 		dax_pmd_dbg(&bh, address, "allocated block too small");
875 		return VM_FAULT_FALLBACK;
876 	}
877 
878 	/*
879 	 * If we allocated new storage, make sure no process has any
880 	 * zero pages covering this hole
881 	 */
882 	if (alloc) {
883 		loff_t lstart = pgoff << PAGE_SHIFT;
884 		loff_t lend = lstart + PMD_SIZE - 1; /* inclusive */
885 
886 		truncate_pagecache_range(inode, lstart, lend);
887 	}
888 
889 	i_mmap_lock_read(mapping);
890 
891 	/*
892 	 * If a truncate happened while we were allocating blocks, we may
893 	 * leave blocks allocated to the file that are beyond EOF.  We can't
894 	 * take i_mutex here, so just leave them hanging; they'll be freed
895 	 * when the file is deleted.
896 	 */
897 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
898 	if (pgoff >= size) {
899 		result = VM_FAULT_SIGBUS;
900 		goto out;
901 	}
902 	if ((pgoff | PG_PMD_COLOUR) >= size) {
903 		dax_pmd_dbg(&bh, address,
904 				"offset + huge page size > file size");
905 		goto fallback;
906 	}
907 
908 	if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
909 		spinlock_t *ptl;
910 		pmd_t entry;
911 		struct page *zero_page = get_huge_zero_page();
912 
913 		if (unlikely(!zero_page)) {
914 			dax_pmd_dbg(&bh, address, "no zero page");
915 			goto fallback;
916 		}
917 
918 		ptl = pmd_lock(vma->vm_mm, pmd);
919 		if (!pmd_none(*pmd)) {
920 			spin_unlock(ptl);
921 			dax_pmd_dbg(&bh, address, "pmd already present");
922 			goto fallback;
923 		}
924 
925 		dev_dbg(part_to_dev(bdev->bd_part),
926 				"%s: %s addr: %lx pfn: <zero> sect: %llx\n",
927 				__func__, current->comm, address,
928 				(unsigned long long) to_sector(&bh, inode));
929 
930 		entry = mk_pmd(zero_page, vma->vm_page_prot);
931 		entry = pmd_mkhuge(entry);
932 		set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
933 		result = VM_FAULT_NOPAGE;
934 		spin_unlock(ptl);
935 	} else {
936 		struct blk_dax_ctl dax = {
937 			.sector = to_sector(&bh, inode),
938 			.size = PMD_SIZE,
939 		};
940 		long length = dax_map_atomic(bdev, &dax);
941 
942 		if (length < 0) {
943 			result = VM_FAULT_SIGBUS;
944 			goto out;
945 		}
946 		if (length < PMD_SIZE) {
947 			dax_pmd_dbg(&bh, address, "dax-length too small");
948 			dax_unmap_atomic(bdev, &dax);
949 			goto fallback;
950 		}
951 		if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) {
952 			dax_pmd_dbg(&bh, address, "pfn unaligned");
953 			dax_unmap_atomic(bdev, &dax);
954 			goto fallback;
955 		}
956 
957 		if (!pfn_t_devmap(dax.pfn)) {
958 			dax_unmap_atomic(bdev, &dax);
959 			dax_pmd_dbg(&bh, address, "pfn not in memmap");
960 			goto fallback;
961 		}
962 
963 		if (buffer_unwritten(&bh) || buffer_new(&bh)) {
964 			clear_pmem(dax.addr, PMD_SIZE);
965 			wmb_pmem();
966 			count_vm_event(PGMAJFAULT);
967 			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
968 			result |= VM_FAULT_MAJOR;
969 		}
970 		dax_unmap_atomic(bdev, &dax);
971 
972 		/*
973 		 * For PTE faults we insert a radix tree entry for reads, and
974 		 * leave it clean.  Then on the first write we dirty the radix
975 		 * tree entry via the dax_pfn_mkwrite() path.  This sequence
976 		 * allows the dax_pfn_mkwrite() call to be simpler and avoid a
977 		 * call into get_block() to translate the pgoff to a sector in
978 		 * order to be able to create a new radix tree entry.
979 		 *
980 		 * The PMD path doesn't have an equivalent to
981 		 * dax_pfn_mkwrite(), though, so for a read followed by a
982 		 * write we traverse all the way through __dax_pmd_fault()
983 		 * twice.  This means we can just skip inserting a radix tree
984 		 * entry completely on the initial read and just wait until
985 		 * the write to insert a dirty entry.
986 		 */
987 		if (write) {
988 			error = dax_radix_entry(mapping, pgoff, dax.sector,
989 					true, true);
990 			if (error) {
991 				dax_pmd_dbg(&bh, address,
992 						"PMD radix insertion failed");
993 				goto fallback;
994 			}
995 		}
996 
997 		dev_dbg(part_to_dev(bdev->bd_part),
998 				"%s: %s addr: %lx pfn: %lx sect: %llx\n",
999 				__func__, current->comm, address,
1000 				pfn_t_to_pfn(dax.pfn),
1001 				(unsigned long long) dax.sector);
1002 		result |= vmf_insert_pfn_pmd(vma, address, pmd,
1003 				dax.pfn, write);
1004 	}
1005 
1006  out:
1007 	i_mmap_unlock_read(mapping);
1008 
1009 	if (buffer_unwritten(&bh))
1010 		complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
1011 
1012 	return result;
1013 
1014  fallback:
1015 	count_vm_event(THP_FAULT_FALLBACK);
1016 	result = VM_FAULT_FALLBACK;
1017 	goto out;
1018 }
1019 EXPORT_SYMBOL_GPL(__dax_pmd_fault);
1020 
1021 /**
1022  * dax_pmd_fault - handle a PMD fault on a DAX file
1023  * @vma: The virtual memory area where the fault occurred
1024  * @vmf: The description of the fault
1025  * @get_block: The filesystem method used to translate file offsets to blocks
1026  *
1027  * When a page fault occurs, filesystems may call this helper in their
1028  * pmd_fault handler for DAX files.
1029  */
1030 int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
1031 			pmd_t *pmd, unsigned int flags, get_block_t get_block,
1032 			dax_iodone_t complete_unwritten)
1033 {
1034 	int result;
1035 	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
1036 
1037 	if (flags & FAULT_FLAG_WRITE) {
1038 		sb_start_pagefault(sb);
1039 		file_update_time(vma->vm_file);
1040 	}
1041 	result = __dax_pmd_fault(vma, address, pmd, flags, get_block,
1042 				complete_unwritten);
1043 	if (flags & FAULT_FLAG_WRITE)
1044 		sb_end_pagefault(sb);
1045 
1046 	return result;
1047 }
1048 EXPORT_SYMBOL_GPL(dax_pmd_fault);
1049 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1050 
1051 /**
1052  * dax_pfn_mkwrite - handle first write to DAX page
1053  * @vma: The virtual memory area where the fault occurred
1054  * @vmf: The description of the fault
1055  */
1056 int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1057 {
1058 	struct file *file = vma->vm_file;
1059 
1060 	/*
1061 	 * We pass NO_SECTOR to dax_radix_entry() because we expect that a
1062 	 * RADIX_DAX_PTE entry already exists in the radix tree from a
1063 	 * previous call to __dax_fault().  We just want to look up that PTE
1064 	 * entry using vmf->pgoff and make sure the dirty tag is set.  This
1065 	 * saves us from having to make a call to get_block() here to look
1066 	 * up the sector.
1067 	 */
1068 	dax_radix_entry(file->f_mapping, vmf->pgoff, NO_SECTOR, false, true);
1069 	return VM_FAULT_NOPAGE;
1070 }
1071 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
1072 
1073 /**
1074  * dax_zero_page_range - zero a range within a page of a DAX file
1075  * @inode: The file being truncated
1076  * @from: The file offset that is being truncated to
1077  * @length: The number of bytes to zero
1078  * @get_block: The filesystem method used to translate file offsets to blocks
1079  *
1080  * This function can be called by a filesystem when it is zeroing part of a
1081  * page in a DAX file.  This is intended for hole-punch operations.  If
1082  * you are truncating a file, the helper function dax_truncate_page() may be
1083  * more convenient.
1084  *
1085  * We work in terms of PAGE_CACHE_SIZE here for commonality with
1086  * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1087  * took care of disposing of the unnecessary blocks.  Even if the filesystem
1088  * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
1089  * since the file might be mmapped.
1090  */
1091 int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
1092 							get_block_t get_block)
1093 {
1094 	struct buffer_head bh;
1095 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
1096 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
1097 	int err;
1098 
1099 	/* Block boundary? Nothing to do */
1100 	if (!length)
1101 		return 0;
1102 	BUG_ON((offset + length) > PAGE_CACHE_SIZE);
1103 
1104 	memset(&bh, 0, sizeof(bh));
1105 	bh.b_bdev = inode->i_sb->s_bdev;
1106 	bh.b_size = PAGE_CACHE_SIZE;
1107 	err = get_block(inode, index, &bh, 0);
1108 	if (err < 0)
1109 		return err;
1110 	if (buffer_written(&bh)) {
1111 		struct block_device *bdev = bh.b_bdev;
1112 		struct blk_dax_ctl dax = {
1113 			.sector = to_sector(&bh, inode),
1114 			.size = PAGE_CACHE_SIZE,
1115 		};
1116 
1117 		if (dax_map_atomic(bdev, &dax) < 0)
1118 			return PTR_ERR(dax.addr);
1119 		clear_pmem(dax.addr + offset, length);
1120 		wmb_pmem();
1121 		dax_unmap_atomic(bdev, &dax);
1122 	}
1123 
1124 	return 0;
1125 }
1126 EXPORT_SYMBOL_GPL(dax_zero_page_range);
1127 
1128 /**
1129  * dax_truncate_page - handle a partial page being truncated in a DAX file
1130  * @inode: The file being truncated
1131  * @from: The file offset that is being truncated to
1132  * @get_block: The filesystem method used to translate file offsets to blocks
1133  *
1134  * Similar to block_truncate_page(), this function can be called by a
1135  * filesystem when it is truncating a DAX file to handle the partial page.
1136  *
1137  * We work in terms of PAGE_CACHE_SIZE here for commonality with
1138  * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1139  * took care of disposing of the unnecessary blocks.  Even if the filesystem
1140  * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
1141  * since the file might be mmapped.
1142  */
1143 int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
1144 {
1145 	unsigned length = PAGE_CACHE_ALIGN(from) - from;
1146 	return dax_zero_page_range(inode, from, length, get_block);
1147 }
1148 EXPORT_SYMBOL_GPL(dax_truncate_page);
1149