xref: /openbmc/linux/fs/dax.c (revision 0e749e54)
1d475c634SMatthew Wilcox /*
2d475c634SMatthew Wilcox  * fs/dax.c - Direct Access filesystem code
3d475c634SMatthew Wilcox  * Copyright (c) 2013-2014 Intel Corporation
4d475c634SMatthew Wilcox  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5d475c634SMatthew Wilcox  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6d475c634SMatthew Wilcox  *
7d475c634SMatthew Wilcox  * This program is free software; you can redistribute it and/or modify it
8d475c634SMatthew Wilcox  * under the terms and conditions of the GNU General Public License,
9d475c634SMatthew Wilcox  * version 2, as published by the Free Software Foundation.
10d475c634SMatthew Wilcox  *
11d475c634SMatthew Wilcox  * This program is distributed in the hope it will be useful, but WITHOUT
12d475c634SMatthew Wilcox  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13d475c634SMatthew Wilcox  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14d475c634SMatthew Wilcox  * more details.
15d475c634SMatthew Wilcox  */
16d475c634SMatthew Wilcox 
17d475c634SMatthew Wilcox #include <linux/atomic.h>
18d475c634SMatthew Wilcox #include <linux/blkdev.h>
19d475c634SMatthew Wilcox #include <linux/buffer_head.h>
20d77e92e2SRoss Zwisler #include <linux/dax.h>
21d475c634SMatthew Wilcox #include <linux/fs.h>
22d475c634SMatthew Wilcox #include <linux/genhd.h>
23f7ca90b1SMatthew Wilcox #include <linux/highmem.h>
24f7ca90b1SMatthew Wilcox #include <linux/memcontrol.h>
25f7ca90b1SMatthew Wilcox #include <linux/mm.h>
26d475c634SMatthew Wilcox #include <linux/mutex.h>
272765cfbbSRoss Zwisler #include <linux/pmem.h>
28289c6aedSMatthew Wilcox #include <linux/sched.h>
29d475c634SMatthew Wilcox #include <linux/uio.h>
30f7ca90b1SMatthew Wilcox #include <linux/vmstat.h>
310e749e54SDan Williams #include <linux/sizes.h>
32d475c634SMatthew Wilcox 
331ca19157SDave Chinner /*
341ca19157SDave Chinner  * dax_clear_blocks() is called from within transaction context from XFS,
351ca19157SDave Chinner  * and hence this means the stack from this point must follow GFP_NOFS
361ca19157SDave Chinner  * semantics for all operations.
371ca19157SDave Chinner  */
38289c6aedSMatthew Wilcox int dax_clear_blocks(struct inode *inode, sector_t block, long size)
39289c6aedSMatthew Wilcox {
40289c6aedSMatthew Wilcox 	struct block_device *bdev = inode->i_sb->s_bdev;
41289c6aedSMatthew Wilcox 	sector_t sector = block << (inode->i_blkbits - 9);
42289c6aedSMatthew Wilcox 
43289c6aedSMatthew Wilcox 	might_sleep();
44289c6aedSMatthew Wilcox 	do {
45e2e05394SRoss Zwisler 		void __pmem *addr;
46289c6aedSMatthew Wilcox 		unsigned long pfn;
470e749e54SDan Williams 		long count, sz;
48289c6aedSMatthew Wilcox 
49289c6aedSMatthew Wilcox 		count = bdev_direct_access(bdev, sector, &addr, &pfn, size);
50289c6aedSMatthew Wilcox 		if (count < 0)
51289c6aedSMatthew Wilcox 			return count;
520e749e54SDan Williams 		sz = min_t(long, count, SZ_128K);
530e749e54SDan Williams 		clear_pmem(addr, sz);
540e749e54SDan Williams 		size -= sz;
550e749e54SDan Williams 		BUG_ON(sz & 511);
560e749e54SDan Williams 		sector += sz / 512;
57289c6aedSMatthew Wilcox 		cond_resched();
58289c6aedSMatthew Wilcox 	} while (size);
59289c6aedSMatthew Wilcox 
602765cfbbSRoss Zwisler 	wmb_pmem();
61289c6aedSMatthew Wilcox 	return 0;
62289c6aedSMatthew Wilcox }
63289c6aedSMatthew Wilcox EXPORT_SYMBOL_GPL(dax_clear_blocks);
64289c6aedSMatthew Wilcox 
65e2e05394SRoss Zwisler static long dax_get_addr(struct buffer_head *bh, void __pmem **addr,
66e2e05394SRoss Zwisler 		unsigned blkbits)
67d475c634SMatthew Wilcox {
68d475c634SMatthew Wilcox 	unsigned long pfn;
69d475c634SMatthew Wilcox 	sector_t sector = bh->b_blocknr << (blkbits - 9);
70d475c634SMatthew Wilcox 	return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size);
71d475c634SMatthew Wilcox }
72d475c634SMatthew Wilcox 
732765cfbbSRoss Zwisler /* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
74e2e05394SRoss Zwisler static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
75e2e05394SRoss Zwisler 		loff_t pos, loff_t end)
76d475c634SMatthew Wilcox {
77d475c634SMatthew Wilcox 	loff_t final = end - pos + first; /* The final byte of the buffer */
78d475c634SMatthew Wilcox 
79d475c634SMatthew Wilcox 	if (first > 0)
80e2e05394SRoss Zwisler 		clear_pmem(addr, first);
81d475c634SMatthew Wilcox 	if (final < size)
82e2e05394SRoss Zwisler 		clear_pmem(addr + final, size - final);
83d475c634SMatthew Wilcox }
84d475c634SMatthew Wilcox 
85d475c634SMatthew Wilcox static bool buffer_written(struct buffer_head *bh)
86d475c634SMatthew Wilcox {
87d475c634SMatthew Wilcox 	return buffer_mapped(bh) && !buffer_unwritten(bh);
88d475c634SMatthew Wilcox }
89d475c634SMatthew Wilcox 
90d475c634SMatthew Wilcox /*
91d475c634SMatthew Wilcox  * When ext4 encounters a hole, it returns without modifying the buffer_head
92d475c634SMatthew Wilcox  * which means that we can't trust b_size.  To cope with this, we set b_state
93d475c634SMatthew Wilcox  * to 0 before calling get_block and, if any bit is set, we know we can trust
94d475c634SMatthew Wilcox  * b_size.  Unfortunate, really, since ext4 knows precisely how long a hole is
95d475c634SMatthew Wilcox  * and would save us time calling get_block repeatedly.
96d475c634SMatthew Wilcox  */
97d475c634SMatthew Wilcox static bool buffer_size_valid(struct buffer_head *bh)
98d475c634SMatthew Wilcox {
99d475c634SMatthew Wilcox 	return bh->b_state != 0;
100d475c634SMatthew Wilcox }
101d475c634SMatthew Wilcox 
102a95cd631SOmar Sandoval static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
103d475c634SMatthew Wilcox 		      loff_t start, loff_t end, get_block_t get_block,
104d475c634SMatthew Wilcox 		      struct buffer_head *bh)
105d475c634SMatthew Wilcox {
106d475c634SMatthew Wilcox 	ssize_t retval = 0;
107d475c634SMatthew Wilcox 	loff_t pos = start;
108d475c634SMatthew Wilcox 	loff_t max = start;
109d475c634SMatthew Wilcox 	loff_t bh_max = start;
110e2e05394SRoss Zwisler 	void __pmem *addr;
111d475c634SMatthew Wilcox 	bool hole = false;
1122765cfbbSRoss Zwisler 	bool need_wmb = false;
113d475c634SMatthew Wilcox 
114a95cd631SOmar Sandoval 	if (iov_iter_rw(iter) != WRITE)
115d475c634SMatthew Wilcox 		end = min(end, i_size_read(inode));
116d475c634SMatthew Wilcox 
117d475c634SMatthew Wilcox 	while (pos < end) {
1182765cfbbSRoss Zwisler 		size_t len;
119d475c634SMatthew Wilcox 		if (pos == max) {
120d475c634SMatthew Wilcox 			unsigned blkbits = inode->i_blkbits;
121e94f5a22SJeff Moyer 			long page = pos >> PAGE_SHIFT;
122e94f5a22SJeff Moyer 			sector_t block = page << (PAGE_SHIFT - blkbits);
123d475c634SMatthew Wilcox 			unsigned first = pos - (block << blkbits);
124d475c634SMatthew Wilcox 			long size;
125d475c634SMatthew Wilcox 
126d475c634SMatthew Wilcox 			if (pos == bh_max) {
127d475c634SMatthew Wilcox 				bh->b_size = PAGE_ALIGN(end - pos);
128d475c634SMatthew Wilcox 				bh->b_state = 0;
129d475c634SMatthew Wilcox 				retval = get_block(inode, block, bh,
130a95cd631SOmar Sandoval 						   iov_iter_rw(iter) == WRITE);
131d475c634SMatthew Wilcox 				if (retval)
132d475c634SMatthew Wilcox 					break;
133d475c634SMatthew Wilcox 				if (!buffer_size_valid(bh))
134d475c634SMatthew Wilcox 					bh->b_size = 1 << blkbits;
135d475c634SMatthew Wilcox 				bh_max = pos - first + bh->b_size;
136d475c634SMatthew Wilcox 			} else {
137d475c634SMatthew Wilcox 				unsigned done = bh->b_size -
138d475c634SMatthew Wilcox 						(bh_max - (pos - first));
139d475c634SMatthew Wilcox 				bh->b_blocknr += done >> blkbits;
140d475c634SMatthew Wilcox 				bh->b_size -= done;
141d475c634SMatthew Wilcox 			}
142d475c634SMatthew Wilcox 
143a95cd631SOmar Sandoval 			hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh);
144d475c634SMatthew Wilcox 			if (hole) {
145d475c634SMatthew Wilcox 				addr = NULL;
146d475c634SMatthew Wilcox 				size = bh->b_size - first;
147d475c634SMatthew Wilcox 			} else {
148d475c634SMatthew Wilcox 				retval = dax_get_addr(bh, &addr, blkbits);
149d475c634SMatthew Wilcox 				if (retval < 0)
150d475c634SMatthew Wilcox 					break;
1512765cfbbSRoss Zwisler 				if (buffer_unwritten(bh) || buffer_new(bh)) {
152d475c634SMatthew Wilcox 					dax_new_buf(addr, retval, first, pos,
153d475c634SMatthew Wilcox 									end);
1542765cfbbSRoss Zwisler 					need_wmb = true;
1552765cfbbSRoss Zwisler 				}
156d475c634SMatthew Wilcox 				addr += first;
157d475c634SMatthew Wilcox 				size = retval - first;
158d475c634SMatthew Wilcox 			}
159d475c634SMatthew Wilcox 			max = min(pos + size, end);
160d475c634SMatthew Wilcox 		}
161d475c634SMatthew Wilcox 
1622765cfbbSRoss Zwisler 		if (iov_iter_rw(iter) == WRITE) {
163e2e05394SRoss Zwisler 			len = copy_from_iter_pmem(addr, max - pos, iter);
1642765cfbbSRoss Zwisler 			need_wmb = true;
1652765cfbbSRoss Zwisler 		} else if (!hole)
166e2e05394SRoss Zwisler 			len = copy_to_iter((void __force *)addr, max - pos,
167e2e05394SRoss Zwisler 					iter);
168d475c634SMatthew Wilcox 		else
169d475c634SMatthew Wilcox 			len = iov_iter_zero(max - pos, iter);
170d475c634SMatthew Wilcox 
171cadfbb6eSAl Viro 		if (!len) {
172cadfbb6eSAl Viro 			retval = -EFAULT;
173d475c634SMatthew Wilcox 			break;
174cadfbb6eSAl Viro 		}
175d475c634SMatthew Wilcox 
176d475c634SMatthew Wilcox 		pos += len;
177d475c634SMatthew Wilcox 		addr += len;
178d475c634SMatthew Wilcox 	}
179d475c634SMatthew Wilcox 
1802765cfbbSRoss Zwisler 	if (need_wmb)
1812765cfbbSRoss Zwisler 		wmb_pmem();
1822765cfbbSRoss Zwisler 
183d475c634SMatthew Wilcox 	return (pos == start) ? retval : pos - start;
184d475c634SMatthew Wilcox }
185d475c634SMatthew Wilcox 
186d475c634SMatthew Wilcox /**
187d475c634SMatthew Wilcox  * dax_do_io - Perform I/O to a DAX file
188d475c634SMatthew Wilcox  * @iocb: The control block for this I/O
189d475c634SMatthew Wilcox  * @inode: The file which the I/O is directed at
190d475c634SMatthew Wilcox  * @iter: The addresses to do I/O from or to
191d475c634SMatthew Wilcox  * @pos: The file offset where the I/O starts
192d475c634SMatthew Wilcox  * @get_block: The filesystem method used to translate file offsets to blocks
193d475c634SMatthew Wilcox  * @end_io: A filesystem callback for I/O completion
194d475c634SMatthew Wilcox  * @flags: See below
195d475c634SMatthew Wilcox  *
196d475c634SMatthew Wilcox  * This function uses the same locking scheme as do_blockdev_direct_IO:
197d475c634SMatthew Wilcox  * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
198d475c634SMatthew Wilcox  * caller for writes.  For reads, we take and release the i_mutex ourselves.
199d475c634SMatthew Wilcox  * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
200d475c634SMatthew Wilcox  * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
201d475c634SMatthew Wilcox  * is in progress.
202d475c634SMatthew Wilcox  */
203a95cd631SOmar Sandoval ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
204a95cd631SOmar Sandoval 		  struct iov_iter *iter, loff_t pos, get_block_t get_block,
205a95cd631SOmar Sandoval 		  dio_iodone_t end_io, int flags)
206d475c634SMatthew Wilcox {
207d475c634SMatthew Wilcox 	struct buffer_head bh;
208d475c634SMatthew Wilcox 	ssize_t retval = -EINVAL;
209d475c634SMatthew Wilcox 	loff_t end = pos + iov_iter_count(iter);
210d475c634SMatthew Wilcox 
211d475c634SMatthew Wilcox 	memset(&bh, 0, sizeof(bh));
212d475c634SMatthew Wilcox 
213a95cd631SOmar Sandoval 	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
214d475c634SMatthew Wilcox 		struct address_space *mapping = inode->i_mapping;
215d475c634SMatthew Wilcox 		mutex_lock(&inode->i_mutex);
216d475c634SMatthew Wilcox 		retval = filemap_write_and_wait_range(mapping, pos, end - 1);
217d475c634SMatthew Wilcox 		if (retval) {
218d475c634SMatthew Wilcox 			mutex_unlock(&inode->i_mutex);
219d475c634SMatthew Wilcox 			goto out;
220d475c634SMatthew Wilcox 		}
221d475c634SMatthew Wilcox 	}
222d475c634SMatthew Wilcox 
223d475c634SMatthew Wilcox 	/* Protects against truncate */
224bbab37ddSMatthew Wilcox 	if (!(flags & DIO_SKIP_DIO_COUNT))
225fe0f07d0SJens Axboe 		inode_dio_begin(inode);
226d475c634SMatthew Wilcox 
227a95cd631SOmar Sandoval 	retval = dax_io(inode, iter, pos, end, get_block, &bh);
228d475c634SMatthew Wilcox 
229a95cd631SOmar Sandoval 	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
230d475c634SMatthew Wilcox 		mutex_unlock(&inode->i_mutex);
231d475c634SMatthew Wilcox 
232d475c634SMatthew Wilcox 	if ((retval > 0) && end_io)
233d475c634SMatthew Wilcox 		end_io(iocb, pos, retval, bh.b_private);
234d475c634SMatthew Wilcox 
235bbab37ddSMatthew Wilcox 	if (!(flags & DIO_SKIP_DIO_COUNT))
236fe0f07d0SJens Axboe 		inode_dio_end(inode);
237d475c634SMatthew Wilcox  out:
238d475c634SMatthew Wilcox 	return retval;
239d475c634SMatthew Wilcox }
240d475c634SMatthew Wilcox EXPORT_SYMBOL_GPL(dax_do_io);
241f7ca90b1SMatthew Wilcox 
242f7ca90b1SMatthew Wilcox /*
243f7ca90b1SMatthew Wilcox  * The user has performed a load from a hole in the file.  Allocating
244f7ca90b1SMatthew Wilcox  * a new page in the file would cause excessive storage usage for
245f7ca90b1SMatthew Wilcox  * workloads with sparse files.  We allocate a page cache page instead.
246f7ca90b1SMatthew Wilcox  * We'll kick it out of the page cache if it's ever written to,
247f7ca90b1SMatthew Wilcox  * otherwise it will simply fall out of the page cache under memory
248f7ca90b1SMatthew Wilcox  * pressure without ever having been dirtied.
249f7ca90b1SMatthew Wilcox  */
250f7ca90b1SMatthew Wilcox static int dax_load_hole(struct address_space *mapping, struct page *page,
251f7ca90b1SMatthew Wilcox 							struct vm_fault *vmf)
252f7ca90b1SMatthew Wilcox {
253f7ca90b1SMatthew Wilcox 	unsigned long size;
254f7ca90b1SMatthew Wilcox 	struct inode *inode = mapping->host;
255f7ca90b1SMatthew Wilcox 	if (!page)
256f7ca90b1SMatthew Wilcox 		page = find_or_create_page(mapping, vmf->pgoff,
257f7ca90b1SMatthew Wilcox 						GFP_KERNEL | __GFP_ZERO);
258f7ca90b1SMatthew Wilcox 	if (!page)
259f7ca90b1SMatthew Wilcox 		return VM_FAULT_OOM;
260f7ca90b1SMatthew Wilcox 	/* Recheck i_size under page lock to avoid truncate race */
261f7ca90b1SMatthew Wilcox 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
262f7ca90b1SMatthew Wilcox 	if (vmf->pgoff >= size) {
263f7ca90b1SMatthew Wilcox 		unlock_page(page);
264f7ca90b1SMatthew Wilcox 		page_cache_release(page);
265f7ca90b1SMatthew Wilcox 		return VM_FAULT_SIGBUS;
266f7ca90b1SMatthew Wilcox 	}
267f7ca90b1SMatthew Wilcox 
268f7ca90b1SMatthew Wilcox 	vmf->page = page;
269f7ca90b1SMatthew Wilcox 	return VM_FAULT_LOCKED;
270f7ca90b1SMatthew Wilcox }
271f7ca90b1SMatthew Wilcox 
272f7ca90b1SMatthew Wilcox static int copy_user_bh(struct page *to, struct buffer_head *bh,
273f7ca90b1SMatthew Wilcox 			unsigned blkbits, unsigned long vaddr)
274f7ca90b1SMatthew Wilcox {
275e2e05394SRoss Zwisler 	void __pmem *vfrom;
276e2e05394SRoss Zwisler 	void *vto;
277e2e05394SRoss Zwisler 
278f7ca90b1SMatthew Wilcox 	if (dax_get_addr(bh, &vfrom, blkbits) < 0)
279f7ca90b1SMatthew Wilcox 		return -EIO;
280f7ca90b1SMatthew Wilcox 	vto = kmap_atomic(to);
281e2e05394SRoss Zwisler 	copy_user_page(vto, (void __force *)vfrom, vaddr, to);
282f7ca90b1SMatthew Wilcox 	kunmap_atomic(vto);
283f7ca90b1SMatthew Wilcox 	return 0;
284f7ca90b1SMatthew Wilcox }
285f7ca90b1SMatthew Wilcox 
286f7ca90b1SMatthew Wilcox static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
287f7ca90b1SMatthew Wilcox 			struct vm_area_struct *vma, struct vm_fault *vmf)
288f7ca90b1SMatthew Wilcox {
2890f90cc66SRoss Zwisler 	struct address_space *mapping = inode->i_mapping;
290f7ca90b1SMatthew Wilcox 	sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
291f7ca90b1SMatthew Wilcox 	unsigned long vaddr = (unsigned long)vmf->virtual_address;
292e2e05394SRoss Zwisler 	void __pmem *addr;
293f7ca90b1SMatthew Wilcox 	unsigned long pfn;
294f7ca90b1SMatthew Wilcox 	pgoff_t size;
295f7ca90b1SMatthew Wilcox 	int error;
296f7ca90b1SMatthew Wilcox 
2970f90cc66SRoss Zwisler 	i_mmap_lock_read(mapping);
2980f90cc66SRoss Zwisler 
299f7ca90b1SMatthew Wilcox 	/*
300f7ca90b1SMatthew Wilcox 	 * Check truncate didn't happen while we were allocating a block.
301f7ca90b1SMatthew Wilcox 	 * If it did, this block may or may not be still allocated to the
302f7ca90b1SMatthew Wilcox 	 * file.  We can't tell the filesystem to free it because we can't
303f7ca90b1SMatthew Wilcox 	 * take i_mutex here.  In the worst case, the file still has blocks
304f7ca90b1SMatthew Wilcox 	 * allocated past the end of the file.
305f7ca90b1SMatthew Wilcox 	 */
306f7ca90b1SMatthew Wilcox 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
307f7ca90b1SMatthew Wilcox 	if (unlikely(vmf->pgoff >= size)) {
308f7ca90b1SMatthew Wilcox 		error = -EIO;
309f7ca90b1SMatthew Wilcox 		goto out;
310f7ca90b1SMatthew Wilcox 	}
311f7ca90b1SMatthew Wilcox 
312f7ca90b1SMatthew Wilcox 	error = bdev_direct_access(bh->b_bdev, sector, &addr, &pfn, bh->b_size);
313f7ca90b1SMatthew Wilcox 	if (error < 0)
314f7ca90b1SMatthew Wilcox 		goto out;
315f7ca90b1SMatthew Wilcox 	if (error < PAGE_SIZE) {
316f7ca90b1SMatthew Wilcox 		error = -EIO;
317f7ca90b1SMatthew Wilcox 		goto out;
318f7ca90b1SMatthew Wilcox 	}
319f7ca90b1SMatthew Wilcox 
3202765cfbbSRoss Zwisler 	if (buffer_unwritten(bh) || buffer_new(bh)) {
321e2e05394SRoss Zwisler 		clear_pmem(addr, PAGE_SIZE);
3222765cfbbSRoss Zwisler 		wmb_pmem();
3232765cfbbSRoss Zwisler 	}
324f7ca90b1SMatthew Wilcox 
325f7ca90b1SMatthew Wilcox 	error = vm_insert_mixed(vma, vaddr, pfn);
326f7ca90b1SMatthew Wilcox 
327f7ca90b1SMatthew Wilcox  out:
3280f90cc66SRoss Zwisler 	i_mmap_unlock_read(mapping);
3290f90cc66SRoss Zwisler 
330f7ca90b1SMatthew Wilcox 	return error;
331f7ca90b1SMatthew Wilcox }
332f7ca90b1SMatthew Wilcox 
333ce5c5d55SDave Chinner /**
334ce5c5d55SDave Chinner  * __dax_fault - handle a page fault on a DAX file
335ce5c5d55SDave Chinner  * @vma: The virtual memory area where the fault occurred
336ce5c5d55SDave Chinner  * @vmf: The description of the fault
337ce5c5d55SDave Chinner  * @get_block: The filesystem method used to translate file offsets to blocks
338b2442c5aSDave Chinner  * @complete_unwritten: The filesystem method used to convert unwritten blocks
339b2442c5aSDave Chinner  *	to written so the data written to them is exposed. This is required for
340b2442c5aSDave Chinner  *	required by write faults for filesystems that will return unwritten
341b2442c5aSDave Chinner  *	extent mappings from @get_block, but it is optional for reads as
342b2442c5aSDave Chinner  *	dax_insert_mapping() will always zero unwritten blocks. If the fs does
343b2442c5aSDave Chinner  *	not support unwritten extents, the it should pass NULL.
344ce5c5d55SDave Chinner  *
345ce5c5d55SDave Chinner  * When a page fault occurs, filesystems may call this helper in their
346ce5c5d55SDave Chinner  * fault handler for DAX files. __dax_fault() assumes the caller has done all
347ce5c5d55SDave Chinner  * the necessary locking for the page fault to proceed successfully.
348ce5c5d55SDave Chinner  */
349ce5c5d55SDave Chinner int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
350e842f290SDave Chinner 			get_block_t get_block, dax_iodone_t complete_unwritten)
351f7ca90b1SMatthew Wilcox {
352f7ca90b1SMatthew Wilcox 	struct file *file = vma->vm_file;
353f7ca90b1SMatthew Wilcox 	struct address_space *mapping = file->f_mapping;
354f7ca90b1SMatthew Wilcox 	struct inode *inode = mapping->host;
355f7ca90b1SMatthew Wilcox 	struct page *page;
356f7ca90b1SMatthew Wilcox 	struct buffer_head bh;
357f7ca90b1SMatthew Wilcox 	unsigned long vaddr = (unsigned long)vmf->virtual_address;
358f7ca90b1SMatthew Wilcox 	unsigned blkbits = inode->i_blkbits;
359f7ca90b1SMatthew Wilcox 	sector_t block;
360f7ca90b1SMatthew Wilcox 	pgoff_t size;
361f7ca90b1SMatthew Wilcox 	int error;
362f7ca90b1SMatthew Wilcox 	int major = 0;
363f7ca90b1SMatthew Wilcox 
364f7ca90b1SMatthew Wilcox 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
365f7ca90b1SMatthew Wilcox 	if (vmf->pgoff >= size)
366f7ca90b1SMatthew Wilcox 		return VM_FAULT_SIGBUS;
367f7ca90b1SMatthew Wilcox 
368f7ca90b1SMatthew Wilcox 	memset(&bh, 0, sizeof(bh));
369f7ca90b1SMatthew Wilcox 	block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
370f7ca90b1SMatthew Wilcox 	bh.b_size = PAGE_SIZE;
371f7ca90b1SMatthew Wilcox 
372f7ca90b1SMatthew Wilcox  repeat:
373f7ca90b1SMatthew Wilcox 	page = find_get_page(mapping, vmf->pgoff);
374f7ca90b1SMatthew Wilcox 	if (page) {
375f7ca90b1SMatthew Wilcox 		if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
376f7ca90b1SMatthew Wilcox 			page_cache_release(page);
377f7ca90b1SMatthew Wilcox 			return VM_FAULT_RETRY;
378f7ca90b1SMatthew Wilcox 		}
379f7ca90b1SMatthew Wilcox 		if (unlikely(page->mapping != mapping)) {
380f7ca90b1SMatthew Wilcox 			unlock_page(page);
381f7ca90b1SMatthew Wilcox 			page_cache_release(page);
382f7ca90b1SMatthew Wilcox 			goto repeat;
383f7ca90b1SMatthew Wilcox 		}
384f7ca90b1SMatthew Wilcox 		size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
385f7ca90b1SMatthew Wilcox 		if (unlikely(vmf->pgoff >= size)) {
386f7ca90b1SMatthew Wilcox 			/*
387f7ca90b1SMatthew Wilcox 			 * We have a struct page covering a hole in the file
388f7ca90b1SMatthew Wilcox 			 * from a read fault and we've raced with a truncate
389f7ca90b1SMatthew Wilcox 			 */
390f7ca90b1SMatthew Wilcox 			error = -EIO;
3910f90cc66SRoss Zwisler 			goto unlock_page;
392f7ca90b1SMatthew Wilcox 		}
393f7ca90b1SMatthew Wilcox 	}
394f7ca90b1SMatthew Wilcox 
395f7ca90b1SMatthew Wilcox 	error = get_block(inode, block, &bh, 0);
396f7ca90b1SMatthew Wilcox 	if (!error && (bh.b_size < PAGE_SIZE))
397f7ca90b1SMatthew Wilcox 		error = -EIO;		/* fs corruption? */
398f7ca90b1SMatthew Wilcox 	if (error)
3990f90cc66SRoss Zwisler 		goto unlock_page;
400f7ca90b1SMatthew Wilcox 
401f7ca90b1SMatthew Wilcox 	if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
402f7ca90b1SMatthew Wilcox 		if (vmf->flags & FAULT_FLAG_WRITE) {
403f7ca90b1SMatthew Wilcox 			error = get_block(inode, block, &bh, 1);
404f7ca90b1SMatthew Wilcox 			count_vm_event(PGMAJFAULT);
405f7ca90b1SMatthew Wilcox 			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
406f7ca90b1SMatthew Wilcox 			major = VM_FAULT_MAJOR;
407f7ca90b1SMatthew Wilcox 			if (!error && (bh.b_size < PAGE_SIZE))
408f7ca90b1SMatthew Wilcox 				error = -EIO;
409f7ca90b1SMatthew Wilcox 			if (error)
4100f90cc66SRoss Zwisler 				goto unlock_page;
411f7ca90b1SMatthew Wilcox 		} else {
412f7ca90b1SMatthew Wilcox 			return dax_load_hole(mapping, page, vmf);
413f7ca90b1SMatthew Wilcox 		}
414f7ca90b1SMatthew Wilcox 	}
415f7ca90b1SMatthew Wilcox 
416f7ca90b1SMatthew Wilcox 	if (vmf->cow_page) {
417f7ca90b1SMatthew Wilcox 		struct page *new_page = vmf->cow_page;
418f7ca90b1SMatthew Wilcox 		if (buffer_written(&bh))
419f7ca90b1SMatthew Wilcox 			error = copy_user_bh(new_page, &bh, blkbits, vaddr);
420f7ca90b1SMatthew Wilcox 		else
421f7ca90b1SMatthew Wilcox 			clear_user_highpage(new_page, vaddr);
422f7ca90b1SMatthew Wilcox 		if (error)
4230f90cc66SRoss Zwisler 			goto unlock_page;
424f7ca90b1SMatthew Wilcox 		vmf->page = page;
425f7ca90b1SMatthew Wilcox 		if (!page) {
4260f90cc66SRoss Zwisler 			i_mmap_lock_read(mapping);
427f7ca90b1SMatthew Wilcox 			/* Check we didn't race with truncate */
428f7ca90b1SMatthew Wilcox 			size = (i_size_read(inode) + PAGE_SIZE - 1) >>
429f7ca90b1SMatthew Wilcox 								PAGE_SHIFT;
430f7ca90b1SMatthew Wilcox 			if (vmf->pgoff >= size) {
4310f90cc66SRoss Zwisler 				i_mmap_unlock_read(mapping);
432f7ca90b1SMatthew Wilcox 				error = -EIO;
4330f90cc66SRoss Zwisler 				goto out;
434f7ca90b1SMatthew Wilcox 			}
435f7ca90b1SMatthew Wilcox 		}
436f7ca90b1SMatthew Wilcox 		return VM_FAULT_LOCKED;
437f7ca90b1SMatthew Wilcox 	}
438f7ca90b1SMatthew Wilcox 
439f7ca90b1SMatthew Wilcox 	/* Check we didn't race with a read fault installing a new page */
440f7ca90b1SMatthew Wilcox 	if (!page && major)
441f7ca90b1SMatthew Wilcox 		page = find_lock_page(mapping, vmf->pgoff);
442f7ca90b1SMatthew Wilcox 
443f7ca90b1SMatthew Wilcox 	if (page) {
444f7ca90b1SMatthew Wilcox 		unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
445f7ca90b1SMatthew Wilcox 							PAGE_CACHE_SIZE, 0);
446f7ca90b1SMatthew Wilcox 		delete_from_page_cache(page);
447f7ca90b1SMatthew Wilcox 		unlock_page(page);
448f7ca90b1SMatthew Wilcox 		page_cache_release(page);
449f7ca90b1SMatthew Wilcox 	}
450f7ca90b1SMatthew Wilcox 
451e842f290SDave Chinner 	/*
452e842f290SDave Chinner 	 * If we successfully insert the new mapping over an unwritten extent,
453e842f290SDave Chinner 	 * we need to ensure we convert the unwritten extent. If there is an
454e842f290SDave Chinner 	 * error inserting the mapping, the filesystem needs to leave it as
455e842f290SDave Chinner 	 * unwritten to prevent exposure of the stale underlying data to
456e842f290SDave Chinner 	 * userspace, but we still need to call the completion function so
457e842f290SDave Chinner 	 * the private resources on the mapping buffer can be released. We
458e842f290SDave Chinner 	 * indicate what the callback should do via the uptodate variable, same
459e842f290SDave Chinner 	 * as for normal BH based IO completions.
460e842f290SDave Chinner 	 */
461f7ca90b1SMatthew Wilcox 	error = dax_insert_mapping(inode, &bh, vma, vmf);
462b2442c5aSDave Chinner 	if (buffer_unwritten(&bh)) {
463b2442c5aSDave Chinner 		if (complete_unwritten)
464e842f290SDave Chinner 			complete_unwritten(&bh, !error);
465b2442c5aSDave Chinner 		else
466b2442c5aSDave Chinner 			WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
467b2442c5aSDave Chinner 	}
468f7ca90b1SMatthew Wilcox 
469f7ca90b1SMatthew Wilcox  out:
470f7ca90b1SMatthew Wilcox 	if (error == -ENOMEM)
471f7ca90b1SMatthew Wilcox 		return VM_FAULT_OOM | major;
472f7ca90b1SMatthew Wilcox 	/* -EBUSY is fine, somebody else faulted on the same PTE */
473f7ca90b1SMatthew Wilcox 	if ((error < 0) && (error != -EBUSY))
474f7ca90b1SMatthew Wilcox 		return VM_FAULT_SIGBUS | major;
475f7ca90b1SMatthew Wilcox 	return VM_FAULT_NOPAGE | major;
476f7ca90b1SMatthew Wilcox 
4770f90cc66SRoss Zwisler  unlock_page:
478f7ca90b1SMatthew Wilcox 	if (page) {
479f7ca90b1SMatthew Wilcox 		unlock_page(page);
480f7ca90b1SMatthew Wilcox 		page_cache_release(page);
481f7ca90b1SMatthew Wilcox 	}
482f7ca90b1SMatthew Wilcox 	goto out;
483f7ca90b1SMatthew Wilcox }
484ce5c5d55SDave Chinner EXPORT_SYMBOL(__dax_fault);
485f7ca90b1SMatthew Wilcox 
486f7ca90b1SMatthew Wilcox /**
487f7ca90b1SMatthew Wilcox  * dax_fault - handle a page fault on a DAX file
488f7ca90b1SMatthew Wilcox  * @vma: The virtual memory area where the fault occurred
489f7ca90b1SMatthew Wilcox  * @vmf: The description of the fault
490f7ca90b1SMatthew Wilcox  * @get_block: The filesystem method used to translate file offsets to blocks
491f7ca90b1SMatthew Wilcox  *
492f7ca90b1SMatthew Wilcox  * When a page fault occurs, filesystems may call this helper in their
493f7ca90b1SMatthew Wilcox  * fault handler for DAX files.
494f7ca90b1SMatthew Wilcox  */
495f7ca90b1SMatthew Wilcox int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
496e842f290SDave Chinner 	      get_block_t get_block, dax_iodone_t complete_unwritten)
497f7ca90b1SMatthew Wilcox {
498f7ca90b1SMatthew Wilcox 	int result;
499f7ca90b1SMatthew Wilcox 	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
500f7ca90b1SMatthew Wilcox 
501f7ca90b1SMatthew Wilcox 	if (vmf->flags & FAULT_FLAG_WRITE) {
502f7ca90b1SMatthew Wilcox 		sb_start_pagefault(sb);
503f7ca90b1SMatthew Wilcox 		file_update_time(vma->vm_file);
504f7ca90b1SMatthew Wilcox 	}
505ce5c5d55SDave Chinner 	result = __dax_fault(vma, vmf, get_block, complete_unwritten);
506f7ca90b1SMatthew Wilcox 	if (vmf->flags & FAULT_FLAG_WRITE)
507f7ca90b1SMatthew Wilcox 		sb_end_pagefault(sb);
508f7ca90b1SMatthew Wilcox 
509f7ca90b1SMatthew Wilcox 	return result;
510f7ca90b1SMatthew Wilcox }
511f7ca90b1SMatthew Wilcox EXPORT_SYMBOL_GPL(dax_fault);
5124c0ccfefSMatthew Wilcox 
513844f35dbSMatthew Wilcox #ifdef CONFIG_TRANSPARENT_HUGEPAGE
514844f35dbSMatthew Wilcox /*
515844f35dbSMatthew Wilcox  * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
516844f35dbSMatthew Wilcox  * more often than one might expect in the below function.
517844f35dbSMatthew Wilcox  */
518844f35dbSMatthew Wilcox #define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
519844f35dbSMatthew Wilcox 
520844f35dbSMatthew Wilcox int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
521844f35dbSMatthew Wilcox 		pmd_t *pmd, unsigned int flags, get_block_t get_block,
522844f35dbSMatthew Wilcox 		dax_iodone_t complete_unwritten)
523844f35dbSMatthew Wilcox {
524844f35dbSMatthew Wilcox 	struct file *file = vma->vm_file;
525844f35dbSMatthew Wilcox 	struct address_space *mapping = file->f_mapping;
526844f35dbSMatthew Wilcox 	struct inode *inode = mapping->host;
527844f35dbSMatthew Wilcox 	struct buffer_head bh;
528844f35dbSMatthew Wilcox 	unsigned blkbits = inode->i_blkbits;
529844f35dbSMatthew Wilcox 	unsigned long pmd_addr = address & PMD_MASK;
530844f35dbSMatthew Wilcox 	bool write = flags & FAULT_FLAG_WRITE;
531844f35dbSMatthew Wilcox 	long length;
532d77e92e2SRoss Zwisler 	void __pmem *kaddr;
533844f35dbSMatthew Wilcox 	pgoff_t size, pgoff;
534844f35dbSMatthew Wilcox 	sector_t block, sector;
535844f35dbSMatthew Wilcox 	unsigned long pfn;
536844f35dbSMatthew Wilcox 	int result = 0;
537844f35dbSMatthew Wilcox 
538ee82c9edSDan Williams 	/* dax pmd mappings are broken wrt gup and fork */
539ee82c9edSDan Williams 	if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
540ee82c9edSDan Williams 		return VM_FAULT_FALLBACK;
541ee82c9edSDan Williams 
542844f35dbSMatthew Wilcox 	/* Fall back to PTEs if we're going to COW */
543844f35dbSMatthew Wilcox 	if (write && !(vma->vm_flags & VM_SHARED))
544844f35dbSMatthew Wilcox 		return VM_FAULT_FALLBACK;
545844f35dbSMatthew Wilcox 	/* If the PMD would extend outside the VMA */
546844f35dbSMatthew Wilcox 	if (pmd_addr < vma->vm_start)
547844f35dbSMatthew Wilcox 		return VM_FAULT_FALLBACK;
548844f35dbSMatthew Wilcox 	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
549844f35dbSMatthew Wilcox 		return VM_FAULT_FALLBACK;
550844f35dbSMatthew Wilcox 
5513fdd1b47SMatthew Wilcox 	pgoff = linear_page_index(vma, pmd_addr);
552844f35dbSMatthew Wilcox 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
553844f35dbSMatthew Wilcox 	if (pgoff >= size)
554844f35dbSMatthew Wilcox 		return VM_FAULT_SIGBUS;
555844f35dbSMatthew Wilcox 	/* If the PMD would cover blocks out of the file */
556844f35dbSMatthew Wilcox 	if ((pgoff | PG_PMD_COLOUR) >= size)
557844f35dbSMatthew Wilcox 		return VM_FAULT_FALLBACK;
558844f35dbSMatthew Wilcox 
559844f35dbSMatthew Wilcox 	memset(&bh, 0, sizeof(bh));
560844f35dbSMatthew Wilcox 	block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
561844f35dbSMatthew Wilcox 
562844f35dbSMatthew Wilcox 	bh.b_size = PMD_SIZE;
563844f35dbSMatthew Wilcox 	length = get_block(inode, block, &bh, write);
564844f35dbSMatthew Wilcox 	if (length)
565844f35dbSMatthew Wilcox 		return VM_FAULT_SIGBUS;
5660f90cc66SRoss Zwisler 	i_mmap_lock_read(mapping);
567844f35dbSMatthew Wilcox 
568844f35dbSMatthew Wilcox 	/*
569844f35dbSMatthew Wilcox 	 * If the filesystem isn't willing to tell us the length of a hole,
570844f35dbSMatthew Wilcox 	 * just fall back to PTEs.  Calling get_block 512 times in a loop
571844f35dbSMatthew Wilcox 	 * would be silly.
572844f35dbSMatthew Wilcox 	 */
573844f35dbSMatthew Wilcox 	if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
574844f35dbSMatthew Wilcox 		goto fallback;
575844f35dbSMatthew Wilcox 
57646c043edSKirill A. Shutemov 	/*
57746c043edSKirill A. Shutemov 	 * If we allocated new storage, make sure no process has any
57846c043edSKirill A. Shutemov 	 * zero pages covering this hole
57946c043edSKirill A. Shutemov 	 */
58046c043edSKirill A. Shutemov 	if (buffer_new(&bh)) {
5810f90cc66SRoss Zwisler 		i_mmap_unlock_read(mapping);
58246c043edSKirill A. Shutemov 		unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
5830f90cc66SRoss Zwisler 		i_mmap_lock_read(mapping);
58446c043edSKirill A. Shutemov 	}
58546c043edSKirill A. Shutemov 
58684c4e5e6SMatthew Wilcox 	/*
58784c4e5e6SMatthew Wilcox 	 * If a truncate happened while we were allocating blocks, we may
58884c4e5e6SMatthew Wilcox 	 * leave blocks allocated to the file that are beyond EOF.  We can't
58984c4e5e6SMatthew Wilcox 	 * take i_mutex here, so just leave them hanging; they'll be freed
59084c4e5e6SMatthew Wilcox 	 * when the file is deleted.
59184c4e5e6SMatthew Wilcox 	 */
592844f35dbSMatthew Wilcox 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
593844f35dbSMatthew Wilcox 	if (pgoff >= size) {
594844f35dbSMatthew Wilcox 		result = VM_FAULT_SIGBUS;
595844f35dbSMatthew Wilcox 		goto out;
596844f35dbSMatthew Wilcox 	}
597844f35dbSMatthew Wilcox 	if ((pgoff | PG_PMD_COLOUR) >= size)
598844f35dbSMatthew Wilcox 		goto fallback;
599844f35dbSMatthew Wilcox 
600844f35dbSMatthew Wilcox 	if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
601844f35dbSMatthew Wilcox 		spinlock_t *ptl;
602d295e341SKirill A. Shutemov 		pmd_t entry;
603844f35dbSMatthew Wilcox 		struct page *zero_page = get_huge_zero_page();
604d295e341SKirill A. Shutemov 
605844f35dbSMatthew Wilcox 		if (unlikely(!zero_page))
606844f35dbSMatthew Wilcox 			goto fallback;
607844f35dbSMatthew Wilcox 
608d295e341SKirill A. Shutemov 		ptl = pmd_lock(vma->vm_mm, pmd);
609d295e341SKirill A. Shutemov 		if (!pmd_none(*pmd)) {
610844f35dbSMatthew Wilcox 			spin_unlock(ptl);
611d295e341SKirill A. Shutemov 			goto fallback;
612d295e341SKirill A. Shutemov 		}
613d295e341SKirill A. Shutemov 
614d295e341SKirill A. Shutemov 		entry = mk_pmd(zero_page, vma->vm_page_prot);
615d295e341SKirill A. Shutemov 		entry = pmd_mkhuge(entry);
616d295e341SKirill A. Shutemov 		set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
617844f35dbSMatthew Wilcox 		result = VM_FAULT_NOPAGE;
618d295e341SKirill A. Shutemov 		spin_unlock(ptl);
619844f35dbSMatthew Wilcox 	} else {
6200f90cc66SRoss Zwisler 		sector = bh.b_blocknr << (blkbits - 9);
621844f35dbSMatthew Wilcox 		length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
622844f35dbSMatthew Wilcox 						bh.b_size);
623844f35dbSMatthew Wilcox 		if (length < 0) {
624844f35dbSMatthew Wilcox 			result = VM_FAULT_SIGBUS;
625844f35dbSMatthew Wilcox 			goto out;
626844f35dbSMatthew Wilcox 		}
627844f35dbSMatthew Wilcox 		if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
628844f35dbSMatthew Wilcox 			goto fallback;
629844f35dbSMatthew Wilcox 
630152d7bd8SDan Williams 		/*
631152d7bd8SDan Williams 		 * TODO: teach vmf_insert_pfn_pmd() to support
632152d7bd8SDan Williams 		 * 'pte_special' for pmds
633152d7bd8SDan Williams 		 */
634152d7bd8SDan Williams 		if (pfn_valid(pfn))
635152d7bd8SDan Williams 			goto fallback;
636152d7bd8SDan Williams 
6370f90cc66SRoss Zwisler 		if (buffer_unwritten(&bh) || buffer_new(&bh)) {
63852db400fSDan Williams 			clear_pmem(kaddr, PMD_SIZE);
6390f90cc66SRoss Zwisler 			wmb_pmem();
6400f90cc66SRoss Zwisler 			count_vm_event(PGMAJFAULT);
6410f90cc66SRoss Zwisler 			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
6420f90cc66SRoss Zwisler 			result |= VM_FAULT_MAJOR;
6430f90cc66SRoss Zwisler 		}
6440f90cc66SRoss Zwisler 
645844f35dbSMatthew Wilcox 		result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write);
646844f35dbSMatthew Wilcox 	}
647844f35dbSMatthew Wilcox 
648844f35dbSMatthew Wilcox  out:
6490f90cc66SRoss Zwisler 	i_mmap_unlock_read(mapping);
6500f90cc66SRoss Zwisler 
651844f35dbSMatthew Wilcox 	if (buffer_unwritten(&bh))
652844f35dbSMatthew Wilcox 		complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
653844f35dbSMatthew Wilcox 
654844f35dbSMatthew Wilcox 	return result;
655844f35dbSMatthew Wilcox 
656844f35dbSMatthew Wilcox  fallback:
657844f35dbSMatthew Wilcox 	count_vm_event(THP_FAULT_FALLBACK);
658844f35dbSMatthew Wilcox 	result = VM_FAULT_FALLBACK;
659844f35dbSMatthew Wilcox 	goto out;
660844f35dbSMatthew Wilcox }
661844f35dbSMatthew Wilcox EXPORT_SYMBOL_GPL(__dax_pmd_fault);
662844f35dbSMatthew Wilcox 
663844f35dbSMatthew Wilcox /**
664844f35dbSMatthew Wilcox  * dax_pmd_fault - handle a PMD fault on a DAX file
665844f35dbSMatthew Wilcox  * @vma: The virtual memory area where the fault occurred
666844f35dbSMatthew Wilcox  * @vmf: The description of the fault
667844f35dbSMatthew Wilcox  * @get_block: The filesystem method used to translate file offsets to blocks
668844f35dbSMatthew Wilcox  *
669844f35dbSMatthew Wilcox  * When a page fault occurs, filesystems may call this helper in their
670844f35dbSMatthew Wilcox  * pmd_fault handler for DAX files.
671844f35dbSMatthew Wilcox  */
672844f35dbSMatthew Wilcox int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
673844f35dbSMatthew Wilcox 			pmd_t *pmd, unsigned int flags, get_block_t get_block,
674844f35dbSMatthew Wilcox 			dax_iodone_t complete_unwritten)
675844f35dbSMatthew Wilcox {
676844f35dbSMatthew Wilcox 	int result;
677844f35dbSMatthew Wilcox 	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
678844f35dbSMatthew Wilcox 
679844f35dbSMatthew Wilcox 	if (flags & FAULT_FLAG_WRITE) {
680844f35dbSMatthew Wilcox 		sb_start_pagefault(sb);
681844f35dbSMatthew Wilcox 		file_update_time(vma->vm_file);
682844f35dbSMatthew Wilcox 	}
683844f35dbSMatthew Wilcox 	result = __dax_pmd_fault(vma, address, pmd, flags, get_block,
684844f35dbSMatthew Wilcox 				complete_unwritten);
685844f35dbSMatthew Wilcox 	if (flags & FAULT_FLAG_WRITE)
686844f35dbSMatthew Wilcox 		sb_end_pagefault(sb);
687844f35dbSMatthew Wilcox 
688844f35dbSMatthew Wilcox 	return result;
689844f35dbSMatthew Wilcox }
690844f35dbSMatthew Wilcox EXPORT_SYMBOL_GPL(dax_pmd_fault);
691dd8a2b6cSValentin Rothberg #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
692844f35dbSMatthew Wilcox 
6934c0ccfefSMatthew Wilcox /**
6940e3b210cSBoaz Harrosh  * dax_pfn_mkwrite - handle first write to DAX page
6950e3b210cSBoaz Harrosh  * @vma: The virtual memory area where the fault occurred
6960e3b210cSBoaz Harrosh  * @vmf: The description of the fault
6970e3b210cSBoaz Harrosh  *
6980e3b210cSBoaz Harrosh  */
6990e3b210cSBoaz Harrosh int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
7000e3b210cSBoaz Harrosh {
7010e3b210cSBoaz Harrosh 	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
7020e3b210cSBoaz Harrosh 
7030e3b210cSBoaz Harrosh 	sb_start_pagefault(sb);
7040e3b210cSBoaz Harrosh 	file_update_time(vma->vm_file);
7050e3b210cSBoaz Harrosh 	sb_end_pagefault(sb);
7060e3b210cSBoaz Harrosh 	return VM_FAULT_NOPAGE;
7070e3b210cSBoaz Harrosh }
7080e3b210cSBoaz Harrosh EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
7090e3b210cSBoaz Harrosh 
7100e3b210cSBoaz Harrosh /**
71125726bc1SMatthew Wilcox  * dax_zero_page_range - zero a range within a page of a DAX file
7124c0ccfefSMatthew Wilcox  * @inode: The file being truncated
7134c0ccfefSMatthew Wilcox  * @from: The file offset that is being truncated to
71425726bc1SMatthew Wilcox  * @length: The number of bytes to zero
7154c0ccfefSMatthew Wilcox  * @get_block: The filesystem method used to translate file offsets to blocks
7164c0ccfefSMatthew Wilcox  *
71725726bc1SMatthew Wilcox  * This function can be called by a filesystem when it is zeroing part of a
71825726bc1SMatthew Wilcox  * page in a DAX file.  This is intended for hole-punch operations.  If
71925726bc1SMatthew Wilcox  * you are truncating a file, the helper function dax_truncate_page() may be
72025726bc1SMatthew Wilcox  * more convenient.
7214c0ccfefSMatthew Wilcox  *
7224c0ccfefSMatthew Wilcox  * We work in terms of PAGE_CACHE_SIZE here for commonality with
7234c0ccfefSMatthew Wilcox  * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
7244c0ccfefSMatthew Wilcox  * took care of disposing of the unnecessary blocks.  Even if the filesystem
7254c0ccfefSMatthew Wilcox  * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
72625726bc1SMatthew Wilcox  * since the file might be mmapped.
7274c0ccfefSMatthew Wilcox  */
72825726bc1SMatthew Wilcox int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
72925726bc1SMatthew Wilcox 							get_block_t get_block)
7304c0ccfefSMatthew Wilcox {
7314c0ccfefSMatthew Wilcox 	struct buffer_head bh;
7324c0ccfefSMatthew Wilcox 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
7334c0ccfefSMatthew Wilcox 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
7344c0ccfefSMatthew Wilcox 	int err;
7354c0ccfefSMatthew Wilcox 
7364c0ccfefSMatthew Wilcox 	/* Block boundary? Nothing to do */
7374c0ccfefSMatthew Wilcox 	if (!length)
7384c0ccfefSMatthew Wilcox 		return 0;
73925726bc1SMatthew Wilcox 	BUG_ON((offset + length) > PAGE_CACHE_SIZE);
7404c0ccfefSMatthew Wilcox 
7414c0ccfefSMatthew Wilcox 	memset(&bh, 0, sizeof(bh));
7424c0ccfefSMatthew Wilcox 	bh.b_size = PAGE_CACHE_SIZE;
7434c0ccfefSMatthew Wilcox 	err = get_block(inode, index, &bh, 0);
7444c0ccfefSMatthew Wilcox 	if (err < 0)
7454c0ccfefSMatthew Wilcox 		return err;
7464c0ccfefSMatthew Wilcox 	if (buffer_written(&bh)) {
747e2e05394SRoss Zwisler 		void __pmem *addr;
7484c0ccfefSMatthew Wilcox 		err = dax_get_addr(&bh, &addr, inode->i_blkbits);
7494c0ccfefSMatthew Wilcox 		if (err < 0)
7504c0ccfefSMatthew Wilcox 			return err;
751e2e05394SRoss Zwisler 		clear_pmem(addr + offset, length);
7522765cfbbSRoss Zwisler 		wmb_pmem();
7534c0ccfefSMatthew Wilcox 	}
7544c0ccfefSMatthew Wilcox 
7554c0ccfefSMatthew Wilcox 	return 0;
7564c0ccfefSMatthew Wilcox }
75725726bc1SMatthew Wilcox EXPORT_SYMBOL_GPL(dax_zero_page_range);
75825726bc1SMatthew Wilcox 
75925726bc1SMatthew Wilcox /**
76025726bc1SMatthew Wilcox  * dax_truncate_page - handle a partial page being truncated in a DAX file
76125726bc1SMatthew Wilcox  * @inode: The file being truncated
76225726bc1SMatthew Wilcox  * @from: The file offset that is being truncated to
76325726bc1SMatthew Wilcox  * @get_block: The filesystem method used to translate file offsets to blocks
76425726bc1SMatthew Wilcox  *
76525726bc1SMatthew Wilcox  * Similar to block_truncate_page(), this function can be called by a
76625726bc1SMatthew Wilcox  * filesystem when it is truncating a DAX file to handle the partial page.
76725726bc1SMatthew Wilcox  *
76825726bc1SMatthew Wilcox  * We work in terms of PAGE_CACHE_SIZE here for commonality with
76925726bc1SMatthew Wilcox  * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
77025726bc1SMatthew Wilcox  * took care of disposing of the unnecessary blocks.  Even if the filesystem
77125726bc1SMatthew Wilcox  * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
77225726bc1SMatthew Wilcox  * since the file might be mmapped.
77325726bc1SMatthew Wilcox  */
77425726bc1SMatthew Wilcox int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
77525726bc1SMatthew Wilcox {
77625726bc1SMatthew Wilcox 	unsigned length = PAGE_CACHE_ALIGN(from) - from;
77725726bc1SMatthew Wilcox 	return dax_zero_page_range(inode, from, length, get_block);
77825726bc1SMatthew Wilcox }
7794c0ccfefSMatthew Wilcox EXPORT_SYMBOL_GPL(dax_truncate_page);
780