xref: /openbmc/linux/fs/dax.c (revision ff17b8df)
12025cf9eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2d475c634SMatthew Wilcox /*
3d475c634SMatthew Wilcox  * fs/dax.c - Direct Access filesystem code
4d475c634SMatthew Wilcox  * Copyright (c) 2013-2014 Intel Corporation
5d475c634SMatthew Wilcox  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
6d475c634SMatthew Wilcox  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7d475c634SMatthew Wilcox  */
8d475c634SMatthew Wilcox 
9d475c634SMatthew Wilcox #include <linux/atomic.h>
10d475c634SMatthew Wilcox #include <linux/blkdev.h>
11d475c634SMatthew Wilcox #include <linux/buffer_head.h>
12d77e92e2SRoss Zwisler #include <linux/dax.h>
13d475c634SMatthew Wilcox #include <linux/fs.h>
14f7ca90b1SMatthew Wilcox #include <linux/highmem.h>
15f7ca90b1SMatthew Wilcox #include <linux/memcontrol.h>
16f7ca90b1SMatthew Wilcox #include <linux/mm.h>
17d475c634SMatthew Wilcox #include <linux/mutex.h>
189973c98eSRoss Zwisler #include <linux/pagevec.h>
19289c6aedSMatthew Wilcox #include <linux/sched.h>
20f361bf4aSIngo Molnar #include <linux/sched/signal.h>
21d475c634SMatthew Wilcox #include <linux/uio.h>
22f7ca90b1SMatthew Wilcox #include <linux/vmstat.h>
2334c0fd54SDan Williams #include <linux/pfn_t.h>
240e749e54SDan Williams #include <linux/sizes.h>
254b4bb46dSJan Kara #include <linux/mmu_notifier.h>
26a254e568SChristoph Hellwig #include <linux/iomap.h>
2706083a09SMuchun Song #include <linux/rmap.h>
2811cf9d86SAneesh Kumar K.V #include <asm/pgalloc.h>
29d475c634SMatthew Wilcox 
30282a8e03SRoss Zwisler #define CREATE_TRACE_POINTS
31282a8e03SRoss Zwisler #include <trace/events/fs_dax.h>
32282a8e03SRoss Zwisler 
33cfc93c6cSMatthew Wilcox static inline unsigned int pe_order(enum page_entry_size pe_size)
34cfc93c6cSMatthew Wilcox {
35cfc93c6cSMatthew Wilcox 	if (pe_size == PE_SIZE_PTE)
36cfc93c6cSMatthew Wilcox 		return PAGE_SHIFT - PAGE_SHIFT;
37cfc93c6cSMatthew Wilcox 	if (pe_size == PE_SIZE_PMD)
38cfc93c6cSMatthew Wilcox 		return PMD_SHIFT - PAGE_SHIFT;
39cfc93c6cSMatthew Wilcox 	if (pe_size == PE_SIZE_PUD)
40cfc93c6cSMatthew Wilcox 		return PUD_SHIFT - PAGE_SHIFT;
41cfc93c6cSMatthew Wilcox 	return ~0;
42cfc93c6cSMatthew Wilcox }
43cfc93c6cSMatthew Wilcox 
44ac401cc7SJan Kara /* We choose 4096 entries - same as per-zone page wait tables */
45ac401cc7SJan Kara #define DAX_WAIT_TABLE_BITS 12
46ac401cc7SJan Kara #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
47ac401cc7SJan Kara 
48917f3452SRoss Zwisler /* The 'colour' (ie low bits) within a PMD of a page offset.  */
49917f3452SRoss Zwisler #define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
50977fbdcdSMatthew Wilcox #define PG_PMD_NR	(PMD_SIZE >> PAGE_SHIFT)
51917f3452SRoss Zwisler 
52cfc93c6cSMatthew Wilcox /* The order of a PMD entry */
53cfc93c6cSMatthew Wilcox #define PMD_ORDER	(PMD_SHIFT - PAGE_SHIFT)
54cfc93c6cSMatthew Wilcox 
55ce95ab0fSRoss Zwisler static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
56ac401cc7SJan Kara 
57ac401cc7SJan Kara static int __init init_dax_wait_table(void)
58ac401cc7SJan Kara {
59ac401cc7SJan Kara 	int i;
60ac401cc7SJan Kara 
61ac401cc7SJan Kara 	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
62ac401cc7SJan Kara 		init_waitqueue_head(wait_table + i);
63ac401cc7SJan Kara 	return 0;
64ac401cc7SJan Kara }
65ac401cc7SJan Kara fs_initcall(init_dax_wait_table);
66ac401cc7SJan Kara 
67527b19d0SRoss Zwisler /*
683159f943SMatthew Wilcox  * DAX pagecache entries use XArray value entries so they can't be mistaken
693159f943SMatthew Wilcox  * for pages.  We use one bit for locking, one bit for the entry size (PMD)
703159f943SMatthew Wilcox  * and two more to tell us if the entry is a zero page or an empty entry that
713159f943SMatthew Wilcox  * is just used for locking.  In total four special bits.
72527b19d0SRoss Zwisler  *
73527b19d0SRoss Zwisler  * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
74527b19d0SRoss Zwisler  * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
75527b19d0SRoss Zwisler  * block allocation.
76527b19d0SRoss Zwisler  */
773159f943SMatthew Wilcox #define DAX_SHIFT	(4)
783159f943SMatthew Wilcox #define DAX_LOCKED	(1UL << 0)
793159f943SMatthew Wilcox #define DAX_PMD		(1UL << 1)
803159f943SMatthew Wilcox #define DAX_ZERO_PAGE	(1UL << 2)
813159f943SMatthew Wilcox #define DAX_EMPTY	(1UL << 3)
82527b19d0SRoss Zwisler 
83a77d19f4SMatthew Wilcox static unsigned long dax_to_pfn(void *entry)
84527b19d0SRoss Zwisler {
853159f943SMatthew Wilcox 	return xa_to_value(entry) >> DAX_SHIFT;
86527b19d0SRoss Zwisler }
87527b19d0SRoss Zwisler 
889f32d221SMatthew Wilcox static void *dax_make_entry(pfn_t pfn, unsigned long flags)
899f32d221SMatthew Wilcox {
909f32d221SMatthew Wilcox 	return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
919f32d221SMatthew Wilcox }
929f32d221SMatthew Wilcox 
93cfc93c6cSMatthew Wilcox static bool dax_is_locked(void *entry)
94cfc93c6cSMatthew Wilcox {
95cfc93c6cSMatthew Wilcox 	return xa_to_value(entry) & DAX_LOCKED;
96cfc93c6cSMatthew Wilcox }
97cfc93c6cSMatthew Wilcox 
98a77d19f4SMatthew Wilcox static unsigned int dax_entry_order(void *entry)
99527b19d0SRoss Zwisler {
1003159f943SMatthew Wilcox 	if (xa_to_value(entry) & DAX_PMD)
101cfc93c6cSMatthew Wilcox 		return PMD_ORDER;
102527b19d0SRoss Zwisler 	return 0;
103527b19d0SRoss Zwisler }
104527b19d0SRoss Zwisler 
105fda490d3SMatthew Wilcox static unsigned long dax_is_pmd_entry(void *entry)
106642261acSRoss Zwisler {
1073159f943SMatthew Wilcox 	return xa_to_value(entry) & DAX_PMD;
108642261acSRoss Zwisler }
109642261acSRoss Zwisler 
110fda490d3SMatthew Wilcox static bool dax_is_pte_entry(void *entry)
111642261acSRoss Zwisler {
1123159f943SMatthew Wilcox 	return !(xa_to_value(entry) & DAX_PMD);
113642261acSRoss Zwisler }
114642261acSRoss Zwisler 
115642261acSRoss Zwisler static int dax_is_zero_entry(void *entry)
116642261acSRoss Zwisler {
1173159f943SMatthew Wilcox 	return xa_to_value(entry) & DAX_ZERO_PAGE;
118642261acSRoss Zwisler }
119642261acSRoss Zwisler 
120642261acSRoss Zwisler static int dax_is_empty_entry(void *entry)
121642261acSRoss Zwisler {
1223159f943SMatthew Wilcox 	return xa_to_value(entry) & DAX_EMPTY;
123642261acSRoss Zwisler }
124642261acSRoss Zwisler 
125f7ca90b1SMatthew Wilcox /*
12623c84eb7SMatthew Wilcox (Oracle)  * true if the entry that was found is of a smaller order than the entry
12723c84eb7SMatthew Wilcox (Oracle)  * we were looking for
12823c84eb7SMatthew Wilcox (Oracle)  */
12923c84eb7SMatthew Wilcox (Oracle) static bool dax_is_conflict(void *entry)
13023c84eb7SMatthew Wilcox (Oracle) {
13123c84eb7SMatthew Wilcox (Oracle) 	return entry == XA_RETRY_ENTRY;
13223c84eb7SMatthew Wilcox (Oracle) }
13323c84eb7SMatthew Wilcox (Oracle) 
13423c84eb7SMatthew Wilcox (Oracle) /*
135a77d19f4SMatthew Wilcox  * DAX page cache entry locking
136ac401cc7SJan Kara  */
137ac401cc7SJan Kara struct exceptional_entry_key {
138ec4907ffSMatthew Wilcox 	struct xarray *xa;
13963e95b5cSRoss Zwisler 	pgoff_t entry_start;
140ac401cc7SJan Kara };
141ac401cc7SJan Kara 
142ac401cc7SJan Kara struct wait_exceptional_entry_queue {
143ac6424b9SIngo Molnar 	wait_queue_entry_t wait;
144ac401cc7SJan Kara 	struct exceptional_entry_key key;
145ac401cc7SJan Kara };
146ac401cc7SJan Kara 
147698ab77aSVivek Goyal /**
148698ab77aSVivek Goyal  * enum dax_wake_mode: waitqueue wakeup behaviour
149698ab77aSVivek Goyal  * @WAKE_ALL: wake all waiters in the waitqueue
150698ab77aSVivek Goyal  * @WAKE_NEXT: wake only the first waiter in the waitqueue
151698ab77aSVivek Goyal  */
152698ab77aSVivek Goyal enum dax_wake_mode {
153698ab77aSVivek Goyal 	WAKE_ALL,
154698ab77aSVivek Goyal 	WAKE_NEXT,
155698ab77aSVivek Goyal };
156698ab77aSVivek Goyal 
157b15cd800SMatthew Wilcox static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
158b15cd800SMatthew Wilcox 		void *entry, struct exceptional_entry_key *key)
15963e95b5cSRoss Zwisler {
16063e95b5cSRoss Zwisler 	unsigned long hash;
161b15cd800SMatthew Wilcox 	unsigned long index = xas->xa_index;
16263e95b5cSRoss Zwisler 
16363e95b5cSRoss Zwisler 	/*
16463e95b5cSRoss Zwisler 	 * If 'entry' is a PMD, align the 'index' that we use for the wait
16563e95b5cSRoss Zwisler 	 * queue to the start of that PMD.  This ensures that all offsets in
16663e95b5cSRoss Zwisler 	 * the range covered by the PMD map to the same bit lock.
16763e95b5cSRoss Zwisler 	 */
168642261acSRoss Zwisler 	if (dax_is_pmd_entry(entry))
169917f3452SRoss Zwisler 		index &= ~PG_PMD_COLOUR;
170b15cd800SMatthew Wilcox 	key->xa = xas->xa;
17163e95b5cSRoss Zwisler 	key->entry_start = index;
17263e95b5cSRoss Zwisler 
173b15cd800SMatthew Wilcox 	hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
17463e95b5cSRoss Zwisler 	return wait_table + hash;
17563e95b5cSRoss Zwisler }
17663e95b5cSRoss Zwisler 
177ec4907ffSMatthew Wilcox static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
178ec4907ffSMatthew Wilcox 		unsigned int mode, int sync, void *keyp)
179ac401cc7SJan Kara {
180ac401cc7SJan Kara 	struct exceptional_entry_key *key = keyp;
181ac401cc7SJan Kara 	struct wait_exceptional_entry_queue *ewait =
182ac401cc7SJan Kara 		container_of(wait, struct wait_exceptional_entry_queue, wait);
183ac401cc7SJan Kara 
184ec4907ffSMatthew Wilcox 	if (key->xa != ewait->key.xa ||
18563e95b5cSRoss Zwisler 	    key->entry_start != ewait->key.entry_start)
186ac401cc7SJan Kara 		return 0;
187ac401cc7SJan Kara 	return autoremove_wake_function(wait, mode, sync, NULL);
188ac401cc7SJan Kara }
189ac401cc7SJan Kara 
190ac401cc7SJan Kara /*
191b93b0163SMatthew Wilcox  * @entry may no longer be the entry at the index in the mapping.
192b93b0163SMatthew Wilcox  * The important information it's conveying is whether the entry at
193b93b0163SMatthew Wilcox  * this index used to be a PMD entry.
194e30331ffSRoss Zwisler  */
195698ab77aSVivek Goyal static void dax_wake_entry(struct xa_state *xas, void *entry,
196698ab77aSVivek Goyal 			   enum dax_wake_mode mode)
197e30331ffSRoss Zwisler {
198e30331ffSRoss Zwisler 	struct exceptional_entry_key key;
199e30331ffSRoss Zwisler 	wait_queue_head_t *wq;
200e30331ffSRoss Zwisler 
201b15cd800SMatthew Wilcox 	wq = dax_entry_waitqueue(xas, entry, &key);
202e30331ffSRoss Zwisler 
203e30331ffSRoss Zwisler 	/*
204e30331ffSRoss Zwisler 	 * Checking for locked entry and prepare_to_wait_exclusive() happens
205b93b0163SMatthew Wilcox 	 * under the i_pages lock, ditto for entry handling in our callers.
206e30331ffSRoss Zwisler 	 * So at this point all tasks that could have seen our entry locked
207e30331ffSRoss Zwisler 	 * must be in the waitqueue and the following check will see them.
208e30331ffSRoss Zwisler 	 */
209e30331ffSRoss Zwisler 	if (waitqueue_active(wq))
210698ab77aSVivek Goyal 		__wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
211e30331ffSRoss Zwisler }
212e30331ffSRoss Zwisler 
213cfc93c6cSMatthew Wilcox /*
214cfc93c6cSMatthew Wilcox  * Look up entry in page cache, wait for it to become unlocked if it
215cfc93c6cSMatthew Wilcox  * is a DAX entry and return it.  The caller must subsequently call
216cfc93c6cSMatthew Wilcox  * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
21723c84eb7SMatthew Wilcox (Oracle)  * if it did.  The entry returned may have a larger order than @order.
21823c84eb7SMatthew Wilcox (Oracle)  * If @order is larger than the order of the entry found in i_pages, this
21923c84eb7SMatthew Wilcox (Oracle)  * function returns a dax_is_conflict entry.
220cfc93c6cSMatthew Wilcox  *
221cfc93c6cSMatthew Wilcox  * Must be called with the i_pages lock held.
222cfc93c6cSMatthew Wilcox  */
22323c84eb7SMatthew Wilcox (Oracle) static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
224cfc93c6cSMatthew Wilcox {
225cfc93c6cSMatthew Wilcox 	void *entry;
226cfc93c6cSMatthew Wilcox 	struct wait_exceptional_entry_queue ewait;
227cfc93c6cSMatthew Wilcox 	wait_queue_head_t *wq;
228cfc93c6cSMatthew Wilcox 
229cfc93c6cSMatthew Wilcox 	init_wait(&ewait.wait);
230cfc93c6cSMatthew Wilcox 	ewait.wait.func = wake_exceptional_entry_func;
231cfc93c6cSMatthew Wilcox 
232cfc93c6cSMatthew Wilcox 	for (;;) {
2330e40de03SMatthew Wilcox 		entry = xas_find_conflict(xas);
2346370740eSDan Williams 		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
2356370740eSDan Williams 			return entry;
23623c84eb7SMatthew Wilcox (Oracle) 		if (dax_entry_order(entry) < order)
23723c84eb7SMatthew Wilcox (Oracle) 			return XA_RETRY_ENTRY;
2386370740eSDan Williams 		if (!dax_is_locked(entry))
239cfc93c6cSMatthew Wilcox 			return entry;
240cfc93c6cSMatthew Wilcox 
241b15cd800SMatthew Wilcox 		wq = dax_entry_waitqueue(xas, entry, &ewait.key);
242cfc93c6cSMatthew Wilcox 		prepare_to_wait_exclusive(wq, &ewait.wait,
243cfc93c6cSMatthew Wilcox 					  TASK_UNINTERRUPTIBLE);
244cfc93c6cSMatthew Wilcox 		xas_unlock_irq(xas);
245cfc93c6cSMatthew Wilcox 		xas_reset(xas);
246cfc93c6cSMatthew Wilcox 		schedule();
247cfc93c6cSMatthew Wilcox 		finish_wait(wq, &ewait.wait);
248cfc93c6cSMatthew Wilcox 		xas_lock_irq(xas);
249cfc93c6cSMatthew Wilcox 	}
250cfc93c6cSMatthew Wilcox }
251cfc93c6cSMatthew Wilcox 
25255e56f06SMatthew Wilcox /*
25355e56f06SMatthew Wilcox  * The only thing keeping the address space around is the i_pages lock
25455e56f06SMatthew Wilcox  * (it's cycled in clear_inode() after removing the entries from i_pages)
25555e56f06SMatthew Wilcox  * After we call xas_unlock_irq(), we cannot touch xas->xa.
25655e56f06SMatthew Wilcox  */
25755e56f06SMatthew Wilcox static void wait_entry_unlocked(struct xa_state *xas, void *entry)
25855e56f06SMatthew Wilcox {
25955e56f06SMatthew Wilcox 	struct wait_exceptional_entry_queue ewait;
26055e56f06SMatthew Wilcox 	wait_queue_head_t *wq;
26155e56f06SMatthew Wilcox 
26255e56f06SMatthew Wilcox 	init_wait(&ewait.wait);
26355e56f06SMatthew Wilcox 	ewait.wait.func = wake_exceptional_entry_func;
26455e56f06SMatthew Wilcox 
26555e56f06SMatthew Wilcox 	wq = dax_entry_waitqueue(xas, entry, &ewait.key);
266d8a70641SDan Williams 	/*
267d8a70641SDan Williams 	 * Unlike get_unlocked_entry() there is no guarantee that this
268d8a70641SDan Williams 	 * path ever successfully retrieves an unlocked entry before an
269d8a70641SDan Williams 	 * inode dies. Perform a non-exclusive wait in case this path
270d8a70641SDan Williams 	 * never successfully performs its own wake up.
271d8a70641SDan Williams 	 */
272d8a70641SDan Williams 	prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
27355e56f06SMatthew Wilcox 	xas_unlock_irq(xas);
27455e56f06SMatthew Wilcox 	schedule();
27555e56f06SMatthew Wilcox 	finish_wait(wq, &ewait.wait);
27655e56f06SMatthew Wilcox }
27755e56f06SMatthew Wilcox 
2784c3d043dSVivek Goyal static void put_unlocked_entry(struct xa_state *xas, void *entry,
2794c3d043dSVivek Goyal 			       enum dax_wake_mode mode)
280cfc93c6cSMatthew Wilcox {
28161c30c98SJan Kara 	if (entry && !dax_is_conflict(entry))
2824c3d043dSVivek Goyal 		dax_wake_entry(xas, entry, mode);
283cfc93c6cSMatthew Wilcox }
284cfc93c6cSMatthew Wilcox 
285cfc93c6cSMatthew Wilcox /*
286cfc93c6cSMatthew Wilcox  * We used the xa_state to get the entry, but then we locked the entry and
287cfc93c6cSMatthew Wilcox  * dropped the xa_lock, so we know the xa_state is stale and must be reset
288cfc93c6cSMatthew Wilcox  * before use.
289cfc93c6cSMatthew Wilcox  */
290cfc93c6cSMatthew Wilcox static void dax_unlock_entry(struct xa_state *xas, void *entry)
291cfc93c6cSMatthew Wilcox {
292cfc93c6cSMatthew Wilcox 	void *old;
293cfc93c6cSMatthew Wilcox 
2947ae2ea7dSMatthew Wilcox 	BUG_ON(dax_is_locked(entry));
295cfc93c6cSMatthew Wilcox 	xas_reset(xas);
296cfc93c6cSMatthew Wilcox 	xas_lock_irq(xas);
297cfc93c6cSMatthew Wilcox 	old = xas_store(xas, entry);
298cfc93c6cSMatthew Wilcox 	xas_unlock_irq(xas);
299cfc93c6cSMatthew Wilcox 	BUG_ON(!dax_is_locked(old));
300698ab77aSVivek Goyal 	dax_wake_entry(xas, entry, WAKE_NEXT);
301cfc93c6cSMatthew Wilcox }
302cfc93c6cSMatthew Wilcox 
303cfc93c6cSMatthew Wilcox /*
304cfc93c6cSMatthew Wilcox  * Return: The entry stored at this location before it was locked.
305cfc93c6cSMatthew Wilcox  */
306cfc93c6cSMatthew Wilcox static void *dax_lock_entry(struct xa_state *xas, void *entry)
307cfc93c6cSMatthew Wilcox {
308cfc93c6cSMatthew Wilcox 	unsigned long v = xa_to_value(entry);
309cfc93c6cSMatthew Wilcox 	return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
310cfc93c6cSMatthew Wilcox }
311cfc93c6cSMatthew Wilcox 
312d2c997c0SDan Williams static unsigned long dax_entry_size(void *entry)
313d2c997c0SDan Williams {
314d2c997c0SDan Williams 	if (dax_is_zero_entry(entry))
315d2c997c0SDan Williams 		return 0;
316d2c997c0SDan Williams 	else if (dax_is_empty_entry(entry))
317d2c997c0SDan Williams 		return 0;
318d2c997c0SDan Williams 	else if (dax_is_pmd_entry(entry))
319d2c997c0SDan Williams 		return PMD_SIZE;
320d2c997c0SDan Williams 	else
321d2c997c0SDan Williams 		return PAGE_SIZE;
322d2c997c0SDan Williams }
323d2c997c0SDan Williams 
324a77d19f4SMatthew Wilcox static unsigned long dax_end_pfn(void *entry)
325d2c997c0SDan Williams {
326a77d19f4SMatthew Wilcox 	return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
327d2c997c0SDan Williams }
328d2c997c0SDan Williams 
329d2c997c0SDan Williams /*
330d2c997c0SDan Williams  * Iterate through all mapped pfns represented by an entry, i.e. skip
331d2c997c0SDan Williams  * 'empty' and 'zero' entries.
332d2c997c0SDan Williams  */
333d2c997c0SDan Williams #define for_each_mapped_pfn(entry, pfn) \
334a77d19f4SMatthew Wilcox 	for (pfn = dax_to_pfn(entry); \
335a77d19f4SMatthew Wilcox 			pfn < dax_end_pfn(entry); pfn++)
336d2c997c0SDan Williams 
3376061b69bSShiyang Ruan static inline bool dax_mapping_is_cow(struct address_space *mapping)
3386061b69bSShiyang Ruan {
3396061b69bSShiyang Ruan 	return (unsigned long)mapping == PAGE_MAPPING_DAX_COW;
3406061b69bSShiyang Ruan }
3416061b69bSShiyang Ruan 
34273449dafSDan Williams /*
3436061b69bSShiyang Ruan  * Set the page->mapping with FS_DAX_MAPPING_COW flag, increase the refcount.
3446061b69bSShiyang Ruan  */
3456061b69bSShiyang Ruan static inline void dax_mapping_set_cow(struct page *page)
3466061b69bSShiyang Ruan {
3476061b69bSShiyang Ruan 	if ((uintptr_t)page->mapping != PAGE_MAPPING_DAX_COW) {
3486061b69bSShiyang Ruan 		/*
3496061b69bSShiyang Ruan 		 * Reset the index if the page was already mapped
3506061b69bSShiyang Ruan 		 * regularly before.
3516061b69bSShiyang Ruan 		 */
3526061b69bSShiyang Ruan 		if (page->mapping)
3536061b69bSShiyang Ruan 			page->index = 1;
3546061b69bSShiyang Ruan 		page->mapping = (void *)PAGE_MAPPING_DAX_COW;
3556061b69bSShiyang Ruan 	}
3566061b69bSShiyang Ruan 	page->index++;
3576061b69bSShiyang Ruan }
3586061b69bSShiyang Ruan 
3596061b69bSShiyang Ruan /*
3606061b69bSShiyang Ruan  * When it is called in dax_insert_entry(), the cow flag will indicate that
3616061b69bSShiyang Ruan  * whether this entry is shared by multiple files.  If so, set the page->mapping
3626061b69bSShiyang Ruan  * FS_DAX_MAPPING_COW, and use page->index as refcount.
36373449dafSDan Williams  */
36473449dafSDan Williams static void dax_associate_entry(void *entry, struct address_space *mapping,
3656061b69bSShiyang Ruan 		struct vm_area_struct *vma, unsigned long address, bool cow)
366d2c997c0SDan Williams {
36773449dafSDan Williams 	unsigned long size = dax_entry_size(entry), pfn, index;
36873449dafSDan Williams 	int i = 0;
369d2c997c0SDan Williams 
370d2c997c0SDan Williams 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
371d2c997c0SDan Williams 		return;
372d2c997c0SDan Williams 
37373449dafSDan Williams 	index = linear_page_index(vma, address & ~(size - 1));
374d2c997c0SDan Williams 	for_each_mapped_pfn(entry, pfn) {
375d2c997c0SDan Williams 		struct page *page = pfn_to_page(pfn);
376d2c997c0SDan Williams 
3776061b69bSShiyang Ruan 		if (cow) {
3786061b69bSShiyang Ruan 			dax_mapping_set_cow(page);
3796061b69bSShiyang Ruan 		} else {
380d2c997c0SDan Williams 			WARN_ON_ONCE(page->mapping);
381d2c997c0SDan Williams 			page->mapping = mapping;
38273449dafSDan Williams 			page->index = index + i++;
383d2c997c0SDan Williams 		}
384d2c997c0SDan Williams 	}
3856061b69bSShiyang Ruan }
386d2c997c0SDan Williams 
387d2c997c0SDan Williams static void dax_disassociate_entry(void *entry, struct address_space *mapping,
388d2c997c0SDan Williams 		bool trunc)
389d2c997c0SDan Williams {
390d2c997c0SDan Williams 	unsigned long pfn;
391d2c997c0SDan Williams 
392d2c997c0SDan Williams 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
393d2c997c0SDan Williams 		return;
394d2c997c0SDan Williams 
395d2c997c0SDan Williams 	for_each_mapped_pfn(entry, pfn) {
396d2c997c0SDan Williams 		struct page *page = pfn_to_page(pfn);
397d2c997c0SDan Williams 
398d2c997c0SDan Williams 		WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
3996061b69bSShiyang Ruan 		if (dax_mapping_is_cow(page->mapping)) {
4006061b69bSShiyang Ruan 			/* keep the CoW flag if this page is still shared */
4016061b69bSShiyang Ruan 			if (page->index-- > 0)
4026061b69bSShiyang Ruan 				continue;
4036061b69bSShiyang Ruan 		} else
404d2c997c0SDan Williams 			WARN_ON_ONCE(page->mapping && page->mapping != mapping);
405d2c997c0SDan Williams 		page->mapping = NULL;
40673449dafSDan Williams 		page->index = 0;
407d2c997c0SDan Williams 	}
408d2c997c0SDan Williams }
409d2c997c0SDan Williams 
4105fac7408SDan Williams static struct page *dax_busy_page(void *entry)
4115fac7408SDan Williams {
4125fac7408SDan Williams 	unsigned long pfn;
4135fac7408SDan Williams 
4145fac7408SDan Williams 	for_each_mapped_pfn(entry, pfn) {
4155fac7408SDan Williams 		struct page *page = pfn_to_page(pfn);
4165fac7408SDan Williams 
4175fac7408SDan Williams 		if (page_ref_count(page) > 1)
4185fac7408SDan Williams 			return page;
4195fac7408SDan Williams 	}
4205fac7408SDan Williams 	return NULL;
4215fac7408SDan Williams }
4225fac7408SDan Williams 
423c5bbd451SMatthew Wilcox /*
424c2e8021aSShiyang Ruan  * dax_lock_page - Lock the DAX entry corresponding to a page
425c5bbd451SMatthew Wilcox  * @page: The page whose entry we want to lock
426c5bbd451SMatthew Wilcox  *
427c5bbd451SMatthew Wilcox  * Context: Process context.
42827359fd6SMatthew Wilcox  * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
42927359fd6SMatthew Wilcox  * not be locked.
430c5bbd451SMatthew Wilcox  */
43127359fd6SMatthew Wilcox dax_entry_t dax_lock_page(struct page *page)
432c2a7d2a1SDan Williams {
4339f32d221SMatthew Wilcox 	XA_STATE(xas, NULL, 0);
4349f32d221SMatthew Wilcox 	void *entry;
435c2a7d2a1SDan Williams 
436c5bbd451SMatthew Wilcox 	/* Ensure page->mapping isn't freed while we look at it */
437c5bbd451SMatthew Wilcox 	rcu_read_lock();
438c2a7d2a1SDan Williams 	for (;;) {
4399f32d221SMatthew Wilcox 		struct address_space *mapping = READ_ONCE(page->mapping);
440c2a7d2a1SDan Williams 
44127359fd6SMatthew Wilcox 		entry = NULL;
442c93db7bbSMatthew Wilcox 		if (!mapping || !dax_mapping(mapping))
443c5bbd451SMatthew Wilcox 			break;
444c2a7d2a1SDan Williams 
445c2a7d2a1SDan Williams 		/*
446c2a7d2a1SDan Williams 		 * In the device-dax case there's no need to lock, a
447c2a7d2a1SDan Williams 		 * struct dev_pagemap pin is sufficient to keep the
448c2a7d2a1SDan Williams 		 * inode alive, and we assume we have dev_pagemap pin
449c2a7d2a1SDan Williams 		 * otherwise we would not have a valid pfn_to_page()
450c2a7d2a1SDan Williams 		 * translation.
451c2a7d2a1SDan Williams 		 */
45227359fd6SMatthew Wilcox 		entry = (void *)~0UL;
4539f32d221SMatthew Wilcox 		if (S_ISCHR(mapping->host->i_mode))
454c5bbd451SMatthew Wilcox 			break;
455c2a7d2a1SDan Williams 
4569f32d221SMatthew Wilcox 		xas.xa = &mapping->i_pages;
4579f32d221SMatthew Wilcox 		xas_lock_irq(&xas);
458c2a7d2a1SDan Williams 		if (mapping != page->mapping) {
4599f32d221SMatthew Wilcox 			xas_unlock_irq(&xas);
460c2a7d2a1SDan Williams 			continue;
461c2a7d2a1SDan Williams 		}
4629f32d221SMatthew Wilcox 		xas_set(&xas, page->index);
4639f32d221SMatthew Wilcox 		entry = xas_load(&xas);
4649f32d221SMatthew Wilcox 		if (dax_is_locked(entry)) {
465c5bbd451SMatthew Wilcox 			rcu_read_unlock();
46655e56f06SMatthew Wilcox 			wait_entry_unlocked(&xas, entry);
467c5bbd451SMatthew Wilcox 			rcu_read_lock();
468c2a7d2a1SDan Williams 			continue;
469c2a7d2a1SDan Williams 		}
4709f32d221SMatthew Wilcox 		dax_lock_entry(&xas, entry);
4719f32d221SMatthew Wilcox 		xas_unlock_irq(&xas);
472c5bbd451SMatthew Wilcox 		break;
4739f32d221SMatthew Wilcox 	}
474c5bbd451SMatthew Wilcox 	rcu_read_unlock();
47527359fd6SMatthew Wilcox 	return (dax_entry_t)entry;
476c2a7d2a1SDan Williams }
477c2a7d2a1SDan Williams 
47827359fd6SMatthew Wilcox void dax_unlock_page(struct page *page, dax_entry_t cookie)
479c2a7d2a1SDan Williams {
480c2a7d2a1SDan Williams 	struct address_space *mapping = page->mapping;
4819f32d221SMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, page->index);
482c2a7d2a1SDan Williams 
4839f32d221SMatthew Wilcox 	if (S_ISCHR(mapping->host->i_mode))
484c2a7d2a1SDan Williams 		return;
485c2a7d2a1SDan Williams 
48627359fd6SMatthew Wilcox 	dax_unlock_entry(&xas, (void *)cookie);
487c2a7d2a1SDan Williams }
488c2a7d2a1SDan Williams 
489ac401cc7SJan Kara /*
4902f437effSShiyang Ruan  * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping
4912f437effSShiyang Ruan  * @mapping: the file's mapping whose entry we want to lock
4922f437effSShiyang Ruan  * @index: the offset within this file
4932f437effSShiyang Ruan  * @page: output the dax page corresponding to this dax entry
4942f437effSShiyang Ruan  *
4952f437effSShiyang Ruan  * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry
4962f437effSShiyang Ruan  * could not be locked.
4972f437effSShiyang Ruan  */
4982f437effSShiyang Ruan dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index,
4992f437effSShiyang Ruan 		struct page **page)
5002f437effSShiyang Ruan {
5012f437effSShiyang Ruan 	XA_STATE(xas, NULL, 0);
5022f437effSShiyang Ruan 	void *entry;
5032f437effSShiyang Ruan 
5042f437effSShiyang Ruan 	rcu_read_lock();
5052f437effSShiyang Ruan 	for (;;) {
5062f437effSShiyang Ruan 		entry = NULL;
5072f437effSShiyang Ruan 		if (!dax_mapping(mapping))
5082f437effSShiyang Ruan 			break;
5092f437effSShiyang Ruan 
5102f437effSShiyang Ruan 		xas.xa = &mapping->i_pages;
5112f437effSShiyang Ruan 		xas_lock_irq(&xas);
5122f437effSShiyang Ruan 		xas_set(&xas, index);
5132f437effSShiyang Ruan 		entry = xas_load(&xas);
5142f437effSShiyang Ruan 		if (dax_is_locked(entry)) {
5152f437effSShiyang Ruan 			rcu_read_unlock();
5162f437effSShiyang Ruan 			wait_entry_unlocked(&xas, entry);
5172f437effSShiyang Ruan 			rcu_read_lock();
5182f437effSShiyang Ruan 			continue;
5192f437effSShiyang Ruan 		}
5202f437effSShiyang Ruan 		if (!entry ||
5212f437effSShiyang Ruan 		    dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
5222f437effSShiyang Ruan 			/*
5232f437effSShiyang Ruan 			 * Because we are looking for entry from file's mapping
5242f437effSShiyang Ruan 			 * and index, so the entry may not be inserted for now,
5252f437effSShiyang Ruan 			 * or even a zero/empty entry.  We don't think this is
5262f437effSShiyang Ruan 			 * an error case.  So, return a special value and do
5272f437effSShiyang Ruan 			 * not output @page.
5282f437effSShiyang Ruan 			 */
5292f437effSShiyang Ruan 			entry = (void *)~0UL;
5302f437effSShiyang Ruan 		} else {
5312f437effSShiyang Ruan 			*page = pfn_to_page(dax_to_pfn(entry));
5322f437effSShiyang Ruan 			dax_lock_entry(&xas, entry);
5332f437effSShiyang Ruan 		}
5342f437effSShiyang Ruan 		xas_unlock_irq(&xas);
5352f437effSShiyang Ruan 		break;
5362f437effSShiyang Ruan 	}
5372f437effSShiyang Ruan 	rcu_read_unlock();
5382f437effSShiyang Ruan 	return (dax_entry_t)entry;
5392f437effSShiyang Ruan }
5402f437effSShiyang Ruan 
5412f437effSShiyang Ruan void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index,
5422f437effSShiyang Ruan 		dax_entry_t cookie)
5432f437effSShiyang Ruan {
5442f437effSShiyang Ruan 	XA_STATE(xas, &mapping->i_pages, index);
5452f437effSShiyang Ruan 
5462f437effSShiyang Ruan 	if (cookie == ~0UL)
5472f437effSShiyang Ruan 		return;
5482f437effSShiyang Ruan 
5492f437effSShiyang Ruan 	dax_unlock_entry(&xas, (void *)cookie);
5502f437effSShiyang Ruan }
5512f437effSShiyang Ruan 
5522f437effSShiyang Ruan /*
553a77d19f4SMatthew Wilcox  * Find page cache entry at given index. If it is a DAX entry, return it
554a77d19f4SMatthew Wilcox  * with the entry locked. If the page cache doesn't contain an entry at
555a77d19f4SMatthew Wilcox  * that index, add a locked empty entry.
556ac401cc7SJan Kara  *
5573159f943SMatthew Wilcox  * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
558b15cd800SMatthew Wilcox  * either return that locked entry or will return VM_FAULT_FALLBACK.
559b15cd800SMatthew Wilcox  * This will happen if there are any PTE entries within the PMD range
560b15cd800SMatthew Wilcox  * that we are requesting.
561642261acSRoss Zwisler  *
562b15cd800SMatthew Wilcox  * We always favor PTE entries over PMD entries. There isn't a flow where we
563b15cd800SMatthew Wilcox  * evict PTE entries in order to 'upgrade' them to a PMD entry.  A PMD
564b15cd800SMatthew Wilcox  * insertion will fail if it finds any PTE entries already in the tree, and a
565b15cd800SMatthew Wilcox  * PTE insertion will cause an existing PMD entry to be unmapped and
566b15cd800SMatthew Wilcox  * downgraded to PTE entries.  This happens for both PMD zero pages as
567b15cd800SMatthew Wilcox  * well as PMD empty entries.
568642261acSRoss Zwisler  *
569b15cd800SMatthew Wilcox  * The exception to this downgrade path is for PMD entries that have
570b15cd800SMatthew Wilcox  * real storage backing them.  We will leave these real PMD entries in
571b15cd800SMatthew Wilcox  * the tree, and PTE writes will simply dirty the entire PMD entry.
572642261acSRoss Zwisler  *
573ac401cc7SJan Kara  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
574ac401cc7SJan Kara  * persistent memory the benefit is doubtful. We can add that later if we can
575ac401cc7SJan Kara  * show it helps.
576b15cd800SMatthew Wilcox  *
577b15cd800SMatthew Wilcox  * On error, this function does not return an ERR_PTR.  Instead it returns
578b15cd800SMatthew Wilcox  * a VM_FAULT code, encoded as an xarray internal entry.  The ERR_PTR values
579b15cd800SMatthew Wilcox  * overlap with xarray value entries.
580ac401cc7SJan Kara  */
581b15cd800SMatthew Wilcox static void *grab_mapping_entry(struct xa_state *xas,
58223c84eb7SMatthew Wilcox (Oracle) 		struct address_space *mapping, unsigned int order)
583ac401cc7SJan Kara {
584b15cd800SMatthew Wilcox 	unsigned long index = xas->xa_index;
5851a14e377SJan Kara 	bool pmd_downgrade;	/* splitting PMD entry into PTE entries? */
586b15cd800SMatthew Wilcox 	void *entry;
587ac401cc7SJan Kara 
588b15cd800SMatthew Wilcox retry:
5891a14e377SJan Kara 	pmd_downgrade = false;
590b15cd800SMatthew Wilcox 	xas_lock_irq(xas);
59123c84eb7SMatthew Wilcox (Oracle) 	entry = get_unlocked_entry(xas, order);
592642261acSRoss Zwisler 
593b15cd800SMatthew Wilcox 	if (entry) {
59423c84eb7SMatthew Wilcox (Oracle) 		if (dax_is_conflict(entry))
59523c84eb7SMatthew Wilcox (Oracle) 			goto fallback;
5960e40de03SMatthew Wilcox 		if (!xa_is_value(entry)) {
59749688e65SHao Li 			xas_set_err(xas, -EIO);
59891d25ba8SRoss Zwisler 			goto out_unlock;
59991d25ba8SRoss Zwisler 		}
60091d25ba8SRoss Zwisler 
60123c84eb7SMatthew Wilcox (Oracle) 		if (order == 0) {
60291d25ba8SRoss Zwisler 			if (dax_is_pmd_entry(entry) &&
603642261acSRoss Zwisler 			    (dax_is_zero_entry(entry) ||
604642261acSRoss Zwisler 			     dax_is_empty_entry(entry))) {
605642261acSRoss Zwisler 				pmd_downgrade = true;
606642261acSRoss Zwisler 			}
607642261acSRoss Zwisler 		}
608642261acSRoss Zwisler 	}
609642261acSRoss Zwisler 
610642261acSRoss Zwisler 	if (pmd_downgrade) {
611642261acSRoss Zwisler 		/*
612642261acSRoss Zwisler 		 * Make sure 'entry' remains valid while we drop
613b93b0163SMatthew Wilcox 		 * the i_pages lock.
614642261acSRoss Zwisler 		 */
615b15cd800SMatthew Wilcox 		dax_lock_entry(xas, entry);
616642261acSRoss Zwisler 
617642261acSRoss Zwisler 		/*
618642261acSRoss Zwisler 		 * Besides huge zero pages the only other thing that gets
619642261acSRoss Zwisler 		 * downgraded are empty entries which don't need to be
620642261acSRoss Zwisler 		 * unmapped.
621642261acSRoss Zwisler 		 */
622b15cd800SMatthew Wilcox 		if (dax_is_zero_entry(entry)) {
623b15cd800SMatthew Wilcox 			xas_unlock_irq(xas);
624b15cd800SMatthew Wilcox 			unmap_mapping_pages(mapping,
625b15cd800SMatthew Wilcox 					xas->xa_index & ~PG_PMD_COLOUR,
626977fbdcdSMatthew Wilcox 					PG_PMD_NR, false);
627b15cd800SMatthew Wilcox 			xas_reset(xas);
628b15cd800SMatthew Wilcox 			xas_lock_irq(xas);
629e11f8b7bSRoss Zwisler 		}
630e11f8b7bSRoss Zwisler 
631d2c997c0SDan Williams 		dax_disassociate_entry(entry, mapping, false);
632b15cd800SMatthew Wilcox 		xas_store(xas, NULL);	/* undo the PMD join */
633698ab77aSVivek Goyal 		dax_wake_entry(xas, entry, WAKE_ALL);
6347f0e07fbSMatthew Wilcox (Oracle) 		mapping->nrpages -= PG_PMD_NR;
635b15cd800SMatthew Wilcox 		entry = NULL;
636b15cd800SMatthew Wilcox 		xas_set(xas, index);
637642261acSRoss Zwisler 	}
638642261acSRoss Zwisler 
639b15cd800SMatthew Wilcox 	if (entry) {
640b15cd800SMatthew Wilcox 		dax_lock_entry(xas, entry);
641b15cd800SMatthew Wilcox 	} else {
64223c84eb7SMatthew Wilcox (Oracle) 		unsigned long flags = DAX_EMPTY;
64323c84eb7SMatthew Wilcox (Oracle) 
64423c84eb7SMatthew Wilcox (Oracle) 		if (order > 0)
64523c84eb7SMatthew Wilcox (Oracle) 			flags |= DAX_PMD;
64623c84eb7SMatthew Wilcox (Oracle) 		entry = dax_make_entry(pfn_to_pfn_t(0), flags);
647b15cd800SMatthew Wilcox 		dax_lock_entry(xas, entry);
648b15cd800SMatthew Wilcox 		if (xas_error(xas))
649b15cd800SMatthew Wilcox 			goto out_unlock;
6507f0e07fbSMatthew Wilcox (Oracle) 		mapping->nrpages += 1UL << order;
651ac401cc7SJan Kara 	}
652b15cd800SMatthew Wilcox 
653642261acSRoss Zwisler out_unlock:
654b15cd800SMatthew Wilcox 	xas_unlock_irq(xas);
655b15cd800SMatthew Wilcox 	if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
656b15cd800SMatthew Wilcox 		goto retry;
657b15cd800SMatthew Wilcox 	if (xas->xa_node == XA_ERROR(-ENOMEM))
658b15cd800SMatthew Wilcox 		return xa_mk_internal(VM_FAULT_OOM);
659b15cd800SMatthew Wilcox 	if (xas_error(xas))
660b15cd800SMatthew Wilcox 		return xa_mk_internal(VM_FAULT_SIGBUS);
661e3ad61c6SRoss Zwisler 	return entry;
662b15cd800SMatthew Wilcox fallback:
663b15cd800SMatthew Wilcox 	xas_unlock_irq(xas);
664b15cd800SMatthew Wilcox 	return xa_mk_internal(VM_FAULT_FALLBACK);
665ac401cc7SJan Kara }
666ac401cc7SJan Kara 
6675fac7408SDan Williams /**
6686bbdd563SVivek Goyal  * dax_layout_busy_page_range - find first pinned page in @mapping
6695fac7408SDan Williams  * @mapping: address space to scan for a page with ref count > 1
6706bbdd563SVivek Goyal  * @start: Starting offset. Page containing 'start' is included.
6716bbdd563SVivek Goyal  * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX,
6726bbdd563SVivek Goyal  *       pages from 'start' till the end of file are included.
6735fac7408SDan Williams  *
6745fac7408SDan Williams  * DAX requires ZONE_DEVICE mapped pages. These pages are never
6755fac7408SDan Williams  * 'onlined' to the page allocator so they are considered idle when
6765fac7408SDan Williams  * page->count == 1. A filesystem uses this interface to determine if
6775fac7408SDan Williams  * any page in the mapping is busy, i.e. for DMA, or other
6785fac7408SDan Williams  * get_user_pages() usages.
6795fac7408SDan Williams  *
6805fac7408SDan Williams  * It is expected that the filesystem is holding locks to block the
6815fac7408SDan Williams  * establishment of new mappings in this address_space. I.e. it expects
6825fac7408SDan Williams  * to be able to run unmap_mapping_range() and subsequently not race
6835fac7408SDan Williams  * mapping_mapped() becoming true.
6845fac7408SDan Williams  */
6856bbdd563SVivek Goyal struct page *dax_layout_busy_page_range(struct address_space *mapping,
6866bbdd563SVivek Goyal 					loff_t start, loff_t end)
6875fac7408SDan Williams {
688084a8990SMatthew Wilcox 	void *entry;
689084a8990SMatthew Wilcox 	unsigned int scanned = 0;
6905fac7408SDan Williams 	struct page *page = NULL;
6916bbdd563SVivek Goyal 	pgoff_t start_idx = start >> PAGE_SHIFT;
6926bbdd563SVivek Goyal 	pgoff_t end_idx;
6936bbdd563SVivek Goyal 	XA_STATE(xas, &mapping->i_pages, start_idx);
6945fac7408SDan Williams 
6955fac7408SDan Williams 	/*
6965fac7408SDan Williams 	 * In the 'limited' case get_user_pages() for dax is disabled.
6975fac7408SDan Williams 	 */
6985fac7408SDan Williams 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
6995fac7408SDan Williams 		return NULL;
7005fac7408SDan Williams 
7015fac7408SDan Williams 	if (!dax_mapping(mapping) || !mapping_mapped(mapping))
7025fac7408SDan Williams 		return NULL;
7035fac7408SDan Williams 
7046bbdd563SVivek Goyal 	/* If end == LLONG_MAX, all pages from start to till end of file */
7056bbdd563SVivek Goyal 	if (end == LLONG_MAX)
7066bbdd563SVivek Goyal 		end_idx = ULONG_MAX;
7076bbdd563SVivek Goyal 	else
7086bbdd563SVivek Goyal 		end_idx = end >> PAGE_SHIFT;
7095fac7408SDan Williams 	/*
7105fac7408SDan Williams 	 * If we race get_user_pages_fast() here either we'll see the
711084a8990SMatthew Wilcox 	 * elevated page count in the iteration and wait, or
7125fac7408SDan Williams 	 * get_user_pages_fast() will see that the page it took a reference
7135fac7408SDan Williams 	 * against is no longer mapped in the page tables and bail to the
7145fac7408SDan Williams 	 * get_user_pages() slow path.  The slow path is protected by
7155fac7408SDan Williams 	 * pte_lock() and pmd_lock(). New references are not taken without
7166bbdd563SVivek Goyal 	 * holding those locks, and unmap_mapping_pages() will not zero the
7175fac7408SDan Williams 	 * pte or pmd without holding the respective lock, so we are
7185fac7408SDan Williams 	 * guaranteed to either see new references or prevent new
7195fac7408SDan Williams 	 * references from being established.
7205fac7408SDan Williams 	 */
7216bbdd563SVivek Goyal 	unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0);
7225fac7408SDan Williams 
723084a8990SMatthew Wilcox 	xas_lock_irq(&xas);
7246bbdd563SVivek Goyal 	xas_for_each(&xas, entry, end_idx) {
725084a8990SMatthew Wilcox 		if (WARN_ON_ONCE(!xa_is_value(entry)))
7265fac7408SDan Williams 			continue;
727084a8990SMatthew Wilcox 		if (unlikely(dax_is_locked(entry)))
72823c84eb7SMatthew Wilcox (Oracle) 			entry = get_unlocked_entry(&xas, 0);
7295fac7408SDan Williams 		if (entry)
7305fac7408SDan Williams 			page = dax_busy_page(entry);
7314c3d043dSVivek Goyal 		put_unlocked_entry(&xas, entry, WAKE_NEXT);
7325fac7408SDan Williams 		if (page)
7335fac7408SDan Williams 			break;
734084a8990SMatthew Wilcox 		if (++scanned % XA_CHECK_SCHED)
735084a8990SMatthew Wilcox 			continue;
736cdbf8897SRoss Zwisler 
737084a8990SMatthew Wilcox 		xas_pause(&xas);
738084a8990SMatthew Wilcox 		xas_unlock_irq(&xas);
739084a8990SMatthew Wilcox 		cond_resched();
740084a8990SMatthew Wilcox 		xas_lock_irq(&xas);
7415fac7408SDan Williams 	}
742084a8990SMatthew Wilcox 	xas_unlock_irq(&xas);
7435fac7408SDan Williams 	return page;
7445fac7408SDan Williams }
7456bbdd563SVivek Goyal EXPORT_SYMBOL_GPL(dax_layout_busy_page_range);
7466bbdd563SVivek Goyal 
7476bbdd563SVivek Goyal struct page *dax_layout_busy_page(struct address_space *mapping)
7486bbdd563SVivek Goyal {
7496bbdd563SVivek Goyal 	return dax_layout_busy_page_range(mapping, 0, LLONG_MAX);
7506bbdd563SVivek Goyal }
7515fac7408SDan Williams EXPORT_SYMBOL_GPL(dax_layout_busy_page);
7525fac7408SDan Williams 
753a77d19f4SMatthew Wilcox static int __dax_invalidate_entry(struct address_space *mapping,
754c6dcf52cSJan Kara 					  pgoff_t index, bool trunc)
755c6dcf52cSJan Kara {
75607f2d89cSMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, index);
757c6dcf52cSJan Kara 	int ret = 0;
758c6dcf52cSJan Kara 	void *entry;
759c6dcf52cSJan Kara 
76007f2d89cSMatthew Wilcox 	xas_lock_irq(&xas);
76123c84eb7SMatthew Wilcox (Oracle) 	entry = get_unlocked_entry(&xas, 0);
7623159f943SMatthew Wilcox 	if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
763c6dcf52cSJan Kara 		goto out;
764c6dcf52cSJan Kara 	if (!trunc &&
76507f2d89cSMatthew Wilcox 	    (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
76607f2d89cSMatthew Wilcox 	     xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
767c6dcf52cSJan Kara 		goto out;
768d2c997c0SDan Williams 	dax_disassociate_entry(entry, mapping, trunc);
76907f2d89cSMatthew Wilcox 	xas_store(&xas, NULL);
7707f0e07fbSMatthew Wilcox (Oracle) 	mapping->nrpages -= 1UL << dax_entry_order(entry);
771c6dcf52cSJan Kara 	ret = 1;
772c6dcf52cSJan Kara out:
77323738832SVivek Goyal 	put_unlocked_entry(&xas, entry, WAKE_ALL);
77407f2d89cSMatthew Wilcox 	xas_unlock_irq(&xas);
775c6dcf52cSJan Kara 	return ret;
776c6dcf52cSJan Kara }
77707f2d89cSMatthew Wilcox 
778ac401cc7SJan Kara /*
7793159f943SMatthew Wilcox  * Delete DAX entry at @index from @mapping.  Wait for it
7803159f943SMatthew Wilcox  * to be unlocked before deleting it.
781ac401cc7SJan Kara  */
782ac401cc7SJan Kara int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
783ac401cc7SJan Kara {
784a77d19f4SMatthew Wilcox 	int ret = __dax_invalidate_entry(mapping, index, true);
785ac401cc7SJan Kara 
786ac401cc7SJan Kara 	/*
787ac401cc7SJan Kara 	 * This gets called from truncate / punch_hole path. As such, the caller
788ac401cc7SJan Kara 	 * must hold locks protecting against concurrent modifications of the
789a77d19f4SMatthew Wilcox 	 * page cache (usually fs-private i_mmap_sem for writing). Since the
7903159f943SMatthew Wilcox 	 * caller has seen a DAX entry for this index, we better find it
791ac401cc7SJan Kara 	 * at that index as well...
792ac401cc7SJan Kara 	 */
793c6dcf52cSJan Kara 	WARN_ON_ONCE(!ret);
794c6dcf52cSJan Kara 	return ret;
795ac401cc7SJan Kara }
796ac401cc7SJan Kara 
797c6dcf52cSJan Kara /*
7983159f943SMatthew Wilcox  * Invalidate DAX entry if it is clean.
799c6dcf52cSJan Kara  */
800c6dcf52cSJan Kara int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
801c6dcf52cSJan Kara 				      pgoff_t index)
802c6dcf52cSJan Kara {
803a77d19f4SMatthew Wilcox 	return __dax_invalidate_entry(mapping, index, false);
804ac401cc7SJan Kara }
805ac401cc7SJan Kara 
80660696eb2SChristoph Hellwig static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos)
807f7ca90b1SMatthew Wilcox {
808de205114SChristoph Hellwig 	return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset);
809429f8de7SChristoph Hellwig }
810429f8de7SChristoph Hellwig 
811429f8de7SChristoph Hellwig static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)
812429f8de7SChristoph Hellwig {
81360696eb2SChristoph Hellwig 	pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos);
814cccbce67SDan Williams 	void *vto, *kaddr;
815cccbce67SDan Williams 	long rc;
816cccbce67SDan Williams 	int id;
817e2e05394SRoss Zwisler 
818cccbce67SDan Williams 	id = dax_read_lock();
819e511c4a3SJane Chu 	rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS,
820e511c4a3SJane Chu 				&kaddr, NULL);
821cccbce67SDan Williams 	if (rc < 0) {
822cccbce67SDan Williams 		dax_read_unlock(id);
823cccbce67SDan Williams 		return rc;
824cccbce67SDan Williams 	}
825429f8de7SChristoph Hellwig 	vto = kmap_atomic(vmf->cow_page);
826429f8de7SChristoph Hellwig 	copy_user_page(vto, kaddr, vmf->address, vmf->cow_page);
827f7ca90b1SMatthew Wilcox 	kunmap_atomic(vto);
828cccbce67SDan Williams 	dax_read_unlock(id);
829f7ca90b1SMatthew Wilcox 	return 0;
830f7ca90b1SMatthew Wilcox }
831f7ca90b1SMatthew Wilcox 
832642261acSRoss Zwisler /*
833642261acSRoss Zwisler  * By this point grab_mapping_entry() has ensured that we have a locked entry
834642261acSRoss Zwisler  * of the appropriate size so we don't have to worry about downgrading PMDs to
835642261acSRoss Zwisler  * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
836642261acSRoss Zwisler  * already in the tree, we will skip the insertion and just dirty the PMD as
837642261acSRoss Zwisler  * appropriate.
838642261acSRoss Zwisler  */
839b15cd800SMatthew Wilcox static void *dax_insert_entry(struct xa_state *xas,
840b15cd800SMatthew Wilcox 		struct address_space *mapping, struct vm_fault *vmf,
841b15cd800SMatthew Wilcox 		void *entry, pfn_t pfn, unsigned long flags, bool dirty)
8429973c98eSRoss Zwisler {
843b15cd800SMatthew Wilcox 	void *new_entry = dax_make_entry(pfn, flags);
8449973c98eSRoss Zwisler 
845f5b7b748SJan Kara 	if (dirty)
8469973c98eSRoss Zwisler 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
8479973c98eSRoss Zwisler 
8483159f943SMatthew Wilcox 	if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
849b15cd800SMatthew Wilcox 		unsigned long index = xas->xa_index;
85091d25ba8SRoss Zwisler 		/* we are replacing a zero page with block mapping */
85191d25ba8SRoss Zwisler 		if (dax_is_pmd_entry(entry))
852977fbdcdSMatthew Wilcox 			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
853977fbdcdSMatthew Wilcox 					PG_PMD_NR, false);
85491d25ba8SRoss Zwisler 		else /* pte entry */
855b15cd800SMatthew Wilcox 			unmap_mapping_pages(mapping, index, 1, false);
856ac401cc7SJan Kara 	}
8579973c98eSRoss Zwisler 
858b15cd800SMatthew Wilcox 	xas_reset(xas);
859b15cd800SMatthew Wilcox 	xas_lock_irq(xas);
8601571c029SJan Kara 	if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
8611571c029SJan Kara 		void *old;
8621571c029SJan Kara 
863d2c997c0SDan Williams 		dax_disassociate_entry(entry, mapping, false);
8646061b69bSShiyang Ruan 		dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
8656061b69bSShiyang Ruan 				false);
866642261acSRoss Zwisler 		/*
867a77d19f4SMatthew Wilcox 		 * Only swap our new entry into the page cache if the current
868642261acSRoss Zwisler 		 * entry is a zero page or an empty entry.  If a normal PTE or
869a77d19f4SMatthew Wilcox 		 * PMD entry is already in the cache, we leave it alone.  This
870642261acSRoss Zwisler 		 * means that if we are trying to insert a PTE and the
871642261acSRoss Zwisler 		 * existing entry is a PMD, we will just leave the PMD in the
872642261acSRoss Zwisler 		 * tree and dirty it if necessary.
873642261acSRoss Zwisler 		 */
8741571c029SJan Kara 		old = dax_lock_entry(xas, new_entry);
875b15cd800SMatthew Wilcox 		WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
876b15cd800SMatthew Wilcox 					DAX_LOCKED));
87791d25ba8SRoss Zwisler 		entry = new_entry;
878b15cd800SMatthew Wilcox 	} else {
879b15cd800SMatthew Wilcox 		xas_load(xas);	/* Walk the xa_state */
880ac401cc7SJan Kara 	}
88191d25ba8SRoss Zwisler 
882f5b7b748SJan Kara 	if (dirty)
883b15cd800SMatthew Wilcox 		xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
88491d25ba8SRoss Zwisler 
885b15cd800SMatthew Wilcox 	xas_unlock_irq(xas);
88691d25ba8SRoss Zwisler 	return entry;
8879973c98eSRoss Zwisler }
8889973c98eSRoss Zwisler 
8899fc747f6SMatthew Wilcox static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
8909fc747f6SMatthew Wilcox 		struct address_space *mapping, void *entry)
8919973c98eSRoss Zwisler {
89206083a09SMuchun Song 	unsigned long pfn, index, count, end;
8933fe0791cSDan Williams 	long ret = 0;
89406083a09SMuchun Song 	struct vm_area_struct *vma;
8959973c98eSRoss Zwisler 
8969973c98eSRoss Zwisler 	/*
897a6abc2c0SJan Kara 	 * A page got tagged dirty in DAX mapping? Something is seriously
898a6abc2c0SJan Kara 	 * wrong.
8999973c98eSRoss Zwisler 	 */
9003159f943SMatthew Wilcox 	if (WARN_ON(!xa_is_value(entry)))
901a6abc2c0SJan Kara 		return -EIO;
9029973c98eSRoss Zwisler 
9039fc747f6SMatthew Wilcox 	if (unlikely(dax_is_locked(entry))) {
9049fc747f6SMatthew Wilcox 		void *old_entry = entry;
9059fc747f6SMatthew Wilcox 
90623c84eb7SMatthew Wilcox (Oracle) 		entry = get_unlocked_entry(xas, 0);
9079fc747f6SMatthew Wilcox 
908a6abc2c0SJan Kara 		/* Entry got punched out / reallocated? */
9099fc747f6SMatthew Wilcox 		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
910a6abc2c0SJan Kara 			goto put_unlocked;
911a6abc2c0SJan Kara 		/*
9129fc747f6SMatthew Wilcox 		 * Entry got reallocated elsewhere? No need to writeback.
9139fc747f6SMatthew Wilcox 		 * We have to compare pfns as we must not bail out due to
9149fc747f6SMatthew Wilcox 		 * difference in lockbit or entry type.
915a6abc2c0SJan Kara 		 */
9169fc747f6SMatthew Wilcox 		if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
917a6abc2c0SJan Kara 			goto put_unlocked;
918642261acSRoss Zwisler 		if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
919642261acSRoss Zwisler 					dax_is_zero_entry(entry))) {
9209973c98eSRoss Zwisler 			ret = -EIO;
921a6abc2c0SJan Kara 			goto put_unlocked;
9229973c98eSRoss Zwisler 		}
9239973c98eSRoss Zwisler 
9249fc747f6SMatthew Wilcox 		/* Another fsync thread may have already done this entry */
9259fc747f6SMatthew Wilcox 		if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
926a6abc2c0SJan Kara 			goto put_unlocked;
9279fc747f6SMatthew Wilcox 	}
9289fc747f6SMatthew Wilcox 
929a6abc2c0SJan Kara 	/* Lock the entry to serialize with page faults */
9309fc747f6SMatthew Wilcox 	dax_lock_entry(xas, entry);
9319fc747f6SMatthew Wilcox 
932a6abc2c0SJan Kara 	/*
933a6abc2c0SJan Kara 	 * We can clear the tag now but we have to be careful so that concurrent
934a6abc2c0SJan Kara 	 * dax_writeback_one() calls for the same index cannot finish before we
935a6abc2c0SJan Kara 	 * actually flush the caches. This is achieved as the calls will look
936b93b0163SMatthew Wilcox 	 * at the entry only under the i_pages lock and once they do that
937b93b0163SMatthew Wilcox 	 * they will see the entry locked and wait for it to unlock.
938a6abc2c0SJan Kara 	 */
9399fc747f6SMatthew Wilcox 	xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
9409fc747f6SMatthew Wilcox 	xas_unlock_irq(xas);
941a6abc2c0SJan Kara 
942642261acSRoss Zwisler 	/*
943e4b3448bSMatthew Wilcox 	 * If dax_writeback_mapping_range() was given a wbc->range_start
944e4b3448bSMatthew Wilcox 	 * in the middle of a PMD, the 'index' we use needs to be
945e4b3448bSMatthew Wilcox 	 * aligned to the start of the PMD.
9463fe0791cSDan Williams 	 * This allows us to flush for PMD_SIZE and not have to worry about
9473fe0791cSDan Williams 	 * partial PMD writebacks.
948642261acSRoss Zwisler 	 */
949a77d19f4SMatthew Wilcox 	pfn = dax_to_pfn(entry);
950e4b3448bSMatthew Wilcox 	count = 1UL << dax_entry_order(entry);
951e4b3448bSMatthew Wilcox 	index = xas->xa_index & ~(count - 1);
95206083a09SMuchun Song 	end = index + count - 1;
953cccbce67SDan Williams 
95406083a09SMuchun Song 	/* Walk all mappings of a given index of a file and writeprotect them */
95506083a09SMuchun Song 	i_mmap_lock_read(mapping);
95606083a09SMuchun Song 	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) {
95706083a09SMuchun Song 		pfn_mkclean_range(pfn, count, index, vma);
95806083a09SMuchun Song 		cond_resched();
95906083a09SMuchun Song 	}
96006083a09SMuchun Song 	i_mmap_unlock_read(mapping);
96106083a09SMuchun Song 
962e4b3448bSMatthew Wilcox 	dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
9634b4bb46dSJan Kara 	/*
9644b4bb46dSJan Kara 	 * After we have flushed the cache, we can clear the dirty tag. There
9654b4bb46dSJan Kara 	 * cannot be new dirty data in the pfn after the flush has completed as
9664b4bb46dSJan Kara 	 * the pfn mappings are writeprotected and fault waits for mapping
9674b4bb46dSJan Kara 	 * entry lock.
9684b4bb46dSJan Kara 	 */
9699fc747f6SMatthew Wilcox 	xas_reset(xas);
9709fc747f6SMatthew Wilcox 	xas_lock_irq(xas);
9719fc747f6SMatthew Wilcox 	xas_store(xas, entry);
9729fc747f6SMatthew Wilcox 	xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
973698ab77aSVivek Goyal 	dax_wake_entry(xas, entry, WAKE_NEXT);
9749fc747f6SMatthew Wilcox 
975e4b3448bSMatthew Wilcox 	trace_dax_writeback_one(mapping->host, index, count);
9769973c98eSRoss Zwisler 	return ret;
9779973c98eSRoss Zwisler 
978a6abc2c0SJan Kara  put_unlocked:
9794c3d043dSVivek Goyal 	put_unlocked_entry(xas, entry, WAKE_NEXT);
9809973c98eSRoss Zwisler 	return ret;
9819973c98eSRoss Zwisler }
9829973c98eSRoss Zwisler 
9839973c98eSRoss Zwisler /*
9849973c98eSRoss Zwisler  * Flush the mapping to the persistent domain within the byte range of [start,
9859973c98eSRoss Zwisler  * end]. This is required by data integrity operations to ensure file data is
9869973c98eSRoss Zwisler  * on persistent storage prior to completion of the operation.
9879973c98eSRoss Zwisler  */
9887f6d5b52SRoss Zwisler int dax_writeback_mapping_range(struct address_space *mapping,
9893f666c56SVivek Goyal 		struct dax_device *dax_dev, struct writeback_control *wbc)
9909973c98eSRoss Zwisler {
9919fc747f6SMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
9929973c98eSRoss Zwisler 	struct inode *inode = mapping->host;
9939fc747f6SMatthew Wilcox 	pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
9949fc747f6SMatthew Wilcox 	void *entry;
9959fc747f6SMatthew Wilcox 	int ret = 0;
9969fc747f6SMatthew Wilcox 	unsigned int scanned = 0;
9979973c98eSRoss Zwisler 
9989973c98eSRoss Zwisler 	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
9999973c98eSRoss Zwisler 		return -EIO;
10009973c98eSRoss Zwisler 
10017716506aSMatthew Wilcox (Oracle) 	if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL)
10027f6d5b52SRoss Zwisler 		return 0;
10037f6d5b52SRoss Zwisler 
10049fc747f6SMatthew Wilcox 	trace_dax_writeback_range(inode, xas.xa_index, end_index);
10059973c98eSRoss Zwisler 
10069fc747f6SMatthew Wilcox 	tag_pages_for_writeback(mapping, xas.xa_index, end_index);
1007d14a3f48SRoss Zwisler 
10089fc747f6SMatthew Wilcox 	xas_lock_irq(&xas);
10099fc747f6SMatthew Wilcox 	xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
10109fc747f6SMatthew Wilcox 		ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
1011819ec6b9SJeff Layton 		if (ret < 0) {
1012819ec6b9SJeff Layton 			mapping_set_error(mapping, ret);
10139fc747f6SMatthew Wilcox 			break;
1014d14a3f48SRoss Zwisler 		}
10159fc747f6SMatthew Wilcox 		if (++scanned % XA_CHECK_SCHED)
10169fc747f6SMatthew Wilcox 			continue;
10179fc747f6SMatthew Wilcox 
10189fc747f6SMatthew Wilcox 		xas_pause(&xas);
10199fc747f6SMatthew Wilcox 		xas_unlock_irq(&xas);
10209fc747f6SMatthew Wilcox 		cond_resched();
10219fc747f6SMatthew Wilcox 		xas_lock_irq(&xas);
1022d14a3f48SRoss Zwisler 	}
10239fc747f6SMatthew Wilcox 	xas_unlock_irq(&xas);
10249fc747f6SMatthew Wilcox 	trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
10259fc747f6SMatthew Wilcox 	return ret;
10269973c98eSRoss Zwisler }
10279973c98eSRoss Zwisler EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
10289973c98eSRoss Zwisler 
1029e28cd3e5SShiyang Ruan static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
1030e28cd3e5SShiyang Ruan 		size_t size, void **kaddr, pfn_t *pfnp)
10315e161e40SJan Kara {
103260696eb2SChristoph Hellwig 	pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1033e28cd3e5SShiyang Ruan 	int id, rc = 0;
10345e161e40SJan Kara 	long length;
10355e161e40SJan Kara 
1036cccbce67SDan Williams 	id = dax_read_lock();
10375e161e40SJan Kara 	length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
1038e28cd3e5SShiyang Ruan 				   DAX_ACCESS, kaddr, pfnp);
10395e161e40SJan Kara 	if (length < 0) {
10405e161e40SJan Kara 		rc = length;
10415e161e40SJan Kara 		goto out;
10425e161e40SJan Kara 	}
1043e28cd3e5SShiyang Ruan 	if (!pfnp)
1044e28cd3e5SShiyang Ruan 		goto out_check_addr;
10455e161e40SJan Kara 	rc = -EINVAL;
10465e161e40SJan Kara 	if (PFN_PHYS(length) < size)
10475e161e40SJan Kara 		goto out;
10485e161e40SJan Kara 	if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
10495e161e40SJan Kara 		goto out;
10505e161e40SJan Kara 	/* For larger pages we need devmap */
10515e161e40SJan Kara 	if (length > 1 && !pfn_t_devmap(*pfnp))
10525e161e40SJan Kara 		goto out;
10535e161e40SJan Kara 	rc = 0;
1054e28cd3e5SShiyang Ruan 
1055e28cd3e5SShiyang Ruan out_check_addr:
1056e28cd3e5SShiyang Ruan 	if (!kaddr)
1057e28cd3e5SShiyang Ruan 		goto out;
1058e28cd3e5SShiyang Ruan 	if (!*kaddr)
1059e28cd3e5SShiyang Ruan 		rc = -EFAULT;
10605e161e40SJan Kara out:
1061cccbce67SDan Williams 	dax_read_unlock(id);
1062cccbce67SDan Williams 	return rc;
1063cccbce67SDan Williams }
1064f7ca90b1SMatthew Wilcox 
1065*ff17b8dfSShiyang Ruan /**
1066*ff17b8dfSShiyang Ruan  * dax_iomap_cow_copy - Copy the data from source to destination before write
1067*ff17b8dfSShiyang Ruan  * @pos:	address to do copy from.
1068*ff17b8dfSShiyang Ruan  * @length:	size of copy operation.
1069*ff17b8dfSShiyang Ruan  * @align_size:	aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE)
1070*ff17b8dfSShiyang Ruan  * @srcmap:	iomap srcmap
1071*ff17b8dfSShiyang Ruan  * @daddr:	destination address to copy to.
1072*ff17b8dfSShiyang Ruan  *
1073*ff17b8dfSShiyang Ruan  * This can be called from two places. Either during DAX write fault (page
1074*ff17b8dfSShiyang Ruan  * aligned), to copy the length size data to daddr. Or, while doing normal DAX
1075*ff17b8dfSShiyang Ruan  * write operation, dax_iomap_actor() might call this to do the copy of either
1076*ff17b8dfSShiyang Ruan  * start or end unaligned address. In the latter case the rest of the copy of
1077*ff17b8dfSShiyang Ruan  * aligned ranges is taken care by dax_iomap_actor() itself.
1078*ff17b8dfSShiyang Ruan  */
1079*ff17b8dfSShiyang Ruan static int dax_iomap_cow_copy(loff_t pos, uint64_t length, size_t align_size,
1080*ff17b8dfSShiyang Ruan 		const struct iomap *srcmap, void *daddr)
1081*ff17b8dfSShiyang Ruan {
1082*ff17b8dfSShiyang Ruan 	loff_t head_off = pos & (align_size - 1);
1083*ff17b8dfSShiyang Ruan 	size_t size = ALIGN(head_off + length, align_size);
1084*ff17b8dfSShiyang Ruan 	loff_t end = pos + length;
1085*ff17b8dfSShiyang Ruan 	loff_t pg_end = round_up(end, align_size);
1086*ff17b8dfSShiyang Ruan 	bool copy_all = head_off == 0 && end == pg_end;
1087*ff17b8dfSShiyang Ruan 	void *saddr = 0;
1088*ff17b8dfSShiyang Ruan 	int ret = 0;
1089*ff17b8dfSShiyang Ruan 
1090*ff17b8dfSShiyang Ruan 	ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL);
1091*ff17b8dfSShiyang Ruan 	if (ret)
1092*ff17b8dfSShiyang Ruan 		return ret;
1093*ff17b8dfSShiyang Ruan 
1094*ff17b8dfSShiyang Ruan 	if (copy_all) {
1095*ff17b8dfSShiyang Ruan 		ret = copy_mc_to_kernel(daddr, saddr, length);
1096*ff17b8dfSShiyang Ruan 		return ret ? -EIO : 0;
1097*ff17b8dfSShiyang Ruan 	}
1098*ff17b8dfSShiyang Ruan 
1099*ff17b8dfSShiyang Ruan 	/* Copy the head part of the range */
1100*ff17b8dfSShiyang Ruan 	if (head_off) {
1101*ff17b8dfSShiyang Ruan 		ret = copy_mc_to_kernel(daddr, saddr, head_off);
1102*ff17b8dfSShiyang Ruan 		if (ret)
1103*ff17b8dfSShiyang Ruan 			return -EIO;
1104*ff17b8dfSShiyang Ruan 	}
1105*ff17b8dfSShiyang Ruan 
1106*ff17b8dfSShiyang Ruan 	/* Copy the tail part of the range */
1107*ff17b8dfSShiyang Ruan 	if (end < pg_end) {
1108*ff17b8dfSShiyang Ruan 		loff_t tail_off = head_off + length;
1109*ff17b8dfSShiyang Ruan 		loff_t tail_len = pg_end - end;
1110*ff17b8dfSShiyang Ruan 
1111*ff17b8dfSShiyang Ruan 		ret = copy_mc_to_kernel(daddr + tail_off, saddr + tail_off,
1112*ff17b8dfSShiyang Ruan 					tail_len);
1113*ff17b8dfSShiyang Ruan 		if (ret)
1114*ff17b8dfSShiyang Ruan 			return -EIO;
1115*ff17b8dfSShiyang Ruan 	}
1116*ff17b8dfSShiyang Ruan 	return 0;
1117*ff17b8dfSShiyang Ruan }
1118*ff17b8dfSShiyang Ruan 
11192f89dc12SJan Kara /*
112091d25ba8SRoss Zwisler  * The user has performed a load from a hole in the file.  Allocating a new
112191d25ba8SRoss Zwisler  * page in the file would cause excessive storage usage for workloads with
112291d25ba8SRoss Zwisler  * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
112391d25ba8SRoss Zwisler  * If this page is ever written to we will re-fault and change the mapping to
112491d25ba8SRoss Zwisler  * point to real DAX storage instead.
11252f89dc12SJan Kara  */
1126b15cd800SMatthew Wilcox static vm_fault_t dax_load_hole(struct xa_state *xas,
1127b15cd800SMatthew Wilcox 		struct address_space *mapping, void **entry,
1128e30331ffSRoss Zwisler 		struct vm_fault *vmf)
1129e30331ffSRoss Zwisler {
1130e30331ffSRoss Zwisler 	struct inode *inode = mapping->host;
113191d25ba8SRoss Zwisler 	unsigned long vaddr = vmf->address;
1132b90ca5ccSMatthew Wilcox 	pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1133b90ca5ccSMatthew Wilcox 	vm_fault_t ret;
1134e30331ffSRoss Zwisler 
1135b15cd800SMatthew Wilcox 	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
11363159f943SMatthew Wilcox 			DAX_ZERO_PAGE, false);
11373159f943SMatthew Wilcox 
1138ab77dab4SSouptick Joarder 	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1139e30331ffSRoss Zwisler 	trace_dax_load_hole(inode, vmf, ret);
1140e30331ffSRoss Zwisler 	return ret;
1141e30331ffSRoss Zwisler }
1142e30331ffSRoss Zwisler 
1143c2436190SShiyang Ruan #ifdef CONFIG_FS_DAX_PMD
1144c2436190SShiyang Ruan static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
114565dd814aSChristoph Hellwig 		const struct iomap *iomap, void **entry)
1146c2436190SShiyang Ruan {
1147c2436190SShiyang Ruan 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1148c2436190SShiyang Ruan 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1149c2436190SShiyang Ruan 	struct vm_area_struct *vma = vmf->vma;
1150c2436190SShiyang Ruan 	struct inode *inode = mapping->host;
1151c2436190SShiyang Ruan 	pgtable_t pgtable = NULL;
1152c2436190SShiyang Ruan 	struct page *zero_page;
1153c2436190SShiyang Ruan 	spinlock_t *ptl;
1154c2436190SShiyang Ruan 	pmd_t pmd_entry;
1155c2436190SShiyang Ruan 	pfn_t pfn;
1156c2436190SShiyang Ruan 
1157c2436190SShiyang Ruan 	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1158c2436190SShiyang Ruan 
1159c2436190SShiyang Ruan 	if (unlikely(!zero_page))
1160c2436190SShiyang Ruan 		goto fallback;
1161c2436190SShiyang Ruan 
1162c2436190SShiyang Ruan 	pfn = page_to_pfn_t(zero_page);
1163c2436190SShiyang Ruan 	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1164c2436190SShiyang Ruan 			DAX_PMD | DAX_ZERO_PAGE, false);
1165c2436190SShiyang Ruan 
1166c2436190SShiyang Ruan 	if (arch_needs_pgtable_deposit()) {
1167c2436190SShiyang Ruan 		pgtable = pte_alloc_one(vma->vm_mm);
1168c2436190SShiyang Ruan 		if (!pgtable)
1169c2436190SShiyang Ruan 			return VM_FAULT_OOM;
1170c2436190SShiyang Ruan 	}
1171c2436190SShiyang Ruan 
1172c2436190SShiyang Ruan 	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1173c2436190SShiyang Ruan 	if (!pmd_none(*(vmf->pmd))) {
1174c2436190SShiyang Ruan 		spin_unlock(ptl);
1175c2436190SShiyang Ruan 		goto fallback;
1176c2436190SShiyang Ruan 	}
1177c2436190SShiyang Ruan 
1178c2436190SShiyang Ruan 	if (pgtable) {
1179c2436190SShiyang Ruan 		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1180c2436190SShiyang Ruan 		mm_inc_nr_ptes(vma->vm_mm);
1181c2436190SShiyang Ruan 	}
1182c2436190SShiyang Ruan 	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1183c2436190SShiyang Ruan 	pmd_entry = pmd_mkhuge(pmd_entry);
1184c2436190SShiyang Ruan 	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1185c2436190SShiyang Ruan 	spin_unlock(ptl);
1186c2436190SShiyang Ruan 	trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
1187c2436190SShiyang Ruan 	return VM_FAULT_NOPAGE;
1188c2436190SShiyang Ruan 
1189c2436190SShiyang Ruan fallback:
1190c2436190SShiyang Ruan 	if (pgtable)
1191c2436190SShiyang Ruan 		pte_free(vma->vm_mm, pgtable);
1192c2436190SShiyang Ruan 	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
1193c2436190SShiyang Ruan 	return VM_FAULT_FALLBACK;
1194c2436190SShiyang Ruan }
1195c2436190SShiyang Ruan #else
1196c2436190SShiyang Ruan static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
119765dd814aSChristoph Hellwig 		const struct iomap *iomap, void **entry)
1198c2436190SShiyang Ruan {
1199c2436190SShiyang Ruan 	return VM_FAULT_FALLBACK;
1200c2436190SShiyang Ruan }
1201c2436190SShiyang Ruan #endif /* CONFIG_FS_DAX_PMD */
1202c2436190SShiyang Ruan 
1203e5c71954SChristoph Hellwig static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff,
1204e5c71954SChristoph Hellwig 		unsigned int offset, size_t size)
1205e5c71954SChristoph Hellwig {
1206e5c71954SChristoph Hellwig 	void *kaddr;
1207e5c71954SChristoph Hellwig 	long ret;
1208e5c71954SChristoph Hellwig 
1209e511c4a3SJane Chu 	ret = dax_direct_access(dax_dev, pgoff, 1, DAX_ACCESS, &kaddr, NULL);
1210e5c71954SChristoph Hellwig 	if (ret > 0) {
1211e5c71954SChristoph Hellwig 		memset(kaddr + offset, 0, size);
1212e5c71954SChristoph Hellwig 		dax_flush(dax_dev, kaddr + offset, size);
1213e5c71954SChristoph Hellwig 	}
1214e5c71954SChristoph Hellwig 	return ret;
1215e5c71954SChristoph Hellwig }
1216e5c71954SChristoph Hellwig 
1217c6f40468SChristoph Hellwig static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
1218679c8bd3SChristoph Hellwig {
1219c6f40468SChristoph Hellwig 	const struct iomap *iomap = &iter->iomap;
1220c6f40468SChristoph Hellwig 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
1221c6f40468SChristoph Hellwig 	loff_t pos = iter->pos;
1222c6f40468SChristoph Hellwig 	u64 length = iomap_length(iter);
1223c6f40468SChristoph Hellwig 	s64 written = 0;
1224c6f40468SChristoph Hellwig 
1225c6f40468SChristoph Hellwig 	/* already zeroed?  we're done. */
1226c6f40468SChristoph Hellwig 	if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1227c6f40468SChristoph Hellwig 		return length;
1228c6f40468SChristoph Hellwig 
1229c6f40468SChristoph Hellwig 	do {
123081ee8e52SMatthew Wilcox (Oracle) 		unsigned offset = offset_in_page(pos);
123181ee8e52SMatthew Wilcox (Oracle) 		unsigned size = min_t(u64, PAGE_SIZE - offset, length);
1232c6f40468SChristoph Hellwig 		pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1233c6f40468SChristoph Hellwig 		long rc;
1234c6f40468SChristoph Hellwig 		int id;
12350a23f9ffSVivek Goyal 
1236cccbce67SDan Williams 		id = dax_read_lock();
1237e5c71954SChristoph Hellwig 		if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
123881ee8e52SMatthew Wilcox (Oracle) 			rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
12390a23f9ffSVivek Goyal 		else
1240e5c71954SChristoph Hellwig 			rc = dax_memzero(iomap->dax_dev, pgoff, offset, size);
1241cccbce67SDan Williams 		dax_read_unlock(id);
12420a23f9ffSVivek Goyal 
1243e5c71954SChristoph Hellwig 		if (rc < 0)
1244e5c71954SChristoph Hellwig 			return rc;
1245c6f40468SChristoph Hellwig 		pos += size;
1246c6f40468SChristoph Hellwig 		length -= size;
1247c6f40468SChristoph Hellwig 		written += size;
1248c6f40468SChristoph Hellwig 		if (did_zero)
1249c6f40468SChristoph Hellwig 			*did_zero = true;
1250c6f40468SChristoph Hellwig 	} while (length > 0);
1251c6f40468SChristoph Hellwig 
1252c6f40468SChristoph Hellwig 	return written;
1253679c8bd3SChristoph Hellwig }
1254679c8bd3SChristoph Hellwig 
1255c6f40468SChristoph Hellwig int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1256c6f40468SChristoph Hellwig 		const struct iomap_ops *ops)
1257c6f40468SChristoph Hellwig {
1258c6f40468SChristoph Hellwig 	struct iomap_iter iter = {
1259c6f40468SChristoph Hellwig 		.inode		= inode,
1260c6f40468SChristoph Hellwig 		.pos		= pos,
1261c6f40468SChristoph Hellwig 		.len		= len,
1262952da063SChristoph Hellwig 		.flags		= IOMAP_DAX | IOMAP_ZERO,
1263c6f40468SChristoph Hellwig 	};
1264c6f40468SChristoph Hellwig 	int ret;
1265c6f40468SChristoph Hellwig 
1266c6f40468SChristoph Hellwig 	while ((ret = iomap_iter(&iter, ops)) > 0)
1267c6f40468SChristoph Hellwig 		iter.processed = dax_zero_iter(&iter, did_zero);
1268c6f40468SChristoph Hellwig 	return ret;
1269c6f40468SChristoph Hellwig }
1270c6f40468SChristoph Hellwig EXPORT_SYMBOL_GPL(dax_zero_range);
1271c6f40468SChristoph Hellwig 
1272c6f40468SChristoph Hellwig int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1273c6f40468SChristoph Hellwig 		const struct iomap_ops *ops)
1274c6f40468SChristoph Hellwig {
1275c6f40468SChristoph Hellwig 	unsigned int blocksize = i_blocksize(inode);
1276c6f40468SChristoph Hellwig 	unsigned int off = pos & (blocksize - 1);
1277c6f40468SChristoph Hellwig 
1278c6f40468SChristoph Hellwig 	/* Block boundary? Nothing to do */
1279c6f40468SChristoph Hellwig 	if (!off)
1280c6f40468SChristoph Hellwig 		return 0;
1281c6f40468SChristoph Hellwig 	return dax_zero_range(inode, pos, blocksize - off, did_zero, ops);
1282c6f40468SChristoph Hellwig }
1283c6f40468SChristoph Hellwig EXPORT_SYMBOL_GPL(dax_truncate_page);
1284c6f40468SChristoph Hellwig 
1285ca289e0bSChristoph Hellwig static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
1286ca289e0bSChristoph Hellwig 		struct iov_iter *iter)
1287a254e568SChristoph Hellwig {
1288ca289e0bSChristoph Hellwig 	const struct iomap *iomap = &iomi->iomap;
1289*ff17b8dfSShiyang Ruan 	const struct iomap *srcmap = &iomi->srcmap;
1290ca289e0bSChristoph Hellwig 	loff_t length = iomap_length(iomi);
1291ca289e0bSChristoph Hellwig 	loff_t pos = iomi->pos;
1292cccbce67SDan Williams 	struct dax_device *dax_dev = iomap->dax_dev;
1293a254e568SChristoph Hellwig 	loff_t end = pos + length, done = 0;
1294*ff17b8dfSShiyang Ruan 	bool write = iov_iter_rw(iter) == WRITE;
1295a254e568SChristoph Hellwig 	ssize_t ret = 0;
1296a77d4786SDan Williams 	size_t xfer;
1297cccbce67SDan Williams 	int id;
1298a254e568SChristoph Hellwig 
1299*ff17b8dfSShiyang Ruan 	if (!write) {
1300ca289e0bSChristoph Hellwig 		end = min(end, i_size_read(iomi->inode));
1301a254e568SChristoph Hellwig 		if (pos >= end)
1302a254e568SChristoph Hellwig 			return 0;
1303a254e568SChristoph Hellwig 
1304a254e568SChristoph Hellwig 		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1305a254e568SChristoph Hellwig 			return iov_iter_zero(min(length, end - pos), iter);
1306a254e568SChristoph Hellwig 	}
1307a254e568SChristoph Hellwig 
1308*ff17b8dfSShiyang Ruan 	/*
1309*ff17b8dfSShiyang Ruan 	 * In DAX mode, enforce either pure overwrites of written extents, or
1310*ff17b8dfSShiyang Ruan 	 * writes to unwritten extents as part of a copy-on-write operation.
1311*ff17b8dfSShiyang Ruan 	 */
1312*ff17b8dfSShiyang Ruan 	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED &&
1313*ff17b8dfSShiyang Ruan 			!(iomap->flags & IOMAP_F_SHARED)))
1314a254e568SChristoph Hellwig 		return -EIO;
1315a254e568SChristoph Hellwig 
1316e3fce68cSJan Kara 	/*
1317e3fce68cSJan Kara 	 * Write can allocate block for an area which has a hole page mapped
1318e3fce68cSJan Kara 	 * into page tables. We have to tear down these mappings so that data
1319e3fce68cSJan Kara 	 * written by write(2) is visible in mmap.
1320e3fce68cSJan Kara 	 */
1321cd656375SJan Kara 	if (iomap->flags & IOMAP_F_NEW) {
1322ca289e0bSChristoph Hellwig 		invalidate_inode_pages2_range(iomi->inode->i_mapping,
1323e3fce68cSJan Kara 					      pos >> PAGE_SHIFT,
1324e3fce68cSJan Kara 					      (end - 1) >> PAGE_SHIFT);
1325e3fce68cSJan Kara 	}
1326e3fce68cSJan Kara 
1327cccbce67SDan Williams 	id = dax_read_lock();
1328a254e568SChristoph Hellwig 	while (pos < end) {
1329a254e568SChristoph Hellwig 		unsigned offset = pos & (PAGE_SIZE - 1);
1330cccbce67SDan Williams 		const size_t size = ALIGN(length + offset, PAGE_SIZE);
133160696eb2SChristoph Hellwig 		pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1332a254e568SChristoph Hellwig 		ssize_t map_len;
1333047218ecSJane Chu 		bool recovery = false;
1334cccbce67SDan Williams 		void *kaddr;
1335a254e568SChristoph Hellwig 
1336d1908f52SMichal Hocko 		if (fatal_signal_pending(current)) {
1337d1908f52SMichal Hocko 			ret = -EINTR;
1338d1908f52SMichal Hocko 			break;
1339d1908f52SMichal Hocko 		}
1340d1908f52SMichal Hocko 
1341cccbce67SDan Williams 		map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1342e511c4a3SJane Chu 				DAX_ACCESS, &kaddr, NULL);
1343047218ecSJane Chu 		if (map_len == -EIO && iov_iter_rw(iter) == WRITE) {
1344047218ecSJane Chu 			map_len = dax_direct_access(dax_dev, pgoff,
1345047218ecSJane Chu 					PHYS_PFN(size), DAX_RECOVERY_WRITE,
134686ed913bSHuaisheng Ye 					&kaddr, NULL);
1347047218ecSJane Chu 			if (map_len > 0)
1348047218ecSJane Chu 				recovery = true;
1349047218ecSJane Chu 		}
1350a254e568SChristoph Hellwig 		if (map_len < 0) {
1351a254e568SChristoph Hellwig 			ret = map_len;
1352a254e568SChristoph Hellwig 			break;
1353a254e568SChristoph Hellwig 		}
1354a254e568SChristoph Hellwig 
1355*ff17b8dfSShiyang Ruan 		if (write &&
1356*ff17b8dfSShiyang Ruan 		    srcmap->type != IOMAP_HOLE && srcmap->addr != iomap->addr) {
1357*ff17b8dfSShiyang Ruan 			ret = dax_iomap_cow_copy(pos, length, PAGE_SIZE, srcmap,
1358*ff17b8dfSShiyang Ruan 						 kaddr);
1359*ff17b8dfSShiyang Ruan 			if (ret)
1360*ff17b8dfSShiyang Ruan 				break;
1361*ff17b8dfSShiyang Ruan 		}
1362*ff17b8dfSShiyang Ruan 
1363cccbce67SDan Williams 		map_len = PFN_PHYS(map_len);
1364cccbce67SDan Williams 		kaddr += offset;
1365a254e568SChristoph Hellwig 		map_len -= offset;
1366a254e568SChristoph Hellwig 		if (map_len > end - pos)
1367a254e568SChristoph Hellwig 			map_len = end - pos;
1368a254e568SChristoph Hellwig 
1369047218ecSJane Chu 		if (recovery)
1370047218ecSJane Chu 			xfer = dax_recovery_write(dax_dev, pgoff, kaddr,
1371047218ecSJane Chu 					map_len, iter);
1372*ff17b8dfSShiyang Ruan 		else if (write)
1373a77d4786SDan Williams 			xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1374fec53774SDan Williams 					map_len, iter);
1375a254e568SChristoph Hellwig 		else
1376a77d4786SDan Williams 			xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1377b3a9a0c3SDan Williams 					map_len, iter);
1378a254e568SChristoph Hellwig 
1379a77d4786SDan Williams 		pos += xfer;
1380a77d4786SDan Williams 		length -= xfer;
1381a77d4786SDan Williams 		done += xfer;
1382a77d4786SDan Williams 
1383a77d4786SDan Williams 		if (xfer == 0)
1384a77d4786SDan Williams 			ret = -EFAULT;
1385a77d4786SDan Williams 		if (xfer < map_len)
1386a77d4786SDan Williams 			break;
1387a254e568SChristoph Hellwig 	}
1388cccbce67SDan Williams 	dax_read_unlock(id);
1389a254e568SChristoph Hellwig 
1390a254e568SChristoph Hellwig 	return done ? done : ret;
1391a254e568SChristoph Hellwig }
1392a254e568SChristoph Hellwig 
1393a254e568SChristoph Hellwig /**
139411c59c92SRoss Zwisler  * dax_iomap_rw - Perform I/O to a DAX file
1395a254e568SChristoph Hellwig  * @iocb:	The control block for this I/O
1396a254e568SChristoph Hellwig  * @iter:	The addresses to do I/O from or to
1397a254e568SChristoph Hellwig  * @ops:	iomap ops passed from the file system
1398a254e568SChristoph Hellwig  *
1399a254e568SChristoph Hellwig  * This function performs read and write operations to directly mapped
1400a254e568SChristoph Hellwig  * persistent memory.  The callers needs to take care of read/write exclusion
1401a254e568SChristoph Hellwig  * and evicting any page cache pages in the region under I/O.
1402a254e568SChristoph Hellwig  */
1403a254e568SChristoph Hellwig ssize_t
140411c59c92SRoss Zwisler dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
14058ff6daa1SChristoph Hellwig 		const struct iomap_ops *ops)
1406a254e568SChristoph Hellwig {
1407ca289e0bSChristoph Hellwig 	struct iomap_iter iomi = {
1408ca289e0bSChristoph Hellwig 		.inode		= iocb->ki_filp->f_mapping->host,
1409ca289e0bSChristoph Hellwig 		.pos		= iocb->ki_pos,
1410ca289e0bSChristoph Hellwig 		.len		= iov_iter_count(iter),
1411952da063SChristoph Hellwig 		.flags		= IOMAP_DAX,
1412ca289e0bSChristoph Hellwig 	};
1413ca289e0bSChristoph Hellwig 	loff_t done = 0;
1414ca289e0bSChristoph Hellwig 	int ret;
1415a254e568SChristoph Hellwig 
1416168316dbSChristoph Hellwig 	if (iov_iter_rw(iter) == WRITE) {
1417ca289e0bSChristoph Hellwig 		lockdep_assert_held_write(&iomi.inode->i_rwsem);
1418ca289e0bSChristoph Hellwig 		iomi.flags |= IOMAP_WRITE;
1419168316dbSChristoph Hellwig 	} else {
1420ca289e0bSChristoph Hellwig 		lockdep_assert_held(&iomi.inode->i_rwsem);
1421168316dbSChristoph Hellwig 	}
1422a254e568SChristoph Hellwig 
142396222d53SJeff Moyer 	if (iocb->ki_flags & IOCB_NOWAIT)
1424ca289e0bSChristoph Hellwig 		iomi.flags |= IOMAP_NOWAIT;
142596222d53SJeff Moyer 
1426ca289e0bSChristoph Hellwig 	while ((ret = iomap_iter(&iomi, ops)) > 0)
1427ca289e0bSChristoph Hellwig 		iomi.processed = dax_iomap_iter(&iomi, iter);
1428a254e568SChristoph Hellwig 
1429ca289e0bSChristoph Hellwig 	done = iomi.pos - iocb->ki_pos;
1430ca289e0bSChristoph Hellwig 	iocb->ki_pos = iomi.pos;
1431a254e568SChristoph Hellwig 	return done ? done : ret;
1432a254e568SChristoph Hellwig }
143311c59c92SRoss Zwisler EXPORT_SYMBOL_GPL(dax_iomap_rw);
1434a7d73fe6SChristoph Hellwig 
1435ab77dab4SSouptick Joarder static vm_fault_t dax_fault_return(int error)
14369f141d6eSJan Kara {
14379f141d6eSJan Kara 	if (error == 0)
14389f141d6eSJan Kara 		return VM_FAULT_NOPAGE;
1439c9aed74eSSouptick Joarder 	return vmf_error(error);
14409f141d6eSJan Kara }
14419f141d6eSJan Kara 
1442aaa422c4SDan Williams /*
1443aaa422c4SDan Williams  * MAP_SYNC on a dax mapping guarantees dirty metadata is
1444aaa422c4SDan Williams  * flushed on write-faults (non-cow), but not read-faults.
1445aaa422c4SDan Williams  */
1446aaa422c4SDan Williams static bool dax_fault_is_synchronous(unsigned long flags,
144765dd814aSChristoph Hellwig 		struct vm_area_struct *vma, const struct iomap *iomap)
1448aaa422c4SDan Williams {
1449aaa422c4SDan Williams 	return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1450aaa422c4SDan Williams 		&& (iomap->flags & IOMAP_F_DIRTY);
1451aaa422c4SDan Williams }
1452aaa422c4SDan Williams 
145355f81639SShiyang Ruan /*
145455f81639SShiyang Ruan  * When handling a synchronous page fault and the inode need a fsync, we can
145555f81639SShiyang Ruan  * insert the PTE/PMD into page tables only after that fsync happened. Skip
145655f81639SShiyang Ruan  * insertion for now and return the pfn so that caller can insert it after the
145755f81639SShiyang Ruan  * fsync is done.
145855f81639SShiyang Ruan  */
145955f81639SShiyang Ruan static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
146055f81639SShiyang Ruan {
146155f81639SShiyang Ruan 	if (WARN_ON_ONCE(!pfnp))
146255f81639SShiyang Ruan 		return VM_FAULT_SIGBUS;
146355f81639SShiyang Ruan 	*pfnp = pfn;
146455f81639SShiyang Ruan 	return VM_FAULT_NEEDDSYNC;
146555f81639SShiyang Ruan }
146655f81639SShiyang Ruan 
146765dd814aSChristoph Hellwig static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
146865dd814aSChristoph Hellwig 		const struct iomap_iter *iter)
146955f81639SShiyang Ruan {
147055f81639SShiyang Ruan 	vm_fault_t ret;
147155f81639SShiyang Ruan 	int error = 0;
147255f81639SShiyang Ruan 
147365dd814aSChristoph Hellwig 	switch (iter->iomap.type) {
147455f81639SShiyang Ruan 	case IOMAP_HOLE:
147555f81639SShiyang Ruan 	case IOMAP_UNWRITTEN:
1476429f8de7SChristoph Hellwig 		clear_user_highpage(vmf->cow_page, vmf->address);
147755f81639SShiyang Ruan 		break;
147855f81639SShiyang Ruan 	case IOMAP_MAPPED:
1479429f8de7SChristoph Hellwig 		error = copy_cow_page_dax(vmf, iter);
148055f81639SShiyang Ruan 		break;
148155f81639SShiyang Ruan 	default:
148255f81639SShiyang Ruan 		WARN_ON_ONCE(1);
148355f81639SShiyang Ruan 		error = -EIO;
148455f81639SShiyang Ruan 		break;
148555f81639SShiyang Ruan 	}
148655f81639SShiyang Ruan 
148755f81639SShiyang Ruan 	if (error)
148855f81639SShiyang Ruan 		return dax_fault_return(error);
148955f81639SShiyang Ruan 
149055f81639SShiyang Ruan 	__SetPageUptodate(vmf->cow_page);
149155f81639SShiyang Ruan 	ret = finish_fault(vmf);
149255f81639SShiyang Ruan 	if (!ret)
149355f81639SShiyang Ruan 		return VM_FAULT_DONE_COW;
149455f81639SShiyang Ruan 	return ret;
149555f81639SShiyang Ruan }
149655f81639SShiyang Ruan 
1497c2436190SShiyang Ruan /**
149865dd814aSChristoph Hellwig  * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault.
1499c2436190SShiyang Ruan  * @vmf:	vm fault instance
150065dd814aSChristoph Hellwig  * @iter:	iomap iter
1501c2436190SShiyang Ruan  * @pfnp:	pfn to be returned
1502c2436190SShiyang Ruan  * @xas:	the dax mapping tree of a file
1503c2436190SShiyang Ruan  * @entry:	an unlocked dax entry to be inserted
1504c2436190SShiyang Ruan  * @pmd:	distinguish whether it is a pmd fault
1505c2436190SShiyang Ruan  */
150665dd814aSChristoph Hellwig static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
150765dd814aSChristoph Hellwig 		const struct iomap_iter *iter, pfn_t *pfnp,
150865dd814aSChristoph Hellwig 		struct xa_state *xas, void **entry, bool pmd)
1509c2436190SShiyang Ruan {
1510c2436190SShiyang Ruan 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
151165dd814aSChristoph Hellwig 	const struct iomap *iomap = &iter->iomap;
1512*ff17b8dfSShiyang Ruan 	const struct iomap *srcmap = &iter->srcmap;
1513c2436190SShiyang Ruan 	size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
1514c2436190SShiyang Ruan 	loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
1515c2436190SShiyang Ruan 	bool write = vmf->flags & FAULT_FLAG_WRITE;
151665dd814aSChristoph Hellwig 	bool sync = dax_fault_is_synchronous(iter->flags, vmf->vma, iomap);
1517c2436190SShiyang Ruan 	unsigned long entry_flags = pmd ? DAX_PMD : 0;
1518c2436190SShiyang Ruan 	int err = 0;
1519c2436190SShiyang Ruan 	pfn_t pfn;
1520*ff17b8dfSShiyang Ruan 	void *kaddr;
1521c2436190SShiyang Ruan 
152265dd814aSChristoph Hellwig 	if (!pmd && vmf->cow_page)
152365dd814aSChristoph Hellwig 		return dax_fault_cow_page(vmf, iter);
152465dd814aSChristoph Hellwig 
1525c2436190SShiyang Ruan 	/* if we are reading UNWRITTEN and HOLE, return a hole. */
1526c2436190SShiyang Ruan 	if (!write &&
1527c2436190SShiyang Ruan 	    (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) {
1528c2436190SShiyang Ruan 		if (!pmd)
1529c2436190SShiyang Ruan 			return dax_load_hole(xas, mapping, entry, vmf);
1530c2436190SShiyang Ruan 		return dax_pmd_load_hole(xas, vmf, iomap, entry);
1531c2436190SShiyang Ruan 	}
1532c2436190SShiyang Ruan 
1533*ff17b8dfSShiyang Ruan 	if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) {
1534c2436190SShiyang Ruan 		WARN_ON_ONCE(1);
1535c2436190SShiyang Ruan 		return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS;
1536c2436190SShiyang Ruan 	}
1537c2436190SShiyang Ruan 
1538*ff17b8dfSShiyang Ruan 	err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn);
1539c2436190SShiyang Ruan 	if (err)
1540c2436190SShiyang Ruan 		return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err);
1541c2436190SShiyang Ruan 
1542c2436190SShiyang Ruan 	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, entry_flags,
1543c2436190SShiyang Ruan 				  write && !sync);
1544c2436190SShiyang Ruan 
1545*ff17b8dfSShiyang Ruan 	if (write &&
1546*ff17b8dfSShiyang Ruan 	    srcmap->type != IOMAP_HOLE && srcmap->addr != iomap->addr) {
1547*ff17b8dfSShiyang Ruan 		err = dax_iomap_cow_copy(pos, size, size, srcmap, kaddr);
1548*ff17b8dfSShiyang Ruan 		if (err)
1549*ff17b8dfSShiyang Ruan 			return dax_fault_return(err);
1550*ff17b8dfSShiyang Ruan 	}
1551*ff17b8dfSShiyang Ruan 
1552c2436190SShiyang Ruan 	if (sync)
1553c2436190SShiyang Ruan 		return dax_fault_synchronous_pfnp(pfnp, pfn);
1554c2436190SShiyang Ruan 
1555c2436190SShiyang Ruan 	/* insert PMD pfn */
1556c2436190SShiyang Ruan 	if (pmd)
1557c2436190SShiyang Ruan 		return vmf_insert_pfn_pmd(vmf, pfn, write);
1558c2436190SShiyang Ruan 
1559c2436190SShiyang Ruan 	/* insert PTE pfn */
1560c2436190SShiyang Ruan 	if (write)
1561c2436190SShiyang Ruan 		return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1562c2436190SShiyang Ruan 	return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
1563c2436190SShiyang Ruan }
1564c2436190SShiyang Ruan 
1565ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1566c0b24625SJan Kara 			       int *iomap_errp, const struct iomap_ops *ops)
1567a7d73fe6SChristoph Hellwig {
156865dd814aSChristoph Hellwig 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1569b15cd800SMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
157065dd814aSChristoph Hellwig 	struct iomap_iter iter = {
157165dd814aSChristoph Hellwig 		.inode		= mapping->host,
157265dd814aSChristoph Hellwig 		.pos		= (loff_t)vmf->pgoff << PAGE_SHIFT,
157365dd814aSChristoph Hellwig 		.len		= PAGE_SIZE,
1574952da063SChristoph Hellwig 		.flags		= IOMAP_DAX | IOMAP_FAULT,
157565dd814aSChristoph Hellwig 	};
1576ab77dab4SSouptick Joarder 	vm_fault_t ret = 0;
1577a7d73fe6SChristoph Hellwig 	void *entry;
157865dd814aSChristoph Hellwig 	int error;
1579a7d73fe6SChristoph Hellwig 
158065dd814aSChristoph Hellwig 	trace_dax_pte_fault(iter.inode, vmf, ret);
1581a7d73fe6SChristoph Hellwig 	/*
1582a7d73fe6SChristoph Hellwig 	 * Check whether offset isn't beyond end of file now. Caller is supposed
1583a7d73fe6SChristoph Hellwig 	 * to hold locks serializing us with truncate / punch hole so this is
1584a7d73fe6SChristoph Hellwig 	 * a reliable test.
1585a7d73fe6SChristoph Hellwig 	 */
158665dd814aSChristoph Hellwig 	if (iter.pos >= i_size_read(iter.inode)) {
1587ab77dab4SSouptick Joarder 		ret = VM_FAULT_SIGBUS;
1588a9c42b33SRoss Zwisler 		goto out;
1589a9c42b33SRoss Zwisler 	}
1590a7d73fe6SChristoph Hellwig 
159165dd814aSChristoph Hellwig 	if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
159265dd814aSChristoph Hellwig 		iter.flags |= IOMAP_WRITE;
1593a7d73fe6SChristoph Hellwig 
1594b15cd800SMatthew Wilcox 	entry = grab_mapping_entry(&xas, mapping, 0);
1595b15cd800SMatthew Wilcox 	if (xa_is_internal(entry)) {
1596b15cd800SMatthew Wilcox 		ret = xa_to_internal(entry);
159713e451fdSJan Kara 		goto out;
159813e451fdSJan Kara 	}
159913e451fdSJan Kara 
1600a7d73fe6SChristoph Hellwig 	/*
1601e2093926SRoss Zwisler 	 * It is possible, particularly with mixed reads & writes to private
1602e2093926SRoss Zwisler 	 * mappings, that we have raced with a PMD fault that overlaps with
1603e2093926SRoss Zwisler 	 * the PTE we need to set up.  If so just return and the fault will be
1604e2093926SRoss Zwisler 	 * retried.
1605e2093926SRoss Zwisler 	 */
1606e2093926SRoss Zwisler 	if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1607ab77dab4SSouptick Joarder 		ret = VM_FAULT_NOPAGE;
1608e2093926SRoss Zwisler 		goto unlock_entry;
1609e2093926SRoss Zwisler 	}
1610e2093926SRoss Zwisler 
161165dd814aSChristoph Hellwig 	while ((error = iomap_iter(&iter, ops)) > 0) {
161265dd814aSChristoph Hellwig 		if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) {
161365dd814aSChristoph Hellwig 			iter.processed = -EIO;	/* fs corruption? */
161465dd814aSChristoph Hellwig 			continue;
161565dd814aSChristoph Hellwig 		}
161665dd814aSChristoph Hellwig 
161765dd814aSChristoph Hellwig 		ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false);
161865dd814aSChristoph Hellwig 		if (ret != VM_FAULT_SIGBUS &&
161965dd814aSChristoph Hellwig 		    (iter.iomap.flags & IOMAP_F_NEW)) {
162065dd814aSChristoph Hellwig 			count_vm_event(PGMAJFAULT);
162165dd814aSChristoph Hellwig 			count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
162265dd814aSChristoph Hellwig 			ret |= VM_FAULT_MAJOR;
162365dd814aSChristoph Hellwig 		}
162465dd814aSChristoph Hellwig 
162565dd814aSChristoph Hellwig 		if (!(ret & VM_FAULT_ERROR))
162665dd814aSChristoph Hellwig 			iter.processed = PAGE_SIZE;
162765dd814aSChristoph Hellwig 	}
162865dd814aSChristoph Hellwig 
1629c0b24625SJan Kara 	if (iomap_errp)
1630c0b24625SJan Kara 		*iomap_errp = error;
163165dd814aSChristoph Hellwig 	if (!ret && error)
1632ab77dab4SSouptick Joarder 		ret = dax_fault_return(error);
1633a7d73fe6SChristoph Hellwig 
163413e451fdSJan Kara unlock_entry:
1635b15cd800SMatthew Wilcox 	dax_unlock_entry(&xas, entry);
1636a9c42b33SRoss Zwisler out:
163765dd814aSChristoph Hellwig 	trace_dax_pte_fault_done(iter.inode, vmf, ret);
163865dd814aSChristoph Hellwig 	return ret;
1639a7d73fe6SChristoph Hellwig }
1640642261acSRoss Zwisler 
1641642261acSRoss Zwisler #ifdef CONFIG_FS_DAX_PMD
164255f81639SShiyang Ruan static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
164355f81639SShiyang Ruan 		pgoff_t max_pgoff)
1644642261acSRoss Zwisler {
1645d8a849e1SDave Jiang 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1646d8a849e1SDave Jiang 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1647282a8e03SRoss Zwisler 
1648fffa281bSRoss Zwisler 	/*
1649fffa281bSRoss Zwisler 	 * Make sure that the faulting address's PMD offset (color) matches
1650fffa281bSRoss Zwisler 	 * the PMD offset from the start of the file.  This is necessary so
1651fffa281bSRoss Zwisler 	 * that a PMD range in the page table overlaps exactly with a PMD
1652a77d19f4SMatthew Wilcox 	 * range in the page cache.
1653fffa281bSRoss Zwisler 	 */
1654fffa281bSRoss Zwisler 	if ((vmf->pgoff & PG_PMD_COLOUR) !=
1655fffa281bSRoss Zwisler 	    ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
165655f81639SShiyang Ruan 		return true;
1657fffa281bSRoss Zwisler 
1658642261acSRoss Zwisler 	/* Fall back to PTEs if we're going to COW */
165955f81639SShiyang Ruan 	if (write && !(vmf->vma->vm_flags & VM_SHARED))
166055f81639SShiyang Ruan 		return true;
1661642261acSRoss Zwisler 
1662642261acSRoss Zwisler 	/* If the PMD would extend outside the VMA */
166355f81639SShiyang Ruan 	if (pmd_addr < vmf->vma->vm_start)
166455f81639SShiyang Ruan 		return true;
166555f81639SShiyang Ruan 	if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
166655f81639SShiyang Ruan 		return true;
166755f81639SShiyang Ruan 
166855f81639SShiyang Ruan 	/* If the PMD would extend beyond the file size */
166955f81639SShiyang Ruan 	if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff)
167055f81639SShiyang Ruan 		return true;
167155f81639SShiyang Ruan 
167255f81639SShiyang Ruan 	return false;
167355f81639SShiyang Ruan }
167455f81639SShiyang Ruan 
1675642261acSRoss Zwisler static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1676642261acSRoss Zwisler 			       const struct iomap_ops *ops)
1677642261acSRoss Zwisler {
167865dd814aSChristoph Hellwig 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1679642261acSRoss Zwisler 	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
168065dd814aSChristoph Hellwig 	struct iomap_iter iter = {
168165dd814aSChristoph Hellwig 		.inode		= mapping->host,
168265dd814aSChristoph Hellwig 		.len		= PMD_SIZE,
1683952da063SChristoph Hellwig 		.flags		= IOMAP_DAX | IOMAP_FAULT,
168465dd814aSChristoph Hellwig 	};
1685c2436190SShiyang Ruan 	vm_fault_t ret = VM_FAULT_FALLBACK;
1686642261acSRoss Zwisler 	pgoff_t max_pgoff;
1687642261acSRoss Zwisler 	void *entry;
1688642261acSRoss Zwisler 	int error;
1689642261acSRoss Zwisler 
169065dd814aSChristoph Hellwig 	if (vmf->flags & FAULT_FLAG_WRITE)
169165dd814aSChristoph Hellwig 		iter.flags |= IOMAP_WRITE;
169265dd814aSChristoph Hellwig 
1693642261acSRoss Zwisler 	/*
1694642261acSRoss Zwisler 	 * Check whether offset isn't beyond end of file now. Caller is
1695642261acSRoss Zwisler 	 * supposed to hold locks serializing us with truncate / punch hole so
1696642261acSRoss Zwisler 	 * this is a reliable test.
1697642261acSRoss Zwisler 	 */
169865dd814aSChristoph Hellwig 	max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE);
1699642261acSRoss Zwisler 
170065dd814aSChristoph Hellwig 	trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0);
1701642261acSRoss Zwisler 
1702b15cd800SMatthew Wilcox 	if (xas.xa_index >= max_pgoff) {
1703c2436190SShiyang Ruan 		ret = VM_FAULT_SIGBUS;
1704282a8e03SRoss Zwisler 		goto out;
1705282a8e03SRoss Zwisler 	}
1706642261acSRoss Zwisler 
170755f81639SShiyang Ruan 	if (dax_fault_check_fallback(vmf, &xas, max_pgoff))
1708642261acSRoss Zwisler 		goto fallback;
1709642261acSRoss Zwisler 
1710642261acSRoss Zwisler 	/*
1711b15cd800SMatthew Wilcox 	 * grab_mapping_entry() will make sure we get an empty PMD entry,
1712b15cd800SMatthew Wilcox 	 * a zero PMD entry or a DAX PMD.  If it can't (because a PTE
1713b15cd800SMatthew Wilcox 	 * entry is already in the array, for instance), it will return
1714b15cd800SMatthew Wilcox 	 * VM_FAULT_FALLBACK.
17159f141d6eSJan Kara 	 */
171623c84eb7SMatthew Wilcox (Oracle) 	entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
1717b15cd800SMatthew Wilcox 	if (xa_is_internal(entry)) {
1718c2436190SShiyang Ruan 		ret = xa_to_internal(entry);
1719876f2946SRoss Zwisler 		goto fallback;
1720b15cd800SMatthew Wilcox 	}
1721876f2946SRoss Zwisler 
1722876f2946SRoss Zwisler 	/*
1723e2093926SRoss Zwisler 	 * It is possible, particularly with mixed reads & writes to private
1724e2093926SRoss Zwisler 	 * mappings, that we have raced with a PTE fault that overlaps with
1725e2093926SRoss Zwisler 	 * the PMD we need to set up.  If so just return and the fault will be
1726e2093926SRoss Zwisler 	 * retried.
1727e2093926SRoss Zwisler 	 */
1728e2093926SRoss Zwisler 	if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1729e2093926SRoss Zwisler 			!pmd_devmap(*vmf->pmd)) {
1730c2436190SShiyang Ruan 		ret = 0;
1731e2093926SRoss Zwisler 		goto unlock_entry;
1732e2093926SRoss Zwisler 	}
1733e2093926SRoss Zwisler 
173465dd814aSChristoph Hellwig 	iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT;
173565dd814aSChristoph Hellwig 	while ((error = iomap_iter(&iter, ops)) > 0) {
173665dd814aSChristoph Hellwig 		if (iomap_length(&iter) < PMD_SIZE)
173765dd814aSChristoph Hellwig 			continue; /* actually breaks out of the loop */
1738876f2946SRoss Zwisler 
173965dd814aSChristoph Hellwig 		ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true);
174065dd814aSChristoph Hellwig 		if (ret != VM_FAULT_FALLBACK)
174165dd814aSChristoph Hellwig 			iter.processed = PMD_SIZE;
1742caa51d26SJan Kara 	}
1743caa51d26SJan Kara 
1744876f2946SRoss Zwisler unlock_entry:
1745b15cd800SMatthew Wilcox 	dax_unlock_entry(&xas, entry);
1746642261acSRoss Zwisler fallback:
1747c2436190SShiyang Ruan 	if (ret == VM_FAULT_FALLBACK) {
174865dd814aSChristoph Hellwig 		split_huge_pmd(vmf->vma, vmf->pmd, vmf->address);
1749642261acSRoss Zwisler 		count_vm_event(THP_FAULT_FALLBACK);
1750642261acSRoss Zwisler 	}
1751282a8e03SRoss Zwisler out:
175265dd814aSChristoph Hellwig 	trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret);
1753c2436190SShiyang Ruan 	return ret;
1754642261acSRoss Zwisler }
1755a2d58167SDave Jiang #else
1756ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
175701cddfe9SArnd Bergmann 			       const struct iomap_ops *ops)
1758a2d58167SDave Jiang {
1759a2d58167SDave Jiang 	return VM_FAULT_FALLBACK;
1760a2d58167SDave Jiang }
1761642261acSRoss Zwisler #endif /* CONFIG_FS_DAX_PMD */
1762a2d58167SDave Jiang 
1763a2d58167SDave Jiang /**
1764a2d58167SDave Jiang  * dax_iomap_fault - handle a page fault on a DAX file
1765a2d58167SDave Jiang  * @vmf: The description of the fault
1766cec04e8cSJan Kara  * @pe_size: Size of the page to fault in
17679a0dd422SJan Kara  * @pfnp: PFN to insert for synchronous faults if fsync is required
1768c0b24625SJan Kara  * @iomap_errp: Storage for detailed error code in case of error
1769cec04e8cSJan Kara  * @ops: Iomap ops passed from the file system
1770a2d58167SDave Jiang  *
1771a2d58167SDave Jiang  * When a page fault occurs, filesystems may call this helper in
1772a2d58167SDave Jiang  * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1773a2d58167SDave Jiang  * has done all the necessary locking for page fault to proceed
1774a2d58167SDave Jiang  * successfully.
1775a2d58167SDave Jiang  */
1776ab77dab4SSouptick Joarder vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1777c0b24625SJan Kara 		    pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1778a2d58167SDave Jiang {
1779c791ace1SDave Jiang 	switch (pe_size) {
1780c791ace1SDave Jiang 	case PE_SIZE_PTE:
1781c0b24625SJan Kara 		return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1782c791ace1SDave Jiang 	case PE_SIZE_PMD:
17839a0dd422SJan Kara 		return dax_iomap_pmd_fault(vmf, pfnp, ops);
1784a2d58167SDave Jiang 	default:
1785a2d58167SDave Jiang 		return VM_FAULT_FALLBACK;
1786a2d58167SDave Jiang 	}
1787a2d58167SDave Jiang }
1788a2d58167SDave Jiang EXPORT_SYMBOL_GPL(dax_iomap_fault);
178971eab6dfSJan Kara 
1790a77d19f4SMatthew Wilcox /*
179171eab6dfSJan Kara  * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
179271eab6dfSJan Kara  * @vmf: The description of the fault
179371eab6dfSJan Kara  * @pfn: PFN to insert
1794cfc93c6cSMatthew Wilcox  * @order: Order of entry to insert.
179571eab6dfSJan Kara  *
1796a77d19f4SMatthew Wilcox  * This function inserts a writeable PTE or PMD entry into the page tables
1797a77d19f4SMatthew Wilcox  * for an mmaped DAX file.  It also marks the page cache entry as dirty.
179871eab6dfSJan Kara  */
1799cfc93c6cSMatthew Wilcox static vm_fault_t
1800cfc93c6cSMatthew Wilcox dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
180171eab6dfSJan Kara {
180271eab6dfSJan Kara 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1803cfc93c6cSMatthew Wilcox 	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1804cfc93c6cSMatthew Wilcox 	void *entry;
1805ab77dab4SSouptick Joarder 	vm_fault_t ret;
180671eab6dfSJan Kara 
1807cfc93c6cSMatthew Wilcox 	xas_lock_irq(&xas);
180823c84eb7SMatthew Wilcox (Oracle) 	entry = get_unlocked_entry(&xas, order);
180971eab6dfSJan Kara 	/* Did we race with someone splitting entry or so? */
181023c84eb7SMatthew Wilcox (Oracle) 	if (!entry || dax_is_conflict(entry) ||
181123c84eb7SMatthew Wilcox (Oracle) 	    (order == 0 && !dax_is_pte_entry(entry))) {
18124c3d043dSVivek Goyal 		put_unlocked_entry(&xas, entry, WAKE_NEXT);
1813cfc93c6cSMatthew Wilcox 		xas_unlock_irq(&xas);
181471eab6dfSJan Kara 		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
181571eab6dfSJan Kara 						      VM_FAULT_NOPAGE);
181671eab6dfSJan Kara 		return VM_FAULT_NOPAGE;
181771eab6dfSJan Kara 	}
1818cfc93c6cSMatthew Wilcox 	xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1819cfc93c6cSMatthew Wilcox 	dax_lock_entry(&xas, entry);
1820cfc93c6cSMatthew Wilcox 	xas_unlock_irq(&xas);
1821cfc93c6cSMatthew Wilcox 	if (order == 0)
1822ab77dab4SSouptick Joarder 		ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
182371eab6dfSJan Kara #ifdef CONFIG_FS_DAX_PMD
1824cfc93c6cSMatthew Wilcox 	else if (order == PMD_ORDER)
1825fce86ff5SDan Williams 		ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
182671eab6dfSJan Kara #endif
1827cfc93c6cSMatthew Wilcox 	else
1828ab77dab4SSouptick Joarder 		ret = VM_FAULT_FALLBACK;
1829cfc93c6cSMatthew Wilcox 	dax_unlock_entry(&xas, entry);
1830ab77dab4SSouptick Joarder 	trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1831ab77dab4SSouptick Joarder 	return ret;
183271eab6dfSJan Kara }
183371eab6dfSJan Kara 
183471eab6dfSJan Kara /**
183571eab6dfSJan Kara  * dax_finish_sync_fault - finish synchronous page fault
183671eab6dfSJan Kara  * @vmf: The description of the fault
183771eab6dfSJan Kara  * @pe_size: Size of entry to be inserted
183871eab6dfSJan Kara  * @pfn: PFN to insert
183971eab6dfSJan Kara  *
184071eab6dfSJan Kara  * This function ensures that the file range touched by the page fault is
184171eab6dfSJan Kara  * stored persistently on the media and handles inserting of appropriate page
184271eab6dfSJan Kara  * table entry.
184371eab6dfSJan Kara  */
1844ab77dab4SSouptick Joarder vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1845ab77dab4SSouptick Joarder 		enum page_entry_size pe_size, pfn_t pfn)
184671eab6dfSJan Kara {
184771eab6dfSJan Kara 	int err;
184871eab6dfSJan Kara 	loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1849cfc93c6cSMatthew Wilcox 	unsigned int order = pe_order(pe_size);
1850cfc93c6cSMatthew Wilcox 	size_t len = PAGE_SIZE << order;
185171eab6dfSJan Kara 
185271eab6dfSJan Kara 	err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
185371eab6dfSJan Kara 	if (err)
185471eab6dfSJan Kara 		return VM_FAULT_SIGBUS;
1855cfc93c6cSMatthew Wilcox 	return dax_insert_pfn_mkwrite(vmf, pfn, order);
185671eab6dfSJan Kara }
185771eab6dfSJan Kara EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
1858