xref: /openbmc/linux/fs/dax.c (revision 86ed913b)
1d475c634SMatthew Wilcox /*
2d475c634SMatthew Wilcox  * fs/dax.c - Direct Access filesystem code
3d475c634SMatthew Wilcox  * Copyright (c) 2013-2014 Intel Corporation
4d475c634SMatthew Wilcox  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5d475c634SMatthew Wilcox  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6d475c634SMatthew Wilcox  *
7d475c634SMatthew Wilcox  * This program is free software; you can redistribute it and/or modify it
8d475c634SMatthew Wilcox  * under the terms and conditions of the GNU General Public License,
9d475c634SMatthew Wilcox  * version 2, as published by the Free Software Foundation.
10d475c634SMatthew Wilcox  *
11d475c634SMatthew Wilcox  * This program is distributed in the hope it will be useful, but WITHOUT
12d475c634SMatthew Wilcox  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13d475c634SMatthew Wilcox  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14d475c634SMatthew Wilcox  * more details.
15d475c634SMatthew Wilcox  */
16d475c634SMatthew Wilcox 
17d475c634SMatthew Wilcox #include <linux/atomic.h>
18d475c634SMatthew Wilcox #include <linux/blkdev.h>
19d475c634SMatthew Wilcox #include <linux/buffer_head.h>
20d77e92e2SRoss Zwisler #include <linux/dax.h>
21d475c634SMatthew Wilcox #include <linux/fs.h>
22d475c634SMatthew Wilcox #include <linux/genhd.h>
23f7ca90b1SMatthew Wilcox #include <linux/highmem.h>
24f7ca90b1SMatthew Wilcox #include <linux/memcontrol.h>
25f7ca90b1SMatthew Wilcox #include <linux/mm.h>
26d475c634SMatthew Wilcox #include <linux/mutex.h>
279973c98eSRoss Zwisler #include <linux/pagevec.h>
28289c6aedSMatthew Wilcox #include <linux/sched.h>
29f361bf4aSIngo Molnar #include <linux/sched/signal.h>
30d475c634SMatthew Wilcox #include <linux/uio.h>
31f7ca90b1SMatthew Wilcox #include <linux/vmstat.h>
3234c0fd54SDan Williams #include <linux/pfn_t.h>
330e749e54SDan Williams #include <linux/sizes.h>
344b4bb46dSJan Kara #include <linux/mmu_notifier.h>
35a254e568SChristoph Hellwig #include <linux/iomap.h>
36a254e568SChristoph Hellwig #include "internal.h"
37d475c634SMatthew Wilcox 
38282a8e03SRoss Zwisler #define CREATE_TRACE_POINTS
39282a8e03SRoss Zwisler #include <trace/events/fs_dax.h>
40282a8e03SRoss Zwisler 
41ac401cc7SJan Kara /* We choose 4096 entries - same as per-zone page wait tables */
42ac401cc7SJan Kara #define DAX_WAIT_TABLE_BITS 12
43ac401cc7SJan Kara #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
44ac401cc7SJan Kara 
45917f3452SRoss Zwisler /* The 'colour' (ie low bits) within a PMD of a page offset.  */
46917f3452SRoss Zwisler #define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
47977fbdcdSMatthew Wilcox #define PG_PMD_NR	(PMD_SIZE >> PAGE_SHIFT)
48917f3452SRoss Zwisler 
49ce95ab0fSRoss Zwisler static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
50ac401cc7SJan Kara 
51ac401cc7SJan Kara static int __init init_dax_wait_table(void)
52ac401cc7SJan Kara {
53ac401cc7SJan Kara 	int i;
54ac401cc7SJan Kara 
55ac401cc7SJan Kara 	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
56ac401cc7SJan Kara 		init_waitqueue_head(wait_table + i);
57ac401cc7SJan Kara 	return 0;
58ac401cc7SJan Kara }
59ac401cc7SJan Kara fs_initcall(init_dax_wait_table);
60ac401cc7SJan Kara 
61527b19d0SRoss Zwisler /*
62527b19d0SRoss Zwisler  * We use lowest available bit in exceptional entry for locking, one bit for
63527b19d0SRoss Zwisler  * the entry size (PMD) and two more to tell us if the entry is a zero page or
64527b19d0SRoss Zwisler  * an empty entry that is just used for locking.  In total four special bits.
65527b19d0SRoss Zwisler  *
66527b19d0SRoss Zwisler  * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
67527b19d0SRoss Zwisler  * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
68527b19d0SRoss Zwisler  * block allocation.
69527b19d0SRoss Zwisler  */
70527b19d0SRoss Zwisler #define RADIX_DAX_SHIFT		(RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
71527b19d0SRoss Zwisler #define RADIX_DAX_ENTRY_LOCK	(1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
72527b19d0SRoss Zwisler #define RADIX_DAX_PMD		(1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
73527b19d0SRoss Zwisler #define RADIX_DAX_ZERO_PAGE	(1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
74527b19d0SRoss Zwisler #define RADIX_DAX_EMPTY		(1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
75527b19d0SRoss Zwisler 
763fe0791cSDan Williams static unsigned long dax_radix_pfn(void *entry)
77527b19d0SRoss Zwisler {
78527b19d0SRoss Zwisler 	return (unsigned long)entry >> RADIX_DAX_SHIFT;
79527b19d0SRoss Zwisler }
80527b19d0SRoss Zwisler 
813fe0791cSDan Williams static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags)
82527b19d0SRoss Zwisler {
83527b19d0SRoss Zwisler 	return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
843fe0791cSDan Williams 			(pfn << RADIX_DAX_SHIFT) | RADIX_DAX_ENTRY_LOCK);
85527b19d0SRoss Zwisler }
86527b19d0SRoss Zwisler 
87527b19d0SRoss Zwisler static unsigned int dax_radix_order(void *entry)
88527b19d0SRoss Zwisler {
89527b19d0SRoss Zwisler 	if ((unsigned long)entry & RADIX_DAX_PMD)
90527b19d0SRoss Zwisler 		return PMD_SHIFT - PAGE_SHIFT;
91527b19d0SRoss Zwisler 	return 0;
92527b19d0SRoss Zwisler }
93527b19d0SRoss Zwisler 
94642261acSRoss Zwisler static int dax_is_pmd_entry(void *entry)
95642261acSRoss Zwisler {
96642261acSRoss Zwisler 	return (unsigned long)entry & RADIX_DAX_PMD;
97642261acSRoss Zwisler }
98642261acSRoss Zwisler 
99642261acSRoss Zwisler static int dax_is_pte_entry(void *entry)
100642261acSRoss Zwisler {
101642261acSRoss Zwisler 	return !((unsigned long)entry & RADIX_DAX_PMD);
102642261acSRoss Zwisler }
103642261acSRoss Zwisler 
104642261acSRoss Zwisler static int dax_is_zero_entry(void *entry)
105642261acSRoss Zwisler {
10691d25ba8SRoss Zwisler 	return (unsigned long)entry & RADIX_DAX_ZERO_PAGE;
107642261acSRoss Zwisler }
108642261acSRoss Zwisler 
109642261acSRoss Zwisler static int dax_is_empty_entry(void *entry)
110642261acSRoss Zwisler {
111642261acSRoss Zwisler 	return (unsigned long)entry & RADIX_DAX_EMPTY;
112642261acSRoss Zwisler }
113642261acSRoss Zwisler 
114f7ca90b1SMatthew Wilcox /*
115ac401cc7SJan Kara  * DAX radix tree locking
116ac401cc7SJan Kara  */
117ac401cc7SJan Kara struct exceptional_entry_key {
118ac401cc7SJan Kara 	struct address_space *mapping;
11963e95b5cSRoss Zwisler 	pgoff_t entry_start;
120ac401cc7SJan Kara };
121ac401cc7SJan Kara 
122ac401cc7SJan Kara struct wait_exceptional_entry_queue {
123ac6424b9SIngo Molnar 	wait_queue_entry_t wait;
124ac401cc7SJan Kara 	struct exceptional_entry_key key;
125ac401cc7SJan Kara };
126ac401cc7SJan Kara 
12763e95b5cSRoss Zwisler static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
12863e95b5cSRoss Zwisler 		pgoff_t index, void *entry, struct exceptional_entry_key *key)
12963e95b5cSRoss Zwisler {
13063e95b5cSRoss Zwisler 	unsigned long hash;
13163e95b5cSRoss Zwisler 
13263e95b5cSRoss Zwisler 	/*
13363e95b5cSRoss Zwisler 	 * If 'entry' is a PMD, align the 'index' that we use for the wait
13463e95b5cSRoss Zwisler 	 * queue to the start of that PMD.  This ensures that all offsets in
13563e95b5cSRoss Zwisler 	 * the range covered by the PMD map to the same bit lock.
13663e95b5cSRoss Zwisler 	 */
137642261acSRoss Zwisler 	if (dax_is_pmd_entry(entry))
138917f3452SRoss Zwisler 		index &= ~PG_PMD_COLOUR;
13963e95b5cSRoss Zwisler 
14063e95b5cSRoss Zwisler 	key->mapping = mapping;
14163e95b5cSRoss Zwisler 	key->entry_start = index;
14263e95b5cSRoss Zwisler 
14363e95b5cSRoss Zwisler 	hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
14463e95b5cSRoss Zwisler 	return wait_table + hash;
14563e95b5cSRoss Zwisler }
14663e95b5cSRoss Zwisler 
147ac6424b9SIngo Molnar static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
148ac401cc7SJan Kara 				       int sync, void *keyp)
149ac401cc7SJan Kara {
150ac401cc7SJan Kara 	struct exceptional_entry_key *key = keyp;
151ac401cc7SJan Kara 	struct wait_exceptional_entry_queue *ewait =
152ac401cc7SJan Kara 		container_of(wait, struct wait_exceptional_entry_queue, wait);
153ac401cc7SJan Kara 
154ac401cc7SJan Kara 	if (key->mapping != ewait->key.mapping ||
15563e95b5cSRoss Zwisler 	    key->entry_start != ewait->key.entry_start)
156ac401cc7SJan Kara 		return 0;
157ac401cc7SJan Kara 	return autoremove_wake_function(wait, mode, sync, NULL);
158ac401cc7SJan Kara }
159ac401cc7SJan Kara 
160ac401cc7SJan Kara /*
161b93b0163SMatthew Wilcox  * @entry may no longer be the entry at the index in the mapping.
162b93b0163SMatthew Wilcox  * The important information it's conveying is whether the entry at
163b93b0163SMatthew Wilcox  * this index used to be a PMD entry.
164e30331ffSRoss Zwisler  */
165d01ad197SRoss Zwisler static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
166e30331ffSRoss Zwisler 		pgoff_t index, void *entry, bool wake_all)
167e30331ffSRoss Zwisler {
168e30331ffSRoss Zwisler 	struct exceptional_entry_key key;
169e30331ffSRoss Zwisler 	wait_queue_head_t *wq;
170e30331ffSRoss Zwisler 
171e30331ffSRoss Zwisler 	wq = dax_entry_waitqueue(mapping, index, entry, &key);
172e30331ffSRoss Zwisler 
173e30331ffSRoss Zwisler 	/*
174e30331ffSRoss Zwisler 	 * Checking for locked entry and prepare_to_wait_exclusive() happens
175b93b0163SMatthew Wilcox 	 * under the i_pages lock, ditto for entry handling in our callers.
176e30331ffSRoss Zwisler 	 * So at this point all tasks that could have seen our entry locked
177e30331ffSRoss Zwisler 	 * must be in the waitqueue and the following check will see them.
178e30331ffSRoss Zwisler 	 */
179e30331ffSRoss Zwisler 	if (waitqueue_active(wq))
180e30331ffSRoss Zwisler 		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
181e30331ffSRoss Zwisler }
182e30331ffSRoss Zwisler 
183e30331ffSRoss Zwisler /*
184b93b0163SMatthew Wilcox  * Check whether the given slot is locked.  Must be called with the i_pages
185b93b0163SMatthew Wilcox  * lock held.
186ac401cc7SJan Kara  */
187ac401cc7SJan Kara static inline int slot_locked(struct address_space *mapping, void **slot)
188ac401cc7SJan Kara {
189ac401cc7SJan Kara 	unsigned long entry = (unsigned long)
190b93b0163SMatthew Wilcox 		radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
191ac401cc7SJan Kara 	return entry & RADIX_DAX_ENTRY_LOCK;
192ac401cc7SJan Kara }
193ac401cc7SJan Kara 
194ac401cc7SJan Kara /*
195b93b0163SMatthew Wilcox  * Mark the given slot as locked.  Must be called with the i_pages lock held.
196ac401cc7SJan Kara  */
197ac401cc7SJan Kara static inline void *lock_slot(struct address_space *mapping, void **slot)
198ac401cc7SJan Kara {
199ac401cc7SJan Kara 	unsigned long entry = (unsigned long)
200b93b0163SMatthew Wilcox 		radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
201ac401cc7SJan Kara 
202ac401cc7SJan Kara 	entry |= RADIX_DAX_ENTRY_LOCK;
203b93b0163SMatthew Wilcox 	radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
204ac401cc7SJan Kara 	return (void *)entry;
205ac401cc7SJan Kara }
206ac401cc7SJan Kara 
207ac401cc7SJan Kara /*
208b93b0163SMatthew Wilcox  * Mark the given slot as unlocked.  Must be called with the i_pages lock held.
209ac401cc7SJan Kara  */
210ac401cc7SJan Kara static inline void *unlock_slot(struct address_space *mapping, void **slot)
211ac401cc7SJan Kara {
212ac401cc7SJan Kara 	unsigned long entry = (unsigned long)
213b93b0163SMatthew Wilcox 		radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
214ac401cc7SJan Kara 
215ac401cc7SJan Kara 	entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
216b93b0163SMatthew Wilcox 	radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
217ac401cc7SJan Kara 	return (void *)entry;
218ac401cc7SJan Kara }
219ac401cc7SJan Kara 
220ac401cc7SJan Kara /*
221ac401cc7SJan Kara  * Lookup entry in radix tree, wait for it to become unlocked if it is
222ac401cc7SJan Kara  * exceptional entry and return it. The caller must call
223ac401cc7SJan Kara  * put_unlocked_mapping_entry() when he decided not to lock the entry or
224ac401cc7SJan Kara  * put_locked_mapping_entry() when he locked the entry and now wants to
225ac401cc7SJan Kara  * unlock it.
226ac401cc7SJan Kara  *
227b93b0163SMatthew Wilcox  * Must be called with the i_pages lock held.
228ac401cc7SJan Kara  */
229ac401cc7SJan Kara static void *get_unlocked_mapping_entry(struct address_space *mapping,
230ac401cc7SJan Kara 					pgoff_t index, void ***slotp)
231ac401cc7SJan Kara {
232e3ad61c6SRoss Zwisler 	void *entry, **slot;
233ac401cc7SJan Kara 	struct wait_exceptional_entry_queue ewait;
23463e95b5cSRoss Zwisler 	wait_queue_head_t *wq;
235ac401cc7SJan Kara 
236ac401cc7SJan Kara 	init_wait(&ewait.wait);
237ac401cc7SJan Kara 	ewait.wait.func = wake_exceptional_entry_func;
238ac401cc7SJan Kara 
239ac401cc7SJan Kara 	for (;;) {
240b93b0163SMatthew Wilcox 		entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
241ac401cc7SJan Kara 					  &slot);
24291d25ba8SRoss Zwisler 		if (!entry ||
24391d25ba8SRoss Zwisler 		    WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) ||
244ac401cc7SJan Kara 		    !slot_locked(mapping, slot)) {
245ac401cc7SJan Kara 			if (slotp)
246ac401cc7SJan Kara 				*slotp = slot;
247e3ad61c6SRoss Zwisler 			return entry;
248ac401cc7SJan Kara 		}
24963e95b5cSRoss Zwisler 
25063e95b5cSRoss Zwisler 		wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
251ac401cc7SJan Kara 		prepare_to_wait_exclusive(wq, &ewait.wait,
252ac401cc7SJan Kara 					  TASK_UNINTERRUPTIBLE);
253b93b0163SMatthew Wilcox 		xa_unlock_irq(&mapping->i_pages);
254ac401cc7SJan Kara 		schedule();
255ac401cc7SJan Kara 		finish_wait(wq, &ewait.wait);
256b93b0163SMatthew Wilcox 		xa_lock_irq(&mapping->i_pages);
257ac401cc7SJan Kara 	}
258ac401cc7SJan Kara }
259ac401cc7SJan Kara 
260b1aa812bSJan Kara static void dax_unlock_mapping_entry(struct address_space *mapping,
261b1aa812bSJan Kara 				     pgoff_t index)
262b1aa812bSJan Kara {
263b1aa812bSJan Kara 	void *entry, **slot;
264b1aa812bSJan Kara 
265b93b0163SMatthew Wilcox 	xa_lock_irq(&mapping->i_pages);
266b93b0163SMatthew Wilcox 	entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot);
267b1aa812bSJan Kara 	if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
268b1aa812bSJan Kara 			 !slot_locked(mapping, slot))) {
269b93b0163SMatthew Wilcox 		xa_unlock_irq(&mapping->i_pages);
270b1aa812bSJan Kara 		return;
271b1aa812bSJan Kara 	}
272b1aa812bSJan Kara 	unlock_slot(mapping, slot);
273b93b0163SMatthew Wilcox 	xa_unlock_irq(&mapping->i_pages);
274b1aa812bSJan Kara 	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
275b1aa812bSJan Kara }
276b1aa812bSJan Kara 
277ac401cc7SJan Kara static void put_locked_mapping_entry(struct address_space *mapping,
27891d25ba8SRoss Zwisler 		pgoff_t index)
279ac401cc7SJan Kara {
280bc2466e4SJan Kara 	dax_unlock_mapping_entry(mapping, index);
281ac401cc7SJan Kara }
282ac401cc7SJan Kara 
283ac401cc7SJan Kara /*
284ac401cc7SJan Kara  * Called when we are done with radix tree entry we looked up via
285ac401cc7SJan Kara  * get_unlocked_mapping_entry() and which we didn't lock in the end.
286ac401cc7SJan Kara  */
287ac401cc7SJan Kara static void put_unlocked_mapping_entry(struct address_space *mapping,
288ac401cc7SJan Kara 				       pgoff_t index, void *entry)
289ac401cc7SJan Kara {
29091d25ba8SRoss Zwisler 	if (!entry)
291ac401cc7SJan Kara 		return;
292ac401cc7SJan Kara 
293ac401cc7SJan Kara 	/* We have to wake up next waiter for the radix tree entry lock */
294422476c4SRoss Zwisler 	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
295422476c4SRoss Zwisler }
296422476c4SRoss Zwisler 
297d2c997c0SDan Williams static unsigned long dax_entry_size(void *entry)
298d2c997c0SDan Williams {
299d2c997c0SDan Williams 	if (dax_is_zero_entry(entry))
300d2c997c0SDan Williams 		return 0;
301d2c997c0SDan Williams 	else if (dax_is_empty_entry(entry))
302d2c997c0SDan Williams 		return 0;
303d2c997c0SDan Williams 	else if (dax_is_pmd_entry(entry))
304d2c997c0SDan Williams 		return PMD_SIZE;
305d2c997c0SDan Williams 	else
306d2c997c0SDan Williams 		return PAGE_SIZE;
307d2c997c0SDan Williams }
308d2c997c0SDan Williams 
309d2c997c0SDan Williams static unsigned long dax_radix_end_pfn(void *entry)
310d2c997c0SDan Williams {
311d2c997c0SDan Williams 	return dax_radix_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
312d2c997c0SDan Williams }
313d2c997c0SDan Williams 
314d2c997c0SDan Williams /*
315d2c997c0SDan Williams  * Iterate through all mapped pfns represented by an entry, i.e. skip
316d2c997c0SDan Williams  * 'empty' and 'zero' entries.
317d2c997c0SDan Williams  */
318d2c997c0SDan Williams #define for_each_mapped_pfn(entry, pfn) \
319d2c997c0SDan Williams 	for (pfn = dax_radix_pfn(entry); \
320d2c997c0SDan Williams 			pfn < dax_radix_end_pfn(entry); pfn++)
321d2c997c0SDan Williams 
322d2c997c0SDan Williams static void dax_associate_entry(void *entry, struct address_space *mapping)
323d2c997c0SDan Williams {
324d2c997c0SDan Williams 	unsigned long pfn;
325d2c997c0SDan Williams 
326d2c997c0SDan Williams 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
327d2c997c0SDan Williams 		return;
328d2c997c0SDan Williams 
329d2c997c0SDan Williams 	for_each_mapped_pfn(entry, pfn) {
330d2c997c0SDan Williams 		struct page *page = pfn_to_page(pfn);
331d2c997c0SDan Williams 
332d2c997c0SDan Williams 		WARN_ON_ONCE(page->mapping);
333d2c997c0SDan Williams 		page->mapping = mapping;
334d2c997c0SDan Williams 	}
335d2c997c0SDan Williams }
336d2c997c0SDan Williams 
337d2c997c0SDan Williams static void dax_disassociate_entry(void *entry, struct address_space *mapping,
338d2c997c0SDan Williams 		bool trunc)
339d2c997c0SDan Williams {
340d2c997c0SDan Williams 	unsigned long pfn;
341d2c997c0SDan Williams 
342d2c997c0SDan Williams 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
343d2c997c0SDan Williams 		return;
344d2c997c0SDan Williams 
345d2c997c0SDan Williams 	for_each_mapped_pfn(entry, pfn) {
346d2c997c0SDan Williams 		struct page *page = pfn_to_page(pfn);
347d2c997c0SDan Williams 
348d2c997c0SDan Williams 		WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
349d2c997c0SDan Williams 		WARN_ON_ONCE(page->mapping && page->mapping != mapping);
350d2c997c0SDan Williams 		page->mapping = NULL;
351d2c997c0SDan Williams 	}
352d2c997c0SDan Williams }
353d2c997c0SDan Williams 
3545fac7408SDan Williams static struct page *dax_busy_page(void *entry)
3555fac7408SDan Williams {
3565fac7408SDan Williams 	unsigned long pfn;
3575fac7408SDan Williams 
3585fac7408SDan Williams 	for_each_mapped_pfn(entry, pfn) {
3595fac7408SDan Williams 		struct page *page = pfn_to_page(pfn);
3605fac7408SDan Williams 
3615fac7408SDan Williams 		if (page_ref_count(page) > 1)
3625fac7408SDan Williams 			return page;
3635fac7408SDan Williams 	}
3645fac7408SDan Williams 	return NULL;
3655fac7408SDan Williams }
3665fac7408SDan Williams 
367ac401cc7SJan Kara /*
36891d25ba8SRoss Zwisler  * Find radix tree entry at given index. If it points to an exceptional entry,
36991d25ba8SRoss Zwisler  * return it with the radix tree entry locked. If the radix tree doesn't
37091d25ba8SRoss Zwisler  * contain given index, create an empty exceptional entry for the index and
37191d25ba8SRoss Zwisler  * return with it locked.
372ac401cc7SJan Kara  *
373642261acSRoss Zwisler  * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
374642261acSRoss Zwisler  * either return that locked entry or will return an error.  This error will
37591d25ba8SRoss Zwisler  * happen if there are any 4k entries within the 2MiB range that we are
37691d25ba8SRoss Zwisler  * requesting.
377642261acSRoss Zwisler  *
378642261acSRoss Zwisler  * We always favor 4k entries over 2MiB entries. There isn't a flow where we
379642261acSRoss Zwisler  * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
380642261acSRoss Zwisler  * insertion will fail if it finds any 4k entries already in the tree, and a
381642261acSRoss Zwisler  * 4k insertion will cause an existing 2MiB entry to be unmapped and
382642261acSRoss Zwisler  * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
383642261acSRoss Zwisler  * well as 2MiB empty entries.
384642261acSRoss Zwisler  *
385642261acSRoss Zwisler  * The exception to this downgrade path is for 2MiB DAX PMD entries that have
386642261acSRoss Zwisler  * real storage backing them.  We will leave these real 2MiB DAX entries in
387642261acSRoss Zwisler  * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
388642261acSRoss Zwisler  *
389ac401cc7SJan Kara  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
390ac401cc7SJan Kara  * persistent memory the benefit is doubtful. We can add that later if we can
391ac401cc7SJan Kara  * show it helps.
392ac401cc7SJan Kara  */
393642261acSRoss Zwisler static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
394642261acSRoss Zwisler 		unsigned long size_flag)
395ac401cc7SJan Kara {
396642261acSRoss Zwisler 	bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
397e3ad61c6SRoss Zwisler 	void *entry, **slot;
398ac401cc7SJan Kara 
399ac401cc7SJan Kara restart:
400b93b0163SMatthew Wilcox 	xa_lock_irq(&mapping->i_pages);
401e3ad61c6SRoss Zwisler 	entry = get_unlocked_mapping_entry(mapping, index, &slot);
402642261acSRoss Zwisler 
40391d25ba8SRoss Zwisler 	if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) {
40491d25ba8SRoss Zwisler 		entry = ERR_PTR(-EIO);
40591d25ba8SRoss Zwisler 		goto out_unlock;
40691d25ba8SRoss Zwisler 	}
40791d25ba8SRoss Zwisler 
408642261acSRoss Zwisler 	if (entry) {
409642261acSRoss Zwisler 		if (size_flag & RADIX_DAX_PMD) {
41091d25ba8SRoss Zwisler 			if (dax_is_pte_entry(entry)) {
411642261acSRoss Zwisler 				put_unlocked_mapping_entry(mapping, index,
412642261acSRoss Zwisler 						entry);
413642261acSRoss Zwisler 				entry = ERR_PTR(-EEXIST);
414642261acSRoss Zwisler 				goto out_unlock;
415642261acSRoss Zwisler 			}
416642261acSRoss Zwisler 		} else { /* trying to grab a PTE entry */
41791d25ba8SRoss Zwisler 			if (dax_is_pmd_entry(entry) &&
418642261acSRoss Zwisler 			    (dax_is_zero_entry(entry) ||
419642261acSRoss Zwisler 			     dax_is_empty_entry(entry))) {
420642261acSRoss Zwisler 				pmd_downgrade = true;
421642261acSRoss Zwisler 			}
422642261acSRoss Zwisler 		}
423642261acSRoss Zwisler 	}
424642261acSRoss Zwisler 
425ac401cc7SJan Kara 	/* No entry for given index? Make sure radix tree is big enough. */
426642261acSRoss Zwisler 	if (!entry || pmd_downgrade) {
427ac401cc7SJan Kara 		int err;
428ac401cc7SJan Kara 
429642261acSRoss Zwisler 		if (pmd_downgrade) {
430642261acSRoss Zwisler 			/*
431642261acSRoss Zwisler 			 * Make sure 'entry' remains valid while we drop
432b93b0163SMatthew Wilcox 			 * the i_pages lock.
433642261acSRoss Zwisler 			 */
434642261acSRoss Zwisler 			entry = lock_slot(mapping, slot);
435642261acSRoss Zwisler 		}
436642261acSRoss Zwisler 
437b93b0163SMatthew Wilcox 		xa_unlock_irq(&mapping->i_pages);
438642261acSRoss Zwisler 		/*
439642261acSRoss Zwisler 		 * Besides huge zero pages the only other thing that gets
440642261acSRoss Zwisler 		 * downgraded are empty entries which don't need to be
441642261acSRoss Zwisler 		 * unmapped.
442642261acSRoss Zwisler 		 */
443642261acSRoss Zwisler 		if (pmd_downgrade && dax_is_zero_entry(entry))
444977fbdcdSMatthew Wilcox 			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
445977fbdcdSMatthew Wilcox 							PG_PMD_NR, false);
446642261acSRoss Zwisler 
4470cb80b48SJan Kara 		err = radix_tree_preload(
4480cb80b48SJan Kara 				mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
4490cb80b48SJan Kara 		if (err) {
4500cb80b48SJan Kara 			if (pmd_downgrade)
45191d25ba8SRoss Zwisler 				put_locked_mapping_entry(mapping, index);
4520cb80b48SJan Kara 			return ERR_PTR(err);
4530cb80b48SJan Kara 		}
454b93b0163SMatthew Wilcox 		xa_lock_irq(&mapping->i_pages);
455642261acSRoss Zwisler 
456e11f8b7bSRoss Zwisler 		if (!entry) {
457e11f8b7bSRoss Zwisler 			/*
458b93b0163SMatthew Wilcox 			 * We needed to drop the i_pages lock while calling
459e11f8b7bSRoss Zwisler 			 * radix_tree_preload() and we didn't have an entry to
460e11f8b7bSRoss Zwisler 			 * lock.  See if another thread inserted an entry at
461e11f8b7bSRoss Zwisler 			 * our index during this time.
462e11f8b7bSRoss Zwisler 			 */
463b93b0163SMatthew Wilcox 			entry = __radix_tree_lookup(&mapping->i_pages, index,
464e11f8b7bSRoss Zwisler 					NULL, &slot);
465e11f8b7bSRoss Zwisler 			if (entry) {
466e11f8b7bSRoss Zwisler 				radix_tree_preload_end();
467b93b0163SMatthew Wilcox 				xa_unlock_irq(&mapping->i_pages);
468e11f8b7bSRoss Zwisler 				goto restart;
469e11f8b7bSRoss Zwisler 			}
470e11f8b7bSRoss Zwisler 		}
471e11f8b7bSRoss Zwisler 
472642261acSRoss Zwisler 		if (pmd_downgrade) {
473d2c997c0SDan Williams 			dax_disassociate_entry(entry, mapping, false);
474b93b0163SMatthew Wilcox 			radix_tree_delete(&mapping->i_pages, index);
475642261acSRoss Zwisler 			mapping->nrexceptional--;
476642261acSRoss Zwisler 			dax_wake_mapping_entry_waiter(mapping, index, entry,
477642261acSRoss Zwisler 					true);
478642261acSRoss Zwisler 		}
479642261acSRoss Zwisler 
480642261acSRoss Zwisler 		entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
481642261acSRoss Zwisler 
482b93b0163SMatthew Wilcox 		err = __radix_tree_insert(&mapping->i_pages, index,
483642261acSRoss Zwisler 				dax_radix_order(entry), entry);
484ac401cc7SJan Kara 		radix_tree_preload_end();
485ac401cc7SJan Kara 		if (err) {
486b93b0163SMatthew Wilcox 			xa_unlock_irq(&mapping->i_pages);
487642261acSRoss Zwisler 			/*
488e11f8b7bSRoss Zwisler 			 * Our insertion of a DAX entry failed, most likely
489e11f8b7bSRoss Zwisler 			 * because we were inserting a PMD entry and it
490e11f8b7bSRoss Zwisler 			 * collided with a PTE sized entry at a different
491e11f8b7bSRoss Zwisler 			 * index in the PMD range.  We haven't inserted
492e11f8b7bSRoss Zwisler 			 * anything into the radix tree and have no waiters to
493e11f8b7bSRoss Zwisler 			 * wake.
494642261acSRoss Zwisler 			 */
495ac401cc7SJan Kara 			return ERR_PTR(err);
496ac401cc7SJan Kara 		}
497ac401cc7SJan Kara 		/* Good, we have inserted empty locked entry into the tree. */
498ac401cc7SJan Kara 		mapping->nrexceptional++;
499b93b0163SMatthew Wilcox 		xa_unlock_irq(&mapping->i_pages);
500e3ad61c6SRoss Zwisler 		return entry;
501ac401cc7SJan Kara 	}
502e3ad61c6SRoss Zwisler 	entry = lock_slot(mapping, slot);
503642261acSRoss Zwisler  out_unlock:
504b93b0163SMatthew Wilcox 	xa_unlock_irq(&mapping->i_pages);
505e3ad61c6SRoss Zwisler 	return entry;
506ac401cc7SJan Kara }
507ac401cc7SJan Kara 
5085fac7408SDan Williams /**
5095fac7408SDan Williams  * dax_layout_busy_page - find first pinned page in @mapping
5105fac7408SDan Williams  * @mapping: address space to scan for a page with ref count > 1
5115fac7408SDan Williams  *
5125fac7408SDan Williams  * DAX requires ZONE_DEVICE mapped pages. These pages are never
5135fac7408SDan Williams  * 'onlined' to the page allocator so they are considered idle when
5145fac7408SDan Williams  * page->count == 1. A filesystem uses this interface to determine if
5155fac7408SDan Williams  * any page in the mapping is busy, i.e. for DMA, or other
5165fac7408SDan Williams  * get_user_pages() usages.
5175fac7408SDan Williams  *
5185fac7408SDan Williams  * It is expected that the filesystem is holding locks to block the
5195fac7408SDan Williams  * establishment of new mappings in this address_space. I.e. it expects
5205fac7408SDan Williams  * to be able to run unmap_mapping_range() and subsequently not race
5215fac7408SDan Williams  * mapping_mapped() becoming true.
5225fac7408SDan Williams  */
5235fac7408SDan Williams struct page *dax_layout_busy_page(struct address_space *mapping)
5245fac7408SDan Williams {
5255fac7408SDan Williams 	pgoff_t	indices[PAGEVEC_SIZE];
5265fac7408SDan Williams 	struct page *page = NULL;
5275fac7408SDan Williams 	struct pagevec pvec;
5285fac7408SDan Williams 	pgoff_t	index, end;
5295fac7408SDan Williams 	unsigned i;
5305fac7408SDan Williams 
5315fac7408SDan Williams 	/*
5325fac7408SDan Williams 	 * In the 'limited' case get_user_pages() for dax is disabled.
5335fac7408SDan Williams 	 */
5345fac7408SDan Williams 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
5355fac7408SDan Williams 		return NULL;
5365fac7408SDan Williams 
5375fac7408SDan Williams 	if (!dax_mapping(mapping) || !mapping_mapped(mapping))
5385fac7408SDan Williams 		return NULL;
5395fac7408SDan Williams 
5405fac7408SDan Williams 	pagevec_init(&pvec);
5415fac7408SDan Williams 	index = 0;
5425fac7408SDan Williams 	end = -1;
5435fac7408SDan Williams 
5445fac7408SDan Williams 	/*
5455fac7408SDan Williams 	 * If we race get_user_pages_fast() here either we'll see the
5465fac7408SDan Williams 	 * elevated page count in the pagevec_lookup and wait, or
5475fac7408SDan Williams 	 * get_user_pages_fast() will see that the page it took a reference
5485fac7408SDan Williams 	 * against is no longer mapped in the page tables and bail to the
5495fac7408SDan Williams 	 * get_user_pages() slow path.  The slow path is protected by
5505fac7408SDan Williams 	 * pte_lock() and pmd_lock(). New references are not taken without
5515fac7408SDan Williams 	 * holding those locks, and unmap_mapping_range() will not zero the
5525fac7408SDan Williams 	 * pte or pmd without holding the respective lock, so we are
5535fac7408SDan Williams 	 * guaranteed to either see new references or prevent new
5545fac7408SDan Williams 	 * references from being established.
5555fac7408SDan Williams 	 */
5565fac7408SDan Williams 	unmap_mapping_range(mapping, 0, 0, 1);
5575fac7408SDan Williams 
5585fac7408SDan Williams 	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
5595fac7408SDan Williams 				min(end - index, (pgoff_t)PAGEVEC_SIZE),
5605fac7408SDan Williams 				indices)) {
5615fac7408SDan Williams 		for (i = 0; i < pagevec_count(&pvec); i++) {
5625fac7408SDan Williams 			struct page *pvec_ent = pvec.pages[i];
5635fac7408SDan Williams 			void *entry;
5645fac7408SDan Williams 
5655fac7408SDan Williams 			index = indices[i];
5665fac7408SDan Williams 			if (index >= end)
5675fac7408SDan Williams 				break;
5685fac7408SDan Williams 
5695fac7408SDan Williams 			if (!radix_tree_exceptional_entry(pvec_ent))
5705fac7408SDan Williams 				continue;
5715fac7408SDan Williams 
5725fac7408SDan Williams 			xa_lock_irq(&mapping->i_pages);
5735fac7408SDan Williams 			entry = get_unlocked_mapping_entry(mapping, index, NULL);
5745fac7408SDan Williams 			if (entry)
5755fac7408SDan Williams 				page = dax_busy_page(entry);
5765fac7408SDan Williams 			put_unlocked_mapping_entry(mapping, index, entry);
5775fac7408SDan Williams 			xa_unlock_irq(&mapping->i_pages);
5785fac7408SDan Williams 			if (page)
5795fac7408SDan Williams 				break;
5805fac7408SDan Williams 		}
5815fac7408SDan Williams 		pagevec_remove_exceptionals(&pvec);
5825fac7408SDan Williams 		pagevec_release(&pvec);
5835fac7408SDan Williams 		index++;
5845fac7408SDan Williams 
5855fac7408SDan Williams 		if (page)
5865fac7408SDan Williams 			break;
5875fac7408SDan Williams 	}
5885fac7408SDan Williams 	return page;
5895fac7408SDan Williams }
5905fac7408SDan Williams EXPORT_SYMBOL_GPL(dax_layout_busy_page);
5915fac7408SDan Williams 
592c6dcf52cSJan Kara static int __dax_invalidate_mapping_entry(struct address_space *mapping,
593c6dcf52cSJan Kara 					  pgoff_t index, bool trunc)
594c6dcf52cSJan Kara {
595c6dcf52cSJan Kara 	int ret = 0;
596c6dcf52cSJan Kara 	void *entry;
597b93b0163SMatthew Wilcox 	struct radix_tree_root *pages = &mapping->i_pages;
598c6dcf52cSJan Kara 
599b93b0163SMatthew Wilcox 	xa_lock_irq(pages);
600c6dcf52cSJan Kara 	entry = get_unlocked_mapping_entry(mapping, index, NULL);
60191d25ba8SRoss Zwisler 	if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)))
602c6dcf52cSJan Kara 		goto out;
603c6dcf52cSJan Kara 	if (!trunc &&
604b93b0163SMatthew Wilcox 	    (radix_tree_tag_get(pages, index, PAGECACHE_TAG_DIRTY) ||
605b93b0163SMatthew Wilcox 	     radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE)))
606c6dcf52cSJan Kara 		goto out;
607d2c997c0SDan Williams 	dax_disassociate_entry(entry, mapping, trunc);
608b93b0163SMatthew Wilcox 	radix_tree_delete(pages, index);
609c6dcf52cSJan Kara 	mapping->nrexceptional--;
610c6dcf52cSJan Kara 	ret = 1;
611c6dcf52cSJan Kara out:
612c6dcf52cSJan Kara 	put_unlocked_mapping_entry(mapping, index, entry);
613b93b0163SMatthew Wilcox 	xa_unlock_irq(pages);
614c6dcf52cSJan Kara 	return ret;
615c6dcf52cSJan Kara }
616ac401cc7SJan Kara /*
617ac401cc7SJan Kara  * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
618ac401cc7SJan Kara  * entry to get unlocked before deleting it.
619ac401cc7SJan Kara  */
620ac401cc7SJan Kara int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
621ac401cc7SJan Kara {
622c6dcf52cSJan Kara 	int ret = __dax_invalidate_mapping_entry(mapping, index, true);
623ac401cc7SJan Kara 
624ac401cc7SJan Kara 	/*
625ac401cc7SJan Kara 	 * This gets called from truncate / punch_hole path. As such, the caller
626ac401cc7SJan Kara 	 * must hold locks protecting against concurrent modifications of the
627ac401cc7SJan Kara 	 * radix tree (usually fs-private i_mmap_sem for writing). Since the
628ac401cc7SJan Kara 	 * caller has seen exceptional entry for this index, we better find it
629ac401cc7SJan Kara 	 * at that index as well...
630ac401cc7SJan Kara 	 */
631c6dcf52cSJan Kara 	WARN_ON_ONCE(!ret);
632c6dcf52cSJan Kara 	return ret;
633ac401cc7SJan Kara }
634ac401cc7SJan Kara 
635c6dcf52cSJan Kara /*
636c6dcf52cSJan Kara  * Invalidate exceptional DAX entry if it is clean.
637c6dcf52cSJan Kara  */
638c6dcf52cSJan Kara int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
639c6dcf52cSJan Kara 				      pgoff_t index)
640c6dcf52cSJan Kara {
641c6dcf52cSJan Kara 	return __dax_invalidate_mapping_entry(mapping, index, false);
642ac401cc7SJan Kara }
643ac401cc7SJan Kara 
644cccbce67SDan Williams static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
645cccbce67SDan Williams 		sector_t sector, size_t size, struct page *to,
646cccbce67SDan Williams 		unsigned long vaddr)
647f7ca90b1SMatthew Wilcox {
648cccbce67SDan Williams 	void *vto, *kaddr;
649cccbce67SDan Williams 	pgoff_t pgoff;
650cccbce67SDan Williams 	long rc;
651cccbce67SDan Williams 	int id;
652e2e05394SRoss Zwisler 
653cccbce67SDan Williams 	rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
654cccbce67SDan Williams 	if (rc)
655cccbce67SDan Williams 		return rc;
656cccbce67SDan Williams 
657cccbce67SDan Williams 	id = dax_read_lock();
65886ed913bSHuaisheng Ye 	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL);
659cccbce67SDan Williams 	if (rc < 0) {
660cccbce67SDan Williams 		dax_read_unlock(id);
661cccbce67SDan Williams 		return rc;
662cccbce67SDan Williams 	}
663f7ca90b1SMatthew Wilcox 	vto = kmap_atomic(to);
664cccbce67SDan Williams 	copy_user_page(vto, (void __force *)kaddr, vaddr, to);
665f7ca90b1SMatthew Wilcox 	kunmap_atomic(vto);
666cccbce67SDan Williams 	dax_read_unlock(id);
667f7ca90b1SMatthew Wilcox 	return 0;
668f7ca90b1SMatthew Wilcox }
669f7ca90b1SMatthew Wilcox 
670642261acSRoss Zwisler /*
671642261acSRoss Zwisler  * By this point grab_mapping_entry() has ensured that we have a locked entry
672642261acSRoss Zwisler  * of the appropriate size so we don't have to worry about downgrading PMDs to
673642261acSRoss Zwisler  * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
674642261acSRoss Zwisler  * already in the tree, we will skip the insertion and just dirty the PMD as
675642261acSRoss Zwisler  * appropriate.
676642261acSRoss Zwisler  */
677ac401cc7SJan Kara static void *dax_insert_mapping_entry(struct address_space *mapping,
678ac401cc7SJan Kara 				      struct vm_fault *vmf,
6793fe0791cSDan Williams 				      void *entry, pfn_t pfn_t,
680f5b7b748SJan Kara 				      unsigned long flags, bool dirty)
6819973c98eSRoss Zwisler {
682b93b0163SMatthew Wilcox 	struct radix_tree_root *pages = &mapping->i_pages;
6833fe0791cSDan Williams 	unsigned long pfn = pfn_t_to_pfn(pfn_t);
684ac401cc7SJan Kara 	pgoff_t index = vmf->pgoff;
6853fe0791cSDan Williams 	void *new_entry;
6869973c98eSRoss Zwisler 
687f5b7b748SJan Kara 	if (dirty)
6889973c98eSRoss Zwisler 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
6899973c98eSRoss Zwisler 
69091d25ba8SRoss Zwisler 	if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
69191d25ba8SRoss Zwisler 		/* we are replacing a zero page with block mapping */
69291d25ba8SRoss Zwisler 		if (dax_is_pmd_entry(entry))
693977fbdcdSMatthew Wilcox 			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
694977fbdcdSMatthew Wilcox 							PG_PMD_NR, false);
69591d25ba8SRoss Zwisler 		else /* pte entry */
696977fbdcdSMatthew Wilcox 			unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
697ac401cc7SJan Kara 	}
6989973c98eSRoss Zwisler 
699b93b0163SMatthew Wilcox 	xa_lock_irq(pages);
7003fe0791cSDan Williams 	new_entry = dax_radix_locked_entry(pfn, flags);
701d2c997c0SDan Williams 	if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
702d2c997c0SDan Williams 		dax_disassociate_entry(entry, mapping, false);
703d2c997c0SDan Williams 		dax_associate_entry(new_entry, mapping);
704d2c997c0SDan Williams 	}
705642261acSRoss Zwisler 
70691d25ba8SRoss Zwisler 	if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
707642261acSRoss Zwisler 		/*
708642261acSRoss Zwisler 		 * Only swap our new entry into the radix tree if the current
709642261acSRoss Zwisler 		 * entry is a zero page or an empty entry.  If a normal PTE or
710642261acSRoss Zwisler 		 * PMD entry is already in the tree, we leave it alone.  This
711642261acSRoss Zwisler 		 * means that if we are trying to insert a PTE and the
712642261acSRoss Zwisler 		 * existing entry is a PMD, we will just leave the PMD in the
713642261acSRoss Zwisler 		 * tree and dirty it if necessary.
714642261acSRoss Zwisler 		 */
715f7942430SJohannes Weiner 		struct radix_tree_node *node;
716ac401cc7SJan Kara 		void **slot;
717ac401cc7SJan Kara 		void *ret;
718ac401cc7SJan Kara 
719b93b0163SMatthew Wilcox 		ret = __radix_tree_lookup(pages, index, &node, &slot);
720ac401cc7SJan Kara 		WARN_ON_ONCE(ret != entry);
721b93b0163SMatthew Wilcox 		__radix_tree_replace(pages, node, slot,
722c7df8ad2SMel Gorman 				     new_entry, NULL);
72391d25ba8SRoss Zwisler 		entry = new_entry;
724ac401cc7SJan Kara 	}
72591d25ba8SRoss Zwisler 
726f5b7b748SJan Kara 	if (dirty)
727b93b0163SMatthew Wilcox 		radix_tree_tag_set(pages, index, PAGECACHE_TAG_DIRTY);
72891d25ba8SRoss Zwisler 
729b93b0163SMatthew Wilcox 	xa_unlock_irq(pages);
73091d25ba8SRoss Zwisler 	return entry;
7319973c98eSRoss Zwisler }
7329973c98eSRoss Zwisler 
7334b4bb46dSJan Kara static inline unsigned long
7344b4bb46dSJan Kara pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
7354b4bb46dSJan Kara {
7364b4bb46dSJan Kara 	unsigned long address;
7374b4bb46dSJan Kara 
7384b4bb46dSJan Kara 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
7394b4bb46dSJan Kara 	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
7404b4bb46dSJan Kara 	return address;
7414b4bb46dSJan Kara }
7424b4bb46dSJan Kara 
7434b4bb46dSJan Kara /* Walk all mappings of a given index of a file and writeprotect them */
7444b4bb46dSJan Kara static void dax_mapping_entry_mkclean(struct address_space *mapping,
7454b4bb46dSJan Kara 				      pgoff_t index, unsigned long pfn)
7464b4bb46dSJan Kara {
7474b4bb46dSJan Kara 	struct vm_area_struct *vma;
748f729c8c9SRoss Zwisler 	pte_t pte, *ptep = NULL;
749f729c8c9SRoss Zwisler 	pmd_t *pmdp = NULL;
7504b4bb46dSJan Kara 	spinlock_t *ptl;
7514b4bb46dSJan Kara 
7524b4bb46dSJan Kara 	i_mmap_lock_read(mapping);
7534b4bb46dSJan Kara 	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
754a4d1a885SJérôme Glisse 		unsigned long address, start, end;
7554b4bb46dSJan Kara 
7564b4bb46dSJan Kara 		cond_resched();
7574b4bb46dSJan Kara 
7584b4bb46dSJan Kara 		if (!(vma->vm_flags & VM_SHARED))
7594b4bb46dSJan Kara 			continue;
7604b4bb46dSJan Kara 
7614b4bb46dSJan Kara 		address = pgoff_address(index, vma);
762a4d1a885SJérôme Glisse 
763a4d1a885SJérôme Glisse 		/*
764a4d1a885SJérôme Glisse 		 * Note because we provide start/end to follow_pte_pmd it will
765a4d1a885SJérôme Glisse 		 * call mmu_notifier_invalidate_range_start() on our behalf
766a4d1a885SJérôme Glisse 		 * before taking any lock.
767a4d1a885SJérôme Glisse 		 */
768a4d1a885SJérôme Glisse 		if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
7694b4bb46dSJan Kara 			continue;
770f729c8c9SRoss Zwisler 
7710f10851eSJérôme Glisse 		/*
7720f10851eSJérôme Glisse 		 * No need to call mmu_notifier_invalidate_range() as we are
7730f10851eSJérôme Glisse 		 * downgrading page table protection not changing it to point
7740f10851eSJérôme Glisse 		 * to a new page.
7750f10851eSJérôme Glisse 		 *
776ad56b738SMike Rapoport 		 * See Documentation/vm/mmu_notifier.rst
7770f10851eSJérôme Glisse 		 */
778f729c8c9SRoss Zwisler 		if (pmdp) {
779f729c8c9SRoss Zwisler #ifdef CONFIG_FS_DAX_PMD
780f729c8c9SRoss Zwisler 			pmd_t pmd;
781f729c8c9SRoss Zwisler 
782f729c8c9SRoss Zwisler 			if (pfn != pmd_pfn(*pmdp))
783f729c8c9SRoss Zwisler 				goto unlock_pmd;
784f6f37321SLinus Torvalds 			if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
785f729c8c9SRoss Zwisler 				goto unlock_pmd;
786f729c8c9SRoss Zwisler 
787f729c8c9SRoss Zwisler 			flush_cache_page(vma, address, pfn);
788f729c8c9SRoss Zwisler 			pmd = pmdp_huge_clear_flush(vma, address, pmdp);
789f729c8c9SRoss Zwisler 			pmd = pmd_wrprotect(pmd);
790f729c8c9SRoss Zwisler 			pmd = pmd_mkclean(pmd);
791f729c8c9SRoss Zwisler 			set_pmd_at(vma->vm_mm, address, pmdp, pmd);
792f729c8c9SRoss Zwisler unlock_pmd:
793f729c8c9SRoss Zwisler #endif
794ee190ca6SJan H. Schönherr 			spin_unlock(ptl);
795f729c8c9SRoss Zwisler 		} else {
7964b4bb46dSJan Kara 			if (pfn != pte_pfn(*ptep))
797f729c8c9SRoss Zwisler 				goto unlock_pte;
7984b4bb46dSJan Kara 			if (!pte_dirty(*ptep) && !pte_write(*ptep))
799f729c8c9SRoss Zwisler 				goto unlock_pte;
8004b4bb46dSJan Kara 
8014b4bb46dSJan Kara 			flush_cache_page(vma, address, pfn);
8024b4bb46dSJan Kara 			pte = ptep_clear_flush(vma, address, ptep);
8034b4bb46dSJan Kara 			pte = pte_wrprotect(pte);
8044b4bb46dSJan Kara 			pte = pte_mkclean(pte);
8054b4bb46dSJan Kara 			set_pte_at(vma->vm_mm, address, ptep, pte);
806f729c8c9SRoss Zwisler unlock_pte:
8074b4bb46dSJan Kara 			pte_unmap_unlock(ptep, ptl);
808f729c8c9SRoss Zwisler 		}
8094b4bb46dSJan Kara 
810a4d1a885SJérôme Glisse 		mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
8114b4bb46dSJan Kara 	}
8124b4bb46dSJan Kara 	i_mmap_unlock_read(mapping);
8134b4bb46dSJan Kara }
8144b4bb46dSJan Kara 
8153fe0791cSDan Williams static int dax_writeback_one(struct dax_device *dax_dev,
8163fe0791cSDan Williams 		struct address_space *mapping, pgoff_t index, void *entry)
8179973c98eSRoss Zwisler {
818b93b0163SMatthew Wilcox 	struct radix_tree_root *pages = &mapping->i_pages;
8193fe0791cSDan Williams 	void *entry2, **slot;
8203fe0791cSDan Williams 	unsigned long pfn;
8213fe0791cSDan Williams 	long ret = 0;
822cccbce67SDan Williams 	size_t size;
8239973c98eSRoss Zwisler 
8249973c98eSRoss Zwisler 	/*
825a6abc2c0SJan Kara 	 * A page got tagged dirty in DAX mapping? Something is seriously
826a6abc2c0SJan Kara 	 * wrong.
8279973c98eSRoss Zwisler 	 */
828a6abc2c0SJan Kara 	if (WARN_ON(!radix_tree_exceptional_entry(entry)))
829a6abc2c0SJan Kara 		return -EIO;
8309973c98eSRoss Zwisler 
831b93b0163SMatthew Wilcox 	xa_lock_irq(pages);
832a6abc2c0SJan Kara 	entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
833a6abc2c0SJan Kara 	/* Entry got punched out / reallocated? */
83491d25ba8SRoss Zwisler 	if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2)))
835a6abc2c0SJan Kara 		goto put_unlocked;
836a6abc2c0SJan Kara 	/*
837a6abc2c0SJan Kara 	 * Entry got reallocated elsewhere? No need to writeback. We have to
8383fe0791cSDan Williams 	 * compare pfns as we must not bail out due to difference in lockbit
839a6abc2c0SJan Kara 	 * or entry type.
840a6abc2c0SJan Kara 	 */
8413fe0791cSDan Williams 	if (dax_radix_pfn(entry2) != dax_radix_pfn(entry))
842a6abc2c0SJan Kara 		goto put_unlocked;
843642261acSRoss Zwisler 	if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
844642261acSRoss Zwisler 				dax_is_zero_entry(entry))) {
8459973c98eSRoss Zwisler 		ret = -EIO;
846a6abc2c0SJan Kara 		goto put_unlocked;
8479973c98eSRoss Zwisler 	}
8489973c98eSRoss Zwisler 
849a6abc2c0SJan Kara 	/* Another fsync thread may have already written back this entry */
850b93b0163SMatthew Wilcox 	if (!radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE))
851a6abc2c0SJan Kara 		goto put_unlocked;
852a6abc2c0SJan Kara 	/* Lock the entry to serialize with page faults */
853a6abc2c0SJan Kara 	entry = lock_slot(mapping, slot);
854a6abc2c0SJan Kara 	/*
855a6abc2c0SJan Kara 	 * We can clear the tag now but we have to be careful so that concurrent
856a6abc2c0SJan Kara 	 * dax_writeback_one() calls for the same index cannot finish before we
857a6abc2c0SJan Kara 	 * actually flush the caches. This is achieved as the calls will look
858b93b0163SMatthew Wilcox 	 * at the entry only under the i_pages lock and once they do that
859b93b0163SMatthew Wilcox 	 * they will see the entry locked and wait for it to unlock.
860a6abc2c0SJan Kara 	 */
861b93b0163SMatthew Wilcox 	radix_tree_tag_clear(pages, index, PAGECACHE_TAG_TOWRITE);
862b93b0163SMatthew Wilcox 	xa_unlock_irq(pages);
863a6abc2c0SJan Kara 
864642261acSRoss Zwisler 	/*
865642261acSRoss Zwisler 	 * Even if dax_writeback_mapping_range() was given a wbc->range_start
866642261acSRoss Zwisler 	 * in the middle of a PMD, the 'index' we are given will be aligned to
8673fe0791cSDan Williams 	 * the start index of the PMD, as will the pfn we pull from 'entry'.
8683fe0791cSDan Williams 	 * This allows us to flush for PMD_SIZE and not have to worry about
8693fe0791cSDan Williams 	 * partial PMD writebacks.
870642261acSRoss Zwisler 	 */
8713fe0791cSDan Williams 	pfn = dax_radix_pfn(entry);
872cccbce67SDan Williams 	size = PAGE_SIZE << dax_radix_order(entry);
873cccbce67SDan Williams 
8743fe0791cSDan Williams 	dax_mapping_entry_mkclean(mapping, index, pfn);
8753fe0791cSDan Williams 	dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
8764b4bb46dSJan Kara 	/*
8774b4bb46dSJan Kara 	 * After we have flushed the cache, we can clear the dirty tag. There
8784b4bb46dSJan Kara 	 * cannot be new dirty data in the pfn after the flush has completed as
8794b4bb46dSJan Kara 	 * the pfn mappings are writeprotected and fault waits for mapping
8804b4bb46dSJan Kara 	 * entry lock.
8814b4bb46dSJan Kara 	 */
882b93b0163SMatthew Wilcox 	xa_lock_irq(pages);
883b93b0163SMatthew Wilcox 	radix_tree_tag_clear(pages, index, PAGECACHE_TAG_DIRTY);
884b93b0163SMatthew Wilcox 	xa_unlock_irq(pages);
885f9bc3a07SRoss Zwisler 	trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
88691d25ba8SRoss Zwisler 	put_locked_mapping_entry(mapping, index);
8879973c98eSRoss Zwisler 	return ret;
8889973c98eSRoss Zwisler 
889a6abc2c0SJan Kara  put_unlocked:
890a6abc2c0SJan Kara 	put_unlocked_mapping_entry(mapping, index, entry2);
891b93b0163SMatthew Wilcox 	xa_unlock_irq(pages);
8929973c98eSRoss Zwisler 	return ret;
8939973c98eSRoss Zwisler }
8949973c98eSRoss Zwisler 
8959973c98eSRoss Zwisler /*
8969973c98eSRoss Zwisler  * Flush the mapping to the persistent domain within the byte range of [start,
8979973c98eSRoss Zwisler  * end]. This is required by data integrity operations to ensure file data is
8989973c98eSRoss Zwisler  * on persistent storage prior to completion of the operation.
8999973c98eSRoss Zwisler  */
9007f6d5b52SRoss Zwisler int dax_writeback_mapping_range(struct address_space *mapping,
9017f6d5b52SRoss Zwisler 		struct block_device *bdev, struct writeback_control *wbc)
9029973c98eSRoss Zwisler {
9039973c98eSRoss Zwisler 	struct inode *inode = mapping->host;
904642261acSRoss Zwisler 	pgoff_t start_index, end_index;
9059973c98eSRoss Zwisler 	pgoff_t indices[PAGEVEC_SIZE];
906cccbce67SDan Williams 	struct dax_device *dax_dev;
9079973c98eSRoss Zwisler 	struct pagevec pvec;
9089973c98eSRoss Zwisler 	bool done = false;
9099973c98eSRoss Zwisler 	int i, ret = 0;
9109973c98eSRoss Zwisler 
9119973c98eSRoss Zwisler 	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
9129973c98eSRoss Zwisler 		return -EIO;
9139973c98eSRoss Zwisler 
9147f6d5b52SRoss Zwisler 	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
9157f6d5b52SRoss Zwisler 		return 0;
9167f6d5b52SRoss Zwisler 
917cccbce67SDan Williams 	dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
918cccbce67SDan Williams 	if (!dax_dev)
919cccbce67SDan Williams 		return -EIO;
920cccbce67SDan Williams 
92109cbfeafSKirill A. Shutemov 	start_index = wbc->range_start >> PAGE_SHIFT;
92209cbfeafSKirill A. Shutemov 	end_index = wbc->range_end >> PAGE_SHIFT;
9239973c98eSRoss Zwisler 
924d14a3f48SRoss Zwisler 	trace_dax_writeback_range(inode, start_index, end_index);
925d14a3f48SRoss Zwisler 
9269973c98eSRoss Zwisler 	tag_pages_for_writeback(mapping, start_index, end_index);
9279973c98eSRoss Zwisler 
92886679820SMel Gorman 	pagevec_init(&pvec);
9299973c98eSRoss Zwisler 	while (!done) {
9309973c98eSRoss Zwisler 		pvec.nr = find_get_entries_tag(mapping, start_index,
9319973c98eSRoss Zwisler 				PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
9329973c98eSRoss Zwisler 				pvec.pages, indices);
9339973c98eSRoss Zwisler 
9349973c98eSRoss Zwisler 		if (pvec.nr == 0)
9359973c98eSRoss Zwisler 			break;
9369973c98eSRoss Zwisler 
9379973c98eSRoss Zwisler 		for (i = 0; i < pvec.nr; i++) {
9389973c98eSRoss Zwisler 			if (indices[i] > end_index) {
9399973c98eSRoss Zwisler 				done = true;
9409973c98eSRoss Zwisler 				break;
9419973c98eSRoss Zwisler 			}
9429973c98eSRoss Zwisler 
9433fe0791cSDan Williams 			ret = dax_writeback_one(dax_dev, mapping, indices[i],
9443fe0791cSDan Williams 					pvec.pages[i]);
945819ec6b9SJeff Layton 			if (ret < 0) {
946819ec6b9SJeff Layton 				mapping_set_error(mapping, ret);
947d14a3f48SRoss Zwisler 				goto out;
948d14a3f48SRoss Zwisler 			}
949d14a3f48SRoss Zwisler 		}
9501eb643d0SJan Kara 		start_index = indices[pvec.nr - 1] + 1;
951d14a3f48SRoss Zwisler 	}
952d14a3f48SRoss Zwisler out:
953cccbce67SDan Williams 	put_dax(dax_dev);
954d14a3f48SRoss Zwisler 	trace_dax_writeback_range_done(inode, start_index, end_index);
955d14a3f48SRoss Zwisler 	return (ret < 0 ? ret : 0);
9569973c98eSRoss Zwisler }
9579973c98eSRoss Zwisler EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
9589973c98eSRoss Zwisler 
95931a6f1a6SJan Kara static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
960f7ca90b1SMatthew Wilcox {
961a3841f94SLinus Torvalds 	return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
96231a6f1a6SJan Kara }
963f7ca90b1SMatthew Wilcox 
9645e161e40SJan Kara static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
9655e161e40SJan Kara 			 pfn_t *pfnp)
9665e161e40SJan Kara {
9675e161e40SJan Kara 	const sector_t sector = dax_iomap_sector(iomap, pos);
9685e161e40SJan Kara 	pgoff_t pgoff;
9695e161e40SJan Kara 	int id, rc;
9705e161e40SJan Kara 	long length;
9715e161e40SJan Kara 
9725e161e40SJan Kara 	rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
973cccbce67SDan Williams 	if (rc)
974cccbce67SDan Williams 		return rc;
975cccbce67SDan Williams 	id = dax_read_lock();
9765e161e40SJan Kara 	length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
97786ed913bSHuaisheng Ye 				   NULL, pfnp);
9785e161e40SJan Kara 	if (length < 0) {
9795e161e40SJan Kara 		rc = length;
9805e161e40SJan Kara 		goto out;
9815e161e40SJan Kara 	}
9825e161e40SJan Kara 	rc = -EINVAL;
9835e161e40SJan Kara 	if (PFN_PHYS(length) < size)
9845e161e40SJan Kara 		goto out;
9855e161e40SJan Kara 	if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
9865e161e40SJan Kara 		goto out;
9875e161e40SJan Kara 	/* For larger pages we need devmap */
9885e161e40SJan Kara 	if (length > 1 && !pfn_t_devmap(*pfnp))
9895e161e40SJan Kara 		goto out;
9905e161e40SJan Kara 	rc = 0;
9915e161e40SJan Kara out:
992cccbce67SDan Williams 	dax_read_unlock(id);
993cccbce67SDan Williams 	return rc;
994cccbce67SDan Williams }
995f7ca90b1SMatthew Wilcox 
9962f89dc12SJan Kara /*
99791d25ba8SRoss Zwisler  * The user has performed a load from a hole in the file.  Allocating a new
99891d25ba8SRoss Zwisler  * page in the file would cause excessive storage usage for workloads with
99991d25ba8SRoss Zwisler  * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
100091d25ba8SRoss Zwisler  * If this page is ever written to we will re-fault and change the mapping to
100191d25ba8SRoss Zwisler  * point to real DAX storage instead.
10022f89dc12SJan Kara  */
1003ab77dab4SSouptick Joarder static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
1004e30331ffSRoss Zwisler 			 struct vm_fault *vmf)
1005e30331ffSRoss Zwisler {
1006e30331ffSRoss Zwisler 	struct inode *inode = mapping->host;
100791d25ba8SRoss Zwisler 	unsigned long vaddr = vmf->address;
1008ab77dab4SSouptick Joarder 	vm_fault_t ret = VM_FAULT_NOPAGE;
100991d25ba8SRoss Zwisler 	struct page *zero_page;
10103fe0791cSDan Williams 	pfn_t pfn;
1011e30331ffSRoss Zwisler 
101291d25ba8SRoss Zwisler 	zero_page = ZERO_PAGE(0);
101391d25ba8SRoss Zwisler 	if (unlikely(!zero_page)) {
1014e30331ffSRoss Zwisler 		ret = VM_FAULT_OOM;
1015e30331ffSRoss Zwisler 		goto out;
1016e30331ffSRoss Zwisler 	}
1017e30331ffSRoss Zwisler 
10183fe0791cSDan Williams 	pfn = page_to_pfn_t(zero_page);
1019cc4a90acSMatthew Wilcox 	dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE,
1020cc4a90acSMatthew Wilcox 			false);
1021ab77dab4SSouptick Joarder 	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1022e30331ffSRoss Zwisler out:
1023e30331ffSRoss Zwisler 	trace_dax_load_hole(inode, vmf, ret);
1024e30331ffSRoss Zwisler 	return ret;
1025e30331ffSRoss Zwisler }
1026e30331ffSRoss Zwisler 
10274b0228faSVishal Verma static bool dax_range_is_aligned(struct block_device *bdev,
10284b0228faSVishal Verma 				 unsigned int offset, unsigned int length)
10294b0228faSVishal Verma {
10304b0228faSVishal Verma 	unsigned short sector_size = bdev_logical_block_size(bdev);
10314b0228faSVishal Verma 
10324b0228faSVishal Verma 	if (!IS_ALIGNED(offset, sector_size))
10334b0228faSVishal Verma 		return false;
10344b0228faSVishal Verma 	if (!IS_ALIGNED(length, sector_size))
10354b0228faSVishal Verma 		return false;
10364b0228faSVishal Verma 
10374b0228faSVishal Verma 	return true;
10384b0228faSVishal Verma }
10394b0228faSVishal Verma 
1040cccbce67SDan Williams int __dax_zero_page_range(struct block_device *bdev,
1041cccbce67SDan Williams 		struct dax_device *dax_dev, sector_t sector,
1042cccbce67SDan Williams 		unsigned int offset, unsigned int size)
1043679c8bd3SChristoph Hellwig {
1044cccbce67SDan Williams 	if (dax_range_is_aligned(bdev, offset, size)) {
1045cccbce67SDan Williams 		sector_t start_sector = sector + (offset >> 9);
10464b0228faSVishal Verma 
10474b0228faSVishal Verma 		return blkdev_issue_zeroout(bdev, start_sector,
104853ef7d0eSLinus Torvalds 				size >> 9, GFP_NOFS, 0);
10494b0228faSVishal Verma 	} else {
1050cccbce67SDan Williams 		pgoff_t pgoff;
1051cccbce67SDan Williams 		long rc, id;
1052cccbce67SDan Williams 		void *kaddr;
1053cccbce67SDan Williams 
1054e84b83b9SDan Williams 		rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
1055cccbce67SDan Williams 		if (rc)
1056cccbce67SDan Williams 			return rc;
1057cccbce67SDan Williams 
1058cccbce67SDan Williams 		id = dax_read_lock();
105986ed913bSHuaisheng Ye 		rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
1060cccbce67SDan Williams 		if (rc < 0) {
1061cccbce67SDan Williams 			dax_read_unlock(id);
1062cccbce67SDan Williams 			return rc;
1063cccbce67SDan Williams 		}
106481f55870SDan Williams 		memset(kaddr + offset, 0, size);
1065c3ca015fSMikulas Patocka 		dax_flush(dax_dev, kaddr + offset, size);
1066cccbce67SDan Williams 		dax_read_unlock(id);
10674b0228faSVishal Verma 	}
1068679c8bd3SChristoph Hellwig 	return 0;
1069679c8bd3SChristoph Hellwig }
1070679c8bd3SChristoph Hellwig EXPORT_SYMBOL_GPL(__dax_zero_page_range);
1071679c8bd3SChristoph Hellwig 
1072a254e568SChristoph Hellwig static loff_t
107311c59c92SRoss Zwisler dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1074a254e568SChristoph Hellwig 		struct iomap *iomap)
1075a254e568SChristoph Hellwig {
1076cccbce67SDan Williams 	struct block_device *bdev = iomap->bdev;
1077cccbce67SDan Williams 	struct dax_device *dax_dev = iomap->dax_dev;
1078a254e568SChristoph Hellwig 	struct iov_iter *iter = data;
1079a254e568SChristoph Hellwig 	loff_t end = pos + length, done = 0;
1080a254e568SChristoph Hellwig 	ssize_t ret = 0;
1081a77d4786SDan Williams 	size_t xfer;
1082cccbce67SDan Williams 	int id;
1083a254e568SChristoph Hellwig 
1084a254e568SChristoph Hellwig 	if (iov_iter_rw(iter) == READ) {
1085a254e568SChristoph Hellwig 		end = min(end, i_size_read(inode));
1086a254e568SChristoph Hellwig 		if (pos >= end)
1087a254e568SChristoph Hellwig 			return 0;
1088a254e568SChristoph Hellwig 
1089a254e568SChristoph Hellwig 		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1090a254e568SChristoph Hellwig 			return iov_iter_zero(min(length, end - pos), iter);
1091a254e568SChristoph Hellwig 	}
1092a254e568SChristoph Hellwig 
1093a254e568SChristoph Hellwig 	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1094a254e568SChristoph Hellwig 		return -EIO;
1095a254e568SChristoph Hellwig 
1096e3fce68cSJan Kara 	/*
1097e3fce68cSJan Kara 	 * Write can allocate block for an area which has a hole page mapped
1098e3fce68cSJan Kara 	 * into page tables. We have to tear down these mappings so that data
1099e3fce68cSJan Kara 	 * written by write(2) is visible in mmap.
1100e3fce68cSJan Kara 	 */
1101cd656375SJan Kara 	if (iomap->flags & IOMAP_F_NEW) {
1102e3fce68cSJan Kara 		invalidate_inode_pages2_range(inode->i_mapping,
1103e3fce68cSJan Kara 					      pos >> PAGE_SHIFT,
1104e3fce68cSJan Kara 					      (end - 1) >> PAGE_SHIFT);
1105e3fce68cSJan Kara 	}
1106e3fce68cSJan Kara 
1107cccbce67SDan Williams 	id = dax_read_lock();
1108a254e568SChristoph Hellwig 	while (pos < end) {
1109a254e568SChristoph Hellwig 		unsigned offset = pos & (PAGE_SIZE - 1);
1110cccbce67SDan Williams 		const size_t size = ALIGN(length + offset, PAGE_SIZE);
1111cccbce67SDan Williams 		const sector_t sector = dax_iomap_sector(iomap, pos);
1112a254e568SChristoph Hellwig 		ssize_t map_len;
1113cccbce67SDan Williams 		pgoff_t pgoff;
1114cccbce67SDan Williams 		void *kaddr;
1115a254e568SChristoph Hellwig 
1116d1908f52SMichal Hocko 		if (fatal_signal_pending(current)) {
1117d1908f52SMichal Hocko 			ret = -EINTR;
1118d1908f52SMichal Hocko 			break;
1119d1908f52SMichal Hocko 		}
1120d1908f52SMichal Hocko 
1121cccbce67SDan Williams 		ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1122cccbce67SDan Williams 		if (ret)
1123cccbce67SDan Williams 			break;
1124cccbce67SDan Williams 
1125cccbce67SDan Williams 		map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
112686ed913bSHuaisheng Ye 				&kaddr, NULL);
1127a254e568SChristoph Hellwig 		if (map_len < 0) {
1128a254e568SChristoph Hellwig 			ret = map_len;
1129a254e568SChristoph Hellwig 			break;
1130a254e568SChristoph Hellwig 		}
1131a254e568SChristoph Hellwig 
1132cccbce67SDan Williams 		map_len = PFN_PHYS(map_len);
1133cccbce67SDan Williams 		kaddr += offset;
1134a254e568SChristoph Hellwig 		map_len -= offset;
1135a254e568SChristoph Hellwig 		if (map_len > end - pos)
1136a254e568SChristoph Hellwig 			map_len = end - pos;
1137a254e568SChristoph Hellwig 
1138a2e050f5SRoss Zwisler 		/*
1139a2e050f5SRoss Zwisler 		 * The userspace address for the memory copy has already been
1140a2e050f5SRoss Zwisler 		 * validated via access_ok() in either vfs_read() or
1141a2e050f5SRoss Zwisler 		 * vfs_write(), depending on which operation we are doing.
1142a2e050f5SRoss Zwisler 		 */
1143a254e568SChristoph Hellwig 		if (iov_iter_rw(iter) == WRITE)
1144a77d4786SDan Williams 			xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1145fec53774SDan Williams 					map_len, iter);
1146a254e568SChristoph Hellwig 		else
1147a77d4786SDan Williams 			xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1148b3a9a0c3SDan Williams 					map_len, iter);
1149a254e568SChristoph Hellwig 
1150a77d4786SDan Williams 		pos += xfer;
1151a77d4786SDan Williams 		length -= xfer;
1152a77d4786SDan Williams 		done += xfer;
1153a77d4786SDan Williams 
1154a77d4786SDan Williams 		if (xfer == 0)
1155a77d4786SDan Williams 			ret = -EFAULT;
1156a77d4786SDan Williams 		if (xfer < map_len)
1157a77d4786SDan Williams 			break;
1158a254e568SChristoph Hellwig 	}
1159cccbce67SDan Williams 	dax_read_unlock(id);
1160a254e568SChristoph Hellwig 
1161a254e568SChristoph Hellwig 	return done ? done : ret;
1162a254e568SChristoph Hellwig }
1163a254e568SChristoph Hellwig 
1164a254e568SChristoph Hellwig /**
116511c59c92SRoss Zwisler  * dax_iomap_rw - Perform I/O to a DAX file
1166a254e568SChristoph Hellwig  * @iocb:	The control block for this I/O
1167a254e568SChristoph Hellwig  * @iter:	The addresses to do I/O from or to
1168a254e568SChristoph Hellwig  * @ops:	iomap ops passed from the file system
1169a254e568SChristoph Hellwig  *
1170a254e568SChristoph Hellwig  * This function performs read and write operations to directly mapped
1171a254e568SChristoph Hellwig  * persistent memory.  The callers needs to take care of read/write exclusion
1172a254e568SChristoph Hellwig  * and evicting any page cache pages in the region under I/O.
1173a254e568SChristoph Hellwig  */
1174a254e568SChristoph Hellwig ssize_t
117511c59c92SRoss Zwisler dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
11768ff6daa1SChristoph Hellwig 		const struct iomap_ops *ops)
1177a254e568SChristoph Hellwig {
1178a254e568SChristoph Hellwig 	struct address_space *mapping = iocb->ki_filp->f_mapping;
1179a254e568SChristoph Hellwig 	struct inode *inode = mapping->host;
1180a254e568SChristoph Hellwig 	loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1181a254e568SChristoph Hellwig 	unsigned flags = 0;
1182a254e568SChristoph Hellwig 
1183168316dbSChristoph Hellwig 	if (iov_iter_rw(iter) == WRITE) {
1184168316dbSChristoph Hellwig 		lockdep_assert_held_exclusive(&inode->i_rwsem);
1185a254e568SChristoph Hellwig 		flags |= IOMAP_WRITE;
1186168316dbSChristoph Hellwig 	} else {
1187168316dbSChristoph Hellwig 		lockdep_assert_held(&inode->i_rwsem);
1188168316dbSChristoph Hellwig 	}
1189a254e568SChristoph Hellwig 
1190a254e568SChristoph Hellwig 	while (iov_iter_count(iter)) {
1191a254e568SChristoph Hellwig 		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
119211c59c92SRoss Zwisler 				iter, dax_iomap_actor);
1193a254e568SChristoph Hellwig 		if (ret <= 0)
1194a254e568SChristoph Hellwig 			break;
1195a254e568SChristoph Hellwig 		pos += ret;
1196a254e568SChristoph Hellwig 		done += ret;
1197a254e568SChristoph Hellwig 	}
1198a254e568SChristoph Hellwig 
1199a254e568SChristoph Hellwig 	iocb->ki_pos += done;
1200a254e568SChristoph Hellwig 	return done ? done : ret;
1201a254e568SChristoph Hellwig }
120211c59c92SRoss Zwisler EXPORT_SYMBOL_GPL(dax_iomap_rw);
1203a7d73fe6SChristoph Hellwig 
1204ab77dab4SSouptick Joarder static vm_fault_t dax_fault_return(int error)
12059f141d6eSJan Kara {
12069f141d6eSJan Kara 	if (error == 0)
12079f141d6eSJan Kara 		return VM_FAULT_NOPAGE;
12089f141d6eSJan Kara 	if (error == -ENOMEM)
12099f141d6eSJan Kara 		return VM_FAULT_OOM;
12109f141d6eSJan Kara 	return VM_FAULT_SIGBUS;
12119f141d6eSJan Kara }
12129f141d6eSJan Kara 
1213aaa422c4SDan Williams /*
1214aaa422c4SDan Williams  * MAP_SYNC on a dax mapping guarantees dirty metadata is
1215aaa422c4SDan Williams  * flushed on write-faults (non-cow), but not read-faults.
1216aaa422c4SDan Williams  */
1217aaa422c4SDan Williams static bool dax_fault_is_synchronous(unsigned long flags,
1218aaa422c4SDan Williams 		struct vm_area_struct *vma, struct iomap *iomap)
1219aaa422c4SDan Williams {
1220aaa422c4SDan Williams 	return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1221aaa422c4SDan Williams 		&& (iomap->flags & IOMAP_F_DIRTY);
1222aaa422c4SDan Williams }
1223aaa422c4SDan Williams 
1224ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1225c0b24625SJan Kara 			       int *iomap_errp, const struct iomap_ops *ops)
1226a7d73fe6SChristoph Hellwig {
1227a0987ad5SJan Kara 	struct vm_area_struct *vma = vmf->vma;
1228a0987ad5SJan Kara 	struct address_space *mapping = vma->vm_file->f_mapping;
1229a7d73fe6SChristoph Hellwig 	struct inode *inode = mapping->host;
12301a29d85eSJan Kara 	unsigned long vaddr = vmf->address;
1231a7d73fe6SChristoph Hellwig 	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1232a7d73fe6SChristoph Hellwig 	struct iomap iomap = { 0 };
12339484ab1bSJan Kara 	unsigned flags = IOMAP_FAULT;
1234a7d73fe6SChristoph Hellwig 	int error, major = 0;
1235d2c43ef1SJan Kara 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1236caa51d26SJan Kara 	bool sync;
1237ab77dab4SSouptick Joarder 	vm_fault_t ret = 0;
1238a7d73fe6SChristoph Hellwig 	void *entry;
12391b5a1cb2SJan Kara 	pfn_t pfn;
1240a7d73fe6SChristoph Hellwig 
1241ab77dab4SSouptick Joarder 	trace_dax_pte_fault(inode, vmf, ret);
1242a7d73fe6SChristoph Hellwig 	/*
1243a7d73fe6SChristoph Hellwig 	 * Check whether offset isn't beyond end of file now. Caller is supposed
1244a7d73fe6SChristoph Hellwig 	 * to hold locks serializing us with truncate / punch hole so this is
1245a7d73fe6SChristoph Hellwig 	 * a reliable test.
1246a7d73fe6SChristoph Hellwig 	 */
1247a9c42b33SRoss Zwisler 	if (pos >= i_size_read(inode)) {
1248ab77dab4SSouptick Joarder 		ret = VM_FAULT_SIGBUS;
1249a9c42b33SRoss Zwisler 		goto out;
1250a9c42b33SRoss Zwisler 	}
1251a7d73fe6SChristoph Hellwig 
1252d2c43ef1SJan Kara 	if (write && !vmf->cow_page)
1253a7d73fe6SChristoph Hellwig 		flags |= IOMAP_WRITE;
1254a7d73fe6SChristoph Hellwig 
125513e451fdSJan Kara 	entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
125613e451fdSJan Kara 	if (IS_ERR(entry)) {
1257ab77dab4SSouptick Joarder 		ret = dax_fault_return(PTR_ERR(entry));
125813e451fdSJan Kara 		goto out;
125913e451fdSJan Kara 	}
126013e451fdSJan Kara 
1261a7d73fe6SChristoph Hellwig 	/*
1262e2093926SRoss Zwisler 	 * It is possible, particularly with mixed reads & writes to private
1263e2093926SRoss Zwisler 	 * mappings, that we have raced with a PMD fault that overlaps with
1264e2093926SRoss Zwisler 	 * the PTE we need to set up.  If so just return and the fault will be
1265e2093926SRoss Zwisler 	 * retried.
1266e2093926SRoss Zwisler 	 */
1267e2093926SRoss Zwisler 	if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1268ab77dab4SSouptick Joarder 		ret = VM_FAULT_NOPAGE;
1269e2093926SRoss Zwisler 		goto unlock_entry;
1270e2093926SRoss Zwisler 	}
1271e2093926SRoss Zwisler 
1272e2093926SRoss Zwisler 	/*
1273a7d73fe6SChristoph Hellwig 	 * Note that we don't bother to use iomap_apply here: DAX required
1274a7d73fe6SChristoph Hellwig 	 * the file system block size to be equal the page size, which means
1275a7d73fe6SChristoph Hellwig 	 * that we never have to deal with more than a single extent here.
1276a7d73fe6SChristoph Hellwig 	 */
1277a7d73fe6SChristoph Hellwig 	error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1278c0b24625SJan Kara 	if (iomap_errp)
1279c0b24625SJan Kara 		*iomap_errp = error;
1280a9c42b33SRoss Zwisler 	if (error) {
1281ab77dab4SSouptick Joarder 		ret = dax_fault_return(error);
128213e451fdSJan Kara 		goto unlock_entry;
1283a9c42b33SRoss Zwisler 	}
1284a7d73fe6SChristoph Hellwig 	if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
128513e451fdSJan Kara 		error = -EIO;	/* fs corruption? */
128613e451fdSJan Kara 		goto error_finish_iomap;
1287a7d73fe6SChristoph Hellwig 	}
1288a7d73fe6SChristoph Hellwig 
1289a7d73fe6SChristoph Hellwig 	if (vmf->cow_page) {
129031a6f1a6SJan Kara 		sector_t sector = dax_iomap_sector(&iomap, pos);
129131a6f1a6SJan Kara 
1292a7d73fe6SChristoph Hellwig 		switch (iomap.type) {
1293a7d73fe6SChristoph Hellwig 		case IOMAP_HOLE:
1294a7d73fe6SChristoph Hellwig 		case IOMAP_UNWRITTEN:
1295a7d73fe6SChristoph Hellwig 			clear_user_highpage(vmf->cow_page, vaddr);
1296a7d73fe6SChristoph Hellwig 			break;
1297a7d73fe6SChristoph Hellwig 		case IOMAP_MAPPED:
1298cccbce67SDan Williams 			error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1299cccbce67SDan Williams 					sector, PAGE_SIZE, vmf->cow_page, vaddr);
1300a7d73fe6SChristoph Hellwig 			break;
1301a7d73fe6SChristoph Hellwig 		default:
1302a7d73fe6SChristoph Hellwig 			WARN_ON_ONCE(1);
1303a7d73fe6SChristoph Hellwig 			error = -EIO;
1304a7d73fe6SChristoph Hellwig 			break;
1305a7d73fe6SChristoph Hellwig 		}
1306a7d73fe6SChristoph Hellwig 
1307a7d73fe6SChristoph Hellwig 		if (error)
130813e451fdSJan Kara 			goto error_finish_iomap;
1309b1aa812bSJan Kara 
1310b1aa812bSJan Kara 		__SetPageUptodate(vmf->cow_page);
1311ab77dab4SSouptick Joarder 		ret = finish_fault(vmf);
1312ab77dab4SSouptick Joarder 		if (!ret)
1313ab77dab4SSouptick Joarder 			ret = VM_FAULT_DONE_COW;
131413e451fdSJan Kara 		goto finish_iomap;
1315a7d73fe6SChristoph Hellwig 	}
1316a7d73fe6SChristoph Hellwig 
1317aaa422c4SDan Williams 	sync = dax_fault_is_synchronous(flags, vma, &iomap);
1318caa51d26SJan Kara 
1319a7d73fe6SChristoph Hellwig 	switch (iomap.type) {
1320a7d73fe6SChristoph Hellwig 	case IOMAP_MAPPED:
1321a7d73fe6SChristoph Hellwig 		if (iomap.flags & IOMAP_F_NEW) {
1322a7d73fe6SChristoph Hellwig 			count_vm_event(PGMAJFAULT);
1323a0987ad5SJan Kara 			count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
1324a7d73fe6SChristoph Hellwig 			major = VM_FAULT_MAJOR;
1325a7d73fe6SChristoph Hellwig 		}
13261b5a1cb2SJan Kara 		error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
13271b5a1cb2SJan Kara 		if (error < 0)
13281b5a1cb2SJan Kara 			goto error_finish_iomap;
13291b5a1cb2SJan Kara 
13303fe0791cSDan Williams 		entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1331caa51d26SJan Kara 						 0, write && !sync);
13321b5a1cb2SJan Kara 
1333caa51d26SJan Kara 		/*
1334caa51d26SJan Kara 		 * If we are doing synchronous page fault and inode needs fsync,
1335caa51d26SJan Kara 		 * we can insert PTE into page tables only after that happens.
1336caa51d26SJan Kara 		 * Skip insertion for now and return the pfn so that caller can
1337caa51d26SJan Kara 		 * insert it after fsync is done.
1338caa51d26SJan Kara 		 */
1339caa51d26SJan Kara 		if (sync) {
1340caa51d26SJan Kara 			if (WARN_ON_ONCE(!pfnp)) {
1341caa51d26SJan Kara 				error = -EIO;
1342caa51d26SJan Kara 				goto error_finish_iomap;
1343caa51d26SJan Kara 			}
1344caa51d26SJan Kara 			*pfnp = pfn;
1345ab77dab4SSouptick Joarder 			ret = VM_FAULT_NEEDDSYNC | major;
1346caa51d26SJan Kara 			goto finish_iomap;
1347caa51d26SJan Kara 		}
13481b5a1cb2SJan Kara 		trace_dax_insert_mapping(inode, vmf, entry);
13491b5a1cb2SJan Kara 		if (write)
1350ab77dab4SSouptick Joarder 			ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
13511b5a1cb2SJan Kara 		else
1352ab77dab4SSouptick Joarder 			ret = vmf_insert_mixed(vma, vaddr, pfn);
13531b5a1cb2SJan Kara 
1354ab77dab4SSouptick Joarder 		goto finish_iomap;
1355a7d73fe6SChristoph Hellwig 	case IOMAP_UNWRITTEN:
1356a7d73fe6SChristoph Hellwig 	case IOMAP_HOLE:
1357d2c43ef1SJan Kara 		if (!write) {
1358ab77dab4SSouptick Joarder 			ret = dax_load_hole(mapping, entry, vmf);
135913e451fdSJan Kara 			goto finish_iomap;
13601550290bSRoss Zwisler 		}
1361a7d73fe6SChristoph Hellwig 		/*FALLTHRU*/
1362a7d73fe6SChristoph Hellwig 	default:
1363a7d73fe6SChristoph Hellwig 		WARN_ON_ONCE(1);
1364a7d73fe6SChristoph Hellwig 		error = -EIO;
1365a7d73fe6SChristoph Hellwig 		break;
1366a7d73fe6SChristoph Hellwig 	}
1367a7d73fe6SChristoph Hellwig 
136813e451fdSJan Kara  error_finish_iomap:
1369ab77dab4SSouptick Joarder 	ret = dax_fault_return(error);
13709f141d6eSJan Kara  finish_iomap:
13719f141d6eSJan Kara 	if (ops->iomap_end) {
13729f141d6eSJan Kara 		int copied = PAGE_SIZE;
13739f141d6eSJan Kara 
1374ab77dab4SSouptick Joarder 		if (ret & VM_FAULT_ERROR)
13759f141d6eSJan Kara 			copied = 0;
13769f141d6eSJan Kara 		/*
13779f141d6eSJan Kara 		 * The fault is done by now and there's no way back (other
13789f141d6eSJan Kara 		 * thread may be already happily using PTE we have installed).
13799f141d6eSJan Kara 		 * Just ignore error from ->iomap_end since we cannot do much
13809f141d6eSJan Kara 		 * with it.
13819f141d6eSJan Kara 		 */
13829f141d6eSJan Kara 		ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
13831550290bSRoss Zwisler 	}
138413e451fdSJan Kara  unlock_entry:
138591d25ba8SRoss Zwisler 	put_locked_mapping_entry(mapping, vmf->pgoff);
1386a9c42b33SRoss Zwisler  out:
1387ab77dab4SSouptick Joarder 	trace_dax_pte_fault_done(inode, vmf, ret);
1388ab77dab4SSouptick Joarder 	return ret | major;
1389a7d73fe6SChristoph Hellwig }
1390642261acSRoss Zwisler 
1391642261acSRoss Zwisler #ifdef CONFIG_FS_DAX_PMD
1392ab77dab4SSouptick Joarder static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
139391d25ba8SRoss Zwisler 		void *entry)
1394642261acSRoss Zwisler {
1395f4200391SDave Jiang 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1396f4200391SDave Jiang 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1397653b2ea3SRoss Zwisler 	struct inode *inode = mapping->host;
1398642261acSRoss Zwisler 	struct page *zero_page;
1399653b2ea3SRoss Zwisler 	void *ret = NULL;
1400642261acSRoss Zwisler 	spinlock_t *ptl;
1401642261acSRoss Zwisler 	pmd_t pmd_entry;
14023fe0791cSDan Williams 	pfn_t pfn;
1403642261acSRoss Zwisler 
1404f4200391SDave Jiang 	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1405642261acSRoss Zwisler 
1406642261acSRoss Zwisler 	if (unlikely(!zero_page))
1407653b2ea3SRoss Zwisler 		goto fallback;
1408642261acSRoss Zwisler 
14093fe0791cSDan Williams 	pfn = page_to_pfn_t(zero_page);
14103fe0791cSDan Williams 	ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1411f5b7b748SJan Kara 			RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
1412642261acSRoss Zwisler 
1413f4200391SDave Jiang 	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1414f4200391SDave Jiang 	if (!pmd_none(*(vmf->pmd))) {
1415642261acSRoss Zwisler 		spin_unlock(ptl);
1416653b2ea3SRoss Zwisler 		goto fallback;
1417642261acSRoss Zwisler 	}
1418642261acSRoss Zwisler 
1419f4200391SDave Jiang 	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1420642261acSRoss Zwisler 	pmd_entry = pmd_mkhuge(pmd_entry);
1421f4200391SDave Jiang 	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1422642261acSRoss Zwisler 	spin_unlock(ptl);
1423f4200391SDave Jiang 	trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1424642261acSRoss Zwisler 	return VM_FAULT_NOPAGE;
1425653b2ea3SRoss Zwisler 
1426653b2ea3SRoss Zwisler fallback:
1427f4200391SDave Jiang 	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1428642261acSRoss Zwisler 	return VM_FAULT_FALLBACK;
1429642261acSRoss Zwisler }
1430642261acSRoss Zwisler 
1431ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1432a2d58167SDave Jiang 			       const struct iomap_ops *ops)
1433642261acSRoss Zwisler {
1434f4200391SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
1435642261acSRoss Zwisler 	struct address_space *mapping = vma->vm_file->f_mapping;
1436d8a849e1SDave Jiang 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1437d8a849e1SDave Jiang 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1438caa51d26SJan Kara 	bool sync;
14399484ab1bSJan Kara 	unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1440642261acSRoss Zwisler 	struct inode *inode = mapping->host;
1441ab77dab4SSouptick Joarder 	vm_fault_t result = VM_FAULT_FALLBACK;
1442642261acSRoss Zwisler 	struct iomap iomap = { 0 };
1443642261acSRoss Zwisler 	pgoff_t max_pgoff, pgoff;
1444642261acSRoss Zwisler 	void *entry;
1445642261acSRoss Zwisler 	loff_t pos;
1446642261acSRoss Zwisler 	int error;
1447302a5e31SJan Kara 	pfn_t pfn;
1448642261acSRoss Zwisler 
1449282a8e03SRoss Zwisler 	/*
1450282a8e03SRoss Zwisler 	 * Check whether offset isn't beyond end of file now. Caller is
1451282a8e03SRoss Zwisler 	 * supposed to hold locks serializing us with truncate / punch hole so
1452282a8e03SRoss Zwisler 	 * this is a reliable test.
1453282a8e03SRoss Zwisler 	 */
1454282a8e03SRoss Zwisler 	pgoff = linear_page_index(vma, pmd_addr);
1455957ac8c4SJeff Moyer 	max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1456282a8e03SRoss Zwisler 
1457f4200391SDave Jiang 	trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1458282a8e03SRoss Zwisler 
1459fffa281bSRoss Zwisler 	/*
1460fffa281bSRoss Zwisler 	 * Make sure that the faulting address's PMD offset (color) matches
1461fffa281bSRoss Zwisler 	 * the PMD offset from the start of the file.  This is necessary so
1462fffa281bSRoss Zwisler 	 * that a PMD range in the page table overlaps exactly with a PMD
1463fffa281bSRoss Zwisler 	 * range in the radix tree.
1464fffa281bSRoss Zwisler 	 */
1465fffa281bSRoss Zwisler 	if ((vmf->pgoff & PG_PMD_COLOUR) !=
1466fffa281bSRoss Zwisler 	    ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1467fffa281bSRoss Zwisler 		goto fallback;
1468fffa281bSRoss Zwisler 
1469642261acSRoss Zwisler 	/* Fall back to PTEs if we're going to COW */
1470642261acSRoss Zwisler 	if (write && !(vma->vm_flags & VM_SHARED))
1471642261acSRoss Zwisler 		goto fallback;
1472642261acSRoss Zwisler 
1473642261acSRoss Zwisler 	/* If the PMD would extend outside the VMA */
1474642261acSRoss Zwisler 	if (pmd_addr < vma->vm_start)
1475642261acSRoss Zwisler 		goto fallback;
1476642261acSRoss Zwisler 	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1477642261acSRoss Zwisler 		goto fallback;
1478642261acSRoss Zwisler 
1479957ac8c4SJeff Moyer 	if (pgoff >= max_pgoff) {
1480282a8e03SRoss Zwisler 		result = VM_FAULT_SIGBUS;
1481282a8e03SRoss Zwisler 		goto out;
1482282a8e03SRoss Zwisler 	}
1483642261acSRoss Zwisler 
1484642261acSRoss Zwisler 	/* If the PMD would extend beyond the file size */
1485957ac8c4SJeff Moyer 	if ((pgoff | PG_PMD_COLOUR) >= max_pgoff)
1486642261acSRoss Zwisler 		goto fallback;
1487642261acSRoss Zwisler 
1488642261acSRoss Zwisler 	/*
148991d25ba8SRoss Zwisler 	 * grab_mapping_entry() will make sure we get a 2MiB empty entry, a
149091d25ba8SRoss Zwisler 	 * 2MiB zero page entry or a DAX PMD.  If it can't (because a 4k page
149191d25ba8SRoss Zwisler 	 * is already in the tree, for instance), it will return -EEXIST and
149291d25ba8SRoss Zwisler 	 * we just fall back to 4k entries.
14939f141d6eSJan Kara 	 */
14949f141d6eSJan Kara 	entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
14959f141d6eSJan Kara 	if (IS_ERR(entry))
1496876f2946SRoss Zwisler 		goto fallback;
1497876f2946SRoss Zwisler 
1498876f2946SRoss Zwisler 	/*
1499e2093926SRoss Zwisler 	 * It is possible, particularly with mixed reads & writes to private
1500e2093926SRoss Zwisler 	 * mappings, that we have raced with a PTE fault that overlaps with
1501e2093926SRoss Zwisler 	 * the PMD we need to set up.  If so just return and the fault will be
1502e2093926SRoss Zwisler 	 * retried.
1503e2093926SRoss Zwisler 	 */
1504e2093926SRoss Zwisler 	if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1505e2093926SRoss Zwisler 			!pmd_devmap(*vmf->pmd)) {
1506e2093926SRoss Zwisler 		result = 0;
1507e2093926SRoss Zwisler 		goto unlock_entry;
1508e2093926SRoss Zwisler 	}
1509e2093926SRoss Zwisler 
1510e2093926SRoss Zwisler 	/*
1511876f2946SRoss Zwisler 	 * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1512876f2946SRoss Zwisler 	 * setting up a mapping, so really we're using iomap_begin() as a way
1513876f2946SRoss Zwisler 	 * to look up our filesystem block.
1514876f2946SRoss Zwisler 	 */
1515876f2946SRoss Zwisler 	pos = (loff_t)pgoff << PAGE_SHIFT;
1516876f2946SRoss Zwisler 	error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1517876f2946SRoss Zwisler 	if (error)
1518876f2946SRoss Zwisler 		goto unlock_entry;
1519876f2946SRoss Zwisler 
1520876f2946SRoss Zwisler 	if (iomap.offset + iomap.length < pos + PMD_SIZE)
15219f141d6eSJan Kara 		goto finish_iomap;
15229f141d6eSJan Kara 
1523aaa422c4SDan Williams 	sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
1524caa51d26SJan Kara 
1525642261acSRoss Zwisler 	switch (iomap.type) {
1526642261acSRoss Zwisler 	case IOMAP_MAPPED:
1527302a5e31SJan Kara 		error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1528302a5e31SJan Kara 		if (error < 0)
1529302a5e31SJan Kara 			goto finish_iomap;
1530302a5e31SJan Kara 
15313fe0791cSDan Williams 		entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1532caa51d26SJan Kara 						RADIX_DAX_PMD, write && !sync);
1533302a5e31SJan Kara 
1534caa51d26SJan Kara 		/*
1535caa51d26SJan Kara 		 * If we are doing synchronous page fault and inode needs fsync,
1536caa51d26SJan Kara 		 * we can insert PMD into page tables only after that happens.
1537caa51d26SJan Kara 		 * Skip insertion for now and return the pfn so that caller can
1538caa51d26SJan Kara 		 * insert it after fsync is done.
1539caa51d26SJan Kara 		 */
1540caa51d26SJan Kara 		if (sync) {
1541caa51d26SJan Kara 			if (WARN_ON_ONCE(!pfnp))
1542caa51d26SJan Kara 				goto finish_iomap;
1543caa51d26SJan Kara 			*pfnp = pfn;
1544caa51d26SJan Kara 			result = VM_FAULT_NEEDDSYNC;
1545caa51d26SJan Kara 			goto finish_iomap;
1546caa51d26SJan Kara 		}
1547caa51d26SJan Kara 
1548302a5e31SJan Kara 		trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1549302a5e31SJan Kara 		result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
1550302a5e31SJan Kara 					    write);
1551642261acSRoss Zwisler 		break;
1552642261acSRoss Zwisler 	case IOMAP_UNWRITTEN:
1553642261acSRoss Zwisler 	case IOMAP_HOLE:
1554642261acSRoss Zwisler 		if (WARN_ON_ONCE(write))
1555876f2946SRoss Zwisler 			break;
155691d25ba8SRoss Zwisler 		result = dax_pmd_load_hole(vmf, &iomap, entry);
1557642261acSRoss Zwisler 		break;
1558642261acSRoss Zwisler 	default:
1559642261acSRoss Zwisler 		WARN_ON_ONCE(1);
1560642261acSRoss Zwisler 		break;
1561642261acSRoss Zwisler 	}
1562642261acSRoss Zwisler 
15639f141d6eSJan Kara  finish_iomap:
15649f141d6eSJan Kara 	if (ops->iomap_end) {
15659f141d6eSJan Kara 		int copied = PMD_SIZE;
15669f141d6eSJan Kara 
15679f141d6eSJan Kara 		if (result == VM_FAULT_FALLBACK)
15689f141d6eSJan Kara 			copied = 0;
15699f141d6eSJan Kara 		/*
15709f141d6eSJan Kara 		 * The fault is done by now and there's no way back (other
15719f141d6eSJan Kara 		 * thread may be already happily using PMD we have installed).
15729f141d6eSJan Kara 		 * Just ignore error from ->iomap_end since we cannot do much
15739f141d6eSJan Kara 		 * with it.
15749f141d6eSJan Kara 		 */
15759f141d6eSJan Kara 		ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
15769f141d6eSJan Kara 				&iomap);
15779f141d6eSJan Kara 	}
1578876f2946SRoss Zwisler  unlock_entry:
157991d25ba8SRoss Zwisler 	put_locked_mapping_entry(mapping, pgoff);
1580642261acSRoss Zwisler  fallback:
1581642261acSRoss Zwisler 	if (result == VM_FAULT_FALLBACK) {
1582d8a849e1SDave Jiang 		split_huge_pmd(vma, vmf->pmd, vmf->address);
1583642261acSRoss Zwisler 		count_vm_event(THP_FAULT_FALLBACK);
1584642261acSRoss Zwisler 	}
1585282a8e03SRoss Zwisler out:
1586f4200391SDave Jiang 	trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1587642261acSRoss Zwisler 	return result;
1588642261acSRoss Zwisler }
1589a2d58167SDave Jiang #else
1590ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
159101cddfe9SArnd Bergmann 			       const struct iomap_ops *ops)
1592a2d58167SDave Jiang {
1593a2d58167SDave Jiang 	return VM_FAULT_FALLBACK;
1594a2d58167SDave Jiang }
1595642261acSRoss Zwisler #endif /* CONFIG_FS_DAX_PMD */
1596a2d58167SDave Jiang 
1597a2d58167SDave Jiang /**
1598a2d58167SDave Jiang  * dax_iomap_fault - handle a page fault on a DAX file
1599a2d58167SDave Jiang  * @vmf: The description of the fault
1600cec04e8cSJan Kara  * @pe_size: Size of the page to fault in
16019a0dd422SJan Kara  * @pfnp: PFN to insert for synchronous faults if fsync is required
1602c0b24625SJan Kara  * @iomap_errp: Storage for detailed error code in case of error
1603cec04e8cSJan Kara  * @ops: Iomap ops passed from the file system
1604a2d58167SDave Jiang  *
1605a2d58167SDave Jiang  * When a page fault occurs, filesystems may call this helper in
1606a2d58167SDave Jiang  * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1607a2d58167SDave Jiang  * has done all the necessary locking for page fault to proceed
1608a2d58167SDave Jiang  * successfully.
1609a2d58167SDave Jiang  */
1610ab77dab4SSouptick Joarder vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1611c0b24625SJan Kara 		    pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1612a2d58167SDave Jiang {
1613c791ace1SDave Jiang 	switch (pe_size) {
1614c791ace1SDave Jiang 	case PE_SIZE_PTE:
1615c0b24625SJan Kara 		return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1616c791ace1SDave Jiang 	case PE_SIZE_PMD:
16179a0dd422SJan Kara 		return dax_iomap_pmd_fault(vmf, pfnp, ops);
1618a2d58167SDave Jiang 	default:
1619a2d58167SDave Jiang 		return VM_FAULT_FALLBACK;
1620a2d58167SDave Jiang 	}
1621a2d58167SDave Jiang }
1622a2d58167SDave Jiang EXPORT_SYMBOL_GPL(dax_iomap_fault);
162371eab6dfSJan Kara 
162471eab6dfSJan Kara /**
162571eab6dfSJan Kara  * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
162671eab6dfSJan Kara  * @vmf: The description of the fault
162771eab6dfSJan Kara  * @pe_size: Size of entry to be inserted
162871eab6dfSJan Kara  * @pfn: PFN to insert
162971eab6dfSJan Kara  *
163071eab6dfSJan Kara  * This function inserts writeable PTE or PMD entry into page tables for mmaped
163171eab6dfSJan Kara  * DAX file.  It takes care of marking corresponding radix tree entry as dirty
163271eab6dfSJan Kara  * as well.
163371eab6dfSJan Kara  */
1634ab77dab4SSouptick Joarder static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
163571eab6dfSJan Kara 				  enum page_entry_size pe_size,
163671eab6dfSJan Kara 				  pfn_t pfn)
163771eab6dfSJan Kara {
163871eab6dfSJan Kara 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
163971eab6dfSJan Kara 	void *entry, **slot;
164071eab6dfSJan Kara 	pgoff_t index = vmf->pgoff;
1641ab77dab4SSouptick Joarder 	vm_fault_t ret;
164271eab6dfSJan Kara 
1643b93b0163SMatthew Wilcox 	xa_lock_irq(&mapping->i_pages);
164471eab6dfSJan Kara 	entry = get_unlocked_mapping_entry(mapping, index, &slot);
164571eab6dfSJan Kara 	/* Did we race with someone splitting entry or so? */
164671eab6dfSJan Kara 	if (!entry ||
164771eab6dfSJan Kara 	    (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) ||
164871eab6dfSJan Kara 	    (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) {
164971eab6dfSJan Kara 		put_unlocked_mapping_entry(mapping, index, entry);
1650b93b0163SMatthew Wilcox 		xa_unlock_irq(&mapping->i_pages);
165171eab6dfSJan Kara 		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
165271eab6dfSJan Kara 						      VM_FAULT_NOPAGE);
165371eab6dfSJan Kara 		return VM_FAULT_NOPAGE;
165471eab6dfSJan Kara 	}
1655b93b0163SMatthew Wilcox 	radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY);
165671eab6dfSJan Kara 	entry = lock_slot(mapping, slot);
1657b93b0163SMatthew Wilcox 	xa_unlock_irq(&mapping->i_pages);
165871eab6dfSJan Kara 	switch (pe_size) {
165971eab6dfSJan Kara 	case PE_SIZE_PTE:
1660ab77dab4SSouptick Joarder 		ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
166171eab6dfSJan Kara 		break;
166271eab6dfSJan Kara #ifdef CONFIG_FS_DAX_PMD
166371eab6dfSJan Kara 	case PE_SIZE_PMD:
1664ab77dab4SSouptick Joarder 		ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
166571eab6dfSJan Kara 			pfn, true);
166671eab6dfSJan Kara 		break;
166771eab6dfSJan Kara #endif
166871eab6dfSJan Kara 	default:
1669ab77dab4SSouptick Joarder 		ret = VM_FAULT_FALLBACK;
167071eab6dfSJan Kara 	}
167171eab6dfSJan Kara 	put_locked_mapping_entry(mapping, index);
1672ab77dab4SSouptick Joarder 	trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1673ab77dab4SSouptick Joarder 	return ret;
167471eab6dfSJan Kara }
167571eab6dfSJan Kara 
167671eab6dfSJan Kara /**
167771eab6dfSJan Kara  * dax_finish_sync_fault - finish synchronous page fault
167871eab6dfSJan Kara  * @vmf: The description of the fault
167971eab6dfSJan Kara  * @pe_size: Size of entry to be inserted
168071eab6dfSJan Kara  * @pfn: PFN to insert
168171eab6dfSJan Kara  *
168271eab6dfSJan Kara  * This function ensures that the file range touched by the page fault is
168371eab6dfSJan Kara  * stored persistently on the media and handles inserting of appropriate page
168471eab6dfSJan Kara  * table entry.
168571eab6dfSJan Kara  */
1686ab77dab4SSouptick Joarder vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1687ab77dab4SSouptick Joarder 		enum page_entry_size pe_size, pfn_t pfn)
168871eab6dfSJan Kara {
168971eab6dfSJan Kara 	int err;
169071eab6dfSJan Kara 	loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
169171eab6dfSJan Kara 	size_t len = 0;
169271eab6dfSJan Kara 
169371eab6dfSJan Kara 	if (pe_size == PE_SIZE_PTE)
169471eab6dfSJan Kara 		len = PAGE_SIZE;
169571eab6dfSJan Kara 	else if (pe_size == PE_SIZE_PMD)
169671eab6dfSJan Kara 		len = PMD_SIZE;
169771eab6dfSJan Kara 	else
169871eab6dfSJan Kara 		WARN_ON_ONCE(1);
169971eab6dfSJan Kara 	err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
170071eab6dfSJan Kara 	if (err)
170171eab6dfSJan Kara 		return VM_FAULT_SIGBUS;
170271eab6dfSJan Kara 	return dax_insert_pfn_mkwrite(vmf, pe_size, pfn);
170371eab6dfSJan Kara }
170471eab6dfSJan Kara EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
1705