xref: /openbmc/linux/fs/dax.c (revision c6f40468657d16e4010ef84bf32a761feb3469ea)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * fs/dax.c - Direct Access filesystem code
4  * Copyright (c) 2013-2014 Intel Corporation
5  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
6  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7  */
8 
9 #include <linux/atomic.h>
10 #include <linux/blkdev.h>
11 #include <linux/buffer_head.h>
12 #include <linux/dax.h>
13 #include <linux/fs.h>
14 #include <linux/genhd.h>
15 #include <linux/highmem.h>
16 #include <linux/memcontrol.h>
17 #include <linux/mm.h>
18 #include <linux/mutex.h>
19 #include <linux/pagevec.h>
20 #include <linux/sched.h>
21 #include <linux/sched/signal.h>
22 #include <linux/uio.h>
23 #include <linux/vmstat.h>
24 #include <linux/pfn_t.h>
25 #include <linux/sizes.h>
26 #include <linux/mmu_notifier.h>
27 #include <linux/iomap.h>
28 #include <asm/pgalloc.h>
29 
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/fs_dax.h>
32 
33 static inline unsigned int pe_order(enum page_entry_size pe_size)
34 {
35 	if (pe_size == PE_SIZE_PTE)
36 		return PAGE_SHIFT - PAGE_SHIFT;
37 	if (pe_size == PE_SIZE_PMD)
38 		return PMD_SHIFT - PAGE_SHIFT;
39 	if (pe_size == PE_SIZE_PUD)
40 		return PUD_SHIFT - PAGE_SHIFT;
41 	return ~0;
42 }
43 
44 /* We choose 4096 entries - same as per-zone page wait tables */
45 #define DAX_WAIT_TABLE_BITS 12
46 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
47 
48 /* The 'colour' (ie low bits) within a PMD of a page offset.  */
49 #define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
50 #define PG_PMD_NR	(PMD_SIZE >> PAGE_SHIFT)
51 
52 /* The order of a PMD entry */
53 #define PMD_ORDER	(PMD_SHIFT - PAGE_SHIFT)
54 
55 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
56 
57 static int __init init_dax_wait_table(void)
58 {
59 	int i;
60 
61 	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
62 		init_waitqueue_head(wait_table + i);
63 	return 0;
64 }
65 fs_initcall(init_dax_wait_table);
66 
67 /*
68  * DAX pagecache entries use XArray value entries so they can't be mistaken
69  * for pages.  We use one bit for locking, one bit for the entry size (PMD)
70  * and two more to tell us if the entry is a zero page or an empty entry that
71  * is just used for locking.  In total four special bits.
72  *
73  * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
74  * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
75  * block allocation.
76  */
77 #define DAX_SHIFT	(4)
78 #define DAX_LOCKED	(1UL << 0)
79 #define DAX_PMD		(1UL << 1)
80 #define DAX_ZERO_PAGE	(1UL << 2)
81 #define DAX_EMPTY	(1UL << 3)
82 
83 static unsigned long dax_to_pfn(void *entry)
84 {
85 	return xa_to_value(entry) >> DAX_SHIFT;
86 }
87 
88 static void *dax_make_entry(pfn_t pfn, unsigned long flags)
89 {
90 	return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
91 }
92 
93 static bool dax_is_locked(void *entry)
94 {
95 	return xa_to_value(entry) & DAX_LOCKED;
96 }
97 
98 static unsigned int dax_entry_order(void *entry)
99 {
100 	if (xa_to_value(entry) & DAX_PMD)
101 		return PMD_ORDER;
102 	return 0;
103 }
104 
105 static unsigned long dax_is_pmd_entry(void *entry)
106 {
107 	return xa_to_value(entry) & DAX_PMD;
108 }
109 
110 static bool dax_is_pte_entry(void *entry)
111 {
112 	return !(xa_to_value(entry) & DAX_PMD);
113 }
114 
115 static int dax_is_zero_entry(void *entry)
116 {
117 	return xa_to_value(entry) & DAX_ZERO_PAGE;
118 }
119 
120 static int dax_is_empty_entry(void *entry)
121 {
122 	return xa_to_value(entry) & DAX_EMPTY;
123 }
124 
125 /*
126  * true if the entry that was found is of a smaller order than the entry
127  * we were looking for
128  */
129 static bool dax_is_conflict(void *entry)
130 {
131 	return entry == XA_RETRY_ENTRY;
132 }
133 
134 /*
135  * DAX page cache entry locking
136  */
137 struct exceptional_entry_key {
138 	struct xarray *xa;
139 	pgoff_t entry_start;
140 };
141 
142 struct wait_exceptional_entry_queue {
143 	wait_queue_entry_t wait;
144 	struct exceptional_entry_key key;
145 };
146 
147 /**
148  * enum dax_wake_mode: waitqueue wakeup behaviour
149  * @WAKE_ALL: wake all waiters in the waitqueue
150  * @WAKE_NEXT: wake only the first waiter in the waitqueue
151  */
152 enum dax_wake_mode {
153 	WAKE_ALL,
154 	WAKE_NEXT,
155 };
156 
157 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
158 		void *entry, struct exceptional_entry_key *key)
159 {
160 	unsigned long hash;
161 	unsigned long index = xas->xa_index;
162 
163 	/*
164 	 * If 'entry' is a PMD, align the 'index' that we use for the wait
165 	 * queue to the start of that PMD.  This ensures that all offsets in
166 	 * the range covered by the PMD map to the same bit lock.
167 	 */
168 	if (dax_is_pmd_entry(entry))
169 		index &= ~PG_PMD_COLOUR;
170 	key->xa = xas->xa;
171 	key->entry_start = index;
172 
173 	hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
174 	return wait_table + hash;
175 }
176 
177 static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
178 		unsigned int mode, int sync, void *keyp)
179 {
180 	struct exceptional_entry_key *key = keyp;
181 	struct wait_exceptional_entry_queue *ewait =
182 		container_of(wait, struct wait_exceptional_entry_queue, wait);
183 
184 	if (key->xa != ewait->key.xa ||
185 	    key->entry_start != ewait->key.entry_start)
186 		return 0;
187 	return autoremove_wake_function(wait, mode, sync, NULL);
188 }
189 
190 /*
191  * @entry may no longer be the entry at the index in the mapping.
192  * The important information it's conveying is whether the entry at
193  * this index used to be a PMD entry.
194  */
195 static void dax_wake_entry(struct xa_state *xas, void *entry,
196 			   enum dax_wake_mode mode)
197 {
198 	struct exceptional_entry_key key;
199 	wait_queue_head_t *wq;
200 
201 	wq = dax_entry_waitqueue(xas, entry, &key);
202 
203 	/*
204 	 * Checking for locked entry and prepare_to_wait_exclusive() happens
205 	 * under the i_pages lock, ditto for entry handling in our callers.
206 	 * So at this point all tasks that could have seen our entry locked
207 	 * must be in the waitqueue and the following check will see them.
208 	 */
209 	if (waitqueue_active(wq))
210 		__wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
211 }
212 
213 /*
214  * Look up entry in page cache, wait for it to become unlocked if it
215  * is a DAX entry and return it.  The caller must subsequently call
216  * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
217  * if it did.  The entry returned may have a larger order than @order.
218  * If @order is larger than the order of the entry found in i_pages, this
219  * function returns a dax_is_conflict entry.
220  *
221  * Must be called with the i_pages lock held.
222  */
223 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
224 {
225 	void *entry;
226 	struct wait_exceptional_entry_queue ewait;
227 	wait_queue_head_t *wq;
228 
229 	init_wait(&ewait.wait);
230 	ewait.wait.func = wake_exceptional_entry_func;
231 
232 	for (;;) {
233 		entry = xas_find_conflict(xas);
234 		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
235 			return entry;
236 		if (dax_entry_order(entry) < order)
237 			return XA_RETRY_ENTRY;
238 		if (!dax_is_locked(entry))
239 			return entry;
240 
241 		wq = dax_entry_waitqueue(xas, entry, &ewait.key);
242 		prepare_to_wait_exclusive(wq, &ewait.wait,
243 					  TASK_UNINTERRUPTIBLE);
244 		xas_unlock_irq(xas);
245 		xas_reset(xas);
246 		schedule();
247 		finish_wait(wq, &ewait.wait);
248 		xas_lock_irq(xas);
249 	}
250 }
251 
252 /*
253  * The only thing keeping the address space around is the i_pages lock
254  * (it's cycled in clear_inode() after removing the entries from i_pages)
255  * After we call xas_unlock_irq(), we cannot touch xas->xa.
256  */
257 static void wait_entry_unlocked(struct xa_state *xas, void *entry)
258 {
259 	struct wait_exceptional_entry_queue ewait;
260 	wait_queue_head_t *wq;
261 
262 	init_wait(&ewait.wait);
263 	ewait.wait.func = wake_exceptional_entry_func;
264 
265 	wq = dax_entry_waitqueue(xas, entry, &ewait.key);
266 	/*
267 	 * Unlike get_unlocked_entry() there is no guarantee that this
268 	 * path ever successfully retrieves an unlocked entry before an
269 	 * inode dies. Perform a non-exclusive wait in case this path
270 	 * never successfully performs its own wake up.
271 	 */
272 	prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
273 	xas_unlock_irq(xas);
274 	schedule();
275 	finish_wait(wq, &ewait.wait);
276 }
277 
278 static void put_unlocked_entry(struct xa_state *xas, void *entry,
279 			       enum dax_wake_mode mode)
280 {
281 	if (entry && !dax_is_conflict(entry))
282 		dax_wake_entry(xas, entry, mode);
283 }
284 
285 /*
286  * We used the xa_state to get the entry, but then we locked the entry and
287  * dropped the xa_lock, so we know the xa_state is stale and must be reset
288  * before use.
289  */
290 static void dax_unlock_entry(struct xa_state *xas, void *entry)
291 {
292 	void *old;
293 
294 	BUG_ON(dax_is_locked(entry));
295 	xas_reset(xas);
296 	xas_lock_irq(xas);
297 	old = xas_store(xas, entry);
298 	xas_unlock_irq(xas);
299 	BUG_ON(!dax_is_locked(old));
300 	dax_wake_entry(xas, entry, WAKE_NEXT);
301 }
302 
303 /*
304  * Return: The entry stored at this location before it was locked.
305  */
306 static void *dax_lock_entry(struct xa_state *xas, void *entry)
307 {
308 	unsigned long v = xa_to_value(entry);
309 	return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
310 }
311 
312 static unsigned long dax_entry_size(void *entry)
313 {
314 	if (dax_is_zero_entry(entry))
315 		return 0;
316 	else if (dax_is_empty_entry(entry))
317 		return 0;
318 	else if (dax_is_pmd_entry(entry))
319 		return PMD_SIZE;
320 	else
321 		return PAGE_SIZE;
322 }
323 
324 static unsigned long dax_end_pfn(void *entry)
325 {
326 	return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
327 }
328 
329 /*
330  * Iterate through all mapped pfns represented by an entry, i.e. skip
331  * 'empty' and 'zero' entries.
332  */
333 #define for_each_mapped_pfn(entry, pfn) \
334 	for (pfn = dax_to_pfn(entry); \
335 			pfn < dax_end_pfn(entry); pfn++)
336 
337 /*
338  * TODO: for reflink+dax we need a way to associate a single page with
339  * multiple address_space instances at different linear_page_index()
340  * offsets.
341  */
342 static void dax_associate_entry(void *entry, struct address_space *mapping,
343 		struct vm_area_struct *vma, unsigned long address)
344 {
345 	unsigned long size = dax_entry_size(entry), pfn, index;
346 	int i = 0;
347 
348 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
349 		return;
350 
351 	index = linear_page_index(vma, address & ~(size - 1));
352 	for_each_mapped_pfn(entry, pfn) {
353 		struct page *page = pfn_to_page(pfn);
354 
355 		WARN_ON_ONCE(page->mapping);
356 		page->mapping = mapping;
357 		page->index = index + i++;
358 	}
359 }
360 
361 static void dax_disassociate_entry(void *entry, struct address_space *mapping,
362 		bool trunc)
363 {
364 	unsigned long pfn;
365 
366 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
367 		return;
368 
369 	for_each_mapped_pfn(entry, pfn) {
370 		struct page *page = pfn_to_page(pfn);
371 
372 		WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
373 		WARN_ON_ONCE(page->mapping && page->mapping != mapping);
374 		page->mapping = NULL;
375 		page->index = 0;
376 	}
377 }
378 
379 static struct page *dax_busy_page(void *entry)
380 {
381 	unsigned long pfn;
382 
383 	for_each_mapped_pfn(entry, pfn) {
384 		struct page *page = pfn_to_page(pfn);
385 
386 		if (page_ref_count(page) > 1)
387 			return page;
388 	}
389 	return NULL;
390 }
391 
392 /*
393  * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
394  * @page: The page whose entry we want to lock
395  *
396  * Context: Process context.
397  * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
398  * not be locked.
399  */
400 dax_entry_t dax_lock_page(struct page *page)
401 {
402 	XA_STATE(xas, NULL, 0);
403 	void *entry;
404 
405 	/* Ensure page->mapping isn't freed while we look at it */
406 	rcu_read_lock();
407 	for (;;) {
408 		struct address_space *mapping = READ_ONCE(page->mapping);
409 
410 		entry = NULL;
411 		if (!mapping || !dax_mapping(mapping))
412 			break;
413 
414 		/*
415 		 * In the device-dax case there's no need to lock, a
416 		 * struct dev_pagemap pin is sufficient to keep the
417 		 * inode alive, and we assume we have dev_pagemap pin
418 		 * otherwise we would not have a valid pfn_to_page()
419 		 * translation.
420 		 */
421 		entry = (void *)~0UL;
422 		if (S_ISCHR(mapping->host->i_mode))
423 			break;
424 
425 		xas.xa = &mapping->i_pages;
426 		xas_lock_irq(&xas);
427 		if (mapping != page->mapping) {
428 			xas_unlock_irq(&xas);
429 			continue;
430 		}
431 		xas_set(&xas, page->index);
432 		entry = xas_load(&xas);
433 		if (dax_is_locked(entry)) {
434 			rcu_read_unlock();
435 			wait_entry_unlocked(&xas, entry);
436 			rcu_read_lock();
437 			continue;
438 		}
439 		dax_lock_entry(&xas, entry);
440 		xas_unlock_irq(&xas);
441 		break;
442 	}
443 	rcu_read_unlock();
444 	return (dax_entry_t)entry;
445 }
446 
447 void dax_unlock_page(struct page *page, dax_entry_t cookie)
448 {
449 	struct address_space *mapping = page->mapping;
450 	XA_STATE(xas, &mapping->i_pages, page->index);
451 
452 	if (S_ISCHR(mapping->host->i_mode))
453 		return;
454 
455 	dax_unlock_entry(&xas, (void *)cookie);
456 }
457 
458 /*
459  * Find page cache entry at given index. If it is a DAX entry, return it
460  * with the entry locked. If the page cache doesn't contain an entry at
461  * that index, add a locked empty entry.
462  *
463  * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
464  * either return that locked entry or will return VM_FAULT_FALLBACK.
465  * This will happen if there are any PTE entries within the PMD range
466  * that we are requesting.
467  *
468  * We always favor PTE entries over PMD entries. There isn't a flow where we
469  * evict PTE entries in order to 'upgrade' them to a PMD entry.  A PMD
470  * insertion will fail if it finds any PTE entries already in the tree, and a
471  * PTE insertion will cause an existing PMD entry to be unmapped and
472  * downgraded to PTE entries.  This happens for both PMD zero pages as
473  * well as PMD empty entries.
474  *
475  * The exception to this downgrade path is for PMD entries that have
476  * real storage backing them.  We will leave these real PMD entries in
477  * the tree, and PTE writes will simply dirty the entire PMD entry.
478  *
479  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
480  * persistent memory the benefit is doubtful. We can add that later if we can
481  * show it helps.
482  *
483  * On error, this function does not return an ERR_PTR.  Instead it returns
484  * a VM_FAULT code, encoded as an xarray internal entry.  The ERR_PTR values
485  * overlap with xarray value entries.
486  */
487 static void *grab_mapping_entry(struct xa_state *xas,
488 		struct address_space *mapping, unsigned int order)
489 {
490 	unsigned long index = xas->xa_index;
491 	bool pmd_downgrade;	/* splitting PMD entry into PTE entries? */
492 	void *entry;
493 
494 retry:
495 	pmd_downgrade = false;
496 	xas_lock_irq(xas);
497 	entry = get_unlocked_entry(xas, order);
498 
499 	if (entry) {
500 		if (dax_is_conflict(entry))
501 			goto fallback;
502 		if (!xa_is_value(entry)) {
503 			xas_set_err(xas, -EIO);
504 			goto out_unlock;
505 		}
506 
507 		if (order == 0) {
508 			if (dax_is_pmd_entry(entry) &&
509 			    (dax_is_zero_entry(entry) ||
510 			     dax_is_empty_entry(entry))) {
511 				pmd_downgrade = true;
512 			}
513 		}
514 	}
515 
516 	if (pmd_downgrade) {
517 		/*
518 		 * Make sure 'entry' remains valid while we drop
519 		 * the i_pages lock.
520 		 */
521 		dax_lock_entry(xas, entry);
522 
523 		/*
524 		 * Besides huge zero pages the only other thing that gets
525 		 * downgraded are empty entries which don't need to be
526 		 * unmapped.
527 		 */
528 		if (dax_is_zero_entry(entry)) {
529 			xas_unlock_irq(xas);
530 			unmap_mapping_pages(mapping,
531 					xas->xa_index & ~PG_PMD_COLOUR,
532 					PG_PMD_NR, false);
533 			xas_reset(xas);
534 			xas_lock_irq(xas);
535 		}
536 
537 		dax_disassociate_entry(entry, mapping, false);
538 		xas_store(xas, NULL);	/* undo the PMD join */
539 		dax_wake_entry(xas, entry, WAKE_ALL);
540 		mapping->nrpages -= PG_PMD_NR;
541 		entry = NULL;
542 		xas_set(xas, index);
543 	}
544 
545 	if (entry) {
546 		dax_lock_entry(xas, entry);
547 	} else {
548 		unsigned long flags = DAX_EMPTY;
549 
550 		if (order > 0)
551 			flags |= DAX_PMD;
552 		entry = dax_make_entry(pfn_to_pfn_t(0), flags);
553 		dax_lock_entry(xas, entry);
554 		if (xas_error(xas))
555 			goto out_unlock;
556 		mapping->nrpages += 1UL << order;
557 	}
558 
559 out_unlock:
560 	xas_unlock_irq(xas);
561 	if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
562 		goto retry;
563 	if (xas->xa_node == XA_ERROR(-ENOMEM))
564 		return xa_mk_internal(VM_FAULT_OOM);
565 	if (xas_error(xas))
566 		return xa_mk_internal(VM_FAULT_SIGBUS);
567 	return entry;
568 fallback:
569 	xas_unlock_irq(xas);
570 	return xa_mk_internal(VM_FAULT_FALLBACK);
571 }
572 
573 /**
574  * dax_layout_busy_page_range - find first pinned page in @mapping
575  * @mapping: address space to scan for a page with ref count > 1
576  * @start: Starting offset. Page containing 'start' is included.
577  * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX,
578  *       pages from 'start' till the end of file are included.
579  *
580  * DAX requires ZONE_DEVICE mapped pages. These pages are never
581  * 'onlined' to the page allocator so they are considered idle when
582  * page->count == 1. A filesystem uses this interface to determine if
583  * any page in the mapping is busy, i.e. for DMA, or other
584  * get_user_pages() usages.
585  *
586  * It is expected that the filesystem is holding locks to block the
587  * establishment of new mappings in this address_space. I.e. it expects
588  * to be able to run unmap_mapping_range() and subsequently not race
589  * mapping_mapped() becoming true.
590  */
591 struct page *dax_layout_busy_page_range(struct address_space *mapping,
592 					loff_t start, loff_t end)
593 {
594 	void *entry;
595 	unsigned int scanned = 0;
596 	struct page *page = NULL;
597 	pgoff_t start_idx = start >> PAGE_SHIFT;
598 	pgoff_t end_idx;
599 	XA_STATE(xas, &mapping->i_pages, start_idx);
600 
601 	/*
602 	 * In the 'limited' case get_user_pages() for dax is disabled.
603 	 */
604 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
605 		return NULL;
606 
607 	if (!dax_mapping(mapping) || !mapping_mapped(mapping))
608 		return NULL;
609 
610 	/* If end == LLONG_MAX, all pages from start to till end of file */
611 	if (end == LLONG_MAX)
612 		end_idx = ULONG_MAX;
613 	else
614 		end_idx = end >> PAGE_SHIFT;
615 	/*
616 	 * If we race get_user_pages_fast() here either we'll see the
617 	 * elevated page count in the iteration and wait, or
618 	 * get_user_pages_fast() will see that the page it took a reference
619 	 * against is no longer mapped in the page tables and bail to the
620 	 * get_user_pages() slow path.  The slow path is protected by
621 	 * pte_lock() and pmd_lock(). New references are not taken without
622 	 * holding those locks, and unmap_mapping_pages() will not zero the
623 	 * pte or pmd without holding the respective lock, so we are
624 	 * guaranteed to either see new references or prevent new
625 	 * references from being established.
626 	 */
627 	unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0);
628 
629 	xas_lock_irq(&xas);
630 	xas_for_each(&xas, entry, end_idx) {
631 		if (WARN_ON_ONCE(!xa_is_value(entry)))
632 			continue;
633 		if (unlikely(dax_is_locked(entry)))
634 			entry = get_unlocked_entry(&xas, 0);
635 		if (entry)
636 			page = dax_busy_page(entry);
637 		put_unlocked_entry(&xas, entry, WAKE_NEXT);
638 		if (page)
639 			break;
640 		if (++scanned % XA_CHECK_SCHED)
641 			continue;
642 
643 		xas_pause(&xas);
644 		xas_unlock_irq(&xas);
645 		cond_resched();
646 		xas_lock_irq(&xas);
647 	}
648 	xas_unlock_irq(&xas);
649 	return page;
650 }
651 EXPORT_SYMBOL_GPL(dax_layout_busy_page_range);
652 
653 struct page *dax_layout_busy_page(struct address_space *mapping)
654 {
655 	return dax_layout_busy_page_range(mapping, 0, LLONG_MAX);
656 }
657 EXPORT_SYMBOL_GPL(dax_layout_busy_page);
658 
659 static int __dax_invalidate_entry(struct address_space *mapping,
660 					  pgoff_t index, bool trunc)
661 {
662 	XA_STATE(xas, &mapping->i_pages, index);
663 	int ret = 0;
664 	void *entry;
665 
666 	xas_lock_irq(&xas);
667 	entry = get_unlocked_entry(&xas, 0);
668 	if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
669 		goto out;
670 	if (!trunc &&
671 	    (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
672 	     xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
673 		goto out;
674 	dax_disassociate_entry(entry, mapping, trunc);
675 	xas_store(&xas, NULL);
676 	mapping->nrpages -= 1UL << dax_entry_order(entry);
677 	ret = 1;
678 out:
679 	put_unlocked_entry(&xas, entry, WAKE_ALL);
680 	xas_unlock_irq(&xas);
681 	return ret;
682 }
683 
684 /*
685  * Delete DAX entry at @index from @mapping.  Wait for it
686  * to be unlocked before deleting it.
687  */
688 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
689 {
690 	int ret = __dax_invalidate_entry(mapping, index, true);
691 
692 	/*
693 	 * This gets called from truncate / punch_hole path. As such, the caller
694 	 * must hold locks protecting against concurrent modifications of the
695 	 * page cache (usually fs-private i_mmap_sem for writing). Since the
696 	 * caller has seen a DAX entry for this index, we better find it
697 	 * at that index as well...
698 	 */
699 	WARN_ON_ONCE(!ret);
700 	return ret;
701 }
702 
703 /*
704  * Invalidate DAX entry if it is clean.
705  */
706 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
707 				      pgoff_t index)
708 {
709 	return __dax_invalidate_entry(mapping, index, false);
710 }
711 
712 static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos)
713 {
714 	phys_addr_t paddr = iomap->addr + (pos & PAGE_MASK) - iomap->offset;
715 
716 	if (iomap->bdev)
717 		paddr += (get_start_sect(iomap->bdev) << SECTOR_SHIFT);
718 	return PHYS_PFN(paddr);
719 }
720 
721 static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)
722 {
723 	pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos);
724 	void *vto, *kaddr;
725 	long rc;
726 	int id;
727 
728 	id = dax_read_lock();
729 	rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, &kaddr, NULL);
730 	if (rc < 0) {
731 		dax_read_unlock(id);
732 		return rc;
733 	}
734 	vto = kmap_atomic(vmf->cow_page);
735 	copy_user_page(vto, kaddr, vmf->address, vmf->cow_page);
736 	kunmap_atomic(vto);
737 	dax_read_unlock(id);
738 	return 0;
739 }
740 
741 /*
742  * By this point grab_mapping_entry() has ensured that we have a locked entry
743  * of the appropriate size so we don't have to worry about downgrading PMDs to
744  * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
745  * already in the tree, we will skip the insertion and just dirty the PMD as
746  * appropriate.
747  */
748 static void *dax_insert_entry(struct xa_state *xas,
749 		struct address_space *mapping, struct vm_fault *vmf,
750 		void *entry, pfn_t pfn, unsigned long flags, bool dirty)
751 {
752 	void *new_entry = dax_make_entry(pfn, flags);
753 
754 	if (dirty)
755 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
756 
757 	if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
758 		unsigned long index = xas->xa_index;
759 		/* we are replacing a zero page with block mapping */
760 		if (dax_is_pmd_entry(entry))
761 			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
762 					PG_PMD_NR, false);
763 		else /* pte entry */
764 			unmap_mapping_pages(mapping, index, 1, false);
765 	}
766 
767 	xas_reset(xas);
768 	xas_lock_irq(xas);
769 	if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
770 		void *old;
771 
772 		dax_disassociate_entry(entry, mapping, false);
773 		dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
774 		/*
775 		 * Only swap our new entry into the page cache if the current
776 		 * entry is a zero page or an empty entry.  If a normal PTE or
777 		 * PMD entry is already in the cache, we leave it alone.  This
778 		 * means that if we are trying to insert a PTE and the
779 		 * existing entry is a PMD, we will just leave the PMD in the
780 		 * tree and dirty it if necessary.
781 		 */
782 		old = dax_lock_entry(xas, new_entry);
783 		WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
784 					DAX_LOCKED));
785 		entry = new_entry;
786 	} else {
787 		xas_load(xas);	/* Walk the xa_state */
788 	}
789 
790 	if (dirty)
791 		xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
792 
793 	xas_unlock_irq(xas);
794 	return entry;
795 }
796 
797 static inline
798 unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
799 {
800 	unsigned long address;
801 
802 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
803 	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
804 	return address;
805 }
806 
807 /* Walk all mappings of a given index of a file and writeprotect them */
808 static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
809 		unsigned long pfn)
810 {
811 	struct vm_area_struct *vma;
812 	pte_t pte, *ptep = NULL;
813 	pmd_t *pmdp = NULL;
814 	spinlock_t *ptl;
815 
816 	i_mmap_lock_read(mapping);
817 	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
818 		struct mmu_notifier_range range;
819 		unsigned long address;
820 
821 		cond_resched();
822 
823 		if (!(vma->vm_flags & VM_SHARED))
824 			continue;
825 
826 		address = pgoff_address(index, vma);
827 
828 		/*
829 		 * follow_invalidate_pte() will use the range to call
830 		 * mmu_notifier_invalidate_range_start() on our behalf before
831 		 * taking any lock.
832 		 */
833 		if (follow_invalidate_pte(vma->vm_mm, address, &range, &ptep,
834 					  &pmdp, &ptl))
835 			continue;
836 
837 		/*
838 		 * No need to call mmu_notifier_invalidate_range() as we are
839 		 * downgrading page table protection not changing it to point
840 		 * to a new page.
841 		 *
842 		 * See Documentation/vm/mmu_notifier.rst
843 		 */
844 		if (pmdp) {
845 #ifdef CONFIG_FS_DAX_PMD
846 			pmd_t pmd;
847 
848 			if (pfn != pmd_pfn(*pmdp))
849 				goto unlock_pmd;
850 			if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
851 				goto unlock_pmd;
852 
853 			flush_cache_page(vma, address, pfn);
854 			pmd = pmdp_invalidate(vma, address, pmdp);
855 			pmd = pmd_wrprotect(pmd);
856 			pmd = pmd_mkclean(pmd);
857 			set_pmd_at(vma->vm_mm, address, pmdp, pmd);
858 unlock_pmd:
859 #endif
860 			spin_unlock(ptl);
861 		} else {
862 			if (pfn != pte_pfn(*ptep))
863 				goto unlock_pte;
864 			if (!pte_dirty(*ptep) && !pte_write(*ptep))
865 				goto unlock_pte;
866 
867 			flush_cache_page(vma, address, pfn);
868 			pte = ptep_clear_flush(vma, address, ptep);
869 			pte = pte_wrprotect(pte);
870 			pte = pte_mkclean(pte);
871 			set_pte_at(vma->vm_mm, address, ptep, pte);
872 unlock_pte:
873 			pte_unmap_unlock(ptep, ptl);
874 		}
875 
876 		mmu_notifier_invalidate_range_end(&range);
877 	}
878 	i_mmap_unlock_read(mapping);
879 }
880 
881 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
882 		struct address_space *mapping, void *entry)
883 {
884 	unsigned long pfn, index, count;
885 	long ret = 0;
886 
887 	/*
888 	 * A page got tagged dirty in DAX mapping? Something is seriously
889 	 * wrong.
890 	 */
891 	if (WARN_ON(!xa_is_value(entry)))
892 		return -EIO;
893 
894 	if (unlikely(dax_is_locked(entry))) {
895 		void *old_entry = entry;
896 
897 		entry = get_unlocked_entry(xas, 0);
898 
899 		/* Entry got punched out / reallocated? */
900 		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
901 			goto put_unlocked;
902 		/*
903 		 * Entry got reallocated elsewhere? No need to writeback.
904 		 * We have to compare pfns as we must not bail out due to
905 		 * difference in lockbit or entry type.
906 		 */
907 		if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
908 			goto put_unlocked;
909 		if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
910 					dax_is_zero_entry(entry))) {
911 			ret = -EIO;
912 			goto put_unlocked;
913 		}
914 
915 		/* Another fsync thread may have already done this entry */
916 		if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
917 			goto put_unlocked;
918 	}
919 
920 	/* Lock the entry to serialize with page faults */
921 	dax_lock_entry(xas, entry);
922 
923 	/*
924 	 * We can clear the tag now but we have to be careful so that concurrent
925 	 * dax_writeback_one() calls for the same index cannot finish before we
926 	 * actually flush the caches. This is achieved as the calls will look
927 	 * at the entry only under the i_pages lock and once they do that
928 	 * they will see the entry locked and wait for it to unlock.
929 	 */
930 	xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
931 	xas_unlock_irq(xas);
932 
933 	/*
934 	 * If dax_writeback_mapping_range() was given a wbc->range_start
935 	 * in the middle of a PMD, the 'index' we use needs to be
936 	 * aligned to the start of the PMD.
937 	 * This allows us to flush for PMD_SIZE and not have to worry about
938 	 * partial PMD writebacks.
939 	 */
940 	pfn = dax_to_pfn(entry);
941 	count = 1UL << dax_entry_order(entry);
942 	index = xas->xa_index & ~(count - 1);
943 
944 	dax_entry_mkclean(mapping, index, pfn);
945 	dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
946 	/*
947 	 * After we have flushed the cache, we can clear the dirty tag. There
948 	 * cannot be new dirty data in the pfn after the flush has completed as
949 	 * the pfn mappings are writeprotected and fault waits for mapping
950 	 * entry lock.
951 	 */
952 	xas_reset(xas);
953 	xas_lock_irq(xas);
954 	xas_store(xas, entry);
955 	xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
956 	dax_wake_entry(xas, entry, WAKE_NEXT);
957 
958 	trace_dax_writeback_one(mapping->host, index, count);
959 	return ret;
960 
961  put_unlocked:
962 	put_unlocked_entry(xas, entry, WAKE_NEXT);
963 	return ret;
964 }
965 
966 /*
967  * Flush the mapping to the persistent domain within the byte range of [start,
968  * end]. This is required by data integrity operations to ensure file data is
969  * on persistent storage prior to completion of the operation.
970  */
971 int dax_writeback_mapping_range(struct address_space *mapping,
972 		struct dax_device *dax_dev, struct writeback_control *wbc)
973 {
974 	XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
975 	struct inode *inode = mapping->host;
976 	pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
977 	void *entry;
978 	int ret = 0;
979 	unsigned int scanned = 0;
980 
981 	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
982 		return -EIO;
983 
984 	if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL)
985 		return 0;
986 
987 	trace_dax_writeback_range(inode, xas.xa_index, end_index);
988 
989 	tag_pages_for_writeback(mapping, xas.xa_index, end_index);
990 
991 	xas_lock_irq(&xas);
992 	xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
993 		ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
994 		if (ret < 0) {
995 			mapping_set_error(mapping, ret);
996 			break;
997 		}
998 		if (++scanned % XA_CHECK_SCHED)
999 			continue;
1000 
1001 		xas_pause(&xas);
1002 		xas_unlock_irq(&xas);
1003 		cond_resched();
1004 		xas_lock_irq(&xas);
1005 	}
1006 	xas_unlock_irq(&xas);
1007 	trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
1008 	return ret;
1009 }
1010 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
1011 
1012 static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size,
1013 			 pfn_t *pfnp)
1014 {
1015 	pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1016 	int id, rc;
1017 	long length;
1018 
1019 	id = dax_read_lock();
1020 	length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
1021 				   NULL, pfnp);
1022 	if (length < 0) {
1023 		rc = length;
1024 		goto out;
1025 	}
1026 	rc = -EINVAL;
1027 	if (PFN_PHYS(length) < size)
1028 		goto out;
1029 	if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1030 		goto out;
1031 	/* For larger pages we need devmap */
1032 	if (length > 1 && !pfn_t_devmap(*pfnp))
1033 		goto out;
1034 	rc = 0;
1035 out:
1036 	dax_read_unlock(id);
1037 	return rc;
1038 }
1039 
1040 /*
1041  * The user has performed a load from a hole in the file.  Allocating a new
1042  * page in the file would cause excessive storage usage for workloads with
1043  * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
1044  * If this page is ever written to we will re-fault and change the mapping to
1045  * point to real DAX storage instead.
1046  */
1047 static vm_fault_t dax_load_hole(struct xa_state *xas,
1048 		struct address_space *mapping, void **entry,
1049 		struct vm_fault *vmf)
1050 {
1051 	struct inode *inode = mapping->host;
1052 	unsigned long vaddr = vmf->address;
1053 	pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1054 	vm_fault_t ret;
1055 
1056 	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1057 			DAX_ZERO_PAGE, false);
1058 
1059 	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1060 	trace_dax_load_hole(inode, vmf, ret);
1061 	return ret;
1062 }
1063 
1064 #ifdef CONFIG_FS_DAX_PMD
1065 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1066 		const struct iomap *iomap, void **entry)
1067 {
1068 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1069 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1070 	struct vm_area_struct *vma = vmf->vma;
1071 	struct inode *inode = mapping->host;
1072 	pgtable_t pgtable = NULL;
1073 	struct page *zero_page;
1074 	spinlock_t *ptl;
1075 	pmd_t pmd_entry;
1076 	pfn_t pfn;
1077 
1078 	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1079 
1080 	if (unlikely(!zero_page))
1081 		goto fallback;
1082 
1083 	pfn = page_to_pfn_t(zero_page);
1084 	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1085 			DAX_PMD | DAX_ZERO_PAGE, false);
1086 
1087 	if (arch_needs_pgtable_deposit()) {
1088 		pgtable = pte_alloc_one(vma->vm_mm);
1089 		if (!pgtable)
1090 			return VM_FAULT_OOM;
1091 	}
1092 
1093 	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1094 	if (!pmd_none(*(vmf->pmd))) {
1095 		spin_unlock(ptl);
1096 		goto fallback;
1097 	}
1098 
1099 	if (pgtable) {
1100 		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1101 		mm_inc_nr_ptes(vma->vm_mm);
1102 	}
1103 	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1104 	pmd_entry = pmd_mkhuge(pmd_entry);
1105 	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1106 	spin_unlock(ptl);
1107 	trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
1108 	return VM_FAULT_NOPAGE;
1109 
1110 fallback:
1111 	if (pgtable)
1112 		pte_free(vma->vm_mm, pgtable);
1113 	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
1114 	return VM_FAULT_FALLBACK;
1115 }
1116 #else
1117 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1118 		const struct iomap *iomap, void **entry)
1119 {
1120 	return VM_FAULT_FALLBACK;
1121 }
1122 #endif /* CONFIG_FS_DAX_PMD */
1123 
1124 static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff,
1125 		unsigned int offset, size_t size)
1126 {
1127 	void *kaddr;
1128 	long ret;
1129 
1130 	ret = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
1131 	if (ret > 0) {
1132 		memset(kaddr + offset, 0, size);
1133 		dax_flush(dax_dev, kaddr + offset, size);
1134 	}
1135 	return ret;
1136 }
1137 
1138 static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
1139 {
1140 	const struct iomap *iomap = &iter->iomap;
1141 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
1142 	loff_t pos = iter->pos;
1143 	u64 length = iomap_length(iter);
1144 	s64 written = 0;
1145 
1146 	/* already zeroed?  we're done. */
1147 	if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1148 		return length;
1149 
1150 	do {
1151 		unsigned offset = offset_in_page(pos);
1152 		unsigned size = min_t(u64, PAGE_SIZE - offset, length);
1153 		pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1154 		long rc;
1155 		int id;
1156 
1157 		id = dax_read_lock();
1158 		if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
1159 			rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
1160 		else
1161 			rc = dax_memzero(iomap->dax_dev, pgoff, offset, size);
1162 		dax_read_unlock(id);
1163 
1164 		if (rc < 0)
1165 			return rc;
1166 		pos += size;
1167 		length -= size;
1168 		written += size;
1169 		if (did_zero)
1170 			*did_zero = true;
1171 	} while (length > 0);
1172 
1173 	return written;
1174 }
1175 
1176 int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1177 		const struct iomap_ops *ops)
1178 {
1179 	struct iomap_iter iter = {
1180 		.inode		= inode,
1181 		.pos		= pos,
1182 		.len		= len,
1183 		.flags		= IOMAP_ZERO,
1184 	};
1185 	int ret;
1186 
1187 	while ((ret = iomap_iter(&iter, ops)) > 0)
1188 		iter.processed = dax_zero_iter(&iter, did_zero);
1189 	return ret;
1190 }
1191 EXPORT_SYMBOL_GPL(dax_zero_range);
1192 
1193 int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1194 		const struct iomap_ops *ops)
1195 {
1196 	unsigned int blocksize = i_blocksize(inode);
1197 	unsigned int off = pos & (blocksize - 1);
1198 
1199 	/* Block boundary? Nothing to do */
1200 	if (!off)
1201 		return 0;
1202 	return dax_zero_range(inode, pos, blocksize - off, did_zero, ops);
1203 }
1204 EXPORT_SYMBOL_GPL(dax_truncate_page);
1205 
1206 static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
1207 		struct iov_iter *iter)
1208 {
1209 	const struct iomap *iomap = &iomi->iomap;
1210 	loff_t length = iomap_length(iomi);
1211 	loff_t pos = iomi->pos;
1212 	struct dax_device *dax_dev = iomap->dax_dev;
1213 	loff_t end = pos + length, done = 0;
1214 	ssize_t ret = 0;
1215 	size_t xfer;
1216 	int id;
1217 
1218 	if (iov_iter_rw(iter) == READ) {
1219 		end = min(end, i_size_read(iomi->inode));
1220 		if (pos >= end)
1221 			return 0;
1222 
1223 		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1224 			return iov_iter_zero(min(length, end - pos), iter);
1225 	}
1226 
1227 	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1228 		return -EIO;
1229 
1230 	/*
1231 	 * Write can allocate block for an area which has a hole page mapped
1232 	 * into page tables. We have to tear down these mappings so that data
1233 	 * written by write(2) is visible in mmap.
1234 	 */
1235 	if (iomap->flags & IOMAP_F_NEW) {
1236 		invalidate_inode_pages2_range(iomi->inode->i_mapping,
1237 					      pos >> PAGE_SHIFT,
1238 					      (end - 1) >> PAGE_SHIFT);
1239 	}
1240 
1241 	id = dax_read_lock();
1242 	while (pos < end) {
1243 		unsigned offset = pos & (PAGE_SIZE - 1);
1244 		const size_t size = ALIGN(length + offset, PAGE_SIZE);
1245 		pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1246 		ssize_t map_len;
1247 		void *kaddr;
1248 
1249 		if (fatal_signal_pending(current)) {
1250 			ret = -EINTR;
1251 			break;
1252 		}
1253 
1254 		map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1255 				&kaddr, NULL);
1256 		if (map_len < 0) {
1257 			ret = map_len;
1258 			break;
1259 		}
1260 
1261 		map_len = PFN_PHYS(map_len);
1262 		kaddr += offset;
1263 		map_len -= offset;
1264 		if (map_len > end - pos)
1265 			map_len = end - pos;
1266 
1267 		/*
1268 		 * The userspace address for the memory copy has already been
1269 		 * validated via access_ok() in either vfs_read() or
1270 		 * vfs_write(), depending on which operation we are doing.
1271 		 */
1272 		if (iov_iter_rw(iter) == WRITE)
1273 			xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1274 					map_len, iter);
1275 		else
1276 			xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1277 					map_len, iter);
1278 
1279 		pos += xfer;
1280 		length -= xfer;
1281 		done += xfer;
1282 
1283 		if (xfer == 0)
1284 			ret = -EFAULT;
1285 		if (xfer < map_len)
1286 			break;
1287 	}
1288 	dax_read_unlock(id);
1289 
1290 	return done ? done : ret;
1291 }
1292 
1293 /**
1294  * dax_iomap_rw - Perform I/O to a DAX file
1295  * @iocb:	The control block for this I/O
1296  * @iter:	The addresses to do I/O from or to
1297  * @ops:	iomap ops passed from the file system
1298  *
1299  * This function performs read and write operations to directly mapped
1300  * persistent memory.  The callers needs to take care of read/write exclusion
1301  * and evicting any page cache pages in the region under I/O.
1302  */
1303 ssize_t
1304 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1305 		const struct iomap_ops *ops)
1306 {
1307 	struct iomap_iter iomi = {
1308 		.inode		= iocb->ki_filp->f_mapping->host,
1309 		.pos		= iocb->ki_pos,
1310 		.len		= iov_iter_count(iter),
1311 	};
1312 	loff_t done = 0;
1313 	int ret;
1314 
1315 	if (iov_iter_rw(iter) == WRITE) {
1316 		lockdep_assert_held_write(&iomi.inode->i_rwsem);
1317 		iomi.flags |= IOMAP_WRITE;
1318 	} else {
1319 		lockdep_assert_held(&iomi.inode->i_rwsem);
1320 	}
1321 
1322 	if (iocb->ki_flags & IOCB_NOWAIT)
1323 		iomi.flags |= IOMAP_NOWAIT;
1324 
1325 	while ((ret = iomap_iter(&iomi, ops)) > 0)
1326 		iomi.processed = dax_iomap_iter(&iomi, iter);
1327 
1328 	done = iomi.pos - iocb->ki_pos;
1329 	iocb->ki_pos = iomi.pos;
1330 	return done ? done : ret;
1331 }
1332 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1333 
1334 static vm_fault_t dax_fault_return(int error)
1335 {
1336 	if (error == 0)
1337 		return VM_FAULT_NOPAGE;
1338 	return vmf_error(error);
1339 }
1340 
1341 /*
1342  * MAP_SYNC on a dax mapping guarantees dirty metadata is
1343  * flushed on write-faults (non-cow), but not read-faults.
1344  */
1345 static bool dax_fault_is_synchronous(unsigned long flags,
1346 		struct vm_area_struct *vma, const struct iomap *iomap)
1347 {
1348 	return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1349 		&& (iomap->flags & IOMAP_F_DIRTY);
1350 }
1351 
1352 /*
1353  * When handling a synchronous page fault and the inode need a fsync, we can
1354  * insert the PTE/PMD into page tables only after that fsync happened. Skip
1355  * insertion for now and return the pfn so that caller can insert it after the
1356  * fsync is done.
1357  */
1358 static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
1359 {
1360 	if (WARN_ON_ONCE(!pfnp))
1361 		return VM_FAULT_SIGBUS;
1362 	*pfnp = pfn;
1363 	return VM_FAULT_NEEDDSYNC;
1364 }
1365 
1366 static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
1367 		const struct iomap_iter *iter)
1368 {
1369 	vm_fault_t ret;
1370 	int error = 0;
1371 
1372 	switch (iter->iomap.type) {
1373 	case IOMAP_HOLE:
1374 	case IOMAP_UNWRITTEN:
1375 		clear_user_highpage(vmf->cow_page, vmf->address);
1376 		break;
1377 	case IOMAP_MAPPED:
1378 		error = copy_cow_page_dax(vmf, iter);
1379 		break;
1380 	default:
1381 		WARN_ON_ONCE(1);
1382 		error = -EIO;
1383 		break;
1384 	}
1385 
1386 	if (error)
1387 		return dax_fault_return(error);
1388 
1389 	__SetPageUptodate(vmf->cow_page);
1390 	ret = finish_fault(vmf);
1391 	if (!ret)
1392 		return VM_FAULT_DONE_COW;
1393 	return ret;
1394 }
1395 
1396 /**
1397  * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault.
1398  * @vmf:	vm fault instance
1399  * @iter:	iomap iter
1400  * @pfnp:	pfn to be returned
1401  * @xas:	the dax mapping tree of a file
1402  * @entry:	an unlocked dax entry to be inserted
1403  * @pmd:	distinguish whether it is a pmd fault
1404  */
1405 static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
1406 		const struct iomap_iter *iter, pfn_t *pfnp,
1407 		struct xa_state *xas, void **entry, bool pmd)
1408 {
1409 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1410 	const struct iomap *iomap = &iter->iomap;
1411 	size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
1412 	loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
1413 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1414 	bool sync = dax_fault_is_synchronous(iter->flags, vmf->vma, iomap);
1415 	unsigned long entry_flags = pmd ? DAX_PMD : 0;
1416 	int err = 0;
1417 	pfn_t pfn;
1418 
1419 	if (!pmd && vmf->cow_page)
1420 		return dax_fault_cow_page(vmf, iter);
1421 
1422 	/* if we are reading UNWRITTEN and HOLE, return a hole. */
1423 	if (!write &&
1424 	    (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) {
1425 		if (!pmd)
1426 			return dax_load_hole(xas, mapping, entry, vmf);
1427 		return dax_pmd_load_hole(xas, vmf, iomap, entry);
1428 	}
1429 
1430 	if (iomap->type != IOMAP_MAPPED) {
1431 		WARN_ON_ONCE(1);
1432 		return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS;
1433 	}
1434 
1435 	err = dax_iomap_pfn(&iter->iomap, pos, size, &pfn);
1436 	if (err)
1437 		return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err);
1438 
1439 	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, entry_flags,
1440 				  write && !sync);
1441 
1442 	if (sync)
1443 		return dax_fault_synchronous_pfnp(pfnp, pfn);
1444 
1445 	/* insert PMD pfn */
1446 	if (pmd)
1447 		return vmf_insert_pfn_pmd(vmf, pfn, write);
1448 
1449 	/* insert PTE pfn */
1450 	if (write)
1451 		return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1452 	return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
1453 }
1454 
1455 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1456 			       int *iomap_errp, const struct iomap_ops *ops)
1457 {
1458 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1459 	XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
1460 	struct iomap_iter iter = {
1461 		.inode		= mapping->host,
1462 		.pos		= (loff_t)vmf->pgoff << PAGE_SHIFT,
1463 		.len		= PAGE_SIZE,
1464 		.flags		= IOMAP_FAULT,
1465 	};
1466 	vm_fault_t ret = 0;
1467 	void *entry;
1468 	int error;
1469 
1470 	trace_dax_pte_fault(iter.inode, vmf, ret);
1471 	/*
1472 	 * Check whether offset isn't beyond end of file now. Caller is supposed
1473 	 * to hold locks serializing us with truncate / punch hole so this is
1474 	 * a reliable test.
1475 	 */
1476 	if (iter.pos >= i_size_read(iter.inode)) {
1477 		ret = VM_FAULT_SIGBUS;
1478 		goto out;
1479 	}
1480 
1481 	if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1482 		iter.flags |= IOMAP_WRITE;
1483 
1484 	entry = grab_mapping_entry(&xas, mapping, 0);
1485 	if (xa_is_internal(entry)) {
1486 		ret = xa_to_internal(entry);
1487 		goto out;
1488 	}
1489 
1490 	/*
1491 	 * It is possible, particularly with mixed reads & writes to private
1492 	 * mappings, that we have raced with a PMD fault that overlaps with
1493 	 * the PTE we need to set up.  If so just return and the fault will be
1494 	 * retried.
1495 	 */
1496 	if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1497 		ret = VM_FAULT_NOPAGE;
1498 		goto unlock_entry;
1499 	}
1500 
1501 	while ((error = iomap_iter(&iter, ops)) > 0) {
1502 		if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) {
1503 			iter.processed = -EIO;	/* fs corruption? */
1504 			continue;
1505 		}
1506 
1507 		ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false);
1508 		if (ret != VM_FAULT_SIGBUS &&
1509 		    (iter.iomap.flags & IOMAP_F_NEW)) {
1510 			count_vm_event(PGMAJFAULT);
1511 			count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
1512 			ret |= VM_FAULT_MAJOR;
1513 		}
1514 
1515 		if (!(ret & VM_FAULT_ERROR))
1516 			iter.processed = PAGE_SIZE;
1517 	}
1518 
1519 	if (iomap_errp)
1520 		*iomap_errp = error;
1521 	if (!ret && error)
1522 		ret = dax_fault_return(error);
1523 
1524 unlock_entry:
1525 	dax_unlock_entry(&xas, entry);
1526 out:
1527 	trace_dax_pte_fault_done(iter.inode, vmf, ret);
1528 	return ret;
1529 }
1530 
1531 #ifdef CONFIG_FS_DAX_PMD
1532 static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
1533 		pgoff_t max_pgoff)
1534 {
1535 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1536 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1537 
1538 	/*
1539 	 * Make sure that the faulting address's PMD offset (color) matches
1540 	 * the PMD offset from the start of the file.  This is necessary so
1541 	 * that a PMD range in the page table overlaps exactly with a PMD
1542 	 * range in the page cache.
1543 	 */
1544 	if ((vmf->pgoff & PG_PMD_COLOUR) !=
1545 	    ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1546 		return true;
1547 
1548 	/* Fall back to PTEs if we're going to COW */
1549 	if (write && !(vmf->vma->vm_flags & VM_SHARED))
1550 		return true;
1551 
1552 	/* If the PMD would extend outside the VMA */
1553 	if (pmd_addr < vmf->vma->vm_start)
1554 		return true;
1555 	if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
1556 		return true;
1557 
1558 	/* If the PMD would extend beyond the file size */
1559 	if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff)
1560 		return true;
1561 
1562 	return false;
1563 }
1564 
1565 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1566 			       const struct iomap_ops *ops)
1567 {
1568 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1569 	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
1570 	struct iomap_iter iter = {
1571 		.inode		= mapping->host,
1572 		.len		= PMD_SIZE,
1573 		.flags		= IOMAP_FAULT,
1574 	};
1575 	vm_fault_t ret = VM_FAULT_FALLBACK;
1576 	pgoff_t max_pgoff;
1577 	void *entry;
1578 	int error;
1579 
1580 	if (vmf->flags & FAULT_FLAG_WRITE)
1581 		iter.flags |= IOMAP_WRITE;
1582 
1583 	/*
1584 	 * Check whether offset isn't beyond end of file now. Caller is
1585 	 * supposed to hold locks serializing us with truncate / punch hole so
1586 	 * this is a reliable test.
1587 	 */
1588 	max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE);
1589 
1590 	trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0);
1591 
1592 	if (xas.xa_index >= max_pgoff) {
1593 		ret = VM_FAULT_SIGBUS;
1594 		goto out;
1595 	}
1596 
1597 	if (dax_fault_check_fallback(vmf, &xas, max_pgoff))
1598 		goto fallback;
1599 
1600 	/*
1601 	 * grab_mapping_entry() will make sure we get an empty PMD entry,
1602 	 * a zero PMD entry or a DAX PMD.  If it can't (because a PTE
1603 	 * entry is already in the array, for instance), it will return
1604 	 * VM_FAULT_FALLBACK.
1605 	 */
1606 	entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
1607 	if (xa_is_internal(entry)) {
1608 		ret = xa_to_internal(entry);
1609 		goto fallback;
1610 	}
1611 
1612 	/*
1613 	 * It is possible, particularly with mixed reads & writes to private
1614 	 * mappings, that we have raced with a PTE fault that overlaps with
1615 	 * the PMD we need to set up.  If so just return and the fault will be
1616 	 * retried.
1617 	 */
1618 	if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1619 			!pmd_devmap(*vmf->pmd)) {
1620 		ret = 0;
1621 		goto unlock_entry;
1622 	}
1623 
1624 	iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT;
1625 	while ((error = iomap_iter(&iter, ops)) > 0) {
1626 		if (iomap_length(&iter) < PMD_SIZE)
1627 			continue; /* actually breaks out of the loop */
1628 
1629 		ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true);
1630 		if (ret != VM_FAULT_FALLBACK)
1631 			iter.processed = PMD_SIZE;
1632 	}
1633 
1634 unlock_entry:
1635 	dax_unlock_entry(&xas, entry);
1636 fallback:
1637 	if (ret == VM_FAULT_FALLBACK) {
1638 		split_huge_pmd(vmf->vma, vmf->pmd, vmf->address);
1639 		count_vm_event(THP_FAULT_FALLBACK);
1640 	}
1641 out:
1642 	trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret);
1643 	return ret;
1644 }
1645 #else
1646 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1647 			       const struct iomap_ops *ops)
1648 {
1649 	return VM_FAULT_FALLBACK;
1650 }
1651 #endif /* CONFIG_FS_DAX_PMD */
1652 
1653 /**
1654  * dax_iomap_fault - handle a page fault on a DAX file
1655  * @vmf: The description of the fault
1656  * @pe_size: Size of the page to fault in
1657  * @pfnp: PFN to insert for synchronous faults if fsync is required
1658  * @iomap_errp: Storage for detailed error code in case of error
1659  * @ops: Iomap ops passed from the file system
1660  *
1661  * When a page fault occurs, filesystems may call this helper in
1662  * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1663  * has done all the necessary locking for page fault to proceed
1664  * successfully.
1665  */
1666 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1667 		    pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1668 {
1669 	switch (pe_size) {
1670 	case PE_SIZE_PTE:
1671 		return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1672 	case PE_SIZE_PMD:
1673 		return dax_iomap_pmd_fault(vmf, pfnp, ops);
1674 	default:
1675 		return VM_FAULT_FALLBACK;
1676 	}
1677 }
1678 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1679 
1680 /*
1681  * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1682  * @vmf: The description of the fault
1683  * @pfn: PFN to insert
1684  * @order: Order of entry to insert.
1685  *
1686  * This function inserts a writeable PTE or PMD entry into the page tables
1687  * for an mmaped DAX file.  It also marks the page cache entry as dirty.
1688  */
1689 static vm_fault_t
1690 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
1691 {
1692 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1693 	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1694 	void *entry;
1695 	vm_fault_t ret;
1696 
1697 	xas_lock_irq(&xas);
1698 	entry = get_unlocked_entry(&xas, order);
1699 	/* Did we race with someone splitting entry or so? */
1700 	if (!entry || dax_is_conflict(entry) ||
1701 	    (order == 0 && !dax_is_pte_entry(entry))) {
1702 		put_unlocked_entry(&xas, entry, WAKE_NEXT);
1703 		xas_unlock_irq(&xas);
1704 		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1705 						      VM_FAULT_NOPAGE);
1706 		return VM_FAULT_NOPAGE;
1707 	}
1708 	xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1709 	dax_lock_entry(&xas, entry);
1710 	xas_unlock_irq(&xas);
1711 	if (order == 0)
1712 		ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1713 #ifdef CONFIG_FS_DAX_PMD
1714 	else if (order == PMD_ORDER)
1715 		ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
1716 #endif
1717 	else
1718 		ret = VM_FAULT_FALLBACK;
1719 	dax_unlock_entry(&xas, entry);
1720 	trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1721 	return ret;
1722 }
1723 
1724 /**
1725  * dax_finish_sync_fault - finish synchronous page fault
1726  * @vmf: The description of the fault
1727  * @pe_size: Size of entry to be inserted
1728  * @pfn: PFN to insert
1729  *
1730  * This function ensures that the file range touched by the page fault is
1731  * stored persistently on the media and handles inserting of appropriate page
1732  * table entry.
1733  */
1734 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1735 		enum page_entry_size pe_size, pfn_t pfn)
1736 {
1737 	int err;
1738 	loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1739 	unsigned int order = pe_order(pe_size);
1740 	size_t len = PAGE_SIZE << order;
1741 
1742 	err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1743 	if (err)
1744 		return VM_FAULT_SIGBUS;
1745 	return dax_insert_pfn_mkwrite(vmf, pfn, order);
1746 }
1747 EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
1748