xref: /openbmc/linux/fs/dax.c (revision 7211ec63)
1 /*
2  * fs/dax.c - Direct Access filesystem code
3  * Copyright (c) 2013-2014 Intel Corporation
4  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  */
16 
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
21 #include <linux/fs.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/sched.h>
29 #include <linux/sched/signal.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/mmu_notifier.h>
35 #include <linux/iomap.h>
36 #include "internal.h"
37 
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/fs_dax.h>
40 
41 /* We choose 4096 entries - same as per-zone page wait tables */
42 #define DAX_WAIT_TABLE_BITS 12
43 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
44 
45 /* The 'colour' (ie low bits) within a PMD of a page offset.  */
46 #define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
47 
48 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
49 
50 static int __init init_dax_wait_table(void)
51 {
52 	int i;
53 
54 	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
55 		init_waitqueue_head(wait_table + i);
56 	return 0;
57 }
58 fs_initcall(init_dax_wait_table);
59 
60 /*
61  * We use lowest available bit in exceptional entry for locking, one bit for
62  * the entry size (PMD) and two more to tell us if the entry is a zero page or
63  * an empty entry that is just used for locking.  In total four special bits.
64  *
65  * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
66  * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
67  * block allocation.
68  */
69 #define RADIX_DAX_SHIFT		(RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
70 #define RADIX_DAX_ENTRY_LOCK	(1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
71 #define RADIX_DAX_PMD		(1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
72 #define RADIX_DAX_ZERO_PAGE	(1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
73 #define RADIX_DAX_EMPTY		(1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
74 
75 static unsigned long dax_radix_sector(void *entry)
76 {
77 	return (unsigned long)entry >> RADIX_DAX_SHIFT;
78 }
79 
80 static void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
81 {
82 	return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
83 			((unsigned long)sector << RADIX_DAX_SHIFT) |
84 			RADIX_DAX_ENTRY_LOCK);
85 }
86 
87 static unsigned int dax_radix_order(void *entry)
88 {
89 	if ((unsigned long)entry & RADIX_DAX_PMD)
90 		return PMD_SHIFT - PAGE_SHIFT;
91 	return 0;
92 }
93 
94 static int dax_is_pmd_entry(void *entry)
95 {
96 	return (unsigned long)entry & RADIX_DAX_PMD;
97 }
98 
99 static int dax_is_pte_entry(void *entry)
100 {
101 	return !((unsigned long)entry & RADIX_DAX_PMD);
102 }
103 
104 static int dax_is_zero_entry(void *entry)
105 {
106 	return (unsigned long)entry & RADIX_DAX_ZERO_PAGE;
107 }
108 
109 static int dax_is_empty_entry(void *entry)
110 {
111 	return (unsigned long)entry & RADIX_DAX_EMPTY;
112 }
113 
114 /*
115  * DAX radix tree locking
116  */
117 struct exceptional_entry_key {
118 	struct address_space *mapping;
119 	pgoff_t entry_start;
120 };
121 
122 struct wait_exceptional_entry_queue {
123 	wait_queue_entry_t wait;
124 	struct exceptional_entry_key key;
125 };
126 
127 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
128 		pgoff_t index, void *entry, struct exceptional_entry_key *key)
129 {
130 	unsigned long hash;
131 
132 	/*
133 	 * If 'entry' is a PMD, align the 'index' that we use for the wait
134 	 * queue to the start of that PMD.  This ensures that all offsets in
135 	 * the range covered by the PMD map to the same bit lock.
136 	 */
137 	if (dax_is_pmd_entry(entry))
138 		index &= ~PG_PMD_COLOUR;
139 
140 	key->mapping = mapping;
141 	key->entry_start = index;
142 
143 	hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
144 	return wait_table + hash;
145 }
146 
147 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
148 				       int sync, void *keyp)
149 {
150 	struct exceptional_entry_key *key = keyp;
151 	struct wait_exceptional_entry_queue *ewait =
152 		container_of(wait, struct wait_exceptional_entry_queue, wait);
153 
154 	if (key->mapping != ewait->key.mapping ||
155 	    key->entry_start != ewait->key.entry_start)
156 		return 0;
157 	return autoremove_wake_function(wait, mode, sync, NULL);
158 }
159 
160 /*
161  * We do not necessarily hold the mapping->tree_lock when we call this
162  * function so it is possible that 'entry' is no longer a valid item in the
163  * radix tree.  This is okay because all we really need to do is to find the
164  * correct waitqueue where tasks might be waiting for that old 'entry' and
165  * wake them.
166  */
167 static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
168 		pgoff_t index, void *entry, bool wake_all)
169 {
170 	struct exceptional_entry_key key;
171 	wait_queue_head_t *wq;
172 
173 	wq = dax_entry_waitqueue(mapping, index, entry, &key);
174 
175 	/*
176 	 * Checking for locked entry and prepare_to_wait_exclusive() happens
177 	 * under mapping->tree_lock, ditto for entry handling in our callers.
178 	 * So at this point all tasks that could have seen our entry locked
179 	 * must be in the waitqueue and the following check will see them.
180 	 */
181 	if (waitqueue_active(wq))
182 		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
183 }
184 
185 /*
186  * Check whether the given slot is locked. The function must be called with
187  * mapping->tree_lock held
188  */
189 static inline int slot_locked(struct address_space *mapping, void **slot)
190 {
191 	unsigned long entry = (unsigned long)
192 		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
193 	return entry & RADIX_DAX_ENTRY_LOCK;
194 }
195 
196 /*
197  * Mark the given slot is locked. The function must be called with
198  * mapping->tree_lock held
199  */
200 static inline void *lock_slot(struct address_space *mapping, void **slot)
201 {
202 	unsigned long entry = (unsigned long)
203 		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
204 
205 	entry |= RADIX_DAX_ENTRY_LOCK;
206 	radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
207 	return (void *)entry;
208 }
209 
210 /*
211  * Mark the given slot is unlocked. The function must be called with
212  * mapping->tree_lock held
213  */
214 static inline void *unlock_slot(struct address_space *mapping, void **slot)
215 {
216 	unsigned long entry = (unsigned long)
217 		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
218 
219 	entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
220 	radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
221 	return (void *)entry;
222 }
223 
224 /*
225  * Lookup entry in radix tree, wait for it to become unlocked if it is
226  * exceptional entry and return it. The caller must call
227  * put_unlocked_mapping_entry() when he decided not to lock the entry or
228  * put_locked_mapping_entry() when he locked the entry and now wants to
229  * unlock it.
230  *
231  * The function must be called with mapping->tree_lock held.
232  */
233 static void *get_unlocked_mapping_entry(struct address_space *mapping,
234 					pgoff_t index, void ***slotp)
235 {
236 	void *entry, **slot;
237 	struct wait_exceptional_entry_queue ewait;
238 	wait_queue_head_t *wq;
239 
240 	init_wait(&ewait.wait);
241 	ewait.wait.func = wake_exceptional_entry_func;
242 
243 	for (;;) {
244 		entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
245 					  &slot);
246 		if (!entry ||
247 		    WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) ||
248 		    !slot_locked(mapping, slot)) {
249 			if (slotp)
250 				*slotp = slot;
251 			return entry;
252 		}
253 
254 		wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
255 		prepare_to_wait_exclusive(wq, &ewait.wait,
256 					  TASK_UNINTERRUPTIBLE);
257 		spin_unlock_irq(&mapping->tree_lock);
258 		schedule();
259 		finish_wait(wq, &ewait.wait);
260 		spin_lock_irq(&mapping->tree_lock);
261 	}
262 }
263 
264 static void dax_unlock_mapping_entry(struct address_space *mapping,
265 				     pgoff_t index)
266 {
267 	void *entry, **slot;
268 
269 	spin_lock_irq(&mapping->tree_lock);
270 	entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
271 	if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
272 			 !slot_locked(mapping, slot))) {
273 		spin_unlock_irq(&mapping->tree_lock);
274 		return;
275 	}
276 	unlock_slot(mapping, slot);
277 	spin_unlock_irq(&mapping->tree_lock);
278 	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
279 }
280 
281 static void put_locked_mapping_entry(struct address_space *mapping,
282 		pgoff_t index)
283 {
284 	dax_unlock_mapping_entry(mapping, index);
285 }
286 
287 /*
288  * Called when we are done with radix tree entry we looked up via
289  * get_unlocked_mapping_entry() and which we didn't lock in the end.
290  */
291 static void put_unlocked_mapping_entry(struct address_space *mapping,
292 				       pgoff_t index, void *entry)
293 {
294 	if (!entry)
295 		return;
296 
297 	/* We have to wake up next waiter for the radix tree entry lock */
298 	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
299 }
300 
301 /*
302  * Find radix tree entry at given index. If it points to an exceptional entry,
303  * return it with the radix tree entry locked. If the radix tree doesn't
304  * contain given index, create an empty exceptional entry for the index and
305  * return with it locked.
306  *
307  * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
308  * either return that locked entry or will return an error.  This error will
309  * happen if there are any 4k entries within the 2MiB range that we are
310  * requesting.
311  *
312  * We always favor 4k entries over 2MiB entries. There isn't a flow where we
313  * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
314  * insertion will fail if it finds any 4k entries already in the tree, and a
315  * 4k insertion will cause an existing 2MiB entry to be unmapped and
316  * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
317  * well as 2MiB empty entries.
318  *
319  * The exception to this downgrade path is for 2MiB DAX PMD entries that have
320  * real storage backing them.  We will leave these real 2MiB DAX entries in
321  * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
322  *
323  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
324  * persistent memory the benefit is doubtful. We can add that later if we can
325  * show it helps.
326  */
327 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
328 		unsigned long size_flag)
329 {
330 	bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
331 	void *entry, **slot;
332 
333 restart:
334 	spin_lock_irq(&mapping->tree_lock);
335 	entry = get_unlocked_mapping_entry(mapping, index, &slot);
336 
337 	if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) {
338 		entry = ERR_PTR(-EIO);
339 		goto out_unlock;
340 	}
341 
342 	if (entry) {
343 		if (size_flag & RADIX_DAX_PMD) {
344 			if (dax_is_pte_entry(entry)) {
345 				put_unlocked_mapping_entry(mapping, index,
346 						entry);
347 				entry = ERR_PTR(-EEXIST);
348 				goto out_unlock;
349 			}
350 		} else { /* trying to grab a PTE entry */
351 			if (dax_is_pmd_entry(entry) &&
352 			    (dax_is_zero_entry(entry) ||
353 			     dax_is_empty_entry(entry))) {
354 				pmd_downgrade = true;
355 			}
356 		}
357 	}
358 
359 	/* No entry for given index? Make sure radix tree is big enough. */
360 	if (!entry || pmd_downgrade) {
361 		int err;
362 
363 		if (pmd_downgrade) {
364 			/*
365 			 * Make sure 'entry' remains valid while we drop
366 			 * mapping->tree_lock.
367 			 */
368 			entry = lock_slot(mapping, slot);
369 		}
370 
371 		spin_unlock_irq(&mapping->tree_lock);
372 		/*
373 		 * Besides huge zero pages the only other thing that gets
374 		 * downgraded are empty entries which don't need to be
375 		 * unmapped.
376 		 */
377 		if (pmd_downgrade && dax_is_zero_entry(entry))
378 			unmap_mapping_range(mapping,
379 				(index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
380 
381 		err = radix_tree_preload(
382 				mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
383 		if (err) {
384 			if (pmd_downgrade)
385 				put_locked_mapping_entry(mapping, index);
386 			return ERR_PTR(err);
387 		}
388 		spin_lock_irq(&mapping->tree_lock);
389 
390 		if (!entry) {
391 			/*
392 			 * We needed to drop the page_tree lock while calling
393 			 * radix_tree_preload() and we didn't have an entry to
394 			 * lock.  See if another thread inserted an entry at
395 			 * our index during this time.
396 			 */
397 			entry = __radix_tree_lookup(&mapping->page_tree, index,
398 					NULL, &slot);
399 			if (entry) {
400 				radix_tree_preload_end();
401 				spin_unlock_irq(&mapping->tree_lock);
402 				goto restart;
403 			}
404 		}
405 
406 		if (pmd_downgrade) {
407 			radix_tree_delete(&mapping->page_tree, index);
408 			mapping->nrexceptional--;
409 			dax_wake_mapping_entry_waiter(mapping, index, entry,
410 					true);
411 		}
412 
413 		entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
414 
415 		err = __radix_tree_insert(&mapping->page_tree, index,
416 				dax_radix_order(entry), entry);
417 		radix_tree_preload_end();
418 		if (err) {
419 			spin_unlock_irq(&mapping->tree_lock);
420 			/*
421 			 * Our insertion of a DAX entry failed, most likely
422 			 * because we were inserting a PMD entry and it
423 			 * collided with a PTE sized entry at a different
424 			 * index in the PMD range.  We haven't inserted
425 			 * anything into the radix tree and have no waiters to
426 			 * wake.
427 			 */
428 			return ERR_PTR(err);
429 		}
430 		/* Good, we have inserted empty locked entry into the tree. */
431 		mapping->nrexceptional++;
432 		spin_unlock_irq(&mapping->tree_lock);
433 		return entry;
434 	}
435 	entry = lock_slot(mapping, slot);
436  out_unlock:
437 	spin_unlock_irq(&mapping->tree_lock);
438 	return entry;
439 }
440 
441 static int __dax_invalidate_mapping_entry(struct address_space *mapping,
442 					  pgoff_t index, bool trunc)
443 {
444 	int ret = 0;
445 	void *entry;
446 	struct radix_tree_root *page_tree = &mapping->page_tree;
447 
448 	spin_lock_irq(&mapping->tree_lock);
449 	entry = get_unlocked_mapping_entry(mapping, index, NULL);
450 	if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)))
451 		goto out;
452 	if (!trunc &&
453 	    (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
454 	     radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
455 		goto out;
456 	radix_tree_delete(page_tree, index);
457 	mapping->nrexceptional--;
458 	ret = 1;
459 out:
460 	put_unlocked_mapping_entry(mapping, index, entry);
461 	spin_unlock_irq(&mapping->tree_lock);
462 	return ret;
463 }
464 /*
465  * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
466  * entry to get unlocked before deleting it.
467  */
468 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
469 {
470 	int ret = __dax_invalidate_mapping_entry(mapping, index, true);
471 
472 	/*
473 	 * This gets called from truncate / punch_hole path. As such, the caller
474 	 * must hold locks protecting against concurrent modifications of the
475 	 * radix tree (usually fs-private i_mmap_sem for writing). Since the
476 	 * caller has seen exceptional entry for this index, we better find it
477 	 * at that index as well...
478 	 */
479 	WARN_ON_ONCE(!ret);
480 	return ret;
481 }
482 
483 /*
484  * Invalidate exceptional DAX entry if it is clean.
485  */
486 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
487 				      pgoff_t index)
488 {
489 	return __dax_invalidate_mapping_entry(mapping, index, false);
490 }
491 
492 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
493 		sector_t sector, size_t size, struct page *to,
494 		unsigned long vaddr)
495 {
496 	void *vto, *kaddr;
497 	pgoff_t pgoff;
498 	pfn_t pfn;
499 	long rc;
500 	int id;
501 
502 	rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
503 	if (rc)
504 		return rc;
505 
506 	id = dax_read_lock();
507 	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
508 	if (rc < 0) {
509 		dax_read_unlock(id);
510 		return rc;
511 	}
512 	vto = kmap_atomic(to);
513 	copy_user_page(vto, (void __force *)kaddr, vaddr, to);
514 	kunmap_atomic(vto);
515 	dax_read_unlock(id);
516 	return 0;
517 }
518 
519 /*
520  * By this point grab_mapping_entry() has ensured that we have a locked entry
521  * of the appropriate size so we don't have to worry about downgrading PMDs to
522  * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
523  * already in the tree, we will skip the insertion and just dirty the PMD as
524  * appropriate.
525  */
526 static void *dax_insert_mapping_entry(struct address_space *mapping,
527 				      struct vm_fault *vmf,
528 				      void *entry, sector_t sector,
529 				      unsigned long flags)
530 {
531 	struct radix_tree_root *page_tree = &mapping->page_tree;
532 	void *new_entry;
533 	pgoff_t index = vmf->pgoff;
534 
535 	if (vmf->flags & FAULT_FLAG_WRITE)
536 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
537 
538 	if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
539 		/* we are replacing a zero page with block mapping */
540 		if (dax_is_pmd_entry(entry))
541 			unmap_mapping_range(mapping,
542 					(vmf->pgoff << PAGE_SHIFT) & PMD_MASK,
543 					PMD_SIZE, 0);
544 		else /* pte entry */
545 			unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
546 					PAGE_SIZE, 0);
547 	}
548 
549 	spin_lock_irq(&mapping->tree_lock);
550 	new_entry = dax_radix_locked_entry(sector, flags);
551 
552 	if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
553 		/*
554 		 * Only swap our new entry into the radix tree if the current
555 		 * entry is a zero page or an empty entry.  If a normal PTE or
556 		 * PMD entry is already in the tree, we leave it alone.  This
557 		 * means that if we are trying to insert a PTE and the
558 		 * existing entry is a PMD, we will just leave the PMD in the
559 		 * tree and dirty it if necessary.
560 		 */
561 		struct radix_tree_node *node;
562 		void **slot;
563 		void *ret;
564 
565 		ret = __radix_tree_lookup(page_tree, index, &node, &slot);
566 		WARN_ON_ONCE(ret != entry);
567 		__radix_tree_replace(page_tree, node, slot,
568 				     new_entry, NULL, NULL);
569 		entry = new_entry;
570 	}
571 
572 	if (vmf->flags & FAULT_FLAG_WRITE)
573 		radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
574 
575 	spin_unlock_irq(&mapping->tree_lock);
576 	return entry;
577 }
578 
579 static inline unsigned long
580 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
581 {
582 	unsigned long address;
583 
584 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
585 	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
586 	return address;
587 }
588 
589 /* Walk all mappings of a given index of a file and writeprotect them */
590 static void dax_mapping_entry_mkclean(struct address_space *mapping,
591 				      pgoff_t index, unsigned long pfn)
592 {
593 	struct vm_area_struct *vma;
594 	pte_t pte, *ptep = NULL;
595 	pmd_t *pmdp = NULL;
596 	spinlock_t *ptl;
597 
598 	i_mmap_lock_read(mapping);
599 	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
600 		unsigned long address, start, end;
601 
602 		cond_resched();
603 
604 		if (!(vma->vm_flags & VM_SHARED))
605 			continue;
606 
607 		address = pgoff_address(index, vma);
608 
609 		/*
610 		 * Note because we provide start/end to follow_pte_pmd it will
611 		 * call mmu_notifier_invalidate_range_start() on our behalf
612 		 * before taking any lock.
613 		 */
614 		if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
615 			continue;
616 
617 		if (pmdp) {
618 #ifdef CONFIG_FS_DAX_PMD
619 			pmd_t pmd;
620 
621 			if (pfn != pmd_pfn(*pmdp))
622 				goto unlock_pmd;
623 			if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
624 				goto unlock_pmd;
625 
626 			flush_cache_page(vma, address, pfn);
627 			pmd = pmdp_huge_clear_flush(vma, address, pmdp);
628 			pmd = pmd_wrprotect(pmd);
629 			pmd = pmd_mkclean(pmd);
630 			set_pmd_at(vma->vm_mm, address, pmdp, pmd);
631 			mmu_notifier_invalidate_range(vma->vm_mm, start, end);
632 unlock_pmd:
633 			spin_unlock(ptl);
634 #endif
635 		} else {
636 			if (pfn != pte_pfn(*ptep))
637 				goto unlock_pte;
638 			if (!pte_dirty(*ptep) && !pte_write(*ptep))
639 				goto unlock_pte;
640 
641 			flush_cache_page(vma, address, pfn);
642 			pte = ptep_clear_flush(vma, address, ptep);
643 			pte = pte_wrprotect(pte);
644 			pte = pte_mkclean(pte);
645 			set_pte_at(vma->vm_mm, address, ptep, pte);
646 			mmu_notifier_invalidate_range(vma->vm_mm, start, end);
647 unlock_pte:
648 			pte_unmap_unlock(ptep, ptl);
649 		}
650 
651 		mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
652 	}
653 	i_mmap_unlock_read(mapping);
654 }
655 
656 static int dax_writeback_one(struct block_device *bdev,
657 		struct dax_device *dax_dev, struct address_space *mapping,
658 		pgoff_t index, void *entry)
659 {
660 	struct radix_tree_root *page_tree = &mapping->page_tree;
661 	void *entry2, **slot, *kaddr;
662 	long ret = 0, id;
663 	sector_t sector;
664 	pgoff_t pgoff;
665 	size_t size;
666 	pfn_t pfn;
667 
668 	/*
669 	 * A page got tagged dirty in DAX mapping? Something is seriously
670 	 * wrong.
671 	 */
672 	if (WARN_ON(!radix_tree_exceptional_entry(entry)))
673 		return -EIO;
674 
675 	spin_lock_irq(&mapping->tree_lock);
676 	entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
677 	/* Entry got punched out / reallocated? */
678 	if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2)))
679 		goto put_unlocked;
680 	/*
681 	 * Entry got reallocated elsewhere? No need to writeback. We have to
682 	 * compare sectors as we must not bail out due to difference in lockbit
683 	 * or entry type.
684 	 */
685 	if (dax_radix_sector(entry2) != dax_radix_sector(entry))
686 		goto put_unlocked;
687 	if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
688 				dax_is_zero_entry(entry))) {
689 		ret = -EIO;
690 		goto put_unlocked;
691 	}
692 
693 	/* Another fsync thread may have already written back this entry */
694 	if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
695 		goto put_unlocked;
696 	/* Lock the entry to serialize with page faults */
697 	entry = lock_slot(mapping, slot);
698 	/*
699 	 * We can clear the tag now but we have to be careful so that concurrent
700 	 * dax_writeback_one() calls for the same index cannot finish before we
701 	 * actually flush the caches. This is achieved as the calls will look
702 	 * at the entry only under tree_lock and once they do that they will
703 	 * see the entry locked and wait for it to unlock.
704 	 */
705 	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
706 	spin_unlock_irq(&mapping->tree_lock);
707 
708 	/*
709 	 * Even if dax_writeback_mapping_range() was given a wbc->range_start
710 	 * in the middle of a PMD, the 'index' we are given will be aligned to
711 	 * the start index of the PMD, as will the sector we pull from
712 	 * 'entry'.  This allows us to flush for PMD_SIZE and not have to
713 	 * worry about partial PMD writebacks.
714 	 */
715 	sector = dax_radix_sector(entry);
716 	size = PAGE_SIZE << dax_radix_order(entry);
717 
718 	id = dax_read_lock();
719 	ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
720 	if (ret)
721 		goto dax_unlock;
722 
723 	/*
724 	 * dax_direct_access() may sleep, so cannot hold tree_lock over
725 	 * its invocation.
726 	 */
727 	ret = dax_direct_access(dax_dev, pgoff, size / PAGE_SIZE, &kaddr, &pfn);
728 	if (ret < 0)
729 		goto dax_unlock;
730 
731 	if (WARN_ON_ONCE(ret < size / PAGE_SIZE)) {
732 		ret = -EIO;
733 		goto dax_unlock;
734 	}
735 
736 	dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn));
737 	dax_flush(dax_dev, kaddr, size);
738 	/*
739 	 * After we have flushed the cache, we can clear the dirty tag. There
740 	 * cannot be new dirty data in the pfn after the flush has completed as
741 	 * the pfn mappings are writeprotected and fault waits for mapping
742 	 * entry lock.
743 	 */
744 	spin_lock_irq(&mapping->tree_lock);
745 	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
746 	spin_unlock_irq(&mapping->tree_lock);
747 	trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
748  dax_unlock:
749 	dax_read_unlock(id);
750 	put_locked_mapping_entry(mapping, index);
751 	return ret;
752 
753  put_unlocked:
754 	put_unlocked_mapping_entry(mapping, index, entry2);
755 	spin_unlock_irq(&mapping->tree_lock);
756 	return ret;
757 }
758 
759 /*
760  * Flush the mapping to the persistent domain within the byte range of [start,
761  * end]. This is required by data integrity operations to ensure file data is
762  * on persistent storage prior to completion of the operation.
763  */
764 int dax_writeback_mapping_range(struct address_space *mapping,
765 		struct block_device *bdev, struct writeback_control *wbc)
766 {
767 	struct inode *inode = mapping->host;
768 	pgoff_t start_index, end_index;
769 	pgoff_t indices[PAGEVEC_SIZE];
770 	struct dax_device *dax_dev;
771 	struct pagevec pvec;
772 	bool done = false;
773 	int i, ret = 0;
774 
775 	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
776 		return -EIO;
777 
778 	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
779 		return 0;
780 
781 	dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
782 	if (!dax_dev)
783 		return -EIO;
784 
785 	start_index = wbc->range_start >> PAGE_SHIFT;
786 	end_index = wbc->range_end >> PAGE_SHIFT;
787 
788 	trace_dax_writeback_range(inode, start_index, end_index);
789 
790 	tag_pages_for_writeback(mapping, start_index, end_index);
791 
792 	pagevec_init(&pvec, 0);
793 	while (!done) {
794 		pvec.nr = find_get_entries_tag(mapping, start_index,
795 				PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
796 				pvec.pages, indices);
797 
798 		if (pvec.nr == 0)
799 			break;
800 
801 		for (i = 0; i < pvec.nr; i++) {
802 			if (indices[i] > end_index) {
803 				done = true;
804 				break;
805 			}
806 
807 			ret = dax_writeback_one(bdev, dax_dev, mapping,
808 					indices[i], pvec.pages[i]);
809 			if (ret < 0) {
810 				mapping_set_error(mapping, ret);
811 				goto out;
812 			}
813 		}
814 		start_index = indices[pvec.nr - 1] + 1;
815 	}
816 out:
817 	put_dax(dax_dev);
818 	trace_dax_writeback_range_done(inode, start_index, end_index);
819 	return (ret < 0 ? ret : 0);
820 }
821 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
822 
823 static int dax_insert_mapping(struct address_space *mapping,
824 		struct block_device *bdev, struct dax_device *dax_dev,
825 		sector_t sector, size_t size, void *entry,
826 		struct vm_area_struct *vma, struct vm_fault *vmf)
827 {
828 	unsigned long vaddr = vmf->address;
829 	void *ret, *kaddr;
830 	pgoff_t pgoff;
831 	int id, rc;
832 	pfn_t pfn;
833 
834 	rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
835 	if (rc)
836 		return rc;
837 
838 	id = dax_read_lock();
839 	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
840 	if (rc < 0) {
841 		dax_read_unlock(id);
842 		return rc;
843 	}
844 	dax_read_unlock(id);
845 
846 	ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0);
847 	if (IS_ERR(ret))
848 		return PTR_ERR(ret);
849 
850 	trace_dax_insert_mapping(mapping->host, vmf, ret);
851 	if (vmf->flags & FAULT_FLAG_WRITE)
852 		return vm_insert_mixed_mkwrite(vma, vaddr, pfn);
853 	else
854 		return vm_insert_mixed(vma, vaddr, pfn);
855 }
856 
857 /*
858  * The user has performed a load from a hole in the file.  Allocating a new
859  * page in the file would cause excessive storage usage for workloads with
860  * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
861  * If this page is ever written to we will re-fault and change the mapping to
862  * point to real DAX storage instead.
863  */
864 static int dax_load_hole(struct address_space *mapping, void *entry,
865 			 struct vm_fault *vmf)
866 {
867 	struct inode *inode = mapping->host;
868 	unsigned long vaddr = vmf->address;
869 	int ret = VM_FAULT_NOPAGE;
870 	struct page *zero_page;
871 	void *entry2;
872 
873 	zero_page = ZERO_PAGE(0);
874 	if (unlikely(!zero_page)) {
875 		ret = VM_FAULT_OOM;
876 		goto out;
877 	}
878 
879 	entry2 = dax_insert_mapping_entry(mapping, vmf, entry, 0,
880 			RADIX_DAX_ZERO_PAGE);
881 	if (IS_ERR(entry2)) {
882 		ret = VM_FAULT_SIGBUS;
883 		goto out;
884 	}
885 
886 	vm_insert_mixed(vmf->vma, vaddr, page_to_pfn_t(zero_page));
887 out:
888 	trace_dax_load_hole(inode, vmf, ret);
889 	return ret;
890 }
891 
892 static bool dax_range_is_aligned(struct block_device *bdev,
893 				 unsigned int offset, unsigned int length)
894 {
895 	unsigned short sector_size = bdev_logical_block_size(bdev);
896 
897 	if (!IS_ALIGNED(offset, sector_size))
898 		return false;
899 	if (!IS_ALIGNED(length, sector_size))
900 		return false;
901 
902 	return true;
903 }
904 
905 int __dax_zero_page_range(struct block_device *bdev,
906 		struct dax_device *dax_dev, sector_t sector,
907 		unsigned int offset, unsigned int size)
908 {
909 	if (dax_range_is_aligned(bdev, offset, size)) {
910 		sector_t start_sector = sector + (offset >> 9);
911 
912 		return blkdev_issue_zeroout(bdev, start_sector,
913 				size >> 9, GFP_NOFS, 0);
914 	} else {
915 		pgoff_t pgoff;
916 		long rc, id;
917 		void *kaddr;
918 		pfn_t pfn;
919 
920 		rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
921 		if (rc)
922 			return rc;
923 
924 		id = dax_read_lock();
925 		rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr,
926 				&pfn);
927 		if (rc < 0) {
928 			dax_read_unlock(id);
929 			return rc;
930 		}
931 		memset(kaddr + offset, 0, size);
932 		dax_flush(dax_dev, kaddr + offset, size);
933 		dax_read_unlock(id);
934 	}
935 	return 0;
936 }
937 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
938 
939 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
940 {
941 	return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
942 }
943 
944 static loff_t
945 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
946 		struct iomap *iomap)
947 {
948 	struct block_device *bdev = iomap->bdev;
949 	struct dax_device *dax_dev = iomap->dax_dev;
950 	struct iov_iter *iter = data;
951 	loff_t end = pos + length, done = 0;
952 	ssize_t ret = 0;
953 	int id;
954 
955 	if (iov_iter_rw(iter) == READ) {
956 		end = min(end, i_size_read(inode));
957 		if (pos >= end)
958 			return 0;
959 
960 		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
961 			return iov_iter_zero(min(length, end - pos), iter);
962 	}
963 
964 	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
965 		return -EIO;
966 
967 	/*
968 	 * Write can allocate block for an area which has a hole page mapped
969 	 * into page tables. We have to tear down these mappings so that data
970 	 * written by write(2) is visible in mmap.
971 	 */
972 	if (iomap->flags & IOMAP_F_NEW) {
973 		invalidate_inode_pages2_range(inode->i_mapping,
974 					      pos >> PAGE_SHIFT,
975 					      (end - 1) >> PAGE_SHIFT);
976 	}
977 
978 	id = dax_read_lock();
979 	while (pos < end) {
980 		unsigned offset = pos & (PAGE_SIZE - 1);
981 		const size_t size = ALIGN(length + offset, PAGE_SIZE);
982 		const sector_t sector = dax_iomap_sector(iomap, pos);
983 		ssize_t map_len;
984 		pgoff_t pgoff;
985 		void *kaddr;
986 		pfn_t pfn;
987 
988 		if (fatal_signal_pending(current)) {
989 			ret = -EINTR;
990 			break;
991 		}
992 
993 		ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
994 		if (ret)
995 			break;
996 
997 		map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
998 				&kaddr, &pfn);
999 		if (map_len < 0) {
1000 			ret = map_len;
1001 			break;
1002 		}
1003 
1004 		map_len = PFN_PHYS(map_len);
1005 		kaddr += offset;
1006 		map_len -= offset;
1007 		if (map_len > end - pos)
1008 			map_len = end - pos;
1009 
1010 		/*
1011 		 * The userspace address for the memory copy has already been
1012 		 * validated via access_ok() in either vfs_read() or
1013 		 * vfs_write(), depending on which operation we are doing.
1014 		 */
1015 		if (iov_iter_rw(iter) == WRITE)
1016 			map_len = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1017 					map_len, iter);
1018 		else
1019 			map_len = copy_to_iter(kaddr, map_len, iter);
1020 		if (map_len <= 0) {
1021 			ret = map_len ? map_len : -EFAULT;
1022 			break;
1023 		}
1024 
1025 		pos += map_len;
1026 		length -= map_len;
1027 		done += map_len;
1028 	}
1029 	dax_read_unlock(id);
1030 
1031 	return done ? done : ret;
1032 }
1033 
1034 /**
1035  * dax_iomap_rw - Perform I/O to a DAX file
1036  * @iocb:	The control block for this I/O
1037  * @iter:	The addresses to do I/O from or to
1038  * @ops:	iomap ops passed from the file system
1039  *
1040  * This function performs read and write operations to directly mapped
1041  * persistent memory.  The callers needs to take care of read/write exclusion
1042  * and evicting any page cache pages in the region under I/O.
1043  */
1044 ssize_t
1045 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1046 		const struct iomap_ops *ops)
1047 {
1048 	struct address_space *mapping = iocb->ki_filp->f_mapping;
1049 	struct inode *inode = mapping->host;
1050 	loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1051 	unsigned flags = 0;
1052 
1053 	if (iov_iter_rw(iter) == WRITE) {
1054 		lockdep_assert_held_exclusive(&inode->i_rwsem);
1055 		flags |= IOMAP_WRITE;
1056 	} else {
1057 		lockdep_assert_held(&inode->i_rwsem);
1058 	}
1059 
1060 	while (iov_iter_count(iter)) {
1061 		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1062 				iter, dax_iomap_actor);
1063 		if (ret <= 0)
1064 			break;
1065 		pos += ret;
1066 		done += ret;
1067 	}
1068 
1069 	iocb->ki_pos += done;
1070 	return done ? done : ret;
1071 }
1072 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1073 
1074 static int dax_fault_return(int error)
1075 {
1076 	if (error == 0)
1077 		return VM_FAULT_NOPAGE;
1078 	if (error == -ENOMEM)
1079 		return VM_FAULT_OOM;
1080 	return VM_FAULT_SIGBUS;
1081 }
1082 
1083 static int dax_iomap_pte_fault(struct vm_fault *vmf,
1084 			       const struct iomap_ops *ops)
1085 {
1086 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1087 	struct inode *inode = mapping->host;
1088 	unsigned long vaddr = vmf->address;
1089 	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1090 	sector_t sector;
1091 	struct iomap iomap = { 0 };
1092 	unsigned flags = IOMAP_FAULT;
1093 	int error, major = 0;
1094 	int vmf_ret = 0;
1095 	void *entry;
1096 
1097 	trace_dax_pte_fault(inode, vmf, vmf_ret);
1098 	/*
1099 	 * Check whether offset isn't beyond end of file now. Caller is supposed
1100 	 * to hold locks serializing us with truncate / punch hole so this is
1101 	 * a reliable test.
1102 	 */
1103 	if (pos >= i_size_read(inode)) {
1104 		vmf_ret = VM_FAULT_SIGBUS;
1105 		goto out;
1106 	}
1107 
1108 	if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1109 		flags |= IOMAP_WRITE;
1110 
1111 	entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1112 	if (IS_ERR(entry)) {
1113 		vmf_ret = dax_fault_return(PTR_ERR(entry));
1114 		goto out;
1115 	}
1116 
1117 	/*
1118 	 * It is possible, particularly with mixed reads & writes to private
1119 	 * mappings, that we have raced with a PMD fault that overlaps with
1120 	 * the PTE we need to set up.  If so just return and the fault will be
1121 	 * retried.
1122 	 */
1123 	if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1124 		vmf_ret = VM_FAULT_NOPAGE;
1125 		goto unlock_entry;
1126 	}
1127 
1128 	/*
1129 	 * Note that we don't bother to use iomap_apply here: DAX required
1130 	 * the file system block size to be equal the page size, which means
1131 	 * that we never have to deal with more than a single extent here.
1132 	 */
1133 	error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1134 	if (error) {
1135 		vmf_ret = dax_fault_return(error);
1136 		goto unlock_entry;
1137 	}
1138 	if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1139 		error = -EIO;	/* fs corruption? */
1140 		goto error_finish_iomap;
1141 	}
1142 
1143 	sector = dax_iomap_sector(&iomap, pos);
1144 
1145 	if (vmf->cow_page) {
1146 		switch (iomap.type) {
1147 		case IOMAP_HOLE:
1148 		case IOMAP_UNWRITTEN:
1149 			clear_user_highpage(vmf->cow_page, vaddr);
1150 			break;
1151 		case IOMAP_MAPPED:
1152 			error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1153 					sector, PAGE_SIZE, vmf->cow_page, vaddr);
1154 			break;
1155 		default:
1156 			WARN_ON_ONCE(1);
1157 			error = -EIO;
1158 			break;
1159 		}
1160 
1161 		if (error)
1162 			goto error_finish_iomap;
1163 
1164 		__SetPageUptodate(vmf->cow_page);
1165 		vmf_ret = finish_fault(vmf);
1166 		if (!vmf_ret)
1167 			vmf_ret = VM_FAULT_DONE_COW;
1168 		goto finish_iomap;
1169 	}
1170 
1171 	switch (iomap.type) {
1172 	case IOMAP_MAPPED:
1173 		if (iomap.flags & IOMAP_F_NEW) {
1174 			count_vm_event(PGMAJFAULT);
1175 			count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
1176 			major = VM_FAULT_MAJOR;
1177 		}
1178 		error = dax_insert_mapping(mapping, iomap.bdev, iomap.dax_dev,
1179 				sector, PAGE_SIZE, entry, vmf->vma, vmf);
1180 		/* -EBUSY is fine, somebody else faulted on the same PTE */
1181 		if (error == -EBUSY)
1182 			error = 0;
1183 		break;
1184 	case IOMAP_UNWRITTEN:
1185 	case IOMAP_HOLE:
1186 		if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1187 			vmf_ret = dax_load_hole(mapping, entry, vmf);
1188 			goto finish_iomap;
1189 		}
1190 		/*FALLTHRU*/
1191 	default:
1192 		WARN_ON_ONCE(1);
1193 		error = -EIO;
1194 		break;
1195 	}
1196 
1197  error_finish_iomap:
1198 	vmf_ret = dax_fault_return(error) | major;
1199  finish_iomap:
1200 	if (ops->iomap_end) {
1201 		int copied = PAGE_SIZE;
1202 
1203 		if (vmf_ret & VM_FAULT_ERROR)
1204 			copied = 0;
1205 		/*
1206 		 * The fault is done by now and there's no way back (other
1207 		 * thread may be already happily using PTE we have installed).
1208 		 * Just ignore error from ->iomap_end since we cannot do much
1209 		 * with it.
1210 		 */
1211 		ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1212 	}
1213  unlock_entry:
1214 	put_locked_mapping_entry(mapping, vmf->pgoff);
1215  out:
1216 	trace_dax_pte_fault_done(inode, vmf, vmf_ret);
1217 	return vmf_ret;
1218 }
1219 
1220 #ifdef CONFIG_FS_DAX_PMD
1221 static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
1222 		loff_t pos, void *entry)
1223 {
1224 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1225 	const sector_t sector = dax_iomap_sector(iomap, pos);
1226 	struct dax_device *dax_dev = iomap->dax_dev;
1227 	struct block_device *bdev = iomap->bdev;
1228 	struct inode *inode = mapping->host;
1229 	const size_t size = PMD_SIZE;
1230 	void *ret = NULL, *kaddr;
1231 	long length = 0;
1232 	pgoff_t pgoff;
1233 	pfn_t pfn = {};
1234 	int id;
1235 
1236 	if (bdev_dax_pgoff(bdev, sector, size, &pgoff) != 0)
1237 		goto fallback;
1238 
1239 	id = dax_read_lock();
1240 	length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
1241 	if (length < 0)
1242 		goto unlock_fallback;
1243 	length = PFN_PHYS(length);
1244 
1245 	if (length < size)
1246 		goto unlock_fallback;
1247 	if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR)
1248 		goto unlock_fallback;
1249 	if (!pfn_t_devmap(pfn))
1250 		goto unlock_fallback;
1251 	dax_read_unlock(id);
1252 
1253 	ret = dax_insert_mapping_entry(mapping, vmf, entry, sector,
1254 			RADIX_DAX_PMD);
1255 	if (IS_ERR(ret))
1256 		goto fallback;
1257 
1258 	trace_dax_pmd_insert_mapping(inode, vmf, length, pfn, ret);
1259 	return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1260 			pfn, vmf->flags & FAULT_FLAG_WRITE);
1261 
1262 unlock_fallback:
1263 	dax_read_unlock(id);
1264 fallback:
1265 	trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, pfn, ret);
1266 	return VM_FAULT_FALLBACK;
1267 }
1268 
1269 static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1270 		void *entry)
1271 {
1272 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1273 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1274 	struct inode *inode = mapping->host;
1275 	struct page *zero_page;
1276 	void *ret = NULL;
1277 	spinlock_t *ptl;
1278 	pmd_t pmd_entry;
1279 
1280 	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1281 
1282 	if (unlikely(!zero_page))
1283 		goto fallback;
1284 
1285 	ret = dax_insert_mapping_entry(mapping, vmf, entry, 0,
1286 			RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE);
1287 	if (IS_ERR(ret))
1288 		goto fallback;
1289 
1290 	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1291 	if (!pmd_none(*(vmf->pmd))) {
1292 		spin_unlock(ptl);
1293 		goto fallback;
1294 	}
1295 
1296 	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1297 	pmd_entry = pmd_mkhuge(pmd_entry);
1298 	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1299 	spin_unlock(ptl);
1300 	trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1301 	return VM_FAULT_NOPAGE;
1302 
1303 fallback:
1304 	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1305 	return VM_FAULT_FALLBACK;
1306 }
1307 
1308 static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1309 			       const struct iomap_ops *ops)
1310 {
1311 	struct vm_area_struct *vma = vmf->vma;
1312 	struct address_space *mapping = vma->vm_file->f_mapping;
1313 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1314 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1315 	unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1316 	struct inode *inode = mapping->host;
1317 	int result = VM_FAULT_FALLBACK;
1318 	struct iomap iomap = { 0 };
1319 	pgoff_t max_pgoff, pgoff;
1320 	void *entry;
1321 	loff_t pos;
1322 	int error;
1323 
1324 	/*
1325 	 * Check whether offset isn't beyond end of file now. Caller is
1326 	 * supposed to hold locks serializing us with truncate / punch hole so
1327 	 * this is a reliable test.
1328 	 */
1329 	pgoff = linear_page_index(vma, pmd_addr);
1330 	max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
1331 
1332 	trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1333 
1334 	/*
1335 	 * Make sure that the faulting address's PMD offset (color) matches
1336 	 * the PMD offset from the start of the file.  This is necessary so
1337 	 * that a PMD range in the page table overlaps exactly with a PMD
1338 	 * range in the radix tree.
1339 	 */
1340 	if ((vmf->pgoff & PG_PMD_COLOUR) !=
1341 	    ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1342 		goto fallback;
1343 
1344 	/* Fall back to PTEs if we're going to COW */
1345 	if (write && !(vma->vm_flags & VM_SHARED))
1346 		goto fallback;
1347 
1348 	/* If the PMD would extend outside the VMA */
1349 	if (pmd_addr < vma->vm_start)
1350 		goto fallback;
1351 	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1352 		goto fallback;
1353 
1354 	if (pgoff > max_pgoff) {
1355 		result = VM_FAULT_SIGBUS;
1356 		goto out;
1357 	}
1358 
1359 	/* If the PMD would extend beyond the file size */
1360 	if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
1361 		goto fallback;
1362 
1363 	/*
1364 	 * grab_mapping_entry() will make sure we get a 2MiB empty entry, a
1365 	 * 2MiB zero page entry or a DAX PMD.  If it can't (because a 4k page
1366 	 * is already in the tree, for instance), it will return -EEXIST and
1367 	 * we just fall back to 4k entries.
1368 	 */
1369 	entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1370 	if (IS_ERR(entry))
1371 		goto fallback;
1372 
1373 	/*
1374 	 * It is possible, particularly with mixed reads & writes to private
1375 	 * mappings, that we have raced with a PTE fault that overlaps with
1376 	 * the PMD we need to set up.  If so just return and the fault will be
1377 	 * retried.
1378 	 */
1379 	if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1380 			!pmd_devmap(*vmf->pmd)) {
1381 		result = 0;
1382 		goto unlock_entry;
1383 	}
1384 
1385 	/*
1386 	 * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1387 	 * setting up a mapping, so really we're using iomap_begin() as a way
1388 	 * to look up our filesystem block.
1389 	 */
1390 	pos = (loff_t)pgoff << PAGE_SHIFT;
1391 	error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1392 	if (error)
1393 		goto unlock_entry;
1394 
1395 	if (iomap.offset + iomap.length < pos + PMD_SIZE)
1396 		goto finish_iomap;
1397 
1398 	switch (iomap.type) {
1399 	case IOMAP_MAPPED:
1400 		result = dax_pmd_insert_mapping(vmf, &iomap, pos, entry);
1401 		break;
1402 	case IOMAP_UNWRITTEN:
1403 	case IOMAP_HOLE:
1404 		if (WARN_ON_ONCE(write))
1405 			break;
1406 		result = dax_pmd_load_hole(vmf, &iomap, entry);
1407 		break;
1408 	default:
1409 		WARN_ON_ONCE(1);
1410 		break;
1411 	}
1412 
1413  finish_iomap:
1414 	if (ops->iomap_end) {
1415 		int copied = PMD_SIZE;
1416 
1417 		if (result == VM_FAULT_FALLBACK)
1418 			copied = 0;
1419 		/*
1420 		 * The fault is done by now and there's no way back (other
1421 		 * thread may be already happily using PMD we have installed).
1422 		 * Just ignore error from ->iomap_end since we cannot do much
1423 		 * with it.
1424 		 */
1425 		ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1426 				&iomap);
1427 	}
1428  unlock_entry:
1429 	put_locked_mapping_entry(mapping, pgoff);
1430  fallback:
1431 	if (result == VM_FAULT_FALLBACK) {
1432 		split_huge_pmd(vma, vmf->pmd, vmf->address);
1433 		count_vm_event(THP_FAULT_FALLBACK);
1434 	}
1435 out:
1436 	trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1437 	return result;
1438 }
1439 #else
1440 static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1441 			       const struct iomap_ops *ops)
1442 {
1443 	return VM_FAULT_FALLBACK;
1444 }
1445 #endif /* CONFIG_FS_DAX_PMD */
1446 
1447 /**
1448  * dax_iomap_fault - handle a page fault on a DAX file
1449  * @vmf: The description of the fault
1450  * @ops: iomap ops passed from the file system
1451  *
1452  * When a page fault occurs, filesystems may call this helper in
1453  * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1454  * has done all the necessary locking for page fault to proceed
1455  * successfully.
1456  */
1457 int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1458 		    const struct iomap_ops *ops)
1459 {
1460 	switch (pe_size) {
1461 	case PE_SIZE_PTE:
1462 		return dax_iomap_pte_fault(vmf, ops);
1463 	case PE_SIZE_PMD:
1464 		return dax_iomap_pmd_fault(vmf, ops);
1465 	default:
1466 		return VM_FAULT_FALLBACK;
1467 	}
1468 }
1469 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1470