xref: /openbmc/linux/fs/dax.c (revision d28bcd53)
1 /*
2  * fs/dax.c - Direct Access filesystem code
3  * Copyright (c) 2013-2014 Intel Corporation
4  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  */
16 
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
21 #include <linux/fs.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/sched/signal.h>
31 #include <linux/uio.h>
32 #include <linux/vmstat.h>
33 #include <linux/pfn_t.h>
34 #include <linux/sizes.h>
35 #include <linux/mmu_notifier.h>
36 #include <linux/iomap.h>
37 #include "internal.h"
38 
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/fs_dax.h>
41 
42 /* We choose 4096 entries - same as per-zone page wait tables */
43 #define DAX_WAIT_TABLE_BITS 12
44 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
45 
46 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
47 
48 static int __init init_dax_wait_table(void)
49 {
50 	int i;
51 
52 	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
53 		init_waitqueue_head(wait_table + i);
54 	return 0;
55 }
56 fs_initcall(init_dax_wait_table);
57 
58 static int dax_is_pmd_entry(void *entry)
59 {
60 	return (unsigned long)entry & RADIX_DAX_PMD;
61 }
62 
63 static int dax_is_pte_entry(void *entry)
64 {
65 	return !((unsigned long)entry & RADIX_DAX_PMD);
66 }
67 
68 static int dax_is_zero_entry(void *entry)
69 {
70 	return (unsigned long)entry & RADIX_DAX_HZP;
71 }
72 
73 static int dax_is_empty_entry(void *entry)
74 {
75 	return (unsigned long)entry & RADIX_DAX_EMPTY;
76 }
77 
78 /*
79  * DAX radix tree locking
80  */
81 struct exceptional_entry_key {
82 	struct address_space *mapping;
83 	pgoff_t entry_start;
84 };
85 
86 struct wait_exceptional_entry_queue {
87 	wait_queue_t wait;
88 	struct exceptional_entry_key key;
89 };
90 
91 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
92 		pgoff_t index, void *entry, struct exceptional_entry_key *key)
93 {
94 	unsigned long hash;
95 
96 	/*
97 	 * If 'entry' is a PMD, align the 'index' that we use for the wait
98 	 * queue to the start of that PMD.  This ensures that all offsets in
99 	 * the range covered by the PMD map to the same bit lock.
100 	 */
101 	if (dax_is_pmd_entry(entry))
102 		index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
103 
104 	key->mapping = mapping;
105 	key->entry_start = index;
106 
107 	hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
108 	return wait_table + hash;
109 }
110 
111 static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
112 				       int sync, void *keyp)
113 {
114 	struct exceptional_entry_key *key = keyp;
115 	struct wait_exceptional_entry_queue *ewait =
116 		container_of(wait, struct wait_exceptional_entry_queue, wait);
117 
118 	if (key->mapping != ewait->key.mapping ||
119 	    key->entry_start != ewait->key.entry_start)
120 		return 0;
121 	return autoremove_wake_function(wait, mode, sync, NULL);
122 }
123 
124 /*
125  * Check whether the given slot is locked. The function must be called with
126  * mapping->tree_lock held
127  */
128 static inline int slot_locked(struct address_space *mapping, void **slot)
129 {
130 	unsigned long entry = (unsigned long)
131 		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
132 	return entry & RADIX_DAX_ENTRY_LOCK;
133 }
134 
135 /*
136  * Mark the given slot is locked. The function must be called with
137  * mapping->tree_lock held
138  */
139 static inline void *lock_slot(struct address_space *mapping, void **slot)
140 {
141 	unsigned long entry = (unsigned long)
142 		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
143 
144 	entry |= RADIX_DAX_ENTRY_LOCK;
145 	radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
146 	return (void *)entry;
147 }
148 
149 /*
150  * Mark the given slot is unlocked. The function must be called with
151  * mapping->tree_lock held
152  */
153 static inline void *unlock_slot(struct address_space *mapping, void **slot)
154 {
155 	unsigned long entry = (unsigned long)
156 		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
157 
158 	entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
159 	radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
160 	return (void *)entry;
161 }
162 
163 /*
164  * Lookup entry in radix tree, wait for it to become unlocked if it is
165  * exceptional entry and return it. The caller must call
166  * put_unlocked_mapping_entry() when he decided not to lock the entry or
167  * put_locked_mapping_entry() when he locked the entry and now wants to
168  * unlock it.
169  *
170  * The function must be called with mapping->tree_lock held.
171  */
172 static void *get_unlocked_mapping_entry(struct address_space *mapping,
173 					pgoff_t index, void ***slotp)
174 {
175 	void *entry, **slot;
176 	struct wait_exceptional_entry_queue ewait;
177 	wait_queue_head_t *wq;
178 
179 	init_wait(&ewait.wait);
180 	ewait.wait.func = wake_exceptional_entry_func;
181 
182 	for (;;) {
183 		entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
184 					  &slot);
185 		if (!entry || !radix_tree_exceptional_entry(entry) ||
186 		    !slot_locked(mapping, slot)) {
187 			if (slotp)
188 				*slotp = slot;
189 			return entry;
190 		}
191 
192 		wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
193 		prepare_to_wait_exclusive(wq, &ewait.wait,
194 					  TASK_UNINTERRUPTIBLE);
195 		spin_unlock_irq(&mapping->tree_lock);
196 		schedule();
197 		finish_wait(wq, &ewait.wait);
198 		spin_lock_irq(&mapping->tree_lock);
199 	}
200 }
201 
202 static void dax_unlock_mapping_entry(struct address_space *mapping,
203 				     pgoff_t index)
204 {
205 	void *entry, **slot;
206 
207 	spin_lock_irq(&mapping->tree_lock);
208 	entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
209 	if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
210 			 !slot_locked(mapping, slot))) {
211 		spin_unlock_irq(&mapping->tree_lock);
212 		return;
213 	}
214 	unlock_slot(mapping, slot);
215 	spin_unlock_irq(&mapping->tree_lock);
216 	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
217 }
218 
219 static void put_locked_mapping_entry(struct address_space *mapping,
220 				     pgoff_t index, void *entry)
221 {
222 	if (!radix_tree_exceptional_entry(entry)) {
223 		unlock_page(entry);
224 		put_page(entry);
225 	} else {
226 		dax_unlock_mapping_entry(mapping, index);
227 	}
228 }
229 
230 /*
231  * Called when we are done with radix tree entry we looked up via
232  * get_unlocked_mapping_entry() and which we didn't lock in the end.
233  */
234 static void put_unlocked_mapping_entry(struct address_space *mapping,
235 				       pgoff_t index, void *entry)
236 {
237 	if (!radix_tree_exceptional_entry(entry))
238 		return;
239 
240 	/* We have to wake up next waiter for the radix tree entry lock */
241 	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
242 }
243 
244 /*
245  * Find radix tree entry at given index. If it points to a page, return with
246  * the page locked. If it points to the exceptional entry, return with the
247  * radix tree entry locked. If the radix tree doesn't contain given index,
248  * create empty exceptional entry for the index and return with it locked.
249  *
250  * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
251  * either return that locked entry or will return an error.  This error will
252  * happen if there are any 4k entries (either zero pages or DAX entries)
253  * within the 2MiB range that we are requesting.
254  *
255  * We always favor 4k entries over 2MiB entries. There isn't a flow where we
256  * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
257  * insertion will fail if it finds any 4k entries already in the tree, and a
258  * 4k insertion will cause an existing 2MiB entry to be unmapped and
259  * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
260  * well as 2MiB empty entries.
261  *
262  * The exception to this downgrade path is for 2MiB DAX PMD entries that have
263  * real storage backing them.  We will leave these real 2MiB DAX entries in
264  * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
265  *
266  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
267  * persistent memory the benefit is doubtful. We can add that later if we can
268  * show it helps.
269  */
270 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
271 		unsigned long size_flag)
272 {
273 	bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
274 	void *entry, **slot;
275 
276 restart:
277 	spin_lock_irq(&mapping->tree_lock);
278 	entry = get_unlocked_mapping_entry(mapping, index, &slot);
279 
280 	if (entry) {
281 		if (size_flag & RADIX_DAX_PMD) {
282 			if (!radix_tree_exceptional_entry(entry) ||
283 			    dax_is_pte_entry(entry)) {
284 				put_unlocked_mapping_entry(mapping, index,
285 						entry);
286 				entry = ERR_PTR(-EEXIST);
287 				goto out_unlock;
288 			}
289 		} else { /* trying to grab a PTE entry */
290 			if (radix_tree_exceptional_entry(entry) &&
291 			    dax_is_pmd_entry(entry) &&
292 			    (dax_is_zero_entry(entry) ||
293 			     dax_is_empty_entry(entry))) {
294 				pmd_downgrade = true;
295 			}
296 		}
297 	}
298 
299 	/* No entry for given index? Make sure radix tree is big enough. */
300 	if (!entry || pmd_downgrade) {
301 		int err;
302 
303 		if (pmd_downgrade) {
304 			/*
305 			 * Make sure 'entry' remains valid while we drop
306 			 * mapping->tree_lock.
307 			 */
308 			entry = lock_slot(mapping, slot);
309 		}
310 
311 		spin_unlock_irq(&mapping->tree_lock);
312 		/*
313 		 * Besides huge zero pages the only other thing that gets
314 		 * downgraded are empty entries which don't need to be
315 		 * unmapped.
316 		 */
317 		if (pmd_downgrade && dax_is_zero_entry(entry))
318 			unmap_mapping_range(mapping,
319 				(index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
320 
321 		err = radix_tree_preload(
322 				mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
323 		if (err) {
324 			if (pmd_downgrade)
325 				put_locked_mapping_entry(mapping, index, entry);
326 			return ERR_PTR(err);
327 		}
328 		spin_lock_irq(&mapping->tree_lock);
329 
330 		if (!entry) {
331 			/*
332 			 * We needed to drop the page_tree lock while calling
333 			 * radix_tree_preload() and we didn't have an entry to
334 			 * lock.  See if another thread inserted an entry at
335 			 * our index during this time.
336 			 */
337 			entry = __radix_tree_lookup(&mapping->page_tree, index,
338 					NULL, &slot);
339 			if (entry) {
340 				radix_tree_preload_end();
341 				spin_unlock_irq(&mapping->tree_lock);
342 				goto restart;
343 			}
344 		}
345 
346 		if (pmd_downgrade) {
347 			radix_tree_delete(&mapping->page_tree, index);
348 			mapping->nrexceptional--;
349 			dax_wake_mapping_entry_waiter(mapping, index, entry,
350 					true);
351 		}
352 
353 		entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
354 
355 		err = __radix_tree_insert(&mapping->page_tree, index,
356 				dax_radix_order(entry), entry);
357 		radix_tree_preload_end();
358 		if (err) {
359 			spin_unlock_irq(&mapping->tree_lock);
360 			/*
361 			 * Our insertion of a DAX entry failed, most likely
362 			 * because we were inserting a PMD entry and it
363 			 * collided with a PTE sized entry at a different
364 			 * index in the PMD range.  We haven't inserted
365 			 * anything into the radix tree and have no waiters to
366 			 * wake.
367 			 */
368 			return ERR_PTR(err);
369 		}
370 		/* Good, we have inserted empty locked entry into the tree. */
371 		mapping->nrexceptional++;
372 		spin_unlock_irq(&mapping->tree_lock);
373 		return entry;
374 	}
375 	/* Normal page in radix tree? */
376 	if (!radix_tree_exceptional_entry(entry)) {
377 		struct page *page = entry;
378 
379 		get_page(page);
380 		spin_unlock_irq(&mapping->tree_lock);
381 		lock_page(page);
382 		/* Page got truncated? Retry... */
383 		if (unlikely(page->mapping != mapping)) {
384 			unlock_page(page);
385 			put_page(page);
386 			goto restart;
387 		}
388 		return page;
389 	}
390 	entry = lock_slot(mapping, slot);
391  out_unlock:
392 	spin_unlock_irq(&mapping->tree_lock);
393 	return entry;
394 }
395 
396 /*
397  * We do not necessarily hold the mapping->tree_lock when we call this
398  * function so it is possible that 'entry' is no longer a valid item in the
399  * radix tree.  This is okay because all we really need to do is to find the
400  * correct waitqueue where tasks might be waiting for that old 'entry' and
401  * wake them.
402  */
403 void dax_wake_mapping_entry_waiter(struct address_space *mapping,
404 		pgoff_t index, void *entry, bool wake_all)
405 {
406 	struct exceptional_entry_key key;
407 	wait_queue_head_t *wq;
408 
409 	wq = dax_entry_waitqueue(mapping, index, entry, &key);
410 
411 	/*
412 	 * Checking for locked entry and prepare_to_wait_exclusive() happens
413 	 * under mapping->tree_lock, ditto for entry handling in our callers.
414 	 * So at this point all tasks that could have seen our entry locked
415 	 * must be in the waitqueue and the following check will see them.
416 	 */
417 	if (waitqueue_active(wq))
418 		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
419 }
420 
421 static int __dax_invalidate_mapping_entry(struct address_space *mapping,
422 					  pgoff_t index, bool trunc)
423 {
424 	int ret = 0;
425 	void *entry;
426 	struct radix_tree_root *page_tree = &mapping->page_tree;
427 
428 	spin_lock_irq(&mapping->tree_lock);
429 	entry = get_unlocked_mapping_entry(mapping, index, NULL);
430 	if (!entry || !radix_tree_exceptional_entry(entry))
431 		goto out;
432 	if (!trunc &&
433 	    (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
434 	     radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
435 		goto out;
436 	radix_tree_delete(page_tree, index);
437 	mapping->nrexceptional--;
438 	ret = 1;
439 out:
440 	put_unlocked_mapping_entry(mapping, index, entry);
441 	spin_unlock_irq(&mapping->tree_lock);
442 	return ret;
443 }
444 /*
445  * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
446  * entry to get unlocked before deleting it.
447  */
448 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
449 {
450 	int ret = __dax_invalidate_mapping_entry(mapping, index, true);
451 
452 	/*
453 	 * This gets called from truncate / punch_hole path. As such, the caller
454 	 * must hold locks protecting against concurrent modifications of the
455 	 * radix tree (usually fs-private i_mmap_sem for writing). Since the
456 	 * caller has seen exceptional entry for this index, we better find it
457 	 * at that index as well...
458 	 */
459 	WARN_ON_ONCE(!ret);
460 	return ret;
461 }
462 
463 /*
464  * Invalidate exceptional DAX entry if it is clean.
465  */
466 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
467 				      pgoff_t index)
468 {
469 	return __dax_invalidate_mapping_entry(mapping, index, false);
470 }
471 
472 /*
473  * The user has performed a load from a hole in the file.  Allocating
474  * a new page in the file would cause excessive storage usage for
475  * workloads with sparse files.  We allocate a page cache page instead.
476  * We'll kick it out of the page cache if it's ever written to,
477  * otherwise it will simply fall out of the page cache under memory
478  * pressure without ever having been dirtied.
479  */
480 static int dax_load_hole(struct address_space *mapping, void **entry,
481 			 struct vm_fault *vmf)
482 {
483 	struct inode *inode = mapping->host;
484 	struct page *page;
485 	int ret;
486 
487 	/* Hole page already exists? Return it...  */
488 	if (!radix_tree_exceptional_entry(*entry)) {
489 		page = *entry;
490 		goto finish_fault;
491 	}
492 
493 	/* This will replace locked radix tree entry with a hole page */
494 	page = find_or_create_page(mapping, vmf->pgoff,
495 				   vmf->gfp_mask | __GFP_ZERO);
496 	if (!page) {
497 		ret = VM_FAULT_OOM;
498 		goto out;
499 	}
500 
501 finish_fault:
502 	vmf->page = page;
503 	ret = finish_fault(vmf);
504 	vmf->page = NULL;
505 	*entry = page;
506 	if (!ret) {
507 		/* Grab reference for PTE that is now referencing the page */
508 		get_page(page);
509 		ret = VM_FAULT_NOPAGE;
510 	}
511 out:
512 	trace_dax_load_hole(inode, vmf, ret);
513 	return ret;
514 }
515 
516 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
517 		sector_t sector, size_t size, struct page *to,
518 		unsigned long vaddr)
519 {
520 	void *vto, *kaddr;
521 	pgoff_t pgoff;
522 	pfn_t pfn;
523 	long rc;
524 	int id;
525 
526 	rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
527 	if (rc)
528 		return rc;
529 
530 	id = dax_read_lock();
531 	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
532 	if (rc < 0) {
533 		dax_read_unlock(id);
534 		return rc;
535 	}
536 	vto = kmap_atomic(to);
537 	copy_user_page(vto, (void __force *)kaddr, vaddr, to);
538 	kunmap_atomic(vto);
539 	dax_read_unlock(id);
540 	return 0;
541 }
542 
543 /*
544  * By this point grab_mapping_entry() has ensured that we have a locked entry
545  * of the appropriate size so we don't have to worry about downgrading PMDs to
546  * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
547  * already in the tree, we will skip the insertion and just dirty the PMD as
548  * appropriate.
549  */
550 static void *dax_insert_mapping_entry(struct address_space *mapping,
551 				      struct vm_fault *vmf,
552 				      void *entry, sector_t sector,
553 				      unsigned long flags)
554 {
555 	struct radix_tree_root *page_tree = &mapping->page_tree;
556 	int error = 0;
557 	bool hole_fill = false;
558 	void *new_entry;
559 	pgoff_t index = vmf->pgoff;
560 
561 	if (vmf->flags & FAULT_FLAG_WRITE)
562 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
563 
564 	/* Replacing hole page with block mapping? */
565 	if (!radix_tree_exceptional_entry(entry)) {
566 		hole_fill = true;
567 		/*
568 		 * Unmap the page now before we remove it from page cache below.
569 		 * The page is locked so it cannot be faulted in again.
570 		 */
571 		unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
572 				    PAGE_SIZE, 0);
573 		error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
574 		if (error)
575 			return ERR_PTR(error);
576 	} else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
577 		/* replacing huge zero page with PMD block mapping */
578 		unmap_mapping_range(mapping,
579 			(vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
580 	}
581 
582 	spin_lock_irq(&mapping->tree_lock);
583 	new_entry = dax_radix_locked_entry(sector, flags);
584 
585 	if (hole_fill) {
586 		__delete_from_page_cache(entry, NULL);
587 		/* Drop pagecache reference */
588 		put_page(entry);
589 		error = __radix_tree_insert(page_tree, index,
590 				dax_radix_order(new_entry), new_entry);
591 		if (error) {
592 			new_entry = ERR_PTR(error);
593 			goto unlock;
594 		}
595 		mapping->nrexceptional++;
596 	} else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
597 		/*
598 		 * Only swap our new entry into the radix tree if the current
599 		 * entry is a zero page or an empty entry.  If a normal PTE or
600 		 * PMD entry is already in the tree, we leave it alone.  This
601 		 * means that if we are trying to insert a PTE and the
602 		 * existing entry is a PMD, we will just leave the PMD in the
603 		 * tree and dirty it if necessary.
604 		 */
605 		struct radix_tree_node *node;
606 		void **slot;
607 		void *ret;
608 
609 		ret = __radix_tree_lookup(page_tree, index, &node, &slot);
610 		WARN_ON_ONCE(ret != entry);
611 		__radix_tree_replace(page_tree, node, slot,
612 				     new_entry, NULL, NULL);
613 	}
614 	if (vmf->flags & FAULT_FLAG_WRITE)
615 		radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
616  unlock:
617 	spin_unlock_irq(&mapping->tree_lock);
618 	if (hole_fill) {
619 		radix_tree_preload_end();
620 		/*
621 		 * We don't need hole page anymore, it has been replaced with
622 		 * locked radix tree entry now.
623 		 */
624 		if (mapping->a_ops->freepage)
625 			mapping->a_ops->freepage(entry);
626 		unlock_page(entry);
627 		put_page(entry);
628 	}
629 	return new_entry;
630 }
631 
632 static inline unsigned long
633 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
634 {
635 	unsigned long address;
636 
637 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
638 	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
639 	return address;
640 }
641 
642 /* Walk all mappings of a given index of a file and writeprotect them */
643 static void dax_mapping_entry_mkclean(struct address_space *mapping,
644 				      pgoff_t index, unsigned long pfn)
645 {
646 	struct vm_area_struct *vma;
647 	pte_t pte, *ptep = NULL;
648 	pmd_t *pmdp = NULL;
649 	spinlock_t *ptl;
650 	bool changed;
651 
652 	i_mmap_lock_read(mapping);
653 	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
654 		unsigned long address;
655 
656 		cond_resched();
657 
658 		if (!(vma->vm_flags & VM_SHARED))
659 			continue;
660 
661 		address = pgoff_address(index, vma);
662 		changed = false;
663 		if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl))
664 			continue;
665 
666 		if (pmdp) {
667 #ifdef CONFIG_FS_DAX_PMD
668 			pmd_t pmd;
669 
670 			if (pfn != pmd_pfn(*pmdp))
671 				goto unlock_pmd;
672 			if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
673 				goto unlock_pmd;
674 
675 			flush_cache_page(vma, address, pfn);
676 			pmd = pmdp_huge_clear_flush(vma, address, pmdp);
677 			pmd = pmd_wrprotect(pmd);
678 			pmd = pmd_mkclean(pmd);
679 			set_pmd_at(vma->vm_mm, address, pmdp, pmd);
680 			changed = true;
681 unlock_pmd:
682 			spin_unlock(ptl);
683 #endif
684 		} else {
685 			if (pfn != pte_pfn(*ptep))
686 				goto unlock_pte;
687 			if (!pte_dirty(*ptep) && !pte_write(*ptep))
688 				goto unlock_pte;
689 
690 			flush_cache_page(vma, address, pfn);
691 			pte = ptep_clear_flush(vma, address, ptep);
692 			pte = pte_wrprotect(pte);
693 			pte = pte_mkclean(pte);
694 			set_pte_at(vma->vm_mm, address, ptep, pte);
695 			changed = true;
696 unlock_pte:
697 			pte_unmap_unlock(ptep, ptl);
698 		}
699 
700 		if (changed)
701 			mmu_notifier_invalidate_page(vma->vm_mm, address);
702 	}
703 	i_mmap_unlock_read(mapping);
704 }
705 
706 static int dax_writeback_one(struct block_device *bdev,
707 		struct dax_device *dax_dev, struct address_space *mapping,
708 		pgoff_t index, void *entry)
709 {
710 	struct radix_tree_root *page_tree = &mapping->page_tree;
711 	void *entry2, **slot, *kaddr;
712 	long ret = 0, id;
713 	sector_t sector;
714 	pgoff_t pgoff;
715 	size_t size;
716 	pfn_t pfn;
717 
718 	/*
719 	 * A page got tagged dirty in DAX mapping? Something is seriously
720 	 * wrong.
721 	 */
722 	if (WARN_ON(!radix_tree_exceptional_entry(entry)))
723 		return -EIO;
724 
725 	spin_lock_irq(&mapping->tree_lock);
726 	entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
727 	/* Entry got punched out / reallocated? */
728 	if (!entry2 || !radix_tree_exceptional_entry(entry2))
729 		goto put_unlocked;
730 	/*
731 	 * Entry got reallocated elsewhere? No need to writeback. We have to
732 	 * compare sectors as we must not bail out due to difference in lockbit
733 	 * or entry type.
734 	 */
735 	if (dax_radix_sector(entry2) != dax_radix_sector(entry))
736 		goto put_unlocked;
737 	if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
738 				dax_is_zero_entry(entry))) {
739 		ret = -EIO;
740 		goto put_unlocked;
741 	}
742 
743 	/* Another fsync thread may have already written back this entry */
744 	if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
745 		goto put_unlocked;
746 	/* Lock the entry to serialize with page faults */
747 	entry = lock_slot(mapping, slot);
748 	/*
749 	 * We can clear the tag now but we have to be careful so that concurrent
750 	 * dax_writeback_one() calls for the same index cannot finish before we
751 	 * actually flush the caches. This is achieved as the calls will look
752 	 * at the entry only under tree_lock and once they do that they will
753 	 * see the entry locked and wait for it to unlock.
754 	 */
755 	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
756 	spin_unlock_irq(&mapping->tree_lock);
757 
758 	/*
759 	 * Even if dax_writeback_mapping_range() was given a wbc->range_start
760 	 * in the middle of a PMD, the 'index' we are given will be aligned to
761 	 * the start index of the PMD, as will the sector we pull from
762 	 * 'entry'.  This allows us to flush for PMD_SIZE and not have to
763 	 * worry about partial PMD writebacks.
764 	 */
765 	sector = dax_radix_sector(entry);
766 	size = PAGE_SIZE << dax_radix_order(entry);
767 
768 	id = dax_read_lock();
769 	ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
770 	if (ret)
771 		goto dax_unlock;
772 
773 	/*
774 	 * dax_direct_access() may sleep, so cannot hold tree_lock over
775 	 * its invocation.
776 	 */
777 	ret = dax_direct_access(dax_dev, pgoff, size / PAGE_SIZE, &kaddr, &pfn);
778 	if (ret < 0)
779 		goto dax_unlock;
780 
781 	if (WARN_ON_ONCE(ret < size / PAGE_SIZE)) {
782 		ret = -EIO;
783 		goto dax_unlock;
784 	}
785 
786 	dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn));
787 	wb_cache_pmem(kaddr, size);
788 	/*
789 	 * After we have flushed the cache, we can clear the dirty tag. There
790 	 * cannot be new dirty data in the pfn after the flush has completed as
791 	 * the pfn mappings are writeprotected and fault waits for mapping
792 	 * entry lock.
793 	 */
794 	spin_lock_irq(&mapping->tree_lock);
795 	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
796 	spin_unlock_irq(&mapping->tree_lock);
797 	trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
798  dax_unlock:
799 	dax_read_unlock(id);
800 	put_locked_mapping_entry(mapping, index, entry);
801 	return ret;
802 
803  put_unlocked:
804 	put_unlocked_mapping_entry(mapping, index, entry2);
805 	spin_unlock_irq(&mapping->tree_lock);
806 	return ret;
807 }
808 
809 /*
810  * Flush the mapping to the persistent domain within the byte range of [start,
811  * end]. This is required by data integrity operations to ensure file data is
812  * on persistent storage prior to completion of the operation.
813  */
814 int dax_writeback_mapping_range(struct address_space *mapping,
815 		struct block_device *bdev, struct writeback_control *wbc)
816 {
817 	struct inode *inode = mapping->host;
818 	pgoff_t start_index, end_index;
819 	pgoff_t indices[PAGEVEC_SIZE];
820 	struct dax_device *dax_dev;
821 	struct pagevec pvec;
822 	bool done = false;
823 	int i, ret = 0;
824 
825 	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
826 		return -EIO;
827 
828 	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
829 		return 0;
830 
831 	dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
832 	if (!dax_dev)
833 		return -EIO;
834 
835 	start_index = wbc->range_start >> PAGE_SHIFT;
836 	end_index = wbc->range_end >> PAGE_SHIFT;
837 
838 	trace_dax_writeback_range(inode, start_index, end_index);
839 
840 	tag_pages_for_writeback(mapping, start_index, end_index);
841 
842 	pagevec_init(&pvec, 0);
843 	while (!done) {
844 		pvec.nr = find_get_entries_tag(mapping, start_index,
845 				PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
846 				pvec.pages, indices);
847 
848 		if (pvec.nr == 0)
849 			break;
850 
851 		for (i = 0; i < pvec.nr; i++) {
852 			if (indices[i] > end_index) {
853 				done = true;
854 				break;
855 			}
856 
857 			ret = dax_writeback_one(bdev, dax_dev, mapping,
858 					indices[i], pvec.pages[i]);
859 			if (ret < 0)
860 				goto out;
861 		}
862 	}
863 out:
864 	put_dax(dax_dev);
865 	trace_dax_writeback_range_done(inode, start_index, end_index);
866 	return (ret < 0 ? ret : 0);
867 }
868 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
869 
870 static int dax_insert_mapping(struct address_space *mapping,
871 		struct block_device *bdev, struct dax_device *dax_dev,
872 		sector_t sector, size_t size, void **entryp,
873 		struct vm_area_struct *vma, struct vm_fault *vmf)
874 {
875 	unsigned long vaddr = vmf->address;
876 	void *entry = *entryp;
877 	void *ret, *kaddr;
878 	pgoff_t pgoff;
879 	int id, rc;
880 	pfn_t pfn;
881 
882 	rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
883 	if (rc)
884 		return rc;
885 
886 	id = dax_read_lock();
887 	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
888 	if (rc < 0) {
889 		dax_read_unlock(id);
890 		return rc;
891 	}
892 	dax_read_unlock(id);
893 
894 	ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0);
895 	if (IS_ERR(ret))
896 		return PTR_ERR(ret);
897 	*entryp = ret;
898 
899 	trace_dax_insert_mapping(mapping->host, vmf, ret);
900 	return vm_insert_mixed(vma, vaddr, pfn);
901 }
902 
903 /**
904  * dax_pfn_mkwrite - handle first write to DAX page
905  * @vmf: The description of the fault
906  */
907 int dax_pfn_mkwrite(struct vm_fault *vmf)
908 {
909 	struct file *file = vmf->vma->vm_file;
910 	struct address_space *mapping = file->f_mapping;
911 	struct inode *inode = mapping->host;
912 	void *entry, **slot;
913 	pgoff_t index = vmf->pgoff;
914 
915 	spin_lock_irq(&mapping->tree_lock);
916 	entry = get_unlocked_mapping_entry(mapping, index, &slot);
917 	if (!entry || !radix_tree_exceptional_entry(entry)) {
918 		if (entry)
919 			put_unlocked_mapping_entry(mapping, index, entry);
920 		spin_unlock_irq(&mapping->tree_lock);
921 		trace_dax_pfn_mkwrite_no_entry(inode, vmf, VM_FAULT_NOPAGE);
922 		return VM_FAULT_NOPAGE;
923 	}
924 	radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
925 	entry = lock_slot(mapping, slot);
926 	spin_unlock_irq(&mapping->tree_lock);
927 	/*
928 	 * If we race with somebody updating the PTE and finish_mkwrite_fault()
929 	 * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
930 	 * the fault in either case.
931 	 */
932 	finish_mkwrite_fault(vmf);
933 	put_locked_mapping_entry(mapping, index, entry);
934 	trace_dax_pfn_mkwrite(inode, vmf, VM_FAULT_NOPAGE);
935 	return VM_FAULT_NOPAGE;
936 }
937 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
938 
939 static bool dax_range_is_aligned(struct block_device *bdev,
940 				 unsigned int offset, unsigned int length)
941 {
942 	unsigned short sector_size = bdev_logical_block_size(bdev);
943 
944 	if (!IS_ALIGNED(offset, sector_size))
945 		return false;
946 	if (!IS_ALIGNED(length, sector_size))
947 		return false;
948 
949 	return true;
950 }
951 
952 int __dax_zero_page_range(struct block_device *bdev,
953 		struct dax_device *dax_dev, sector_t sector,
954 		unsigned int offset, unsigned int size)
955 {
956 	if (dax_range_is_aligned(bdev, offset, size)) {
957 		sector_t start_sector = sector + (offset >> 9);
958 
959 		return blkdev_issue_zeroout(bdev, start_sector,
960 				size >> 9, GFP_NOFS, 0);
961 	} else {
962 		pgoff_t pgoff;
963 		long rc, id;
964 		void *kaddr;
965 		pfn_t pfn;
966 
967 		rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
968 		if (rc)
969 			return rc;
970 
971 		id = dax_read_lock();
972 		rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr,
973 				&pfn);
974 		if (rc < 0) {
975 			dax_read_unlock(id);
976 			return rc;
977 		}
978 		clear_pmem(kaddr + offset, size);
979 		dax_read_unlock(id);
980 	}
981 	return 0;
982 }
983 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
984 
985 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
986 {
987 	return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
988 }
989 
990 static loff_t
991 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
992 		struct iomap *iomap)
993 {
994 	struct block_device *bdev = iomap->bdev;
995 	struct dax_device *dax_dev = iomap->dax_dev;
996 	struct iov_iter *iter = data;
997 	loff_t end = pos + length, done = 0;
998 	ssize_t ret = 0;
999 	int id;
1000 
1001 	if (iov_iter_rw(iter) == READ) {
1002 		end = min(end, i_size_read(inode));
1003 		if (pos >= end)
1004 			return 0;
1005 
1006 		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1007 			return iov_iter_zero(min(length, end - pos), iter);
1008 	}
1009 
1010 	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1011 		return -EIO;
1012 
1013 	/*
1014 	 * Write can allocate block for an area which has a hole page mapped
1015 	 * into page tables. We have to tear down these mappings so that data
1016 	 * written by write(2) is visible in mmap.
1017 	 */
1018 	if (iomap->flags & IOMAP_F_NEW) {
1019 		invalidate_inode_pages2_range(inode->i_mapping,
1020 					      pos >> PAGE_SHIFT,
1021 					      (end - 1) >> PAGE_SHIFT);
1022 	}
1023 
1024 	id = dax_read_lock();
1025 	while (pos < end) {
1026 		unsigned offset = pos & (PAGE_SIZE - 1);
1027 		const size_t size = ALIGN(length + offset, PAGE_SIZE);
1028 		const sector_t sector = dax_iomap_sector(iomap, pos);
1029 		ssize_t map_len;
1030 		pgoff_t pgoff;
1031 		void *kaddr;
1032 		pfn_t pfn;
1033 
1034 		if (fatal_signal_pending(current)) {
1035 			ret = -EINTR;
1036 			break;
1037 		}
1038 
1039 		ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1040 		if (ret)
1041 			break;
1042 
1043 		map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1044 				&kaddr, &pfn);
1045 		if (map_len < 0) {
1046 			ret = map_len;
1047 			break;
1048 		}
1049 
1050 		map_len = PFN_PHYS(map_len);
1051 		kaddr += offset;
1052 		map_len -= offset;
1053 		if (map_len > end - pos)
1054 			map_len = end - pos;
1055 
1056 		if (iov_iter_rw(iter) == WRITE)
1057 			map_len = copy_from_iter_pmem(kaddr, map_len, iter);
1058 		else
1059 			map_len = copy_to_iter(kaddr, map_len, iter);
1060 		if (map_len <= 0) {
1061 			ret = map_len ? map_len : -EFAULT;
1062 			break;
1063 		}
1064 
1065 		pos += map_len;
1066 		length -= map_len;
1067 		done += map_len;
1068 	}
1069 	dax_read_unlock(id);
1070 
1071 	return done ? done : ret;
1072 }
1073 
1074 /**
1075  * dax_iomap_rw - Perform I/O to a DAX file
1076  * @iocb:	The control block for this I/O
1077  * @iter:	The addresses to do I/O from or to
1078  * @ops:	iomap ops passed from the file system
1079  *
1080  * This function performs read and write operations to directly mapped
1081  * persistent memory.  The callers needs to take care of read/write exclusion
1082  * and evicting any page cache pages in the region under I/O.
1083  */
1084 ssize_t
1085 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1086 		const struct iomap_ops *ops)
1087 {
1088 	struct address_space *mapping = iocb->ki_filp->f_mapping;
1089 	struct inode *inode = mapping->host;
1090 	loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1091 	unsigned flags = 0;
1092 
1093 	if (iov_iter_rw(iter) == WRITE) {
1094 		lockdep_assert_held_exclusive(&inode->i_rwsem);
1095 		flags |= IOMAP_WRITE;
1096 	} else {
1097 		lockdep_assert_held(&inode->i_rwsem);
1098 	}
1099 
1100 	while (iov_iter_count(iter)) {
1101 		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1102 				iter, dax_iomap_actor);
1103 		if (ret <= 0)
1104 			break;
1105 		pos += ret;
1106 		done += ret;
1107 	}
1108 
1109 	iocb->ki_pos += done;
1110 	return done ? done : ret;
1111 }
1112 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1113 
1114 static int dax_fault_return(int error)
1115 {
1116 	if (error == 0)
1117 		return VM_FAULT_NOPAGE;
1118 	if (error == -ENOMEM)
1119 		return VM_FAULT_OOM;
1120 	return VM_FAULT_SIGBUS;
1121 }
1122 
1123 static int dax_iomap_pte_fault(struct vm_fault *vmf,
1124 			       const struct iomap_ops *ops)
1125 {
1126 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1127 	struct inode *inode = mapping->host;
1128 	unsigned long vaddr = vmf->address;
1129 	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1130 	sector_t sector;
1131 	struct iomap iomap = { 0 };
1132 	unsigned flags = IOMAP_FAULT;
1133 	int error, major = 0;
1134 	int vmf_ret = 0;
1135 	void *entry;
1136 
1137 	trace_dax_pte_fault(inode, vmf, vmf_ret);
1138 	/*
1139 	 * Check whether offset isn't beyond end of file now. Caller is supposed
1140 	 * to hold locks serializing us with truncate / punch hole so this is
1141 	 * a reliable test.
1142 	 */
1143 	if (pos >= i_size_read(inode)) {
1144 		vmf_ret = VM_FAULT_SIGBUS;
1145 		goto out;
1146 	}
1147 
1148 	if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1149 		flags |= IOMAP_WRITE;
1150 
1151 	entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1152 	if (IS_ERR(entry)) {
1153 		vmf_ret = dax_fault_return(PTR_ERR(entry));
1154 		goto out;
1155 	}
1156 
1157 	/*
1158 	 * Note that we don't bother to use iomap_apply here: DAX required
1159 	 * the file system block size to be equal the page size, which means
1160 	 * that we never have to deal with more than a single extent here.
1161 	 */
1162 	error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1163 	if (error) {
1164 		vmf_ret = dax_fault_return(error);
1165 		goto unlock_entry;
1166 	}
1167 	if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1168 		error = -EIO;	/* fs corruption? */
1169 		goto error_finish_iomap;
1170 	}
1171 
1172 	sector = dax_iomap_sector(&iomap, pos);
1173 
1174 	if (vmf->cow_page) {
1175 		switch (iomap.type) {
1176 		case IOMAP_HOLE:
1177 		case IOMAP_UNWRITTEN:
1178 			clear_user_highpage(vmf->cow_page, vaddr);
1179 			break;
1180 		case IOMAP_MAPPED:
1181 			error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1182 					sector, PAGE_SIZE, vmf->cow_page, vaddr);
1183 			break;
1184 		default:
1185 			WARN_ON_ONCE(1);
1186 			error = -EIO;
1187 			break;
1188 		}
1189 
1190 		if (error)
1191 			goto error_finish_iomap;
1192 
1193 		__SetPageUptodate(vmf->cow_page);
1194 		vmf_ret = finish_fault(vmf);
1195 		if (!vmf_ret)
1196 			vmf_ret = VM_FAULT_DONE_COW;
1197 		goto finish_iomap;
1198 	}
1199 
1200 	switch (iomap.type) {
1201 	case IOMAP_MAPPED:
1202 		if (iomap.flags & IOMAP_F_NEW) {
1203 			count_vm_event(PGMAJFAULT);
1204 			mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT);
1205 			major = VM_FAULT_MAJOR;
1206 		}
1207 		error = dax_insert_mapping(mapping, iomap.bdev, iomap.dax_dev,
1208 				sector, PAGE_SIZE, &entry, vmf->vma, vmf);
1209 		/* -EBUSY is fine, somebody else faulted on the same PTE */
1210 		if (error == -EBUSY)
1211 			error = 0;
1212 		break;
1213 	case IOMAP_UNWRITTEN:
1214 	case IOMAP_HOLE:
1215 		if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1216 			vmf_ret = dax_load_hole(mapping, &entry, vmf);
1217 			goto finish_iomap;
1218 		}
1219 		/*FALLTHRU*/
1220 	default:
1221 		WARN_ON_ONCE(1);
1222 		error = -EIO;
1223 		break;
1224 	}
1225 
1226  error_finish_iomap:
1227 	vmf_ret = dax_fault_return(error) | major;
1228  finish_iomap:
1229 	if (ops->iomap_end) {
1230 		int copied = PAGE_SIZE;
1231 
1232 		if (vmf_ret & VM_FAULT_ERROR)
1233 			copied = 0;
1234 		/*
1235 		 * The fault is done by now and there's no way back (other
1236 		 * thread may be already happily using PTE we have installed).
1237 		 * Just ignore error from ->iomap_end since we cannot do much
1238 		 * with it.
1239 		 */
1240 		ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1241 	}
1242  unlock_entry:
1243 	put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1244  out:
1245 	trace_dax_pte_fault_done(inode, vmf, vmf_ret);
1246 	return vmf_ret;
1247 }
1248 
1249 #ifdef CONFIG_FS_DAX_PMD
1250 /*
1251  * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
1252  * more often than one might expect in the below functions.
1253  */
1254 #define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
1255 
1256 static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
1257 		loff_t pos, void **entryp)
1258 {
1259 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1260 	const sector_t sector = dax_iomap_sector(iomap, pos);
1261 	struct dax_device *dax_dev = iomap->dax_dev;
1262 	struct block_device *bdev = iomap->bdev;
1263 	struct inode *inode = mapping->host;
1264 	const size_t size = PMD_SIZE;
1265 	void *ret = NULL, *kaddr;
1266 	long length = 0;
1267 	pgoff_t pgoff;
1268 	pfn_t pfn;
1269 	int id;
1270 
1271 	if (bdev_dax_pgoff(bdev, sector, size, &pgoff) != 0)
1272 		goto fallback;
1273 
1274 	id = dax_read_lock();
1275 	length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
1276 	if (length < 0)
1277 		goto unlock_fallback;
1278 	length = PFN_PHYS(length);
1279 
1280 	if (length < size)
1281 		goto unlock_fallback;
1282 	if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR)
1283 		goto unlock_fallback;
1284 	if (!pfn_t_devmap(pfn))
1285 		goto unlock_fallback;
1286 	dax_read_unlock(id);
1287 
1288 	ret = dax_insert_mapping_entry(mapping, vmf, *entryp, sector,
1289 			RADIX_DAX_PMD);
1290 	if (IS_ERR(ret))
1291 		goto fallback;
1292 	*entryp = ret;
1293 
1294 	trace_dax_pmd_insert_mapping(inode, vmf, length, pfn, ret);
1295 	return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1296 			pfn, vmf->flags & FAULT_FLAG_WRITE);
1297 
1298 unlock_fallback:
1299 	dax_read_unlock(id);
1300 fallback:
1301 	trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, pfn, ret);
1302 	return VM_FAULT_FALLBACK;
1303 }
1304 
1305 static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1306 		void **entryp)
1307 {
1308 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1309 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1310 	struct inode *inode = mapping->host;
1311 	struct page *zero_page;
1312 	void *ret = NULL;
1313 	spinlock_t *ptl;
1314 	pmd_t pmd_entry;
1315 
1316 	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1317 
1318 	if (unlikely(!zero_page))
1319 		goto fallback;
1320 
1321 	ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
1322 			RADIX_DAX_PMD | RADIX_DAX_HZP);
1323 	if (IS_ERR(ret))
1324 		goto fallback;
1325 	*entryp = ret;
1326 
1327 	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1328 	if (!pmd_none(*(vmf->pmd))) {
1329 		spin_unlock(ptl);
1330 		goto fallback;
1331 	}
1332 
1333 	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1334 	pmd_entry = pmd_mkhuge(pmd_entry);
1335 	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1336 	spin_unlock(ptl);
1337 	trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1338 	return VM_FAULT_NOPAGE;
1339 
1340 fallback:
1341 	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1342 	return VM_FAULT_FALLBACK;
1343 }
1344 
1345 static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1346 			       const struct iomap_ops *ops)
1347 {
1348 	struct vm_area_struct *vma = vmf->vma;
1349 	struct address_space *mapping = vma->vm_file->f_mapping;
1350 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1351 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1352 	unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1353 	struct inode *inode = mapping->host;
1354 	int result = VM_FAULT_FALLBACK;
1355 	struct iomap iomap = { 0 };
1356 	pgoff_t max_pgoff, pgoff;
1357 	void *entry;
1358 	loff_t pos;
1359 	int error;
1360 
1361 	/*
1362 	 * Check whether offset isn't beyond end of file now. Caller is
1363 	 * supposed to hold locks serializing us with truncate / punch hole so
1364 	 * this is a reliable test.
1365 	 */
1366 	pgoff = linear_page_index(vma, pmd_addr);
1367 	max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
1368 
1369 	trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1370 
1371 	/* Fall back to PTEs if we're going to COW */
1372 	if (write && !(vma->vm_flags & VM_SHARED))
1373 		goto fallback;
1374 
1375 	/* If the PMD would extend outside the VMA */
1376 	if (pmd_addr < vma->vm_start)
1377 		goto fallback;
1378 	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1379 		goto fallback;
1380 
1381 	if (pgoff > max_pgoff) {
1382 		result = VM_FAULT_SIGBUS;
1383 		goto out;
1384 	}
1385 
1386 	/* If the PMD would extend beyond the file size */
1387 	if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
1388 		goto fallback;
1389 
1390 	/*
1391 	 * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1392 	 * PMD or a HZP entry.  If it can't (because a 4k page is already in
1393 	 * the tree, for instance), it will return -EEXIST and we just fall
1394 	 * back to 4k entries.
1395 	 */
1396 	entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1397 	if (IS_ERR(entry))
1398 		goto fallback;
1399 
1400 	/*
1401 	 * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1402 	 * setting up a mapping, so really we're using iomap_begin() as a way
1403 	 * to look up our filesystem block.
1404 	 */
1405 	pos = (loff_t)pgoff << PAGE_SHIFT;
1406 	error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1407 	if (error)
1408 		goto unlock_entry;
1409 
1410 	if (iomap.offset + iomap.length < pos + PMD_SIZE)
1411 		goto finish_iomap;
1412 
1413 	switch (iomap.type) {
1414 	case IOMAP_MAPPED:
1415 		result = dax_pmd_insert_mapping(vmf, &iomap, pos, &entry);
1416 		break;
1417 	case IOMAP_UNWRITTEN:
1418 	case IOMAP_HOLE:
1419 		if (WARN_ON_ONCE(write))
1420 			break;
1421 		result = dax_pmd_load_hole(vmf, &iomap, &entry);
1422 		break;
1423 	default:
1424 		WARN_ON_ONCE(1);
1425 		break;
1426 	}
1427 
1428  finish_iomap:
1429 	if (ops->iomap_end) {
1430 		int copied = PMD_SIZE;
1431 
1432 		if (result == VM_FAULT_FALLBACK)
1433 			copied = 0;
1434 		/*
1435 		 * The fault is done by now and there's no way back (other
1436 		 * thread may be already happily using PMD we have installed).
1437 		 * Just ignore error from ->iomap_end since we cannot do much
1438 		 * with it.
1439 		 */
1440 		ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1441 				&iomap);
1442 	}
1443  unlock_entry:
1444 	put_locked_mapping_entry(mapping, pgoff, entry);
1445  fallback:
1446 	if (result == VM_FAULT_FALLBACK) {
1447 		split_huge_pmd(vma, vmf->pmd, vmf->address);
1448 		count_vm_event(THP_FAULT_FALLBACK);
1449 	}
1450 out:
1451 	trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1452 	return result;
1453 }
1454 #else
1455 static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1456 			       const struct iomap_ops *ops)
1457 {
1458 	return VM_FAULT_FALLBACK;
1459 }
1460 #endif /* CONFIG_FS_DAX_PMD */
1461 
1462 /**
1463  * dax_iomap_fault - handle a page fault on a DAX file
1464  * @vmf: The description of the fault
1465  * @ops: iomap ops passed from the file system
1466  *
1467  * When a page fault occurs, filesystems may call this helper in
1468  * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1469  * has done all the necessary locking for page fault to proceed
1470  * successfully.
1471  */
1472 int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1473 		    const struct iomap_ops *ops)
1474 {
1475 	switch (pe_size) {
1476 	case PE_SIZE_PTE:
1477 		return dax_iomap_pte_fault(vmf, ops);
1478 	case PE_SIZE_PMD:
1479 		return dax_iomap_pmd_fault(vmf, ops);
1480 	default:
1481 		return VM_FAULT_FALLBACK;
1482 	}
1483 }
1484 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1485