xref: /openbmc/linux/fs/dax.c (revision f7c35abe)
1 /*
2  * fs/dax.c - Direct Access filesystem code
3  * Copyright (c) 2013-2014 Intel Corporation
4  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  */
16 
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
21 #include <linux/fs.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/sched/signal.h>
31 #include <linux/uio.h>
32 #include <linux/vmstat.h>
33 #include <linux/pfn_t.h>
34 #include <linux/sizes.h>
35 #include <linux/mmu_notifier.h>
36 #include <linux/iomap.h>
37 #include "internal.h"
38 
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/fs_dax.h>
41 
42 /* We choose 4096 entries - same as per-zone page wait tables */
43 #define DAX_WAIT_TABLE_BITS 12
44 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
45 
46 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
47 
48 static int __init init_dax_wait_table(void)
49 {
50 	int i;
51 
52 	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
53 		init_waitqueue_head(wait_table + i);
54 	return 0;
55 }
56 fs_initcall(init_dax_wait_table);
57 
58 static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
59 {
60 	struct request_queue *q = bdev->bd_queue;
61 	long rc = -EIO;
62 
63 	dax->addr = ERR_PTR(-EIO);
64 	if (blk_queue_enter(q, true) != 0)
65 		return rc;
66 
67 	rc = bdev_direct_access(bdev, dax);
68 	if (rc < 0) {
69 		dax->addr = ERR_PTR(rc);
70 		blk_queue_exit(q);
71 		return rc;
72 	}
73 	return rc;
74 }
75 
76 static void dax_unmap_atomic(struct block_device *bdev,
77 		const struct blk_dax_ctl *dax)
78 {
79 	if (IS_ERR(dax->addr))
80 		return;
81 	blk_queue_exit(bdev->bd_queue);
82 }
83 
84 static int dax_is_pmd_entry(void *entry)
85 {
86 	return (unsigned long)entry & RADIX_DAX_PMD;
87 }
88 
89 static int dax_is_pte_entry(void *entry)
90 {
91 	return !((unsigned long)entry & RADIX_DAX_PMD);
92 }
93 
94 static int dax_is_zero_entry(void *entry)
95 {
96 	return (unsigned long)entry & RADIX_DAX_HZP;
97 }
98 
99 static int dax_is_empty_entry(void *entry)
100 {
101 	return (unsigned long)entry & RADIX_DAX_EMPTY;
102 }
103 
104 struct page *read_dax_sector(struct block_device *bdev, sector_t n)
105 {
106 	struct page *page = alloc_pages(GFP_KERNEL, 0);
107 	struct blk_dax_ctl dax = {
108 		.size = PAGE_SIZE,
109 		.sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
110 	};
111 	long rc;
112 
113 	if (!page)
114 		return ERR_PTR(-ENOMEM);
115 
116 	rc = dax_map_atomic(bdev, &dax);
117 	if (rc < 0)
118 		return ERR_PTR(rc);
119 	memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
120 	dax_unmap_atomic(bdev, &dax);
121 	return page;
122 }
123 
124 /*
125  * DAX radix tree locking
126  */
127 struct exceptional_entry_key {
128 	struct address_space *mapping;
129 	pgoff_t entry_start;
130 };
131 
132 struct wait_exceptional_entry_queue {
133 	wait_queue_t wait;
134 	struct exceptional_entry_key key;
135 };
136 
137 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
138 		pgoff_t index, void *entry, struct exceptional_entry_key *key)
139 {
140 	unsigned long hash;
141 
142 	/*
143 	 * If 'entry' is a PMD, align the 'index' that we use for the wait
144 	 * queue to the start of that PMD.  This ensures that all offsets in
145 	 * the range covered by the PMD map to the same bit lock.
146 	 */
147 	if (dax_is_pmd_entry(entry))
148 		index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
149 
150 	key->mapping = mapping;
151 	key->entry_start = index;
152 
153 	hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
154 	return wait_table + hash;
155 }
156 
157 static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
158 				       int sync, void *keyp)
159 {
160 	struct exceptional_entry_key *key = keyp;
161 	struct wait_exceptional_entry_queue *ewait =
162 		container_of(wait, struct wait_exceptional_entry_queue, wait);
163 
164 	if (key->mapping != ewait->key.mapping ||
165 	    key->entry_start != ewait->key.entry_start)
166 		return 0;
167 	return autoremove_wake_function(wait, mode, sync, NULL);
168 }
169 
170 /*
171  * Check whether the given slot is locked. The function must be called with
172  * mapping->tree_lock held
173  */
174 static inline int slot_locked(struct address_space *mapping, void **slot)
175 {
176 	unsigned long entry = (unsigned long)
177 		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
178 	return entry & RADIX_DAX_ENTRY_LOCK;
179 }
180 
181 /*
182  * Mark the given slot is locked. The function must be called with
183  * mapping->tree_lock held
184  */
185 static inline void *lock_slot(struct address_space *mapping, void **slot)
186 {
187 	unsigned long entry = (unsigned long)
188 		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
189 
190 	entry |= RADIX_DAX_ENTRY_LOCK;
191 	radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
192 	return (void *)entry;
193 }
194 
195 /*
196  * Mark the given slot is unlocked. The function must be called with
197  * mapping->tree_lock held
198  */
199 static inline void *unlock_slot(struct address_space *mapping, void **slot)
200 {
201 	unsigned long entry = (unsigned long)
202 		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
203 
204 	entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
205 	radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
206 	return (void *)entry;
207 }
208 
209 /*
210  * Lookup entry in radix tree, wait for it to become unlocked if it is
211  * exceptional entry and return it. The caller must call
212  * put_unlocked_mapping_entry() when he decided not to lock the entry or
213  * put_locked_mapping_entry() when he locked the entry and now wants to
214  * unlock it.
215  *
216  * The function must be called with mapping->tree_lock held.
217  */
218 static void *get_unlocked_mapping_entry(struct address_space *mapping,
219 					pgoff_t index, void ***slotp)
220 {
221 	void *entry, **slot;
222 	struct wait_exceptional_entry_queue ewait;
223 	wait_queue_head_t *wq;
224 
225 	init_wait(&ewait.wait);
226 	ewait.wait.func = wake_exceptional_entry_func;
227 
228 	for (;;) {
229 		entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
230 					  &slot);
231 		if (!entry || !radix_tree_exceptional_entry(entry) ||
232 		    !slot_locked(mapping, slot)) {
233 			if (slotp)
234 				*slotp = slot;
235 			return entry;
236 		}
237 
238 		wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
239 		prepare_to_wait_exclusive(wq, &ewait.wait,
240 					  TASK_UNINTERRUPTIBLE);
241 		spin_unlock_irq(&mapping->tree_lock);
242 		schedule();
243 		finish_wait(wq, &ewait.wait);
244 		spin_lock_irq(&mapping->tree_lock);
245 	}
246 }
247 
248 static void dax_unlock_mapping_entry(struct address_space *mapping,
249 				     pgoff_t index)
250 {
251 	void *entry, **slot;
252 
253 	spin_lock_irq(&mapping->tree_lock);
254 	entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
255 	if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
256 			 !slot_locked(mapping, slot))) {
257 		spin_unlock_irq(&mapping->tree_lock);
258 		return;
259 	}
260 	unlock_slot(mapping, slot);
261 	spin_unlock_irq(&mapping->tree_lock);
262 	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
263 }
264 
265 static void put_locked_mapping_entry(struct address_space *mapping,
266 				     pgoff_t index, void *entry)
267 {
268 	if (!radix_tree_exceptional_entry(entry)) {
269 		unlock_page(entry);
270 		put_page(entry);
271 	} else {
272 		dax_unlock_mapping_entry(mapping, index);
273 	}
274 }
275 
276 /*
277  * Called when we are done with radix tree entry we looked up via
278  * get_unlocked_mapping_entry() and which we didn't lock in the end.
279  */
280 static void put_unlocked_mapping_entry(struct address_space *mapping,
281 				       pgoff_t index, void *entry)
282 {
283 	if (!radix_tree_exceptional_entry(entry))
284 		return;
285 
286 	/* We have to wake up next waiter for the radix tree entry lock */
287 	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
288 }
289 
290 /*
291  * Find radix tree entry at given index. If it points to a page, return with
292  * the page locked. If it points to the exceptional entry, return with the
293  * radix tree entry locked. If the radix tree doesn't contain given index,
294  * create empty exceptional entry for the index and return with it locked.
295  *
296  * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
297  * either return that locked entry or will return an error.  This error will
298  * happen if there are any 4k entries (either zero pages or DAX entries)
299  * within the 2MiB range that we are requesting.
300  *
301  * We always favor 4k entries over 2MiB entries. There isn't a flow where we
302  * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
303  * insertion will fail if it finds any 4k entries already in the tree, and a
304  * 4k insertion will cause an existing 2MiB entry to be unmapped and
305  * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
306  * well as 2MiB empty entries.
307  *
308  * The exception to this downgrade path is for 2MiB DAX PMD entries that have
309  * real storage backing them.  We will leave these real 2MiB DAX entries in
310  * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
311  *
312  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
313  * persistent memory the benefit is doubtful. We can add that later if we can
314  * show it helps.
315  */
316 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
317 		unsigned long size_flag)
318 {
319 	bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
320 	void *entry, **slot;
321 
322 restart:
323 	spin_lock_irq(&mapping->tree_lock);
324 	entry = get_unlocked_mapping_entry(mapping, index, &slot);
325 
326 	if (entry) {
327 		if (size_flag & RADIX_DAX_PMD) {
328 			if (!radix_tree_exceptional_entry(entry) ||
329 			    dax_is_pte_entry(entry)) {
330 				put_unlocked_mapping_entry(mapping, index,
331 						entry);
332 				entry = ERR_PTR(-EEXIST);
333 				goto out_unlock;
334 			}
335 		} else { /* trying to grab a PTE entry */
336 			if (radix_tree_exceptional_entry(entry) &&
337 			    dax_is_pmd_entry(entry) &&
338 			    (dax_is_zero_entry(entry) ||
339 			     dax_is_empty_entry(entry))) {
340 				pmd_downgrade = true;
341 			}
342 		}
343 	}
344 
345 	/* No entry for given index? Make sure radix tree is big enough. */
346 	if (!entry || pmd_downgrade) {
347 		int err;
348 
349 		if (pmd_downgrade) {
350 			/*
351 			 * Make sure 'entry' remains valid while we drop
352 			 * mapping->tree_lock.
353 			 */
354 			entry = lock_slot(mapping, slot);
355 		}
356 
357 		spin_unlock_irq(&mapping->tree_lock);
358 		/*
359 		 * Besides huge zero pages the only other thing that gets
360 		 * downgraded are empty entries which don't need to be
361 		 * unmapped.
362 		 */
363 		if (pmd_downgrade && dax_is_zero_entry(entry))
364 			unmap_mapping_range(mapping,
365 				(index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
366 
367 		err = radix_tree_preload(
368 				mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
369 		if (err) {
370 			if (pmd_downgrade)
371 				put_locked_mapping_entry(mapping, index, entry);
372 			return ERR_PTR(err);
373 		}
374 		spin_lock_irq(&mapping->tree_lock);
375 
376 		if (pmd_downgrade) {
377 			radix_tree_delete(&mapping->page_tree, index);
378 			mapping->nrexceptional--;
379 			dax_wake_mapping_entry_waiter(mapping, index, entry,
380 					true);
381 		}
382 
383 		entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
384 
385 		err = __radix_tree_insert(&mapping->page_tree, index,
386 				dax_radix_order(entry), entry);
387 		radix_tree_preload_end();
388 		if (err) {
389 			spin_unlock_irq(&mapping->tree_lock);
390 			/*
391 			 * Someone already created the entry?  This is a
392 			 * normal failure when inserting PMDs in a range
393 			 * that already contains PTEs.  In that case we want
394 			 * to return -EEXIST immediately.
395 			 */
396 			if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD))
397 				goto restart;
398 			/*
399 			 * Our insertion of a DAX PMD entry failed, most
400 			 * likely because it collided with a PTE sized entry
401 			 * at a different index in the PMD range.  We haven't
402 			 * inserted anything into the radix tree and have no
403 			 * waiters to wake.
404 			 */
405 			return ERR_PTR(err);
406 		}
407 		/* Good, we have inserted empty locked entry into the tree. */
408 		mapping->nrexceptional++;
409 		spin_unlock_irq(&mapping->tree_lock);
410 		return entry;
411 	}
412 	/* Normal page in radix tree? */
413 	if (!radix_tree_exceptional_entry(entry)) {
414 		struct page *page = entry;
415 
416 		get_page(page);
417 		spin_unlock_irq(&mapping->tree_lock);
418 		lock_page(page);
419 		/* Page got truncated? Retry... */
420 		if (unlikely(page->mapping != mapping)) {
421 			unlock_page(page);
422 			put_page(page);
423 			goto restart;
424 		}
425 		return page;
426 	}
427 	entry = lock_slot(mapping, slot);
428  out_unlock:
429 	spin_unlock_irq(&mapping->tree_lock);
430 	return entry;
431 }
432 
433 /*
434  * We do not necessarily hold the mapping->tree_lock when we call this
435  * function so it is possible that 'entry' is no longer a valid item in the
436  * radix tree.  This is okay because all we really need to do is to find the
437  * correct waitqueue where tasks might be waiting for that old 'entry' and
438  * wake them.
439  */
440 void dax_wake_mapping_entry_waiter(struct address_space *mapping,
441 		pgoff_t index, void *entry, bool wake_all)
442 {
443 	struct exceptional_entry_key key;
444 	wait_queue_head_t *wq;
445 
446 	wq = dax_entry_waitqueue(mapping, index, entry, &key);
447 
448 	/*
449 	 * Checking for locked entry and prepare_to_wait_exclusive() happens
450 	 * under mapping->tree_lock, ditto for entry handling in our callers.
451 	 * So at this point all tasks that could have seen our entry locked
452 	 * must be in the waitqueue and the following check will see them.
453 	 */
454 	if (waitqueue_active(wq))
455 		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
456 }
457 
458 static int __dax_invalidate_mapping_entry(struct address_space *mapping,
459 					  pgoff_t index, bool trunc)
460 {
461 	int ret = 0;
462 	void *entry;
463 	struct radix_tree_root *page_tree = &mapping->page_tree;
464 
465 	spin_lock_irq(&mapping->tree_lock);
466 	entry = get_unlocked_mapping_entry(mapping, index, NULL);
467 	if (!entry || !radix_tree_exceptional_entry(entry))
468 		goto out;
469 	if (!trunc &&
470 	    (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
471 	     radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
472 		goto out;
473 	radix_tree_delete(page_tree, index);
474 	mapping->nrexceptional--;
475 	ret = 1;
476 out:
477 	put_unlocked_mapping_entry(mapping, index, entry);
478 	spin_unlock_irq(&mapping->tree_lock);
479 	return ret;
480 }
481 /*
482  * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
483  * entry to get unlocked before deleting it.
484  */
485 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
486 {
487 	int ret = __dax_invalidate_mapping_entry(mapping, index, true);
488 
489 	/*
490 	 * This gets called from truncate / punch_hole path. As such, the caller
491 	 * must hold locks protecting against concurrent modifications of the
492 	 * radix tree (usually fs-private i_mmap_sem for writing). Since the
493 	 * caller has seen exceptional entry for this index, we better find it
494 	 * at that index as well...
495 	 */
496 	WARN_ON_ONCE(!ret);
497 	return ret;
498 }
499 
500 /*
501  * Invalidate exceptional DAX entry if easily possible. This handles DAX
502  * entries for invalidate_inode_pages() so we evict the entry only if we can
503  * do so without blocking.
504  */
505 int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
506 {
507 	int ret = 0;
508 	void *entry, **slot;
509 	struct radix_tree_root *page_tree = &mapping->page_tree;
510 
511 	spin_lock_irq(&mapping->tree_lock);
512 	entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
513 	if (!entry || !radix_tree_exceptional_entry(entry) ||
514 	    slot_locked(mapping, slot))
515 		goto out;
516 	if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
517 	    radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
518 		goto out;
519 	radix_tree_delete(page_tree, index);
520 	mapping->nrexceptional--;
521 	ret = 1;
522 out:
523 	spin_unlock_irq(&mapping->tree_lock);
524 	if (ret)
525 		dax_wake_mapping_entry_waiter(mapping, index, entry, true);
526 	return ret;
527 }
528 
529 /*
530  * Invalidate exceptional DAX entry if it is clean.
531  */
532 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
533 				      pgoff_t index)
534 {
535 	return __dax_invalidate_mapping_entry(mapping, index, false);
536 }
537 
538 /*
539  * The user has performed a load from a hole in the file.  Allocating
540  * a new page in the file would cause excessive storage usage for
541  * workloads with sparse files.  We allocate a page cache page instead.
542  * We'll kick it out of the page cache if it's ever written to,
543  * otherwise it will simply fall out of the page cache under memory
544  * pressure without ever having been dirtied.
545  */
546 static int dax_load_hole(struct address_space *mapping, void **entry,
547 			 struct vm_fault *vmf)
548 {
549 	struct page *page;
550 	int ret;
551 
552 	/* Hole page already exists? Return it...  */
553 	if (!radix_tree_exceptional_entry(*entry)) {
554 		page = *entry;
555 		goto out;
556 	}
557 
558 	/* This will replace locked radix tree entry with a hole page */
559 	page = find_or_create_page(mapping, vmf->pgoff,
560 				   vmf->gfp_mask | __GFP_ZERO);
561 	if (!page)
562 		return VM_FAULT_OOM;
563  out:
564 	vmf->page = page;
565 	ret = finish_fault(vmf);
566 	vmf->page = NULL;
567 	*entry = page;
568 	if (!ret) {
569 		/* Grab reference for PTE that is now referencing the page */
570 		get_page(page);
571 		return VM_FAULT_NOPAGE;
572 	}
573 	return ret;
574 }
575 
576 static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
577 		struct page *to, unsigned long vaddr)
578 {
579 	struct blk_dax_ctl dax = {
580 		.sector = sector,
581 		.size = size,
582 	};
583 	void *vto;
584 
585 	if (dax_map_atomic(bdev, &dax) < 0)
586 		return PTR_ERR(dax.addr);
587 	vto = kmap_atomic(to);
588 	copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
589 	kunmap_atomic(vto);
590 	dax_unmap_atomic(bdev, &dax);
591 	return 0;
592 }
593 
594 /*
595  * By this point grab_mapping_entry() has ensured that we have a locked entry
596  * of the appropriate size so we don't have to worry about downgrading PMDs to
597  * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
598  * already in the tree, we will skip the insertion and just dirty the PMD as
599  * appropriate.
600  */
601 static void *dax_insert_mapping_entry(struct address_space *mapping,
602 				      struct vm_fault *vmf,
603 				      void *entry, sector_t sector,
604 				      unsigned long flags)
605 {
606 	struct radix_tree_root *page_tree = &mapping->page_tree;
607 	int error = 0;
608 	bool hole_fill = false;
609 	void *new_entry;
610 	pgoff_t index = vmf->pgoff;
611 
612 	if (vmf->flags & FAULT_FLAG_WRITE)
613 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
614 
615 	/* Replacing hole page with block mapping? */
616 	if (!radix_tree_exceptional_entry(entry)) {
617 		hole_fill = true;
618 		/*
619 		 * Unmap the page now before we remove it from page cache below.
620 		 * The page is locked so it cannot be faulted in again.
621 		 */
622 		unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
623 				    PAGE_SIZE, 0);
624 		error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
625 		if (error)
626 			return ERR_PTR(error);
627 	} else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
628 		/* replacing huge zero page with PMD block mapping */
629 		unmap_mapping_range(mapping,
630 			(vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
631 	}
632 
633 	spin_lock_irq(&mapping->tree_lock);
634 	new_entry = dax_radix_locked_entry(sector, flags);
635 
636 	if (hole_fill) {
637 		__delete_from_page_cache(entry, NULL);
638 		/* Drop pagecache reference */
639 		put_page(entry);
640 		error = __radix_tree_insert(page_tree, index,
641 				dax_radix_order(new_entry), new_entry);
642 		if (error) {
643 			new_entry = ERR_PTR(error);
644 			goto unlock;
645 		}
646 		mapping->nrexceptional++;
647 	} else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
648 		/*
649 		 * Only swap our new entry into the radix tree if the current
650 		 * entry is a zero page or an empty entry.  If a normal PTE or
651 		 * PMD entry is already in the tree, we leave it alone.  This
652 		 * means that if we are trying to insert a PTE and the
653 		 * existing entry is a PMD, we will just leave the PMD in the
654 		 * tree and dirty it if necessary.
655 		 */
656 		struct radix_tree_node *node;
657 		void **slot;
658 		void *ret;
659 
660 		ret = __radix_tree_lookup(page_tree, index, &node, &slot);
661 		WARN_ON_ONCE(ret != entry);
662 		__radix_tree_replace(page_tree, node, slot,
663 				     new_entry, NULL, NULL);
664 	}
665 	if (vmf->flags & FAULT_FLAG_WRITE)
666 		radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
667  unlock:
668 	spin_unlock_irq(&mapping->tree_lock);
669 	if (hole_fill) {
670 		radix_tree_preload_end();
671 		/*
672 		 * We don't need hole page anymore, it has been replaced with
673 		 * locked radix tree entry now.
674 		 */
675 		if (mapping->a_ops->freepage)
676 			mapping->a_ops->freepage(entry);
677 		unlock_page(entry);
678 		put_page(entry);
679 	}
680 	return new_entry;
681 }
682 
683 static inline unsigned long
684 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
685 {
686 	unsigned long address;
687 
688 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
689 	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
690 	return address;
691 }
692 
693 /* Walk all mappings of a given index of a file and writeprotect them */
694 static void dax_mapping_entry_mkclean(struct address_space *mapping,
695 				      pgoff_t index, unsigned long pfn)
696 {
697 	struct vm_area_struct *vma;
698 	pte_t pte, *ptep = NULL;
699 	pmd_t *pmdp = NULL;
700 	spinlock_t *ptl;
701 	bool changed;
702 
703 	i_mmap_lock_read(mapping);
704 	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
705 		unsigned long address;
706 
707 		cond_resched();
708 
709 		if (!(vma->vm_flags & VM_SHARED))
710 			continue;
711 
712 		address = pgoff_address(index, vma);
713 		changed = false;
714 		if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl))
715 			continue;
716 
717 		if (pmdp) {
718 #ifdef CONFIG_FS_DAX_PMD
719 			pmd_t pmd;
720 
721 			if (pfn != pmd_pfn(*pmdp))
722 				goto unlock_pmd;
723 			if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
724 				goto unlock_pmd;
725 
726 			flush_cache_page(vma, address, pfn);
727 			pmd = pmdp_huge_clear_flush(vma, address, pmdp);
728 			pmd = pmd_wrprotect(pmd);
729 			pmd = pmd_mkclean(pmd);
730 			set_pmd_at(vma->vm_mm, address, pmdp, pmd);
731 			changed = true;
732 unlock_pmd:
733 			spin_unlock(ptl);
734 #endif
735 		} else {
736 			if (pfn != pte_pfn(*ptep))
737 				goto unlock_pte;
738 			if (!pte_dirty(*ptep) && !pte_write(*ptep))
739 				goto unlock_pte;
740 
741 			flush_cache_page(vma, address, pfn);
742 			pte = ptep_clear_flush(vma, address, ptep);
743 			pte = pte_wrprotect(pte);
744 			pte = pte_mkclean(pte);
745 			set_pte_at(vma->vm_mm, address, ptep, pte);
746 			changed = true;
747 unlock_pte:
748 			pte_unmap_unlock(ptep, ptl);
749 		}
750 
751 		if (changed)
752 			mmu_notifier_invalidate_page(vma->vm_mm, address);
753 	}
754 	i_mmap_unlock_read(mapping);
755 }
756 
757 static int dax_writeback_one(struct block_device *bdev,
758 		struct address_space *mapping, pgoff_t index, void *entry)
759 {
760 	struct radix_tree_root *page_tree = &mapping->page_tree;
761 	struct blk_dax_ctl dax;
762 	void *entry2, **slot;
763 	int ret = 0;
764 
765 	/*
766 	 * A page got tagged dirty in DAX mapping? Something is seriously
767 	 * wrong.
768 	 */
769 	if (WARN_ON(!radix_tree_exceptional_entry(entry)))
770 		return -EIO;
771 
772 	spin_lock_irq(&mapping->tree_lock);
773 	entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
774 	/* Entry got punched out / reallocated? */
775 	if (!entry2 || !radix_tree_exceptional_entry(entry2))
776 		goto put_unlocked;
777 	/*
778 	 * Entry got reallocated elsewhere? No need to writeback. We have to
779 	 * compare sectors as we must not bail out due to difference in lockbit
780 	 * or entry type.
781 	 */
782 	if (dax_radix_sector(entry2) != dax_radix_sector(entry))
783 		goto put_unlocked;
784 	if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
785 				dax_is_zero_entry(entry))) {
786 		ret = -EIO;
787 		goto put_unlocked;
788 	}
789 
790 	/* Another fsync thread may have already written back this entry */
791 	if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
792 		goto put_unlocked;
793 	/* Lock the entry to serialize with page faults */
794 	entry = lock_slot(mapping, slot);
795 	/*
796 	 * We can clear the tag now but we have to be careful so that concurrent
797 	 * dax_writeback_one() calls for the same index cannot finish before we
798 	 * actually flush the caches. This is achieved as the calls will look
799 	 * at the entry only under tree_lock and once they do that they will
800 	 * see the entry locked and wait for it to unlock.
801 	 */
802 	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
803 	spin_unlock_irq(&mapping->tree_lock);
804 
805 	/*
806 	 * Even if dax_writeback_mapping_range() was given a wbc->range_start
807 	 * in the middle of a PMD, the 'index' we are given will be aligned to
808 	 * the start index of the PMD, as will the sector we pull from
809 	 * 'entry'.  This allows us to flush for PMD_SIZE and not have to
810 	 * worry about partial PMD writebacks.
811 	 */
812 	dax.sector = dax_radix_sector(entry);
813 	dax.size = PAGE_SIZE << dax_radix_order(entry);
814 
815 	/*
816 	 * We cannot hold tree_lock while calling dax_map_atomic() because it
817 	 * eventually calls cond_resched().
818 	 */
819 	ret = dax_map_atomic(bdev, &dax);
820 	if (ret < 0) {
821 		put_locked_mapping_entry(mapping, index, entry);
822 		return ret;
823 	}
824 
825 	if (WARN_ON_ONCE(ret < dax.size)) {
826 		ret = -EIO;
827 		goto unmap;
828 	}
829 
830 	dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(dax.pfn));
831 	wb_cache_pmem(dax.addr, dax.size);
832 	/*
833 	 * After we have flushed the cache, we can clear the dirty tag. There
834 	 * cannot be new dirty data in the pfn after the flush has completed as
835 	 * the pfn mappings are writeprotected and fault waits for mapping
836 	 * entry lock.
837 	 */
838 	spin_lock_irq(&mapping->tree_lock);
839 	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
840 	spin_unlock_irq(&mapping->tree_lock);
841  unmap:
842 	dax_unmap_atomic(bdev, &dax);
843 	put_locked_mapping_entry(mapping, index, entry);
844 	return ret;
845 
846  put_unlocked:
847 	put_unlocked_mapping_entry(mapping, index, entry2);
848 	spin_unlock_irq(&mapping->tree_lock);
849 	return ret;
850 }
851 
852 /*
853  * Flush the mapping to the persistent domain within the byte range of [start,
854  * end]. This is required by data integrity operations to ensure file data is
855  * on persistent storage prior to completion of the operation.
856  */
857 int dax_writeback_mapping_range(struct address_space *mapping,
858 		struct block_device *bdev, struct writeback_control *wbc)
859 {
860 	struct inode *inode = mapping->host;
861 	pgoff_t start_index, end_index;
862 	pgoff_t indices[PAGEVEC_SIZE];
863 	struct pagevec pvec;
864 	bool done = false;
865 	int i, ret = 0;
866 
867 	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
868 		return -EIO;
869 
870 	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
871 		return 0;
872 
873 	start_index = wbc->range_start >> PAGE_SHIFT;
874 	end_index = wbc->range_end >> PAGE_SHIFT;
875 
876 	tag_pages_for_writeback(mapping, start_index, end_index);
877 
878 	pagevec_init(&pvec, 0);
879 	while (!done) {
880 		pvec.nr = find_get_entries_tag(mapping, start_index,
881 				PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
882 				pvec.pages, indices);
883 
884 		if (pvec.nr == 0)
885 			break;
886 
887 		for (i = 0; i < pvec.nr; i++) {
888 			if (indices[i] > end_index) {
889 				done = true;
890 				break;
891 			}
892 
893 			ret = dax_writeback_one(bdev, mapping, indices[i],
894 					pvec.pages[i]);
895 			if (ret < 0)
896 				return ret;
897 		}
898 	}
899 	return 0;
900 }
901 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
902 
903 static int dax_insert_mapping(struct address_space *mapping,
904 		struct block_device *bdev, sector_t sector, size_t size,
905 		void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
906 {
907 	unsigned long vaddr = vmf->address;
908 	struct blk_dax_ctl dax = {
909 		.sector = sector,
910 		.size = size,
911 	};
912 	void *ret;
913 	void *entry = *entryp;
914 
915 	if (dax_map_atomic(bdev, &dax) < 0)
916 		return PTR_ERR(dax.addr);
917 	dax_unmap_atomic(bdev, &dax);
918 
919 	ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0);
920 	if (IS_ERR(ret))
921 		return PTR_ERR(ret);
922 	*entryp = ret;
923 
924 	return vm_insert_mixed(vma, vaddr, dax.pfn);
925 }
926 
927 /**
928  * dax_pfn_mkwrite - handle first write to DAX page
929  * @vmf: The description of the fault
930  */
931 int dax_pfn_mkwrite(struct vm_fault *vmf)
932 {
933 	struct file *file = vmf->vma->vm_file;
934 	struct address_space *mapping = file->f_mapping;
935 	void *entry, **slot;
936 	pgoff_t index = vmf->pgoff;
937 
938 	spin_lock_irq(&mapping->tree_lock);
939 	entry = get_unlocked_mapping_entry(mapping, index, &slot);
940 	if (!entry || !radix_tree_exceptional_entry(entry)) {
941 		if (entry)
942 			put_unlocked_mapping_entry(mapping, index, entry);
943 		spin_unlock_irq(&mapping->tree_lock);
944 		return VM_FAULT_NOPAGE;
945 	}
946 	radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
947 	entry = lock_slot(mapping, slot);
948 	spin_unlock_irq(&mapping->tree_lock);
949 	/*
950 	 * If we race with somebody updating the PTE and finish_mkwrite_fault()
951 	 * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
952 	 * the fault in either case.
953 	 */
954 	finish_mkwrite_fault(vmf);
955 	put_locked_mapping_entry(mapping, index, entry);
956 	return VM_FAULT_NOPAGE;
957 }
958 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
959 
960 static bool dax_range_is_aligned(struct block_device *bdev,
961 				 unsigned int offset, unsigned int length)
962 {
963 	unsigned short sector_size = bdev_logical_block_size(bdev);
964 
965 	if (!IS_ALIGNED(offset, sector_size))
966 		return false;
967 	if (!IS_ALIGNED(length, sector_size))
968 		return false;
969 
970 	return true;
971 }
972 
973 int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
974 		unsigned int offset, unsigned int length)
975 {
976 	struct blk_dax_ctl dax = {
977 		.sector		= sector,
978 		.size		= PAGE_SIZE,
979 	};
980 
981 	if (dax_range_is_aligned(bdev, offset, length)) {
982 		sector_t start_sector = dax.sector + (offset >> 9);
983 
984 		return blkdev_issue_zeroout(bdev, start_sector,
985 				length >> 9, GFP_NOFS, true);
986 	} else {
987 		if (dax_map_atomic(bdev, &dax) < 0)
988 			return PTR_ERR(dax.addr);
989 		clear_pmem(dax.addr + offset, length);
990 		dax_unmap_atomic(bdev, &dax);
991 	}
992 	return 0;
993 }
994 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
995 
996 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
997 {
998 	return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
999 }
1000 
1001 static loff_t
1002 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1003 		struct iomap *iomap)
1004 {
1005 	struct iov_iter *iter = data;
1006 	loff_t end = pos + length, done = 0;
1007 	ssize_t ret = 0;
1008 
1009 	if (iov_iter_rw(iter) == READ) {
1010 		end = min(end, i_size_read(inode));
1011 		if (pos >= end)
1012 			return 0;
1013 
1014 		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1015 			return iov_iter_zero(min(length, end - pos), iter);
1016 	}
1017 
1018 	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1019 		return -EIO;
1020 
1021 	/*
1022 	 * Write can allocate block for an area which has a hole page mapped
1023 	 * into page tables. We have to tear down these mappings so that data
1024 	 * written by write(2) is visible in mmap.
1025 	 */
1026 	if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
1027 		invalidate_inode_pages2_range(inode->i_mapping,
1028 					      pos >> PAGE_SHIFT,
1029 					      (end - 1) >> PAGE_SHIFT);
1030 	}
1031 
1032 	while (pos < end) {
1033 		unsigned offset = pos & (PAGE_SIZE - 1);
1034 		struct blk_dax_ctl dax = { 0 };
1035 		ssize_t map_len;
1036 
1037 		if (fatal_signal_pending(current)) {
1038 			ret = -EINTR;
1039 			break;
1040 		}
1041 
1042 		dax.sector = dax_iomap_sector(iomap, pos);
1043 		dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
1044 		map_len = dax_map_atomic(iomap->bdev, &dax);
1045 		if (map_len < 0) {
1046 			ret = map_len;
1047 			break;
1048 		}
1049 
1050 		dax.addr += offset;
1051 		map_len -= offset;
1052 		if (map_len > end - pos)
1053 			map_len = end - pos;
1054 
1055 		if (iov_iter_rw(iter) == WRITE)
1056 			map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
1057 		else
1058 			map_len = copy_to_iter(dax.addr, map_len, iter);
1059 		dax_unmap_atomic(iomap->bdev, &dax);
1060 		if (map_len <= 0) {
1061 			ret = map_len ? map_len : -EFAULT;
1062 			break;
1063 		}
1064 
1065 		pos += map_len;
1066 		length -= map_len;
1067 		done += map_len;
1068 	}
1069 
1070 	return done ? done : ret;
1071 }
1072 
1073 /**
1074  * dax_iomap_rw - Perform I/O to a DAX file
1075  * @iocb:	The control block for this I/O
1076  * @iter:	The addresses to do I/O from or to
1077  * @ops:	iomap ops passed from the file system
1078  *
1079  * This function performs read and write operations to directly mapped
1080  * persistent memory.  The callers needs to take care of read/write exclusion
1081  * and evicting any page cache pages in the region under I/O.
1082  */
1083 ssize_t
1084 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1085 		const struct iomap_ops *ops)
1086 {
1087 	struct address_space *mapping = iocb->ki_filp->f_mapping;
1088 	struct inode *inode = mapping->host;
1089 	loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1090 	unsigned flags = 0;
1091 
1092 	if (iov_iter_rw(iter) == WRITE) {
1093 		lockdep_assert_held_exclusive(&inode->i_rwsem);
1094 		flags |= IOMAP_WRITE;
1095 	} else {
1096 		lockdep_assert_held(&inode->i_rwsem);
1097 	}
1098 
1099 	while (iov_iter_count(iter)) {
1100 		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1101 				iter, dax_iomap_actor);
1102 		if (ret <= 0)
1103 			break;
1104 		pos += ret;
1105 		done += ret;
1106 	}
1107 
1108 	iocb->ki_pos += done;
1109 	return done ? done : ret;
1110 }
1111 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1112 
1113 static int dax_fault_return(int error)
1114 {
1115 	if (error == 0)
1116 		return VM_FAULT_NOPAGE;
1117 	if (error == -ENOMEM)
1118 		return VM_FAULT_OOM;
1119 	return VM_FAULT_SIGBUS;
1120 }
1121 
1122 static int dax_iomap_pte_fault(struct vm_fault *vmf,
1123 			       const struct iomap_ops *ops)
1124 {
1125 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1126 	struct inode *inode = mapping->host;
1127 	unsigned long vaddr = vmf->address;
1128 	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1129 	sector_t sector;
1130 	struct iomap iomap = { 0 };
1131 	unsigned flags = IOMAP_FAULT;
1132 	int error, major = 0;
1133 	int vmf_ret = 0;
1134 	void *entry;
1135 
1136 	/*
1137 	 * Check whether offset isn't beyond end of file now. Caller is supposed
1138 	 * to hold locks serializing us with truncate / punch hole so this is
1139 	 * a reliable test.
1140 	 */
1141 	if (pos >= i_size_read(inode))
1142 		return VM_FAULT_SIGBUS;
1143 
1144 	if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1145 		flags |= IOMAP_WRITE;
1146 
1147 	/*
1148 	 * Note that we don't bother to use iomap_apply here: DAX required
1149 	 * the file system block size to be equal the page size, which means
1150 	 * that we never have to deal with more than a single extent here.
1151 	 */
1152 	error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1153 	if (error)
1154 		return dax_fault_return(error);
1155 	if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1156 		vmf_ret = dax_fault_return(-EIO);	/* fs corruption? */
1157 		goto finish_iomap;
1158 	}
1159 
1160 	entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1161 	if (IS_ERR(entry)) {
1162 		vmf_ret = dax_fault_return(PTR_ERR(entry));
1163 		goto finish_iomap;
1164 	}
1165 
1166 	sector = dax_iomap_sector(&iomap, pos);
1167 
1168 	if (vmf->cow_page) {
1169 		switch (iomap.type) {
1170 		case IOMAP_HOLE:
1171 		case IOMAP_UNWRITTEN:
1172 			clear_user_highpage(vmf->cow_page, vaddr);
1173 			break;
1174 		case IOMAP_MAPPED:
1175 			error = copy_user_dax(iomap.bdev, sector, PAGE_SIZE,
1176 					vmf->cow_page, vaddr);
1177 			break;
1178 		default:
1179 			WARN_ON_ONCE(1);
1180 			error = -EIO;
1181 			break;
1182 		}
1183 
1184 		if (error)
1185 			goto error_unlock_entry;
1186 
1187 		__SetPageUptodate(vmf->cow_page);
1188 		vmf_ret = finish_fault(vmf);
1189 		if (!vmf_ret)
1190 			vmf_ret = VM_FAULT_DONE_COW;
1191 		goto unlock_entry;
1192 	}
1193 
1194 	switch (iomap.type) {
1195 	case IOMAP_MAPPED:
1196 		if (iomap.flags & IOMAP_F_NEW) {
1197 			count_vm_event(PGMAJFAULT);
1198 			mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT);
1199 			major = VM_FAULT_MAJOR;
1200 		}
1201 		error = dax_insert_mapping(mapping, iomap.bdev, sector,
1202 				PAGE_SIZE, &entry, vmf->vma, vmf);
1203 		/* -EBUSY is fine, somebody else faulted on the same PTE */
1204 		if (error == -EBUSY)
1205 			error = 0;
1206 		break;
1207 	case IOMAP_UNWRITTEN:
1208 	case IOMAP_HOLE:
1209 		if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1210 			vmf_ret = dax_load_hole(mapping, &entry, vmf);
1211 			goto unlock_entry;
1212 		}
1213 		/*FALLTHRU*/
1214 	default:
1215 		WARN_ON_ONCE(1);
1216 		error = -EIO;
1217 		break;
1218 	}
1219 
1220  error_unlock_entry:
1221 	vmf_ret = dax_fault_return(error) | major;
1222  unlock_entry:
1223 	put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1224  finish_iomap:
1225 	if (ops->iomap_end) {
1226 		int copied = PAGE_SIZE;
1227 
1228 		if (vmf_ret & VM_FAULT_ERROR)
1229 			copied = 0;
1230 		/*
1231 		 * The fault is done by now and there's no way back (other
1232 		 * thread may be already happily using PTE we have installed).
1233 		 * Just ignore error from ->iomap_end since we cannot do much
1234 		 * with it.
1235 		 */
1236 		ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1237 	}
1238 	return vmf_ret;
1239 }
1240 
1241 #ifdef CONFIG_FS_DAX_PMD
1242 /*
1243  * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
1244  * more often than one might expect in the below functions.
1245  */
1246 #define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
1247 
1248 static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
1249 		loff_t pos, void **entryp)
1250 {
1251 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1252 	struct block_device *bdev = iomap->bdev;
1253 	struct inode *inode = mapping->host;
1254 	struct blk_dax_ctl dax = {
1255 		.sector = dax_iomap_sector(iomap, pos),
1256 		.size = PMD_SIZE,
1257 	};
1258 	long length = dax_map_atomic(bdev, &dax);
1259 	void *ret = NULL;
1260 
1261 	if (length < 0) /* dax_map_atomic() failed */
1262 		goto fallback;
1263 	if (length < PMD_SIZE)
1264 		goto unmap_fallback;
1265 	if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR)
1266 		goto unmap_fallback;
1267 	if (!pfn_t_devmap(dax.pfn))
1268 		goto unmap_fallback;
1269 
1270 	dax_unmap_atomic(bdev, &dax);
1271 
1272 	ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector,
1273 			RADIX_DAX_PMD);
1274 	if (IS_ERR(ret))
1275 		goto fallback;
1276 	*entryp = ret;
1277 
1278 	trace_dax_pmd_insert_mapping(inode, vmf, length, dax.pfn, ret);
1279 	return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1280 			dax.pfn, vmf->flags & FAULT_FLAG_WRITE);
1281 
1282  unmap_fallback:
1283 	dax_unmap_atomic(bdev, &dax);
1284 fallback:
1285 	trace_dax_pmd_insert_mapping_fallback(inode, vmf, length,
1286 			dax.pfn, ret);
1287 	return VM_FAULT_FALLBACK;
1288 }
1289 
1290 static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1291 		void **entryp)
1292 {
1293 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1294 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1295 	struct inode *inode = mapping->host;
1296 	struct page *zero_page;
1297 	void *ret = NULL;
1298 	spinlock_t *ptl;
1299 	pmd_t pmd_entry;
1300 
1301 	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1302 
1303 	if (unlikely(!zero_page))
1304 		goto fallback;
1305 
1306 	ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
1307 			RADIX_DAX_PMD | RADIX_DAX_HZP);
1308 	if (IS_ERR(ret))
1309 		goto fallback;
1310 	*entryp = ret;
1311 
1312 	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1313 	if (!pmd_none(*(vmf->pmd))) {
1314 		spin_unlock(ptl);
1315 		goto fallback;
1316 	}
1317 
1318 	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1319 	pmd_entry = pmd_mkhuge(pmd_entry);
1320 	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1321 	spin_unlock(ptl);
1322 	trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1323 	return VM_FAULT_NOPAGE;
1324 
1325 fallback:
1326 	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1327 	return VM_FAULT_FALLBACK;
1328 }
1329 
1330 static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1331 			       const struct iomap_ops *ops)
1332 {
1333 	struct vm_area_struct *vma = vmf->vma;
1334 	struct address_space *mapping = vma->vm_file->f_mapping;
1335 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1336 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1337 	unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1338 	struct inode *inode = mapping->host;
1339 	int result = VM_FAULT_FALLBACK;
1340 	struct iomap iomap = { 0 };
1341 	pgoff_t max_pgoff, pgoff;
1342 	void *entry;
1343 	loff_t pos;
1344 	int error;
1345 
1346 	/*
1347 	 * Check whether offset isn't beyond end of file now. Caller is
1348 	 * supposed to hold locks serializing us with truncate / punch hole so
1349 	 * this is a reliable test.
1350 	 */
1351 	pgoff = linear_page_index(vma, pmd_addr);
1352 	max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
1353 
1354 	trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1355 
1356 	/* Fall back to PTEs if we're going to COW */
1357 	if (write && !(vma->vm_flags & VM_SHARED))
1358 		goto fallback;
1359 
1360 	/* If the PMD would extend outside the VMA */
1361 	if (pmd_addr < vma->vm_start)
1362 		goto fallback;
1363 	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1364 		goto fallback;
1365 
1366 	if (pgoff > max_pgoff) {
1367 		result = VM_FAULT_SIGBUS;
1368 		goto out;
1369 	}
1370 
1371 	/* If the PMD would extend beyond the file size */
1372 	if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
1373 		goto fallback;
1374 
1375 	/*
1376 	 * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1377 	 * setting up a mapping, so really we're using iomap_begin() as a way
1378 	 * to look up our filesystem block.
1379 	 */
1380 	pos = (loff_t)pgoff << PAGE_SHIFT;
1381 	error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1382 	if (error)
1383 		goto fallback;
1384 
1385 	if (iomap.offset + iomap.length < pos + PMD_SIZE)
1386 		goto finish_iomap;
1387 
1388 	/*
1389 	 * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1390 	 * PMD or a HZP entry.  If it can't (because a 4k page is already in
1391 	 * the tree, for instance), it will return -EEXIST and we just fall
1392 	 * back to 4k entries.
1393 	 */
1394 	entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1395 	if (IS_ERR(entry))
1396 		goto finish_iomap;
1397 
1398 	switch (iomap.type) {
1399 	case IOMAP_MAPPED:
1400 		result = dax_pmd_insert_mapping(vmf, &iomap, pos, &entry);
1401 		break;
1402 	case IOMAP_UNWRITTEN:
1403 	case IOMAP_HOLE:
1404 		if (WARN_ON_ONCE(write))
1405 			goto unlock_entry;
1406 		result = dax_pmd_load_hole(vmf, &iomap, &entry);
1407 		break;
1408 	default:
1409 		WARN_ON_ONCE(1);
1410 		break;
1411 	}
1412 
1413  unlock_entry:
1414 	put_locked_mapping_entry(mapping, pgoff, entry);
1415  finish_iomap:
1416 	if (ops->iomap_end) {
1417 		int copied = PMD_SIZE;
1418 
1419 		if (result == VM_FAULT_FALLBACK)
1420 			copied = 0;
1421 		/*
1422 		 * The fault is done by now and there's no way back (other
1423 		 * thread may be already happily using PMD we have installed).
1424 		 * Just ignore error from ->iomap_end since we cannot do much
1425 		 * with it.
1426 		 */
1427 		ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1428 				&iomap);
1429 	}
1430  fallback:
1431 	if (result == VM_FAULT_FALLBACK) {
1432 		split_huge_pmd(vma, vmf->pmd, vmf->address);
1433 		count_vm_event(THP_FAULT_FALLBACK);
1434 	}
1435 out:
1436 	trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1437 	return result;
1438 }
1439 #else
1440 static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1441 			       const struct iomap_ops *ops)
1442 {
1443 	return VM_FAULT_FALLBACK;
1444 }
1445 #endif /* CONFIG_FS_DAX_PMD */
1446 
1447 /**
1448  * dax_iomap_fault - handle a page fault on a DAX file
1449  * @vmf: The description of the fault
1450  * @ops: iomap ops passed from the file system
1451  *
1452  * When a page fault occurs, filesystems may call this helper in
1453  * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1454  * has done all the necessary locking for page fault to proceed
1455  * successfully.
1456  */
1457 int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1458 		    const struct iomap_ops *ops)
1459 {
1460 	switch (pe_size) {
1461 	case PE_SIZE_PTE:
1462 		return dax_iomap_pte_fault(vmf, ops);
1463 	case PE_SIZE_PMD:
1464 		return dax_iomap_pmd_fault(vmf, ops);
1465 	default:
1466 		return VM_FAULT_FALLBACK;
1467 	}
1468 }
1469 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1470