1d475c634SMatthew Wilcox /* 2d475c634SMatthew Wilcox * fs/dax.c - Direct Access filesystem code 3d475c634SMatthew Wilcox * Copyright (c) 2013-2014 Intel Corporation 4d475c634SMatthew Wilcox * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 5d475c634SMatthew Wilcox * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 6d475c634SMatthew Wilcox * 7d475c634SMatthew Wilcox * This program is free software; you can redistribute it and/or modify it 8d475c634SMatthew Wilcox * under the terms and conditions of the GNU General Public License, 9d475c634SMatthew Wilcox * version 2, as published by the Free Software Foundation. 10d475c634SMatthew Wilcox * 11d475c634SMatthew Wilcox * This program is distributed in the hope it will be useful, but WITHOUT 12d475c634SMatthew Wilcox * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13d475c634SMatthew Wilcox * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14d475c634SMatthew Wilcox * more details. 15d475c634SMatthew Wilcox */ 16d475c634SMatthew Wilcox 17d475c634SMatthew Wilcox #include <linux/atomic.h> 18d475c634SMatthew Wilcox #include <linux/blkdev.h> 19d475c634SMatthew Wilcox #include <linux/buffer_head.h> 20d77e92e2SRoss Zwisler #include <linux/dax.h> 21d475c634SMatthew Wilcox #include <linux/fs.h> 22d475c634SMatthew Wilcox #include <linux/genhd.h> 23f7ca90b1SMatthew Wilcox #include <linux/highmem.h> 24f7ca90b1SMatthew Wilcox #include <linux/memcontrol.h> 25f7ca90b1SMatthew Wilcox #include <linux/mm.h> 26d475c634SMatthew Wilcox #include <linux/mutex.h> 279973c98eSRoss Zwisler #include <linux/pagevec.h> 282765cfbbSRoss Zwisler #include <linux/pmem.h> 29289c6aedSMatthew Wilcox #include <linux/sched.h> 30f361bf4aSIngo Molnar #include <linux/sched/signal.h> 31d475c634SMatthew Wilcox #include <linux/uio.h> 32f7ca90b1SMatthew Wilcox #include <linux/vmstat.h> 3334c0fd54SDan Williams #include <linux/pfn_t.h> 340e749e54SDan Williams #include <linux/sizes.h> 354b4bb46dSJan Kara #include <linux/mmu_notifier.h> 36a254e568SChristoph Hellwig #include <linux/iomap.h> 37a254e568SChristoph Hellwig #include "internal.h" 38d475c634SMatthew Wilcox 39282a8e03SRoss Zwisler #define CREATE_TRACE_POINTS 40282a8e03SRoss Zwisler #include <trace/events/fs_dax.h> 41282a8e03SRoss Zwisler 42ac401cc7SJan Kara /* We choose 4096 entries - same as per-zone page wait tables */ 43ac401cc7SJan Kara #define DAX_WAIT_TABLE_BITS 12 44ac401cc7SJan Kara #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 45ac401cc7SJan Kara 46ce95ab0fSRoss Zwisler static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 47ac401cc7SJan Kara 48ac401cc7SJan Kara static int __init init_dax_wait_table(void) 49ac401cc7SJan Kara { 50ac401cc7SJan Kara int i; 51ac401cc7SJan Kara 52ac401cc7SJan Kara for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 53ac401cc7SJan Kara init_waitqueue_head(wait_table + i); 54ac401cc7SJan Kara return 0; 55ac401cc7SJan Kara } 56ac401cc7SJan Kara fs_initcall(init_dax_wait_table); 57ac401cc7SJan Kara 58642261acSRoss Zwisler static int dax_is_pmd_entry(void *entry) 59642261acSRoss Zwisler { 60642261acSRoss Zwisler return (unsigned long)entry & RADIX_DAX_PMD; 61642261acSRoss Zwisler } 62642261acSRoss Zwisler 63642261acSRoss Zwisler static int dax_is_pte_entry(void *entry) 64642261acSRoss Zwisler { 65642261acSRoss Zwisler return !((unsigned long)entry & RADIX_DAX_PMD); 66642261acSRoss Zwisler } 67642261acSRoss Zwisler 68642261acSRoss Zwisler static int dax_is_zero_entry(void *entry) 69642261acSRoss Zwisler { 70642261acSRoss Zwisler return (unsigned long)entry & RADIX_DAX_HZP; 71642261acSRoss Zwisler } 72642261acSRoss Zwisler 73642261acSRoss Zwisler static int dax_is_empty_entry(void *entry) 74642261acSRoss Zwisler { 75642261acSRoss Zwisler return (unsigned long)entry & RADIX_DAX_EMPTY; 76642261acSRoss Zwisler } 77642261acSRoss Zwisler 78f7ca90b1SMatthew Wilcox /* 79ac401cc7SJan Kara * DAX radix tree locking 80ac401cc7SJan Kara */ 81ac401cc7SJan Kara struct exceptional_entry_key { 82ac401cc7SJan Kara struct address_space *mapping; 8363e95b5cSRoss Zwisler pgoff_t entry_start; 84ac401cc7SJan Kara }; 85ac401cc7SJan Kara 86ac401cc7SJan Kara struct wait_exceptional_entry_queue { 87ac401cc7SJan Kara wait_queue_t wait; 88ac401cc7SJan Kara struct exceptional_entry_key key; 89ac401cc7SJan Kara }; 90ac401cc7SJan Kara 9163e95b5cSRoss Zwisler static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, 9263e95b5cSRoss Zwisler pgoff_t index, void *entry, struct exceptional_entry_key *key) 9363e95b5cSRoss Zwisler { 9463e95b5cSRoss Zwisler unsigned long hash; 9563e95b5cSRoss Zwisler 9663e95b5cSRoss Zwisler /* 9763e95b5cSRoss Zwisler * If 'entry' is a PMD, align the 'index' that we use for the wait 9863e95b5cSRoss Zwisler * queue to the start of that PMD. This ensures that all offsets in 9963e95b5cSRoss Zwisler * the range covered by the PMD map to the same bit lock. 10063e95b5cSRoss Zwisler */ 101642261acSRoss Zwisler if (dax_is_pmd_entry(entry)) 10263e95b5cSRoss Zwisler index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1); 10363e95b5cSRoss Zwisler 10463e95b5cSRoss Zwisler key->mapping = mapping; 10563e95b5cSRoss Zwisler key->entry_start = index; 10663e95b5cSRoss Zwisler 10763e95b5cSRoss Zwisler hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS); 10863e95b5cSRoss Zwisler return wait_table + hash; 10963e95b5cSRoss Zwisler } 11063e95b5cSRoss Zwisler 111ac401cc7SJan Kara static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode, 112ac401cc7SJan Kara int sync, void *keyp) 113ac401cc7SJan Kara { 114ac401cc7SJan Kara struct exceptional_entry_key *key = keyp; 115ac401cc7SJan Kara struct wait_exceptional_entry_queue *ewait = 116ac401cc7SJan Kara container_of(wait, struct wait_exceptional_entry_queue, wait); 117ac401cc7SJan Kara 118ac401cc7SJan Kara if (key->mapping != ewait->key.mapping || 11963e95b5cSRoss Zwisler key->entry_start != ewait->key.entry_start) 120ac401cc7SJan Kara return 0; 121ac401cc7SJan Kara return autoremove_wake_function(wait, mode, sync, NULL); 122ac401cc7SJan Kara } 123ac401cc7SJan Kara 124ac401cc7SJan Kara /* 125ac401cc7SJan Kara * Check whether the given slot is locked. The function must be called with 126ac401cc7SJan Kara * mapping->tree_lock held 127ac401cc7SJan Kara */ 128ac401cc7SJan Kara static inline int slot_locked(struct address_space *mapping, void **slot) 129ac401cc7SJan Kara { 130ac401cc7SJan Kara unsigned long entry = (unsigned long) 131ac401cc7SJan Kara radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 132ac401cc7SJan Kara return entry & RADIX_DAX_ENTRY_LOCK; 133ac401cc7SJan Kara } 134ac401cc7SJan Kara 135ac401cc7SJan Kara /* 136ac401cc7SJan Kara * Mark the given slot is locked. The function must be called with 137ac401cc7SJan Kara * mapping->tree_lock held 138ac401cc7SJan Kara */ 139ac401cc7SJan Kara static inline void *lock_slot(struct address_space *mapping, void **slot) 140ac401cc7SJan Kara { 141ac401cc7SJan Kara unsigned long entry = (unsigned long) 142ac401cc7SJan Kara radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 143ac401cc7SJan Kara 144ac401cc7SJan Kara entry |= RADIX_DAX_ENTRY_LOCK; 1456d75f366SJohannes Weiner radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry); 146ac401cc7SJan Kara return (void *)entry; 147ac401cc7SJan Kara } 148ac401cc7SJan Kara 149ac401cc7SJan Kara /* 150ac401cc7SJan Kara * Mark the given slot is unlocked. The function must be called with 151ac401cc7SJan Kara * mapping->tree_lock held 152ac401cc7SJan Kara */ 153ac401cc7SJan Kara static inline void *unlock_slot(struct address_space *mapping, void **slot) 154ac401cc7SJan Kara { 155ac401cc7SJan Kara unsigned long entry = (unsigned long) 156ac401cc7SJan Kara radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 157ac401cc7SJan Kara 158ac401cc7SJan Kara entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK; 1596d75f366SJohannes Weiner radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry); 160ac401cc7SJan Kara return (void *)entry; 161ac401cc7SJan Kara } 162ac401cc7SJan Kara 163ac401cc7SJan Kara /* 164ac401cc7SJan Kara * Lookup entry in radix tree, wait for it to become unlocked if it is 165ac401cc7SJan Kara * exceptional entry and return it. The caller must call 166ac401cc7SJan Kara * put_unlocked_mapping_entry() when he decided not to lock the entry or 167ac401cc7SJan Kara * put_locked_mapping_entry() when he locked the entry and now wants to 168ac401cc7SJan Kara * unlock it. 169ac401cc7SJan Kara * 170ac401cc7SJan Kara * The function must be called with mapping->tree_lock held. 171ac401cc7SJan Kara */ 172ac401cc7SJan Kara static void *get_unlocked_mapping_entry(struct address_space *mapping, 173ac401cc7SJan Kara pgoff_t index, void ***slotp) 174ac401cc7SJan Kara { 175e3ad61c6SRoss Zwisler void *entry, **slot; 176ac401cc7SJan Kara struct wait_exceptional_entry_queue ewait; 17763e95b5cSRoss Zwisler wait_queue_head_t *wq; 178ac401cc7SJan Kara 179ac401cc7SJan Kara init_wait(&ewait.wait); 180ac401cc7SJan Kara ewait.wait.func = wake_exceptional_entry_func; 181ac401cc7SJan Kara 182ac401cc7SJan Kara for (;;) { 183e3ad61c6SRoss Zwisler entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, 184ac401cc7SJan Kara &slot); 185e3ad61c6SRoss Zwisler if (!entry || !radix_tree_exceptional_entry(entry) || 186ac401cc7SJan Kara !slot_locked(mapping, slot)) { 187ac401cc7SJan Kara if (slotp) 188ac401cc7SJan Kara *slotp = slot; 189e3ad61c6SRoss Zwisler return entry; 190ac401cc7SJan Kara } 19163e95b5cSRoss Zwisler 19263e95b5cSRoss Zwisler wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); 193ac401cc7SJan Kara prepare_to_wait_exclusive(wq, &ewait.wait, 194ac401cc7SJan Kara TASK_UNINTERRUPTIBLE); 195ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 196ac401cc7SJan Kara schedule(); 197ac401cc7SJan Kara finish_wait(wq, &ewait.wait); 198ac401cc7SJan Kara spin_lock_irq(&mapping->tree_lock); 199ac401cc7SJan Kara } 200ac401cc7SJan Kara } 201ac401cc7SJan Kara 202b1aa812bSJan Kara static void dax_unlock_mapping_entry(struct address_space *mapping, 203b1aa812bSJan Kara pgoff_t index) 204b1aa812bSJan Kara { 205b1aa812bSJan Kara void *entry, **slot; 206b1aa812bSJan Kara 207b1aa812bSJan Kara spin_lock_irq(&mapping->tree_lock); 208b1aa812bSJan Kara entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot); 209b1aa812bSJan Kara if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) || 210b1aa812bSJan Kara !slot_locked(mapping, slot))) { 211b1aa812bSJan Kara spin_unlock_irq(&mapping->tree_lock); 212b1aa812bSJan Kara return; 213b1aa812bSJan Kara } 214b1aa812bSJan Kara unlock_slot(mapping, slot); 215b1aa812bSJan Kara spin_unlock_irq(&mapping->tree_lock); 216b1aa812bSJan Kara dax_wake_mapping_entry_waiter(mapping, index, entry, false); 217b1aa812bSJan Kara } 218b1aa812bSJan Kara 219ac401cc7SJan Kara static void put_locked_mapping_entry(struct address_space *mapping, 220ac401cc7SJan Kara pgoff_t index, void *entry) 221ac401cc7SJan Kara { 222ac401cc7SJan Kara if (!radix_tree_exceptional_entry(entry)) { 223ac401cc7SJan Kara unlock_page(entry); 224ac401cc7SJan Kara put_page(entry); 225ac401cc7SJan Kara } else { 226bc2466e4SJan Kara dax_unlock_mapping_entry(mapping, index); 227ac401cc7SJan Kara } 228ac401cc7SJan Kara } 229ac401cc7SJan Kara 230ac401cc7SJan Kara /* 231ac401cc7SJan Kara * Called when we are done with radix tree entry we looked up via 232ac401cc7SJan Kara * get_unlocked_mapping_entry() and which we didn't lock in the end. 233ac401cc7SJan Kara */ 234ac401cc7SJan Kara static void put_unlocked_mapping_entry(struct address_space *mapping, 235ac401cc7SJan Kara pgoff_t index, void *entry) 236ac401cc7SJan Kara { 237ac401cc7SJan Kara if (!radix_tree_exceptional_entry(entry)) 238ac401cc7SJan Kara return; 239ac401cc7SJan Kara 240ac401cc7SJan Kara /* We have to wake up next waiter for the radix tree entry lock */ 241422476c4SRoss Zwisler dax_wake_mapping_entry_waiter(mapping, index, entry, false); 242422476c4SRoss Zwisler } 243422476c4SRoss Zwisler 244ac401cc7SJan Kara /* 245ac401cc7SJan Kara * Find radix tree entry at given index. If it points to a page, return with 246ac401cc7SJan Kara * the page locked. If it points to the exceptional entry, return with the 247ac401cc7SJan Kara * radix tree entry locked. If the radix tree doesn't contain given index, 248ac401cc7SJan Kara * create empty exceptional entry for the index and return with it locked. 249ac401cc7SJan Kara * 250642261acSRoss Zwisler * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will 251642261acSRoss Zwisler * either return that locked entry or will return an error. This error will 252642261acSRoss Zwisler * happen if there are any 4k entries (either zero pages or DAX entries) 253642261acSRoss Zwisler * within the 2MiB range that we are requesting. 254642261acSRoss Zwisler * 255642261acSRoss Zwisler * We always favor 4k entries over 2MiB entries. There isn't a flow where we 256642261acSRoss Zwisler * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB 257642261acSRoss Zwisler * insertion will fail if it finds any 4k entries already in the tree, and a 258642261acSRoss Zwisler * 4k insertion will cause an existing 2MiB entry to be unmapped and 259642261acSRoss Zwisler * downgraded to 4k entries. This happens for both 2MiB huge zero pages as 260642261acSRoss Zwisler * well as 2MiB empty entries. 261642261acSRoss Zwisler * 262642261acSRoss Zwisler * The exception to this downgrade path is for 2MiB DAX PMD entries that have 263642261acSRoss Zwisler * real storage backing them. We will leave these real 2MiB DAX entries in 264642261acSRoss Zwisler * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry. 265642261acSRoss Zwisler * 266ac401cc7SJan Kara * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 267ac401cc7SJan Kara * persistent memory the benefit is doubtful. We can add that later if we can 268ac401cc7SJan Kara * show it helps. 269ac401cc7SJan Kara */ 270642261acSRoss Zwisler static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, 271642261acSRoss Zwisler unsigned long size_flag) 272ac401cc7SJan Kara { 273642261acSRoss Zwisler bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */ 274e3ad61c6SRoss Zwisler void *entry, **slot; 275ac401cc7SJan Kara 276ac401cc7SJan Kara restart: 277ac401cc7SJan Kara spin_lock_irq(&mapping->tree_lock); 278e3ad61c6SRoss Zwisler entry = get_unlocked_mapping_entry(mapping, index, &slot); 279642261acSRoss Zwisler 280642261acSRoss Zwisler if (entry) { 281642261acSRoss Zwisler if (size_flag & RADIX_DAX_PMD) { 282642261acSRoss Zwisler if (!radix_tree_exceptional_entry(entry) || 283642261acSRoss Zwisler dax_is_pte_entry(entry)) { 284642261acSRoss Zwisler put_unlocked_mapping_entry(mapping, index, 285642261acSRoss Zwisler entry); 286642261acSRoss Zwisler entry = ERR_PTR(-EEXIST); 287642261acSRoss Zwisler goto out_unlock; 288642261acSRoss Zwisler } 289642261acSRoss Zwisler } else { /* trying to grab a PTE entry */ 290642261acSRoss Zwisler if (radix_tree_exceptional_entry(entry) && 291642261acSRoss Zwisler dax_is_pmd_entry(entry) && 292642261acSRoss Zwisler (dax_is_zero_entry(entry) || 293642261acSRoss Zwisler dax_is_empty_entry(entry))) { 294642261acSRoss Zwisler pmd_downgrade = true; 295642261acSRoss Zwisler } 296642261acSRoss Zwisler } 297642261acSRoss Zwisler } 298642261acSRoss Zwisler 299ac401cc7SJan Kara /* No entry for given index? Make sure radix tree is big enough. */ 300642261acSRoss Zwisler if (!entry || pmd_downgrade) { 301ac401cc7SJan Kara int err; 302ac401cc7SJan Kara 303642261acSRoss Zwisler if (pmd_downgrade) { 304642261acSRoss Zwisler /* 305642261acSRoss Zwisler * Make sure 'entry' remains valid while we drop 306642261acSRoss Zwisler * mapping->tree_lock. 307642261acSRoss Zwisler */ 308642261acSRoss Zwisler entry = lock_slot(mapping, slot); 309642261acSRoss Zwisler } 310642261acSRoss Zwisler 311ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 312642261acSRoss Zwisler /* 313642261acSRoss Zwisler * Besides huge zero pages the only other thing that gets 314642261acSRoss Zwisler * downgraded are empty entries which don't need to be 315642261acSRoss Zwisler * unmapped. 316642261acSRoss Zwisler */ 317642261acSRoss Zwisler if (pmd_downgrade && dax_is_zero_entry(entry)) 318642261acSRoss Zwisler unmap_mapping_range(mapping, 319642261acSRoss Zwisler (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0); 320642261acSRoss Zwisler 3210cb80b48SJan Kara err = radix_tree_preload( 3220cb80b48SJan Kara mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); 3230cb80b48SJan Kara if (err) { 3240cb80b48SJan Kara if (pmd_downgrade) 3250cb80b48SJan Kara put_locked_mapping_entry(mapping, index, entry); 3260cb80b48SJan Kara return ERR_PTR(err); 3270cb80b48SJan Kara } 328ac401cc7SJan Kara spin_lock_irq(&mapping->tree_lock); 329642261acSRoss Zwisler 330e11f8b7bSRoss Zwisler if (!entry) { 331e11f8b7bSRoss Zwisler /* 332e11f8b7bSRoss Zwisler * We needed to drop the page_tree lock while calling 333e11f8b7bSRoss Zwisler * radix_tree_preload() and we didn't have an entry to 334e11f8b7bSRoss Zwisler * lock. See if another thread inserted an entry at 335e11f8b7bSRoss Zwisler * our index during this time. 336e11f8b7bSRoss Zwisler */ 337e11f8b7bSRoss Zwisler entry = __radix_tree_lookup(&mapping->page_tree, index, 338e11f8b7bSRoss Zwisler NULL, &slot); 339e11f8b7bSRoss Zwisler if (entry) { 340e11f8b7bSRoss Zwisler radix_tree_preload_end(); 341e11f8b7bSRoss Zwisler spin_unlock_irq(&mapping->tree_lock); 342e11f8b7bSRoss Zwisler goto restart; 343e11f8b7bSRoss Zwisler } 344e11f8b7bSRoss Zwisler } 345e11f8b7bSRoss Zwisler 346642261acSRoss Zwisler if (pmd_downgrade) { 347642261acSRoss Zwisler radix_tree_delete(&mapping->page_tree, index); 348642261acSRoss Zwisler mapping->nrexceptional--; 349642261acSRoss Zwisler dax_wake_mapping_entry_waiter(mapping, index, entry, 350642261acSRoss Zwisler true); 351642261acSRoss Zwisler } 352642261acSRoss Zwisler 353642261acSRoss Zwisler entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY); 354642261acSRoss Zwisler 355642261acSRoss Zwisler err = __radix_tree_insert(&mapping->page_tree, index, 356642261acSRoss Zwisler dax_radix_order(entry), entry); 357ac401cc7SJan Kara radix_tree_preload_end(); 358ac401cc7SJan Kara if (err) { 359ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 360642261acSRoss Zwisler /* 361e11f8b7bSRoss Zwisler * Our insertion of a DAX entry failed, most likely 362e11f8b7bSRoss Zwisler * because we were inserting a PMD entry and it 363e11f8b7bSRoss Zwisler * collided with a PTE sized entry at a different 364e11f8b7bSRoss Zwisler * index in the PMD range. We haven't inserted 365e11f8b7bSRoss Zwisler * anything into the radix tree and have no waiters to 366e11f8b7bSRoss Zwisler * wake. 367642261acSRoss Zwisler */ 368ac401cc7SJan Kara return ERR_PTR(err); 369ac401cc7SJan Kara } 370ac401cc7SJan Kara /* Good, we have inserted empty locked entry into the tree. */ 371ac401cc7SJan Kara mapping->nrexceptional++; 372ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 373e3ad61c6SRoss Zwisler return entry; 374ac401cc7SJan Kara } 375ac401cc7SJan Kara /* Normal page in radix tree? */ 376e3ad61c6SRoss Zwisler if (!radix_tree_exceptional_entry(entry)) { 377e3ad61c6SRoss Zwisler struct page *page = entry; 378ac401cc7SJan Kara 379ac401cc7SJan Kara get_page(page); 380ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 381ac401cc7SJan Kara lock_page(page); 382ac401cc7SJan Kara /* Page got truncated? Retry... */ 383ac401cc7SJan Kara if (unlikely(page->mapping != mapping)) { 384ac401cc7SJan Kara unlock_page(page); 385ac401cc7SJan Kara put_page(page); 386ac401cc7SJan Kara goto restart; 387ac401cc7SJan Kara } 388ac401cc7SJan Kara return page; 389ac401cc7SJan Kara } 390e3ad61c6SRoss Zwisler entry = lock_slot(mapping, slot); 391642261acSRoss Zwisler out_unlock: 392ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 393e3ad61c6SRoss Zwisler return entry; 394ac401cc7SJan Kara } 395ac401cc7SJan Kara 39663e95b5cSRoss Zwisler /* 39763e95b5cSRoss Zwisler * We do not necessarily hold the mapping->tree_lock when we call this 39863e95b5cSRoss Zwisler * function so it is possible that 'entry' is no longer a valid item in the 399642261acSRoss Zwisler * radix tree. This is okay because all we really need to do is to find the 400642261acSRoss Zwisler * correct waitqueue where tasks might be waiting for that old 'entry' and 401642261acSRoss Zwisler * wake them. 40263e95b5cSRoss Zwisler */ 403ac401cc7SJan Kara void dax_wake_mapping_entry_waiter(struct address_space *mapping, 40463e95b5cSRoss Zwisler pgoff_t index, void *entry, bool wake_all) 405ac401cc7SJan Kara { 40663e95b5cSRoss Zwisler struct exceptional_entry_key key; 40763e95b5cSRoss Zwisler wait_queue_head_t *wq; 40863e95b5cSRoss Zwisler 40963e95b5cSRoss Zwisler wq = dax_entry_waitqueue(mapping, index, entry, &key); 410ac401cc7SJan Kara 411ac401cc7SJan Kara /* 412ac401cc7SJan Kara * Checking for locked entry and prepare_to_wait_exclusive() happens 413ac401cc7SJan Kara * under mapping->tree_lock, ditto for entry handling in our callers. 414ac401cc7SJan Kara * So at this point all tasks that could have seen our entry locked 415ac401cc7SJan Kara * must be in the waitqueue and the following check will see them. 416ac401cc7SJan Kara */ 41763e95b5cSRoss Zwisler if (waitqueue_active(wq)) 418ac401cc7SJan Kara __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); 419ac401cc7SJan Kara } 420ac401cc7SJan Kara 421c6dcf52cSJan Kara static int __dax_invalidate_mapping_entry(struct address_space *mapping, 422c6dcf52cSJan Kara pgoff_t index, bool trunc) 423c6dcf52cSJan Kara { 424c6dcf52cSJan Kara int ret = 0; 425c6dcf52cSJan Kara void *entry; 426c6dcf52cSJan Kara struct radix_tree_root *page_tree = &mapping->page_tree; 427c6dcf52cSJan Kara 428c6dcf52cSJan Kara spin_lock_irq(&mapping->tree_lock); 429c6dcf52cSJan Kara entry = get_unlocked_mapping_entry(mapping, index, NULL); 430c6dcf52cSJan Kara if (!entry || !radix_tree_exceptional_entry(entry)) 431c6dcf52cSJan Kara goto out; 432c6dcf52cSJan Kara if (!trunc && 433c6dcf52cSJan Kara (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) || 434c6dcf52cSJan Kara radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))) 435c6dcf52cSJan Kara goto out; 436c6dcf52cSJan Kara radix_tree_delete(page_tree, index); 437c6dcf52cSJan Kara mapping->nrexceptional--; 438c6dcf52cSJan Kara ret = 1; 439c6dcf52cSJan Kara out: 440c6dcf52cSJan Kara put_unlocked_mapping_entry(mapping, index, entry); 441c6dcf52cSJan Kara spin_unlock_irq(&mapping->tree_lock); 442c6dcf52cSJan Kara return ret; 443c6dcf52cSJan Kara } 444ac401cc7SJan Kara /* 445ac401cc7SJan Kara * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree 446ac401cc7SJan Kara * entry to get unlocked before deleting it. 447ac401cc7SJan Kara */ 448ac401cc7SJan Kara int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 449ac401cc7SJan Kara { 450c6dcf52cSJan Kara int ret = __dax_invalidate_mapping_entry(mapping, index, true); 451ac401cc7SJan Kara 452ac401cc7SJan Kara /* 453ac401cc7SJan Kara * This gets called from truncate / punch_hole path. As such, the caller 454ac401cc7SJan Kara * must hold locks protecting against concurrent modifications of the 455ac401cc7SJan Kara * radix tree (usually fs-private i_mmap_sem for writing). Since the 456ac401cc7SJan Kara * caller has seen exceptional entry for this index, we better find it 457ac401cc7SJan Kara * at that index as well... 458ac401cc7SJan Kara */ 459c6dcf52cSJan Kara WARN_ON_ONCE(!ret); 460c6dcf52cSJan Kara return ret; 461ac401cc7SJan Kara } 462ac401cc7SJan Kara 463c6dcf52cSJan Kara /* 464c6dcf52cSJan Kara * Invalidate exceptional DAX entry if easily possible. This handles DAX 465c6dcf52cSJan Kara * entries for invalidate_inode_pages() so we evict the entry only if we can 466c6dcf52cSJan Kara * do so without blocking. 467c6dcf52cSJan Kara */ 468c6dcf52cSJan Kara int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index) 469c6dcf52cSJan Kara { 470c6dcf52cSJan Kara int ret = 0; 471c6dcf52cSJan Kara void *entry, **slot; 472c6dcf52cSJan Kara struct radix_tree_root *page_tree = &mapping->page_tree; 473c6dcf52cSJan Kara 474c6dcf52cSJan Kara spin_lock_irq(&mapping->tree_lock); 475c6dcf52cSJan Kara entry = __radix_tree_lookup(page_tree, index, NULL, &slot); 476c6dcf52cSJan Kara if (!entry || !radix_tree_exceptional_entry(entry) || 477c6dcf52cSJan Kara slot_locked(mapping, slot)) 478c6dcf52cSJan Kara goto out; 479c6dcf52cSJan Kara if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) || 480c6dcf52cSJan Kara radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)) 481c6dcf52cSJan Kara goto out; 482c6dcf52cSJan Kara radix_tree_delete(page_tree, index); 483c6dcf52cSJan Kara mapping->nrexceptional--; 484c6dcf52cSJan Kara ret = 1; 485c6dcf52cSJan Kara out: 486c6dcf52cSJan Kara spin_unlock_irq(&mapping->tree_lock); 487c6dcf52cSJan Kara if (ret) 488c6dcf52cSJan Kara dax_wake_mapping_entry_waiter(mapping, index, entry, true); 489c6dcf52cSJan Kara return ret; 490c6dcf52cSJan Kara } 491c6dcf52cSJan Kara 492c6dcf52cSJan Kara /* 493c6dcf52cSJan Kara * Invalidate exceptional DAX entry if it is clean. 494c6dcf52cSJan Kara */ 495c6dcf52cSJan Kara int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 496c6dcf52cSJan Kara pgoff_t index) 497c6dcf52cSJan Kara { 498c6dcf52cSJan Kara return __dax_invalidate_mapping_entry(mapping, index, false); 499ac401cc7SJan Kara } 500ac401cc7SJan Kara 501ac401cc7SJan Kara /* 502f7ca90b1SMatthew Wilcox * The user has performed a load from a hole in the file. Allocating 503f7ca90b1SMatthew Wilcox * a new page in the file would cause excessive storage usage for 504f7ca90b1SMatthew Wilcox * workloads with sparse files. We allocate a page cache page instead. 505f7ca90b1SMatthew Wilcox * We'll kick it out of the page cache if it's ever written to, 506f7ca90b1SMatthew Wilcox * otherwise it will simply fall out of the page cache under memory 507f7ca90b1SMatthew Wilcox * pressure without ever having been dirtied. 508f7ca90b1SMatthew Wilcox */ 509f449b936SJan Kara static int dax_load_hole(struct address_space *mapping, void **entry, 510f7ca90b1SMatthew Wilcox struct vm_fault *vmf) 511f7ca90b1SMatthew Wilcox { 512ac401cc7SJan Kara struct page *page; 513f449b936SJan Kara int ret; 514f7ca90b1SMatthew Wilcox 515ac401cc7SJan Kara /* Hole page already exists? Return it... */ 516f449b936SJan Kara if (!radix_tree_exceptional_entry(*entry)) { 517f449b936SJan Kara page = *entry; 518f449b936SJan Kara goto out; 519ac401cc7SJan Kara } 520ac401cc7SJan Kara 521ac401cc7SJan Kara /* This will replace locked radix tree entry with a hole page */ 522ac401cc7SJan Kara page = find_or_create_page(mapping, vmf->pgoff, 523ac401cc7SJan Kara vmf->gfp_mask | __GFP_ZERO); 524b1aa812bSJan Kara if (!page) 525ac401cc7SJan Kara return VM_FAULT_OOM; 526f449b936SJan Kara out: 527f7ca90b1SMatthew Wilcox vmf->page = page; 528f449b936SJan Kara ret = finish_fault(vmf); 529f449b936SJan Kara vmf->page = NULL; 530f449b936SJan Kara *entry = page; 531f449b936SJan Kara if (!ret) { 532f449b936SJan Kara /* Grab reference for PTE that is now referencing the page */ 533f449b936SJan Kara get_page(page); 534f449b936SJan Kara return VM_FAULT_NOPAGE; 535f449b936SJan Kara } 536f449b936SJan Kara return ret; 537f7ca90b1SMatthew Wilcox } 538f7ca90b1SMatthew Wilcox 539cccbce67SDan Williams static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, 540cccbce67SDan Williams sector_t sector, size_t size, struct page *to, 541cccbce67SDan Williams unsigned long vaddr) 542f7ca90b1SMatthew Wilcox { 543cccbce67SDan Williams void *vto, *kaddr; 544cccbce67SDan Williams pgoff_t pgoff; 545cccbce67SDan Williams pfn_t pfn; 546cccbce67SDan Williams long rc; 547cccbce67SDan Williams int id; 548e2e05394SRoss Zwisler 549cccbce67SDan Williams rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); 550cccbce67SDan Williams if (rc) 551cccbce67SDan Williams return rc; 552cccbce67SDan Williams 553cccbce67SDan Williams id = dax_read_lock(); 554cccbce67SDan Williams rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); 555cccbce67SDan Williams if (rc < 0) { 556cccbce67SDan Williams dax_read_unlock(id); 557cccbce67SDan Williams return rc; 558cccbce67SDan Williams } 559f7ca90b1SMatthew Wilcox vto = kmap_atomic(to); 560cccbce67SDan Williams copy_user_page(vto, (void __force *)kaddr, vaddr, to); 561f7ca90b1SMatthew Wilcox kunmap_atomic(vto); 562cccbce67SDan Williams dax_read_unlock(id); 563f7ca90b1SMatthew Wilcox return 0; 564f7ca90b1SMatthew Wilcox } 565f7ca90b1SMatthew Wilcox 566642261acSRoss Zwisler /* 567642261acSRoss Zwisler * By this point grab_mapping_entry() has ensured that we have a locked entry 568642261acSRoss Zwisler * of the appropriate size so we don't have to worry about downgrading PMDs to 569642261acSRoss Zwisler * PTEs. If we happen to be trying to insert a PTE and there is a PMD 570642261acSRoss Zwisler * already in the tree, we will skip the insertion and just dirty the PMD as 571642261acSRoss Zwisler * appropriate. 572642261acSRoss Zwisler */ 573ac401cc7SJan Kara static void *dax_insert_mapping_entry(struct address_space *mapping, 574ac401cc7SJan Kara struct vm_fault *vmf, 575642261acSRoss Zwisler void *entry, sector_t sector, 576642261acSRoss Zwisler unsigned long flags) 5779973c98eSRoss Zwisler { 5789973c98eSRoss Zwisler struct radix_tree_root *page_tree = &mapping->page_tree; 579ac401cc7SJan Kara int error = 0; 580ac401cc7SJan Kara bool hole_fill = false; 581ac401cc7SJan Kara void *new_entry; 582ac401cc7SJan Kara pgoff_t index = vmf->pgoff; 5839973c98eSRoss Zwisler 584ac401cc7SJan Kara if (vmf->flags & FAULT_FLAG_WRITE) 5859973c98eSRoss Zwisler __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 5869973c98eSRoss Zwisler 587ac401cc7SJan Kara /* Replacing hole page with block mapping? */ 588ac401cc7SJan Kara if (!radix_tree_exceptional_entry(entry)) { 589ac401cc7SJan Kara hole_fill = true; 5909973c98eSRoss Zwisler /* 591ac401cc7SJan Kara * Unmap the page now before we remove it from page cache below. 592ac401cc7SJan Kara * The page is locked so it cannot be faulted in again. 5939973c98eSRoss Zwisler */ 594ac401cc7SJan Kara unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, 595ac401cc7SJan Kara PAGE_SIZE, 0); 596ac401cc7SJan Kara error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM); 5979973c98eSRoss Zwisler if (error) 598ac401cc7SJan Kara return ERR_PTR(error); 599642261acSRoss Zwisler } else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) { 600642261acSRoss Zwisler /* replacing huge zero page with PMD block mapping */ 601642261acSRoss Zwisler unmap_mapping_range(mapping, 602642261acSRoss Zwisler (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0); 603ac401cc7SJan Kara } 6049973c98eSRoss Zwisler 605ac401cc7SJan Kara spin_lock_irq(&mapping->tree_lock); 606642261acSRoss Zwisler new_entry = dax_radix_locked_entry(sector, flags); 607642261acSRoss Zwisler 608ac401cc7SJan Kara if (hole_fill) { 609ac401cc7SJan Kara __delete_from_page_cache(entry, NULL); 610ac401cc7SJan Kara /* Drop pagecache reference */ 611ac401cc7SJan Kara put_page(entry); 612642261acSRoss Zwisler error = __radix_tree_insert(page_tree, index, 613642261acSRoss Zwisler dax_radix_order(new_entry), new_entry); 614ac401cc7SJan Kara if (error) { 615ac401cc7SJan Kara new_entry = ERR_PTR(error); 616ac401cc7SJan Kara goto unlock; 617ac401cc7SJan Kara } 6189973c98eSRoss Zwisler mapping->nrexceptional++; 619642261acSRoss Zwisler } else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 620642261acSRoss Zwisler /* 621642261acSRoss Zwisler * Only swap our new entry into the radix tree if the current 622642261acSRoss Zwisler * entry is a zero page or an empty entry. If a normal PTE or 623642261acSRoss Zwisler * PMD entry is already in the tree, we leave it alone. This 624642261acSRoss Zwisler * means that if we are trying to insert a PTE and the 625642261acSRoss Zwisler * existing entry is a PMD, we will just leave the PMD in the 626642261acSRoss Zwisler * tree and dirty it if necessary. 627642261acSRoss Zwisler */ 628f7942430SJohannes Weiner struct radix_tree_node *node; 629ac401cc7SJan Kara void **slot; 630ac401cc7SJan Kara void *ret; 631ac401cc7SJan Kara 632f7942430SJohannes Weiner ret = __radix_tree_lookup(page_tree, index, &node, &slot); 633ac401cc7SJan Kara WARN_ON_ONCE(ret != entry); 6344d693d08SJohannes Weiner __radix_tree_replace(page_tree, node, slot, 6354d693d08SJohannes Weiner new_entry, NULL, NULL); 636ac401cc7SJan Kara } 637ac401cc7SJan Kara if (vmf->flags & FAULT_FLAG_WRITE) 6389973c98eSRoss Zwisler radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY); 6399973c98eSRoss Zwisler unlock: 6409973c98eSRoss Zwisler spin_unlock_irq(&mapping->tree_lock); 641ac401cc7SJan Kara if (hole_fill) { 642ac401cc7SJan Kara radix_tree_preload_end(); 643ac401cc7SJan Kara /* 644ac401cc7SJan Kara * We don't need hole page anymore, it has been replaced with 645ac401cc7SJan Kara * locked radix tree entry now. 646ac401cc7SJan Kara */ 647ac401cc7SJan Kara if (mapping->a_ops->freepage) 648ac401cc7SJan Kara mapping->a_ops->freepage(entry); 649ac401cc7SJan Kara unlock_page(entry); 650ac401cc7SJan Kara put_page(entry); 651ac401cc7SJan Kara } 652ac401cc7SJan Kara return new_entry; 6539973c98eSRoss Zwisler } 6549973c98eSRoss Zwisler 6554b4bb46dSJan Kara static inline unsigned long 6564b4bb46dSJan Kara pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma) 6574b4bb46dSJan Kara { 6584b4bb46dSJan Kara unsigned long address; 6594b4bb46dSJan Kara 6604b4bb46dSJan Kara address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 6614b4bb46dSJan Kara VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 6624b4bb46dSJan Kara return address; 6634b4bb46dSJan Kara } 6644b4bb46dSJan Kara 6654b4bb46dSJan Kara /* Walk all mappings of a given index of a file and writeprotect them */ 6664b4bb46dSJan Kara static void dax_mapping_entry_mkclean(struct address_space *mapping, 6674b4bb46dSJan Kara pgoff_t index, unsigned long pfn) 6684b4bb46dSJan Kara { 6694b4bb46dSJan Kara struct vm_area_struct *vma; 670f729c8c9SRoss Zwisler pte_t pte, *ptep = NULL; 671f729c8c9SRoss Zwisler pmd_t *pmdp = NULL; 6724b4bb46dSJan Kara spinlock_t *ptl; 6734b4bb46dSJan Kara bool changed; 6744b4bb46dSJan Kara 6754b4bb46dSJan Kara i_mmap_lock_read(mapping); 6764b4bb46dSJan Kara vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { 6774b4bb46dSJan Kara unsigned long address; 6784b4bb46dSJan Kara 6794b4bb46dSJan Kara cond_resched(); 6804b4bb46dSJan Kara 6814b4bb46dSJan Kara if (!(vma->vm_flags & VM_SHARED)) 6824b4bb46dSJan Kara continue; 6834b4bb46dSJan Kara 6844b4bb46dSJan Kara address = pgoff_address(index, vma); 6854b4bb46dSJan Kara changed = false; 686f729c8c9SRoss Zwisler if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl)) 6874b4bb46dSJan Kara continue; 688f729c8c9SRoss Zwisler 689f729c8c9SRoss Zwisler if (pmdp) { 690f729c8c9SRoss Zwisler #ifdef CONFIG_FS_DAX_PMD 691f729c8c9SRoss Zwisler pmd_t pmd; 692f729c8c9SRoss Zwisler 693f729c8c9SRoss Zwisler if (pfn != pmd_pfn(*pmdp)) 694f729c8c9SRoss Zwisler goto unlock_pmd; 695f729c8c9SRoss Zwisler if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) 696f729c8c9SRoss Zwisler goto unlock_pmd; 697f729c8c9SRoss Zwisler 698f729c8c9SRoss Zwisler flush_cache_page(vma, address, pfn); 699f729c8c9SRoss Zwisler pmd = pmdp_huge_clear_flush(vma, address, pmdp); 700f729c8c9SRoss Zwisler pmd = pmd_wrprotect(pmd); 701f729c8c9SRoss Zwisler pmd = pmd_mkclean(pmd); 702f729c8c9SRoss Zwisler set_pmd_at(vma->vm_mm, address, pmdp, pmd); 703f729c8c9SRoss Zwisler changed = true; 704f729c8c9SRoss Zwisler unlock_pmd: 705f729c8c9SRoss Zwisler spin_unlock(ptl); 706f729c8c9SRoss Zwisler #endif 707f729c8c9SRoss Zwisler } else { 7084b4bb46dSJan Kara if (pfn != pte_pfn(*ptep)) 709f729c8c9SRoss Zwisler goto unlock_pte; 7104b4bb46dSJan Kara if (!pte_dirty(*ptep) && !pte_write(*ptep)) 711f729c8c9SRoss Zwisler goto unlock_pte; 7124b4bb46dSJan Kara 7134b4bb46dSJan Kara flush_cache_page(vma, address, pfn); 7144b4bb46dSJan Kara pte = ptep_clear_flush(vma, address, ptep); 7154b4bb46dSJan Kara pte = pte_wrprotect(pte); 7164b4bb46dSJan Kara pte = pte_mkclean(pte); 7174b4bb46dSJan Kara set_pte_at(vma->vm_mm, address, ptep, pte); 7184b4bb46dSJan Kara changed = true; 719f729c8c9SRoss Zwisler unlock_pte: 7204b4bb46dSJan Kara pte_unmap_unlock(ptep, ptl); 721f729c8c9SRoss Zwisler } 7224b4bb46dSJan Kara 7234b4bb46dSJan Kara if (changed) 7244b4bb46dSJan Kara mmu_notifier_invalidate_page(vma->vm_mm, address); 7254b4bb46dSJan Kara } 7264b4bb46dSJan Kara i_mmap_unlock_read(mapping); 7274b4bb46dSJan Kara } 7284b4bb46dSJan Kara 7299973c98eSRoss Zwisler static int dax_writeback_one(struct block_device *bdev, 730cccbce67SDan Williams struct dax_device *dax_dev, struct address_space *mapping, 731cccbce67SDan Williams pgoff_t index, void *entry) 7329973c98eSRoss Zwisler { 7339973c98eSRoss Zwisler struct radix_tree_root *page_tree = &mapping->page_tree; 734cccbce67SDan Williams void *entry2, **slot, *kaddr; 735cccbce67SDan Williams long ret = 0, id; 736cccbce67SDan Williams sector_t sector; 737cccbce67SDan Williams pgoff_t pgoff; 738cccbce67SDan Williams size_t size; 739cccbce67SDan Williams pfn_t pfn; 7409973c98eSRoss Zwisler 7419973c98eSRoss Zwisler /* 742a6abc2c0SJan Kara * A page got tagged dirty in DAX mapping? Something is seriously 743a6abc2c0SJan Kara * wrong. 7449973c98eSRoss Zwisler */ 745a6abc2c0SJan Kara if (WARN_ON(!radix_tree_exceptional_entry(entry))) 746a6abc2c0SJan Kara return -EIO; 7479973c98eSRoss Zwisler 748a6abc2c0SJan Kara spin_lock_irq(&mapping->tree_lock); 749a6abc2c0SJan Kara entry2 = get_unlocked_mapping_entry(mapping, index, &slot); 750a6abc2c0SJan Kara /* Entry got punched out / reallocated? */ 751a6abc2c0SJan Kara if (!entry2 || !radix_tree_exceptional_entry(entry2)) 752a6abc2c0SJan Kara goto put_unlocked; 753a6abc2c0SJan Kara /* 754a6abc2c0SJan Kara * Entry got reallocated elsewhere? No need to writeback. We have to 755a6abc2c0SJan Kara * compare sectors as we must not bail out due to difference in lockbit 756a6abc2c0SJan Kara * or entry type. 757a6abc2c0SJan Kara */ 758a6abc2c0SJan Kara if (dax_radix_sector(entry2) != dax_radix_sector(entry)) 759a6abc2c0SJan Kara goto put_unlocked; 760642261acSRoss Zwisler if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 761642261acSRoss Zwisler dax_is_zero_entry(entry))) { 7629973c98eSRoss Zwisler ret = -EIO; 763a6abc2c0SJan Kara goto put_unlocked; 7649973c98eSRoss Zwisler } 7659973c98eSRoss Zwisler 766a6abc2c0SJan Kara /* Another fsync thread may have already written back this entry */ 767a6abc2c0SJan Kara if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)) 768a6abc2c0SJan Kara goto put_unlocked; 769a6abc2c0SJan Kara /* Lock the entry to serialize with page faults */ 770a6abc2c0SJan Kara entry = lock_slot(mapping, slot); 771a6abc2c0SJan Kara /* 772a6abc2c0SJan Kara * We can clear the tag now but we have to be careful so that concurrent 773a6abc2c0SJan Kara * dax_writeback_one() calls for the same index cannot finish before we 774a6abc2c0SJan Kara * actually flush the caches. This is achieved as the calls will look 775a6abc2c0SJan Kara * at the entry only under tree_lock and once they do that they will 776a6abc2c0SJan Kara * see the entry locked and wait for it to unlock. 777a6abc2c0SJan Kara */ 778a6abc2c0SJan Kara radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE); 779a6abc2c0SJan Kara spin_unlock_irq(&mapping->tree_lock); 780a6abc2c0SJan Kara 781642261acSRoss Zwisler /* 782642261acSRoss Zwisler * Even if dax_writeback_mapping_range() was given a wbc->range_start 783642261acSRoss Zwisler * in the middle of a PMD, the 'index' we are given will be aligned to 784642261acSRoss Zwisler * the start index of the PMD, as will the sector we pull from 785642261acSRoss Zwisler * 'entry'. This allows us to flush for PMD_SIZE and not have to 786642261acSRoss Zwisler * worry about partial PMD writebacks. 787642261acSRoss Zwisler */ 788cccbce67SDan Williams sector = dax_radix_sector(entry); 789cccbce67SDan Williams size = PAGE_SIZE << dax_radix_order(entry); 790cccbce67SDan Williams 791cccbce67SDan Williams id = dax_read_lock(); 792cccbce67SDan Williams ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 793cccbce67SDan Williams if (ret) 794cccbce67SDan Williams goto dax_unlock; 7959973c98eSRoss Zwisler 7969973c98eSRoss Zwisler /* 797cccbce67SDan Williams * dax_direct_access() may sleep, so cannot hold tree_lock over 798cccbce67SDan Williams * its invocation. 7999973c98eSRoss Zwisler */ 800cccbce67SDan Williams ret = dax_direct_access(dax_dev, pgoff, size / PAGE_SIZE, &kaddr, &pfn); 801cccbce67SDan Williams if (ret < 0) 802cccbce67SDan Williams goto dax_unlock; 8039973c98eSRoss Zwisler 804cccbce67SDan Williams if (WARN_ON_ONCE(ret < size / PAGE_SIZE)) { 8059973c98eSRoss Zwisler ret = -EIO; 806cccbce67SDan Williams goto dax_unlock; 8079973c98eSRoss Zwisler } 8089973c98eSRoss Zwisler 809cccbce67SDan Williams dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn)); 810cccbce67SDan Williams wb_cache_pmem(kaddr, size); 8114b4bb46dSJan Kara /* 8124b4bb46dSJan Kara * After we have flushed the cache, we can clear the dirty tag. There 8134b4bb46dSJan Kara * cannot be new dirty data in the pfn after the flush has completed as 8144b4bb46dSJan Kara * the pfn mappings are writeprotected and fault waits for mapping 8154b4bb46dSJan Kara * entry lock. 8164b4bb46dSJan Kara */ 8174b4bb46dSJan Kara spin_lock_irq(&mapping->tree_lock); 8184b4bb46dSJan Kara radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY); 8194b4bb46dSJan Kara spin_unlock_irq(&mapping->tree_lock); 820cccbce67SDan Williams dax_unlock: 821cccbce67SDan Williams dax_read_unlock(id); 822a6abc2c0SJan Kara put_locked_mapping_entry(mapping, index, entry); 8239973c98eSRoss Zwisler return ret; 8249973c98eSRoss Zwisler 825a6abc2c0SJan Kara put_unlocked: 826a6abc2c0SJan Kara put_unlocked_mapping_entry(mapping, index, entry2); 8279973c98eSRoss Zwisler spin_unlock_irq(&mapping->tree_lock); 8289973c98eSRoss Zwisler return ret; 8299973c98eSRoss Zwisler } 8309973c98eSRoss Zwisler 8319973c98eSRoss Zwisler /* 8329973c98eSRoss Zwisler * Flush the mapping to the persistent domain within the byte range of [start, 8339973c98eSRoss Zwisler * end]. This is required by data integrity operations to ensure file data is 8349973c98eSRoss Zwisler * on persistent storage prior to completion of the operation. 8359973c98eSRoss Zwisler */ 8367f6d5b52SRoss Zwisler int dax_writeback_mapping_range(struct address_space *mapping, 8377f6d5b52SRoss Zwisler struct block_device *bdev, struct writeback_control *wbc) 8389973c98eSRoss Zwisler { 8399973c98eSRoss Zwisler struct inode *inode = mapping->host; 840642261acSRoss Zwisler pgoff_t start_index, end_index; 8419973c98eSRoss Zwisler pgoff_t indices[PAGEVEC_SIZE]; 842cccbce67SDan Williams struct dax_device *dax_dev; 8439973c98eSRoss Zwisler struct pagevec pvec; 8449973c98eSRoss Zwisler bool done = false; 8459973c98eSRoss Zwisler int i, ret = 0; 8469973c98eSRoss Zwisler 8479973c98eSRoss Zwisler if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 8489973c98eSRoss Zwisler return -EIO; 8499973c98eSRoss Zwisler 8507f6d5b52SRoss Zwisler if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) 8517f6d5b52SRoss Zwisler return 0; 8527f6d5b52SRoss Zwisler 853cccbce67SDan Williams dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 854cccbce67SDan Williams if (!dax_dev) 855cccbce67SDan Williams return -EIO; 856cccbce67SDan Williams 85709cbfeafSKirill A. Shutemov start_index = wbc->range_start >> PAGE_SHIFT; 85809cbfeafSKirill A. Shutemov end_index = wbc->range_end >> PAGE_SHIFT; 8599973c98eSRoss Zwisler 8609973c98eSRoss Zwisler tag_pages_for_writeback(mapping, start_index, end_index); 8619973c98eSRoss Zwisler 8629973c98eSRoss Zwisler pagevec_init(&pvec, 0); 8639973c98eSRoss Zwisler while (!done) { 8649973c98eSRoss Zwisler pvec.nr = find_get_entries_tag(mapping, start_index, 8659973c98eSRoss Zwisler PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE, 8669973c98eSRoss Zwisler pvec.pages, indices); 8679973c98eSRoss Zwisler 8689973c98eSRoss Zwisler if (pvec.nr == 0) 8699973c98eSRoss Zwisler break; 8709973c98eSRoss Zwisler 8719973c98eSRoss Zwisler for (i = 0; i < pvec.nr; i++) { 8729973c98eSRoss Zwisler if (indices[i] > end_index) { 8739973c98eSRoss Zwisler done = true; 8749973c98eSRoss Zwisler break; 8759973c98eSRoss Zwisler } 8769973c98eSRoss Zwisler 877cccbce67SDan Williams ret = dax_writeback_one(bdev, dax_dev, mapping, 878cccbce67SDan Williams indices[i], pvec.pages[i]); 879cccbce67SDan Williams if (ret < 0) { 880cccbce67SDan Williams put_dax(dax_dev); 8819973c98eSRoss Zwisler return ret; 8829973c98eSRoss Zwisler } 8839973c98eSRoss Zwisler } 884cccbce67SDan Williams } 885cccbce67SDan Williams put_dax(dax_dev); 8869973c98eSRoss Zwisler return 0; 8879973c98eSRoss Zwisler } 8889973c98eSRoss Zwisler EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 8899973c98eSRoss Zwisler 890ac401cc7SJan Kara static int dax_insert_mapping(struct address_space *mapping, 891cccbce67SDan Williams struct block_device *bdev, struct dax_device *dax_dev, 892cccbce67SDan Williams sector_t sector, size_t size, void **entryp, 893cccbce67SDan Williams struct vm_area_struct *vma, struct vm_fault *vmf) 894f7ca90b1SMatthew Wilcox { 8951a29d85eSJan Kara unsigned long vaddr = vmf->address; 896ac401cc7SJan Kara void *entry = *entryp; 897cccbce67SDan Williams void *ret, *kaddr; 898cccbce67SDan Williams pgoff_t pgoff; 899cccbce67SDan Williams int id, rc; 900cccbce67SDan Williams pfn_t pfn; 901f7ca90b1SMatthew Wilcox 902cccbce67SDan Williams rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); 903cccbce67SDan Williams if (rc) 904cccbce67SDan Williams return rc; 905f7ca90b1SMatthew Wilcox 906cccbce67SDan Williams id = dax_read_lock(); 907cccbce67SDan Williams rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); 908cccbce67SDan Williams if (rc < 0) { 909cccbce67SDan Williams dax_read_unlock(id); 910cccbce67SDan Williams return rc; 911cccbce67SDan Williams } 912cccbce67SDan Williams dax_read_unlock(id); 913cccbce67SDan Williams 914cccbce67SDan Williams ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0); 9154d9a2c87SJan Kara if (IS_ERR(ret)) 9164d9a2c87SJan Kara return PTR_ERR(ret); 917ac401cc7SJan Kara *entryp = ret; 9189973c98eSRoss Zwisler 919cccbce67SDan Williams return vm_insert_mixed(vma, vaddr, pfn); 920f7ca90b1SMatthew Wilcox } 921f7ca90b1SMatthew Wilcox 922ce5c5d55SDave Chinner /** 9230e3b210cSBoaz Harrosh * dax_pfn_mkwrite - handle first write to DAX page 9240e3b210cSBoaz Harrosh * @vmf: The description of the fault 9250e3b210cSBoaz Harrosh */ 92611bac800SDave Jiang int dax_pfn_mkwrite(struct vm_fault *vmf) 9270e3b210cSBoaz Harrosh { 92811bac800SDave Jiang struct file *file = vmf->vma->vm_file; 929ac401cc7SJan Kara struct address_space *mapping = file->f_mapping; 9302f89dc12SJan Kara void *entry, **slot; 931ac401cc7SJan Kara pgoff_t index = vmf->pgoff; 9320e3b210cSBoaz Harrosh 933ac401cc7SJan Kara spin_lock_irq(&mapping->tree_lock); 9342f89dc12SJan Kara entry = get_unlocked_mapping_entry(mapping, index, &slot); 9352f89dc12SJan Kara if (!entry || !radix_tree_exceptional_entry(entry)) { 9362f89dc12SJan Kara if (entry) 937ac401cc7SJan Kara put_unlocked_mapping_entry(mapping, index, entry); 938ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 9390e3b210cSBoaz Harrosh return VM_FAULT_NOPAGE; 9400e3b210cSBoaz Harrosh } 9412f89dc12SJan Kara radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY); 9422f89dc12SJan Kara entry = lock_slot(mapping, slot); 9432f89dc12SJan Kara spin_unlock_irq(&mapping->tree_lock); 9442f89dc12SJan Kara /* 9452f89dc12SJan Kara * If we race with somebody updating the PTE and finish_mkwrite_fault() 9462f89dc12SJan Kara * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry 9472f89dc12SJan Kara * the fault in either case. 9482f89dc12SJan Kara */ 9492f89dc12SJan Kara finish_mkwrite_fault(vmf); 9502f89dc12SJan Kara put_locked_mapping_entry(mapping, index, entry); 9512f89dc12SJan Kara return VM_FAULT_NOPAGE; 9522f89dc12SJan Kara } 9530e3b210cSBoaz Harrosh EXPORT_SYMBOL_GPL(dax_pfn_mkwrite); 9540e3b210cSBoaz Harrosh 9554b0228faSVishal Verma static bool dax_range_is_aligned(struct block_device *bdev, 9564b0228faSVishal Verma unsigned int offset, unsigned int length) 9574b0228faSVishal Verma { 9584b0228faSVishal Verma unsigned short sector_size = bdev_logical_block_size(bdev); 9594b0228faSVishal Verma 9604b0228faSVishal Verma if (!IS_ALIGNED(offset, sector_size)) 9614b0228faSVishal Verma return false; 9624b0228faSVishal Verma if (!IS_ALIGNED(length, sector_size)) 9634b0228faSVishal Verma return false; 9644b0228faSVishal Verma 9654b0228faSVishal Verma return true; 9664b0228faSVishal Verma } 9674b0228faSVishal Verma 968cccbce67SDan Williams int __dax_zero_page_range(struct block_device *bdev, 969cccbce67SDan Williams struct dax_device *dax_dev, sector_t sector, 970cccbce67SDan Williams unsigned int offset, unsigned int size) 971679c8bd3SChristoph Hellwig { 972cccbce67SDan Williams if (dax_range_is_aligned(bdev, offset, size)) { 973cccbce67SDan Williams sector_t start_sector = sector + (offset >> 9); 9744b0228faSVishal Verma 9754b0228faSVishal Verma return blkdev_issue_zeroout(bdev, start_sector, 97653ef7d0eSLinus Torvalds size >> 9, GFP_NOFS, 0); 9774b0228faSVishal Verma } else { 978cccbce67SDan Williams pgoff_t pgoff; 979cccbce67SDan Williams long rc, id; 980cccbce67SDan Williams void *kaddr; 981cccbce67SDan Williams pfn_t pfn; 982cccbce67SDan Williams 983cccbce67SDan Williams rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); 984cccbce67SDan Williams if (rc) 985cccbce67SDan Williams return rc; 986cccbce67SDan Williams 987cccbce67SDan Williams id = dax_read_lock(); 988cccbce67SDan Williams rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, 989cccbce67SDan Williams &pfn); 990cccbce67SDan Williams if (rc < 0) { 991cccbce67SDan Williams dax_read_unlock(id); 992cccbce67SDan Williams return rc; 993cccbce67SDan Williams } 994cccbce67SDan Williams clear_pmem(kaddr + offset, size); 995cccbce67SDan Williams dax_read_unlock(id); 9964b0228faSVishal Verma } 997679c8bd3SChristoph Hellwig return 0; 998679c8bd3SChristoph Hellwig } 999679c8bd3SChristoph Hellwig EXPORT_SYMBOL_GPL(__dax_zero_page_range); 1000679c8bd3SChristoph Hellwig 1001333ccc97SRoss Zwisler static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 1002333ccc97SRoss Zwisler { 1003333ccc97SRoss Zwisler return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9); 1004333ccc97SRoss Zwisler } 1005333ccc97SRoss Zwisler 1006a254e568SChristoph Hellwig static loff_t 100711c59c92SRoss Zwisler dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 1008a254e568SChristoph Hellwig struct iomap *iomap) 1009a254e568SChristoph Hellwig { 1010cccbce67SDan Williams struct block_device *bdev = iomap->bdev; 1011cccbce67SDan Williams struct dax_device *dax_dev = iomap->dax_dev; 1012a254e568SChristoph Hellwig struct iov_iter *iter = data; 1013a254e568SChristoph Hellwig loff_t end = pos + length, done = 0; 1014a254e568SChristoph Hellwig ssize_t ret = 0; 1015cccbce67SDan Williams int id; 1016a254e568SChristoph Hellwig 1017a254e568SChristoph Hellwig if (iov_iter_rw(iter) == READ) { 1018a254e568SChristoph Hellwig end = min(end, i_size_read(inode)); 1019a254e568SChristoph Hellwig if (pos >= end) 1020a254e568SChristoph Hellwig return 0; 1021a254e568SChristoph Hellwig 1022a254e568SChristoph Hellwig if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 1023a254e568SChristoph Hellwig return iov_iter_zero(min(length, end - pos), iter); 1024a254e568SChristoph Hellwig } 1025a254e568SChristoph Hellwig 1026a254e568SChristoph Hellwig if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 1027a254e568SChristoph Hellwig return -EIO; 1028a254e568SChristoph Hellwig 1029e3fce68cSJan Kara /* 1030e3fce68cSJan Kara * Write can allocate block for an area which has a hole page mapped 1031e3fce68cSJan Kara * into page tables. We have to tear down these mappings so that data 1032e3fce68cSJan Kara * written by write(2) is visible in mmap. 1033e3fce68cSJan Kara */ 1034e3fce68cSJan Kara if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) { 1035e3fce68cSJan Kara invalidate_inode_pages2_range(inode->i_mapping, 1036e3fce68cSJan Kara pos >> PAGE_SHIFT, 1037e3fce68cSJan Kara (end - 1) >> PAGE_SHIFT); 1038e3fce68cSJan Kara } 1039e3fce68cSJan Kara 1040cccbce67SDan Williams id = dax_read_lock(); 1041a254e568SChristoph Hellwig while (pos < end) { 1042a254e568SChristoph Hellwig unsigned offset = pos & (PAGE_SIZE - 1); 1043cccbce67SDan Williams const size_t size = ALIGN(length + offset, PAGE_SIZE); 1044cccbce67SDan Williams const sector_t sector = dax_iomap_sector(iomap, pos); 1045a254e568SChristoph Hellwig ssize_t map_len; 1046cccbce67SDan Williams pgoff_t pgoff; 1047cccbce67SDan Williams void *kaddr; 1048cccbce67SDan Williams pfn_t pfn; 1049a254e568SChristoph Hellwig 1050d1908f52SMichal Hocko if (fatal_signal_pending(current)) { 1051d1908f52SMichal Hocko ret = -EINTR; 1052d1908f52SMichal Hocko break; 1053d1908f52SMichal Hocko } 1054d1908f52SMichal Hocko 1055cccbce67SDan Williams ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 1056cccbce67SDan Williams if (ret) 1057cccbce67SDan Williams break; 1058cccbce67SDan Williams 1059cccbce67SDan Williams map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 1060cccbce67SDan Williams &kaddr, &pfn); 1061a254e568SChristoph Hellwig if (map_len < 0) { 1062a254e568SChristoph Hellwig ret = map_len; 1063a254e568SChristoph Hellwig break; 1064a254e568SChristoph Hellwig } 1065a254e568SChristoph Hellwig 1066cccbce67SDan Williams map_len = PFN_PHYS(map_len); 1067cccbce67SDan Williams kaddr += offset; 1068a254e568SChristoph Hellwig map_len -= offset; 1069a254e568SChristoph Hellwig if (map_len > end - pos) 1070a254e568SChristoph Hellwig map_len = end - pos; 1071a254e568SChristoph Hellwig 1072a254e568SChristoph Hellwig if (iov_iter_rw(iter) == WRITE) 1073cccbce67SDan Williams map_len = copy_from_iter_pmem(kaddr, map_len, iter); 1074a254e568SChristoph Hellwig else 1075cccbce67SDan Williams map_len = copy_to_iter(kaddr, map_len, iter); 1076a254e568SChristoph Hellwig if (map_len <= 0) { 1077a254e568SChristoph Hellwig ret = map_len ? map_len : -EFAULT; 1078a254e568SChristoph Hellwig break; 1079a254e568SChristoph Hellwig } 1080a254e568SChristoph Hellwig 1081a254e568SChristoph Hellwig pos += map_len; 1082a254e568SChristoph Hellwig length -= map_len; 1083a254e568SChristoph Hellwig done += map_len; 1084a254e568SChristoph Hellwig } 1085cccbce67SDan Williams dax_read_unlock(id); 1086a254e568SChristoph Hellwig 1087a254e568SChristoph Hellwig return done ? done : ret; 1088a254e568SChristoph Hellwig } 1089a254e568SChristoph Hellwig 1090a254e568SChristoph Hellwig /** 109111c59c92SRoss Zwisler * dax_iomap_rw - Perform I/O to a DAX file 1092a254e568SChristoph Hellwig * @iocb: The control block for this I/O 1093a254e568SChristoph Hellwig * @iter: The addresses to do I/O from or to 1094a254e568SChristoph Hellwig * @ops: iomap ops passed from the file system 1095a254e568SChristoph Hellwig * 1096a254e568SChristoph Hellwig * This function performs read and write operations to directly mapped 1097a254e568SChristoph Hellwig * persistent memory. The callers needs to take care of read/write exclusion 1098a254e568SChristoph Hellwig * and evicting any page cache pages in the region under I/O. 1099a254e568SChristoph Hellwig */ 1100a254e568SChristoph Hellwig ssize_t 110111c59c92SRoss Zwisler dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 11028ff6daa1SChristoph Hellwig const struct iomap_ops *ops) 1103a254e568SChristoph Hellwig { 1104a254e568SChristoph Hellwig struct address_space *mapping = iocb->ki_filp->f_mapping; 1105a254e568SChristoph Hellwig struct inode *inode = mapping->host; 1106a254e568SChristoph Hellwig loff_t pos = iocb->ki_pos, ret = 0, done = 0; 1107a254e568SChristoph Hellwig unsigned flags = 0; 1108a254e568SChristoph Hellwig 1109168316dbSChristoph Hellwig if (iov_iter_rw(iter) == WRITE) { 1110168316dbSChristoph Hellwig lockdep_assert_held_exclusive(&inode->i_rwsem); 1111a254e568SChristoph Hellwig flags |= IOMAP_WRITE; 1112168316dbSChristoph Hellwig } else { 1113168316dbSChristoph Hellwig lockdep_assert_held(&inode->i_rwsem); 1114168316dbSChristoph Hellwig } 1115a254e568SChristoph Hellwig 1116a254e568SChristoph Hellwig while (iov_iter_count(iter)) { 1117a254e568SChristoph Hellwig ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, 111811c59c92SRoss Zwisler iter, dax_iomap_actor); 1119a254e568SChristoph Hellwig if (ret <= 0) 1120a254e568SChristoph Hellwig break; 1121a254e568SChristoph Hellwig pos += ret; 1122a254e568SChristoph Hellwig done += ret; 1123a254e568SChristoph Hellwig } 1124a254e568SChristoph Hellwig 1125a254e568SChristoph Hellwig iocb->ki_pos += done; 1126a254e568SChristoph Hellwig return done ? done : ret; 1127a254e568SChristoph Hellwig } 112811c59c92SRoss Zwisler EXPORT_SYMBOL_GPL(dax_iomap_rw); 1129a7d73fe6SChristoph Hellwig 11309f141d6eSJan Kara static int dax_fault_return(int error) 11319f141d6eSJan Kara { 11329f141d6eSJan Kara if (error == 0) 11339f141d6eSJan Kara return VM_FAULT_NOPAGE; 11349f141d6eSJan Kara if (error == -ENOMEM) 11359f141d6eSJan Kara return VM_FAULT_OOM; 11369f141d6eSJan Kara return VM_FAULT_SIGBUS; 11379f141d6eSJan Kara } 11389f141d6eSJan Kara 1139a2d58167SDave Jiang static int dax_iomap_pte_fault(struct vm_fault *vmf, 1140a2d58167SDave Jiang const struct iomap_ops *ops) 1141a7d73fe6SChristoph Hellwig { 114211bac800SDave Jiang struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1143a7d73fe6SChristoph Hellwig struct inode *inode = mapping->host; 11441a29d85eSJan Kara unsigned long vaddr = vmf->address; 1145a7d73fe6SChristoph Hellwig loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; 1146a7d73fe6SChristoph Hellwig sector_t sector; 1147a7d73fe6SChristoph Hellwig struct iomap iomap = { 0 }; 11489484ab1bSJan Kara unsigned flags = IOMAP_FAULT; 1149a7d73fe6SChristoph Hellwig int error, major = 0; 1150b1aa812bSJan Kara int vmf_ret = 0; 1151a7d73fe6SChristoph Hellwig void *entry; 1152a7d73fe6SChristoph Hellwig 1153a9c42b33SRoss Zwisler trace_dax_pte_fault(inode, vmf, vmf_ret); 1154a7d73fe6SChristoph Hellwig /* 1155a7d73fe6SChristoph Hellwig * Check whether offset isn't beyond end of file now. Caller is supposed 1156a7d73fe6SChristoph Hellwig * to hold locks serializing us with truncate / punch hole so this is 1157a7d73fe6SChristoph Hellwig * a reliable test. 1158a7d73fe6SChristoph Hellwig */ 1159a9c42b33SRoss Zwisler if (pos >= i_size_read(inode)) { 1160a9c42b33SRoss Zwisler vmf_ret = VM_FAULT_SIGBUS; 1161a9c42b33SRoss Zwisler goto out; 1162a9c42b33SRoss Zwisler } 1163a7d73fe6SChristoph Hellwig 1164a7d73fe6SChristoph Hellwig if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) 1165a7d73fe6SChristoph Hellwig flags |= IOMAP_WRITE; 1166a7d73fe6SChristoph Hellwig 1167a7d73fe6SChristoph Hellwig /* 1168a7d73fe6SChristoph Hellwig * Note that we don't bother to use iomap_apply here: DAX required 1169a7d73fe6SChristoph Hellwig * the file system block size to be equal the page size, which means 1170a7d73fe6SChristoph Hellwig * that we never have to deal with more than a single extent here. 1171a7d73fe6SChristoph Hellwig */ 1172a7d73fe6SChristoph Hellwig error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); 1173a9c42b33SRoss Zwisler if (error) { 1174a9c42b33SRoss Zwisler vmf_ret = dax_fault_return(error); 1175a9c42b33SRoss Zwisler goto out; 1176a9c42b33SRoss Zwisler } 1177a7d73fe6SChristoph Hellwig if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { 11789f141d6eSJan Kara vmf_ret = dax_fault_return(-EIO); /* fs corruption? */ 11799f141d6eSJan Kara goto finish_iomap; 11809f141d6eSJan Kara } 11819f141d6eSJan Kara 11829f141d6eSJan Kara entry = grab_mapping_entry(mapping, vmf->pgoff, 0); 11839f141d6eSJan Kara if (IS_ERR(entry)) { 11849f141d6eSJan Kara vmf_ret = dax_fault_return(PTR_ERR(entry)); 11851550290bSRoss Zwisler goto finish_iomap; 1186a7d73fe6SChristoph Hellwig } 1187a7d73fe6SChristoph Hellwig 1188333ccc97SRoss Zwisler sector = dax_iomap_sector(&iomap, pos); 1189a7d73fe6SChristoph Hellwig 1190a7d73fe6SChristoph Hellwig if (vmf->cow_page) { 1191a7d73fe6SChristoph Hellwig switch (iomap.type) { 1192a7d73fe6SChristoph Hellwig case IOMAP_HOLE: 1193a7d73fe6SChristoph Hellwig case IOMAP_UNWRITTEN: 1194a7d73fe6SChristoph Hellwig clear_user_highpage(vmf->cow_page, vaddr); 1195a7d73fe6SChristoph Hellwig break; 1196a7d73fe6SChristoph Hellwig case IOMAP_MAPPED: 1197cccbce67SDan Williams error = copy_user_dax(iomap.bdev, iomap.dax_dev, 1198cccbce67SDan Williams sector, PAGE_SIZE, vmf->cow_page, vaddr); 1199a7d73fe6SChristoph Hellwig break; 1200a7d73fe6SChristoph Hellwig default: 1201a7d73fe6SChristoph Hellwig WARN_ON_ONCE(1); 1202a7d73fe6SChristoph Hellwig error = -EIO; 1203a7d73fe6SChristoph Hellwig break; 1204a7d73fe6SChristoph Hellwig } 1205a7d73fe6SChristoph Hellwig 1206a7d73fe6SChristoph Hellwig if (error) 12079f141d6eSJan Kara goto error_unlock_entry; 1208b1aa812bSJan Kara 1209b1aa812bSJan Kara __SetPageUptodate(vmf->cow_page); 1210b1aa812bSJan Kara vmf_ret = finish_fault(vmf); 1211b1aa812bSJan Kara if (!vmf_ret) 1212b1aa812bSJan Kara vmf_ret = VM_FAULT_DONE_COW; 12139f141d6eSJan Kara goto unlock_entry; 1214a7d73fe6SChristoph Hellwig } 1215a7d73fe6SChristoph Hellwig 1216a7d73fe6SChristoph Hellwig switch (iomap.type) { 1217a7d73fe6SChristoph Hellwig case IOMAP_MAPPED: 1218a7d73fe6SChristoph Hellwig if (iomap.flags & IOMAP_F_NEW) { 1219a7d73fe6SChristoph Hellwig count_vm_event(PGMAJFAULT); 122011bac800SDave Jiang mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT); 1221a7d73fe6SChristoph Hellwig major = VM_FAULT_MAJOR; 1222a7d73fe6SChristoph Hellwig } 1223cccbce67SDan Williams error = dax_insert_mapping(mapping, iomap.bdev, iomap.dax_dev, 1224cccbce67SDan Williams sector, PAGE_SIZE, &entry, vmf->vma, vmf); 12259f141d6eSJan Kara /* -EBUSY is fine, somebody else faulted on the same PTE */ 12269f141d6eSJan Kara if (error == -EBUSY) 12279f141d6eSJan Kara error = 0; 1228a7d73fe6SChristoph Hellwig break; 1229a7d73fe6SChristoph Hellwig case IOMAP_UNWRITTEN: 1230a7d73fe6SChristoph Hellwig case IOMAP_HOLE: 12311550290bSRoss Zwisler if (!(vmf->flags & FAULT_FLAG_WRITE)) { 1232f449b936SJan Kara vmf_ret = dax_load_hole(mapping, &entry, vmf); 12339f141d6eSJan Kara goto unlock_entry; 12341550290bSRoss Zwisler } 1235a7d73fe6SChristoph Hellwig /*FALLTHRU*/ 1236a7d73fe6SChristoph Hellwig default: 1237a7d73fe6SChristoph Hellwig WARN_ON_ONCE(1); 1238a7d73fe6SChristoph Hellwig error = -EIO; 1239a7d73fe6SChristoph Hellwig break; 1240a7d73fe6SChristoph Hellwig } 1241a7d73fe6SChristoph Hellwig 12429f141d6eSJan Kara error_unlock_entry: 12439f141d6eSJan Kara vmf_ret = dax_fault_return(error) | major; 1244a7d73fe6SChristoph Hellwig unlock_entry: 1245a7d73fe6SChristoph Hellwig put_locked_mapping_entry(mapping, vmf->pgoff, entry); 12469f141d6eSJan Kara finish_iomap: 12479f141d6eSJan Kara if (ops->iomap_end) { 12489f141d6eSJan Kara int copied = PAGE_SIZE; 12499f141d6eSJan Kara 12509f141d6eSJan Kara if (vmf_ret & VM_FAULT_ERROR) 12519f141d6eSJan Kara copied = 0; 12529f141d6eSJan Kara /* 12539f141d6eSJan Kara * The fault is done by now and there's no way back (other 12549f141d6eSJan Kara * thread may be already happily using PTE we have installed). 12559f141d6eSJan Kara * Just ignore error from ->iomap_end since we cannot do much 12569f141d6eSJan Kara * with it. 12579f141d6eSJan Kara */ 12589f141d6eSJan Kara ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); 12591550290bSRoss Zwisler } 1260a9c42b33SRoss Zwisler out: 1261a9c42b33SRoss Zwisler trace_dax_pte_fault_done(inode, vmf, vmf_ret); 12629f141d6eSJan Kara return vmf_ret; 1263a7d73fe6SChristoph Hellwig } 1264642261acSRoss Zwisler 1265642261acSRoss Zwisler #ifdef CONFIG_FS_DAX_PMD 1266642261acSRoss Zwisler /* 1267642261acSRoss Zwisler * The 'colour' (ie low bits) within a PMD of a page offset. This comes up 1268642261acSRoss Zwisler * more often than one might expect in the below functions. 1269642261acSRoss Zwisler */ 1270642261acSRoss Zwisler #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 1271642261acSRoss Zwisler 1272f4200391SDave Jiang static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap, 1273f4200391SDave Jiang loff_t pos, void **entryp) 1274642261acSRoss Zwisler { 1275f4200391SDave Jiang struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1276cccbce67SDan Williams const sector_t sector = dax_iomap_sector(iomap, pos); 1277cccbce67SDan Williams struct dax_device *dax_dev = iomap->dax_dev; 1278642261acSRoss Zwisler struct block_device *bdev = iomap->bdev; 127927a7ffacSRoss Zwisler struct inode *inode = mapping->host; 1280cccbce67SDan Williams const size_t size = PMD_SIZE; 1281cccbce67SDan Williams void *ret = NULL, *kaddr; 1282cccbce67SDan Williams long length = 0; 1283cccbce67SDan Williams pgoff_t pgoff; 1284cccbce67SDan Williams pfn_t pfn; 1285cccbce67SDan Williams int id; 1286642261acSRoss Zwisler 1287cccbce67SDan Williams if (bdev_dax_pgoff(bdev, sector, size, &pgoff) != 0) 128827a7ffacSRoss Zwisler goto fallback; 1289642261acSRoss Zwisler 1290cccbce67SDan Williams id = dax_read_lock(); 1291cccbce67SDan Williams length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); 1292cccbce67SDan Williams if (length < 0) 1293cccbce67SDan Williams goto unlock_fallback; 1294cccbce67SDan Williams length = PFN_PHYS(length); 1295642261acSRoss Zwisler 1296cccbce67SDan Williams if (length < size) 1297cccbce67SDan Williams goto unlock_fallback; 1298cccbce67SDan Williams if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR) 1299cccbce67SDan Williams goto unlock_fallback; 1300cccbce67SDan Williams if (!pfn_t_devmap(pfn)) 1301cccbce67SDan Williams goto unlock_fallback; 1302cccbce67SDan Williams dax_read_unlock(id); 1303cccbce67SDan Williams 1304cccbce67SDan Williams ret = dax_insert_mapping_entry(mapping, vmf, *entryp, sector, 1305642261acSRoss Zwisler RADIX_DAX_PMD); 1306642261acSRoss Zwisler if (IS_ERR(ret)) 130727a7ffacSRoss Zwisler goto fallback; 1308642261acSRoss Zwisler *entryp = ret; 1309642261acSRoss Zwisler 1310cccbce67SDan Williams trace_dax_pmd_insert_mapping(inode, vmf, length, pfn, ret); 1311f4200391SDave Jiang return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, 1312cccbce67SDan Williams pfn, vmf->flags & FAULT_FLAG_WRITE); 1313642261acSRoss Zwisler 1314cccbce67SDan Williams unlock_fallback: 1315cccbce67SDan Williams dax_read_unlock(id); 131627a7ffacSRoss Zwisler fallback: 1317cccbce67SDan Williams trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, pfn, ret); 1318642261acSRoss Zwisler return VM_FAULT_FALLBACK; 1319642261acSRoss Zwisler } 1320642261acSRoss Zwisler 1321f4200391SDave Jiang static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap, 1322f4200391SDave Jiang void **entryp) 1323642261acSRoss Zwisler { 1324f4200391SDave Jiang struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1325f4200391SDave Jiang unsigned long pmd_addr = vmf->address & PMD_MASK; 1326653b2ea3SRoss Zwisler struct inode *inode = mapping->host; 1327642261acSRoss Zwisler struct page *zero_page; 1328653b2ea3SRoss Zwisler void *ret = NULL; 1329642261acSRoss Zwisler spinlock_t *ptl; 1330642261acSRoss Zwisler pmd_t pmd_entry; 1331642261acSRoss Zwisler 1332f4200391SDave Jiang zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1333642261acSRoss Zwisler 1334642261acSRoss Zwisler if (unlikely(!zero_page)) 1335653b2ea3SRoss Zwisler goto fallback; 1336642261acSRoss Zwisler 1337642261acSRoss Zwisler ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0, 1338642261acSRoss Zwisler RADIX_DAX_PMD | RADIX_DAX_HZP); 1339642261acSRoss Zwisler if (IS_ERR(ret)) 1340653b2ea3SRoss Zwisler goto fallback; 1341642261acSRoss Zwisler *entryp = ret; 1342642261acSRoss Zwisler 1343f4200391SDave Jiang ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1344f4200391SDave Jiang if (!pmd_none(*(vmf->pmd))) { 1345642261acSRoss Zwisler spin_unlock(ptl); 1346653b2ea3SRoss Zwisler goto fallback; 1347642261acSRoss Zwisler } 1348642261acSRoss Zwisler 1349f4200391SDave Jiang pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1350642261acSRoss Zwisler pmd_entry = pmd_mkhuge(pmd_entry); 1351f4200391SDave Jiang set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1352642261acSRoss Zwisler spin_unlock(ptl); 1353f4200391SDave Jiang trace_dax_pmd_load_hole(inode, vmf, zero_page, ret); 1354642261acSRoss Zwisler return VM_FAULT_NOPAGE; 1355653b2ea3SRoss Zwisler 1356653b2ea3SRoss Zwisler fallback: 1357f4200391SDave Jiang trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret); 1358642261acSRoss Zwisler return VM_FAULT_FALLBACK; 1359642261acSRoss Zwisler } 1360642261acSRoss Zwisler 1361a2d58167SDave Jiang static int dax_iomap_pmd_fault(struct vm_fault *vmf, 1362a2d58167SDave Jiang const struct iomap_ops *ops) 1363642261acSRoss Zwisler { 1364f4200391SDave Jiang struct vm_area_struct *vma = vmf->vma; 1365642261acSRoss Zwisler struct address_space *mapping = vma->vm_file->f_mapping; 1366d8a849e1SDave Jiang unsigned long pmd_addr = vmf->address & PMD_MASK; 1367d8a849e1SDave Jiang bool write = vmf->flags & FAULT_FLAG_WRITE; 13689484ab1bSJan Kara unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; 1369642261acSRoss Zwisler struct inode *inode = mapping->host; 1370642261acSRoss Zwisler int result = VM_FAULT_FALLBACK; 1371642261acSRoss Zwisler struct iomap iomap = { 0 }; 1372642261acSRoss Zwisler pgoff_t max_pgoff, pgoff; 1373642261acSRoss Zwisler void *entry; 1374642261acSRoss Zwisler loff_t pos; 1375642261acSRoss Zwisler int error; 1376642261acSRoss Zwisler 1377282a8e03SRoss Zwisler /* 1378282a8e03SRoss Zwisler * Check whether offset isn't beyond end of file now. Caller is 1379282a8e03SRoss Zwisler * supposed to hold locks serializing us with truncate / punch hole so 1380282a8e03SRoss Zwisler * this is a reliable test. 1381282a8e03SRoss Zwisler */ 1382282a8e03SRoss Zwisler pgoff = linear_page_index(vma, pmd_addr); 1383282a8e03SRoss Zwisler max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT; 1384282a8e03SRoss Zwisler 1385f4200391SDave Jiang trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); 1386282a8e03SRoss Zwisler 1387642261acSRoss Zwisler /* Fall back to PTEs if we're going to COW */ 1388642261acSRoss Zwisler if (write && !(vma->vm_flags & VM_SHARED)) 1389642261acSRoss Zwisler goto fallback; 1390642261acSRoss Zwisler 1391642261acSRoss Zwisler /* If the PMD would extend outside the VMA */ 1392642261acSRoss Zwisler if (pmd_addr < vma->vm_start) 1393642261acSRoss Zwisler goto fallback; 1394642261acSRoss Zwisler if ((pmd_addr + PMD_SIZE) > vma->vm_end) 1395642261acSRoss Zwisler goto fallback; 1396642261acSRoss Zwisler 1397282a8e03SRoss Zwisler if (pgoff > max_pgoff) { 1398282a8e03SRoss Zwisler result = VM_FAULT_SIGBUS; 1399282a8e03SRoss Zwisler goto out; 1400282a8e03SRoss Zwisler } 1401642261acSRoss Zwisler 1402642261acSRoss Zwisler /* If the PMD would extend beyond the file size */ 1403642261acSRoss Zwisler if ((pgoff | PG_PMD_COLOUR) > max_pgoff) 1404642261acSRoss Zwisler goto fallback; 1405642261acSRoss Zwisler 1406642261acSRoss Zwisler /* 1407642261acSRoss Zwisler * Note that we don't use iomap_apply here. We aren't doing I/O, only 1408642261acSRoss Zwisler * setting up a mapping, so really we're using iomap_begin() as a way 1409642261acSRoss Zwisler * to look up our filesystem block. 1410642261acSRoss Zwisler */ 1411642261acSRoss Zwisler pos = (loff_t)pgoff << PAGE_SHIFT; 1412642261acSRoss Zwisler error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); 1413642261acSRoss Zwisler if (error) 14149f141d6eSJan Kara goto fallback; 14159f141d6eSJan Kara 1416642261acSRoss Zwisler if (iomap.offset + iomap.length < pos + PMD_SIZE) 1417642261acSRoss Zwisler goto finish_iomap; 1418642261acSRoss Zwisler 14199f141d6eSJan Kara /* 14209f141d6eSJan Kara * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX 14219f141d6eSJan Kara * PMD or a HZP entry. If it can't (because a 4k page is already in 14229f141d6eSJan Kara * the tree, for instance), it will return -EEXIST and we just fall 14239f141d6eSJan Kara * back to 4k entries. 14249f141d6eSJan Kara */ 14259f141d6eSJan Kara entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); 14269f141d6eSJan Kara if (IS_ERR(entry)) 14279f141d6eSJan Kara goto finish_iomap; 14289f141d6eSJan Kara 1429642261acSRoss Zwisler switch (iomap.type) { 1430642261acSRoss Zwisler case IOMAP_MAPPED: 1431f4200391SDave Jiang result = dax_pmd_insert_mapping(vmf, &iomap, pos, &entry); 1432642261acSRoss Zwisler break; 1433642261acSRoss Zwisler case IOMAP_UNWRITTEN: 1434642261acSRoss Zwisler case IOMAP_HOLE: 1435642261acSRoss Zwisler if (WARN_ON_ONCE(write)) 14369f141d6eSJan Kara goto unlock_entry; 1437f4200391SDave Jiang result = dax_pmd_load_hole(vmf, &iomap, &entry); 1438642261acSRoss Zwisler break; 1439642261acSRoss Zwisler default: 1440642261acSRoss Zwisler WARN_ON_ONCE(1); 1441642261acSRoss Zwisler break; 1442642261acSRoss Zwisler } 1443642261acSRoss Zwisler 1444642261acSRoss Zwisler unlock_entry: 1445642261acSRoss Zwisler put_locked_mapping_entry(mapping, pgoff, entry); 14469f141d6eSJan Kara finish_iomap: 14479f141d6eSJan Kara if (ops->iomap_end) { 14489f141d6eSJan Kara int copied = PMD_SIZE; 14499f141d6eSJan Kara 14509f141d6eSJan Kara if (result == VM_FAULT_FALLBACK) 14519f141d6eSJan Kara copied = 0; 14529f141d6eSJan Kara /* 14539f141d6eSJan Kara * The fault is done by now and there's no way back (other 14549f141d6eSJan Kara * thread may be already happily using PMD we have installed). 14559f141d6eSJan Kara * Just ignore error from ->iomap_end since we cannot do much 14569f141d6eSJan Kara * with it. 14579f141d6eSJan Kara */ 14589f141d6eSJan Kara ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags, 14599f141d6eSJan Kara &iomap); 14609f141d6eSJan Kara } 1461642261acSRoss Zwisler fallback: 1462642261acSRoss Zwisler if (result == VM_FAULT_FALLBACK) { 1463d8a849e1SDave Jiang split_huge_pmd(vma, vmf->pmd, vmf->address); 1464642261acSRoss Zwisler count_vm_event(THP_FAULT_FALLBACK); 1465642261acSRoss Zwisler } 1466282a8e03SRoss Zwisler out: 1467f4200391SDave Jiang trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); 1468642261acSRoss Zwisler return result; 1469642261acSRoss Zwisler } 1470a2d58167SDave Jiang #else 147101cddfe9SArnd Bergmann static int dax_iomap_pmd_fault(struct vm_fault *vmf, 147201cddfe9SArnd Bergmann const struct iomap_ops *ops) 1473a2d58167SDave Jiang { 1474a2d58167SDave Jiang return VM_FAULT_FALLBACK; 1475a2d58167SDave Jiang } 1476642261acSRoss Zwisler #endif /* CONFIG_FS_DAX_PMD */ 1477a2d58167SDave Jiang 1478a2d58167SDave Jiang /** 1479a2d58167SDave Jiang * dax_iomap_fault - handle a page fault on a DAX file 1480a2d58167SDave Jiang * @vmf: The description of the fault 1481a2d58167SDave Jiang * @ops: iomap ops passed from the file system 1482a2d58167SDave Jiang * 1483a2d58167SDave Jiang * When a page fault occurs, filesystems may call this helper in 1484a2d58167SDave Jiang * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1485a2d58167SDave Jiang * has done all the necessary locking for page fault to proceed 1486a2d58167SDave Jiang * successfully. 1487a2d58167SDave Jiang */ 1488c791ace1SDave Jiang int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1489c791ace1SDave Jiang const struct iomap_ops *ops) 1490a2d58167SDave Jiang { 1491c791ace1SDave Jiang switch (pe_size) { 1492c791ace1SDave Jiang case PE_SIZE_PTE: 1493a2d58167SDave Jiang return dax_iomap_pte_fault(vmf, ops); 1494c791ace1SDave Jiang case PE_SIZE_PMD: 1495a2d58167SDave Jiang return dax_iomap_pmd_fault(vmf, ops); 1496a2d58167SDave Jiang default: 1497a2d58167SDave Jiang return VM_FAULT_FALLBACK; 1498a2d58167SDave Jiang } 1499a2d58167SDave Jiang } 1500a2d58167SDave Jiang EXPORT_SYMBOL_GPL(dax_iomap_fault); 1501