1d475c634SMatthew Wilcox /* 2d475c634SMatthew Wilcox * fs/dax.c - Direct Access filesystem code 3d475c634SMatthew Wilcox * Copyright (c) 2013-2014 Intel Corporation 4d475c634SMatthew Wilcox * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 5d475c634SMatthew Wilcox * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 6d475c634SMatthew Wilcox * 7d475c634SMatthew Wilcox * This program is free software; you can redistribute it and/or modify it 8d475c634SMatthew Wilcox * under the terms and conditions of the GNU General Public License, 9d475c634SMatthew Wilcox * version 2, as published by the Free Software Foundation. 10d475c634SMatthew Wilcox * 11d475c634SMatthew Wilcox * This program is distributed in the hope it will be useful, but WITHOUT 12d475c634SMatthew Wilcox * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13d475c634SMatthew Wilcox * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14d475c634SMatthew Wilcox * more details. 15d475c634SMatthew Wilcox */ 16d475c634SMatthew Wilcox 17d475c634SMatthew Wilcox #include <linux/atomic.h> 18d475c634SMatthew Wilcox #include <linux/blkdev.h> 19d475c634SMatthew Wilcox #include <linux/buffer_head.h> 20d77e92e2SRoss Zwisler #include <linux/dax.h> 21d475c634SMatthew Wilcox #include <linux/fs.h> 22d475c634SMatthew Wilcox #include <linux/genhd.h> 23f7ca90b1SMatthew Wilcox #include <linux/highmem.h> 24f7ca90b1SMatthew Wilcox #include <linux/memcontrol.h> 25f7ca90b1SMatthew Wilcox #include <linux/mm.h> 26d475c634SMatthew Wilcox #include <linux/mutex.h> 279973c98eSRoss Zwisler #include <linux/pagevec.h> 28289c6aedSMatthew Wilcox #include <linux/sched.h> 29f361bf4aSIngo Molnar #include <linux/sched/signal.h> 30d475c634SMatthew Wilcox #include <linux/uio.h> 31f7ca90b1SMatthew Wilcox #include <linux/vmstat.h> 3234c0fd54SDan Williams #include <linux/pfn_t.h> 330e749e54SDan Williams #include <linux/sizes.h> 344b4bb46dSJan Kara #include <linux/mmu_notifier.h> 35a254e568SChristoph Hellwig #include <linux/iomap.h> 36a254e568SChristoph Hellwig #include "internal.h" 37d475c634SMatthew Wilcox 38282a8e03SRoss Zwisler #define CREATE_TRACE_POINTS 39282a8e03SRoss Zwisler #include <trace/events/fs_dax.h> 40282a8e03SRoss Zwisler 41ac401cc7SJan Kara /* We choose 4096 entries - same as per-zone page wait tables */ 42ac401cc7SJan Kara #define DAX_WAIT_TABLE_BITS 12 43ac401cc7SJan Kara #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 44ac401cc7SJan Kara 45917f3452SRoss Zwisler /* The 'colour' (ie low bits) within a PMD of a page offset. */ 46917f3452SRoss Zwisler #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 47917f3452SRoss Zwisler 48ce95ab0fSRoss Zwisler static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 49ac401cc7SJan Kara 50ac401cc7SJan Kara static int __init init_dax_wait_table(void) 51ac401cc7SJan Kara { 52ac401cc7SJan Kara int i; 53ac401cc7SJan Kara 54ac401cc7SJan Kara for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 55ac401cc7SJan Kara init_waitqueue_head(wait_table + i); 56ac401cc7SJan Kara return 0; 57ac401cc7SJan Kara } 58ac401cc7SJan Kara fs_initcall(init_dax_wait_table); 59ac401cc7SJan Kara 60527b19d0SRoss Zwisler /* 61527b19d0SRoss Zwisler * We use lowest available bit in exceptional entry for locking, one bit for 62527b19d0SRoss Zwisler * the entry size (PMD) and two more to tell us if the entry is a zero page or 63527b19d0SRoss Zwisler * an empty entry that is just used for locking. In total four special bits. 64527b19d0SRoss Zwisler * 65527b19d0SRoss Zwisler * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE 66527b19d0SRoss Zwisler * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem 67527b19d0SRoss Zwisler * block allocation. 68527b19d0SRoss Zwisler */ 69527b19d0SRoss Zwisler #define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4) 70527b19d0SRoss Zwisler #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) 71527b19d0SRoss Zwisler #define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1)) 72527b19d0SRoss Zwisler #define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2)) 73527b19d0SRoss Zwisler #define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)) 74527b19d0SRoss Zwisler 75527b19d0SRoss Zwisler static unsigned long dax_radix_sector(void *entry) 76527b19d0SRoss Zwisler { 77527b19d0SRoss Zwisler return (unsigned long)entry >> RADIX_DAX_SHIFT; 78527b19d0SRoss Zwisler } 79527b19d0SRoss Zwisler 80527b19d0SRoss Zwisler static void *dax_radix_locked_entry(sector_t sector, unsigned long flags) 81527b19d0SRoss Zwisler { 82527b19d0SRoss Zwisler return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags | 83527b19d0SRoss Zwisler ((unsigned long)sector << RADIX_DAX_SHIFT) | 84527b19d0SRoss Zwisler RADIX_DAX_ENTRY_LOCK); 85527b19d0SRoss Zwisler } 86527b19d0SRoss Zwisler 87527b19d0SRoss Zwisler static unsigned int dax_radix_order(void *entry) 88527b19d0SRoss Zwisler { 89527b19d0SRoss Zwisler if ((unsigned long)entry & RADIX_DAX_PMD) 90527b19d0SRoss Zwisler return PMD_SHIFT - PAGE_SHIFT; 91527b19d0SRoss Zwisler return 0; 92527b19d0SRoss Zwisler } 93527b19d0SRoss Zwisler 94642261acSRoss Zwisler static int dax_is_pmd_entry(void *entry) 95642261acSRoss Zwisler { 96642261acSRoss Zwisler return (unsigned long)entry & RADIX_DAX_PMD; 97642261acSRoss Zwisler } 98642261acSRoss Zwisler 99642261acSRoss Zwisler static int dax_is_pte_entry(void *entry) 100642261acSRoss Zwisler { 101642261acSRoss Zwisler return !((unsigned long)entry & RADIX_DAX_PMD); 102642261acSRoss Zwisler } 103642261acSRoss Zwisler 104642261acSRoss Zwisler static int dax_is_zero_entry(void *entry) 105642261acSRoss Zwisler { 10691d25ba8SRoss Zwisler return (unsigned long)entry & RADIX_DAX_ZERO_PAGE; 107642261acSRoss Zwisler } 108642261acSRoss Zwisler 109642261acSRoss Zwisler static int dax_is_empty_entry(void *entry) 110642261acSRoss Zwisler { 111642261acSRoss Zwisler return (unsigned long)entry & RADIX_DAX_EMPTY; 112642261acSRoss Zwisler } 113642261acSRoss Zwisler 114f7ca90b1SMatthew Wilcox /* 115ac401cc7SJan Kara * DAX radix tree locking 116ac401cc7SJan Kara */ 117ac401cc7SJan Kara struct exceptional_entry_key { 118ac401cc7SJan Kara struct address_space *mapping; 11963e95b5cSRoss Zwisler pgoff_t entry_start; 120ac401cc7SJan Kara }; 121ac401cc7SJan Kara 122ac401cc7SJan Kara struct wait_exceptional_entry_queue { 123ac6424b9SIngo Molnar wait_queue_entry_t wait; 124ac401cc7SJan Kara struct exceptional_entry_key key; 125ac401cc7SJan Kara }; 126ac401cc7SJan Kara 12763e95b5cSRoss Zwisler static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, 12863e95b5cSRoss Zwisler pgoff_t index, void *entry, struct exceptional_entry_key *key) 12963e95b5cSRoss Zwisler { 13063e95b5cSRoss Zwisler unsigned long hash; 13163e95b5cSRoss Zwisler 13263e95b5cSRoss Zwisler /* 13363e95b5cSRoss Zwisler * If 'entry' is a PMD, align the 'index' that we use for the wait 13463e95b5cSRoss Zwisler * queue to the start of that PMD. This ensures that all offsets in 13563e95b5cSRoss Zwisler * the range covered by the PMD map to the same bit lock. 13663e95b5cSRoss Zwisler */ 137642261acSRoss Zwisler if (dax_is_pmd_entry(entry)) 138917f3452SRoss Zwisler index &= ~PG_PMD_COLOUR; 13963e95b5cSRoss Zwisler 14063e95b5cSRoss Zwisler key->mapping = mapping; 14163e95b5cSRoss Zwisler key->entry_start = index; 14263e95b5cSRoss Zwisler 14363e95b5cSRoss Zwisler hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS); 14463e95b5cSRoss Zwisler return wait_table + hash; 14563e95b5cSRoss Zwisler } 14663e95b5cSRoss Zwisler 147ac6424b9SIngo Molnar static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode, 148ac401cc7SJan Kara int sync, void *keyp) 149ac401cc7SJan Kara { 150ac401cc7SJan Kara struct exceptional_entry_key *key = keyp; 151ac401cc7SJan Kara struct wait_exceptional_entry_queue *ewait = 152ac401cc7SJan Kara container_of(wait, struct wait_exceptional_entry_queue, wait); 153ac401cc7SJan Kara 154ac401cc7SJan Kara if (key->mapping != ewait->key.mapping || 15563e95b5cSRoss Zwisler key->entry_start != ewait->key.entry_start) 156ac401cc7SJan Kara return 0; 157ac401cc7SJan Kara return autoremove_wake_function(wait, mode, sync, NULL); 158ac401cc7SJan Kara } 159ac401cc7SJan Kara 160ac401cc7SJan Kara /* 161e30331ffSRoss Zwisler * We do not necessarily hold the mapping->tree_lock when we call this 162e30331ffSRoss Zwisler * function so it is possible that 'entry' is no longer a valid item in the 163e30331ffSRoss Zwisler * radix tree. This is okay because all we really need to do is to find the 164e30331ffSRoss Zwisler * correct waitqueue where tasks might be waiting for that old 'entry' and 165e30331ffSRoss Zwisler * wake them. 166e30331ffSRoss Zwisler */ 167d01ad197SRoss Zwisler static void dax_wake_mapping_entry_waiter(struct address_space *mapping, 168e30331ffSRoss Zwisler pgoff_t index, void *entry, bool wake_all) 169e30331ffSRoss Zwisler { 170e30331ffSRoss Zwisler struct exceptional_entry_key key; 171e30331ffSRoss Zwisler wait_queue_head_t *wq; 172e30331ffSRoss Zwisler 173e30331ffSRoss Zwisler wq = dax_entry_waitqueue(mapping, index, entry, &key); 174e30331ffSRoss Zwisler 175e30331ffSRoss Zwisler /* 176e30331ffSRoss Zwisler * Checking for locked entry and prepare_to_wait_exclusive() happens 177e30331ffSRoss Zwisler * under mapping->tree_lock, ditto for entry handling in our callers. 178e30331ffSRoss Zwisler * So at this point all tasks that could have seen our entry locked 179e30331ffSRoss Zwisler * must be in the waitqueue and the following check will see them. 180e30331ffSRoss Zwisler */ 181e30331ffSRoss Zwisler if (waitqueue_active(wq)) 182e30331ffSRoss Zwisler __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); 183e30331ffSRoss Zwisler } 184e30331ffSRoss Zwisler 185e30331ffSRoss Zwisler /* 186ac401cc7SJan Kara * Check whether the given slot is locked. The function must be called with 187ac401cc7SJan Kara * mapping->tree_lock held 188ac401cc7SJan Kara */ 189ac401cc7SJan Kara static inline int slot_locked(struct address_space *mapping, void **slot) 190ac401cc7SJan Kara { 191ac401cc7SJan Kara unsigned long entry = (unsigned long) 192ac401cc7SJan Kara radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 193ac401cc7SJan Kara return entry & RADIX_DAX_ENTRY_LOCK; 194ac401cc7SJan Kara } 195ac401cc7SJan Kara 196ac401cc7SJan Kara /* 197ac401cc7SJan Kara * Mark the given slot is locked. The function must be called with 198ac401cc7SJan Kara * mapping->tree_lock held 199ac401cc7SJan Kara */ 200ac401cc7SJan Kara static inline void *lock_slot(struct address_space *mapping, void **slot) 201ac401cc7SJan Kara { 202ac401cc7SJan Kara unsigned long entry = (unsigned long) 203ac401cc7SJan Kara radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 204ac401cc7SJan Kara 205ac401cc7SJan Kara entry |= RADIX_DAX_ENTRY_LOCK; 2066d75f366SJohannes Weiner radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry); 207ac401cc7SJan Kara return (void *)entry; 208ac401cc7SJan Kara } 209ac401cc7SJan Kara 210ac401cc7SJan Kara /* 211ac401cc7SJan Kara * Mark the given slot is unlocked. The function must be called with 212ac401cc7SJan Kara * mapping->tree_lock held 213ac401cc7SJan Kara */ 214ac401cc7SJan Kara static inline void *unlock_slot(struct address_space *mapping, void **slot) 215ac401cc7SJan Kara { 216ac401cc7SJan Kara unsigned long entry = (unsigned long) 217ac401cc7SJan Kara radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 218ac401cc7SJan Kara 219ac401cc7SJan Kara entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK; 2206d75f366SJohannes Weiner radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry); 221ac401cc7SJan Kara return (void *)entry; 222ac401cc7SJan Kara } 223ac401cc7SJan Kara 224ac401cc7SJan Kara /* 225ac401cc7SJan Kara * Lookup entry in radix tree, wait for it to become unlocked if it is 226ac401cc7SJan Kara * exceptional entry and return it. The caller must call 227ac401cc7SJan Kara * put_unlocked_mapping_entry() when he decided not to lock the entry or 228ac401cc7SJan Kara * put_locked_mapping_entry() when he locked the entry and now wants to 229ac401cc7SJan Kara * unlock it. 230ac401cc7SJan Kara * 231ac401cc7SJan Kara * The function must be called with mapping->tree_lock held. 232ac401cc7SJan Kara */ 233ac401cc7SJan Kara static void *get_unlocked_mapping_entry(struct address_space *mapping, 234ac401cc7SJan Kara pgoff_t index, void ***slotp) 235ac401cc7SJan Kara { 236e3ad61c6SRoss Zwisler void *entry, **slot; 237ac401cc7SJan Kara struct wait_exceptional_entry_queue ewait; 23863e95b5cSRoss Zwisler wait_queue_head_t *wq; 239ac401cc7SJan Kara 240ac401cc7SJan Kara init_wait(&ewait.wait); 241ac401cc7SJan Kara ewait.wait.func = wake_exceptional_entry_func; 242ac401cc7SJan Kara 243ac401cc7SJan Kara for (;;) { 244e3ad61c6SRoss Zwisler entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, 245ac401cc7SJan Kara &slot); 24691d25ba8SRoss Zwisler if (!entry || 24791d25ba8SRoss Zwisler WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) || 248ac401cc7SJan Kara !slot_locked(mapping, slot)) { 249ac401cc7SJan Kara if (slotp) 250ac401cc7SJan Kara *slotp = slot; 251e3ad61c6SRoss Zwisler return entry; 252ac401cc7SJan Kara } 25363e95b5cSRoss Zwisler 25463e95b5cSRoss Zwisler wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); 255ac401cc7SJan Kara prepare_to_wait_exclusive(wq, &ewait.wait, 256ac401cc7SJan Kara TASK_UNINTERRUPTIBLE); 257ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 258ac401cc7SJan Kara schedule(); 259ac401cc7SJan Kara finish_wait(wq, &ewait.wait); 260ac401cc7SJan Kara spin_lock_irq(&mapping->tree_lock); 261ac401cc7SJan Kara } 262ac401cc7SJan Kara } 263ac401cc7SJan Kara 264b1aa812bSJan Kara static void dax_unlock_mapping_entry(struct address_space *mapping, 265b1aa812bSJan Kara pgoff_t index) 266b1aa812bSJan Kara { 267b1aa812bSJan Kara void *entry, **slot; 268b1aa812bSJan Kara 269b1aa812bSJan Kara spin_lock_irq(&mapping->tree_lock); 270b1aa812bSJan Kara entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot); 271b1aa812bSJan Kara if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) || 272b1aa812bSJan Kara !slot_locked(mapping, slot))) { 273b1aa812bSJan Kara spin_unlock_irq(&mapping->tree_lock); 274b1aa812bSJan Kara return; 275b1aa812bSJan Kara } 276b1aa812bSJan Kara unlock_slot(mapping, slot); 277b1aa812bSJan Kara spin_unlock_irq(&mapping->tree_lock); 278b1aa812bSJan Kara dax_wake_mapping_entry_waiter(mapping, index, entry, false); 279b1aa812bSJan Kara } 280b1aa812bSJan Kara 281ac401cc7SJan Kara static void put_locked_mapping_entry(struct address_space *mapping, 28291d25ba8SRoss Zwisler pgoff_t index) 283ac401cc7SJan Kara { 284bc2466e4SJan Kara dax_unlock_mapping_entry(mapping, index); 285ac401cc7SJan Kara } 286ac401cc7SJan Kara 287ac401cc7SJan Kara /* 288ac401cc7SJan Kara * Called when we are done with radix tree entry we looked up via 289ac401cc7SJan Kara * get_unlocked_mapping_entry() and which we didn't lock in the end. 290ac401cc7SJan Kara */ 291ac401cc7SJan Kara static void put_unlocked_mapping_entry(struct address_space *mapping, 292ac401cc7SJan Kara pgoff_t index, void *entry) 293ac401cc7SJan Kara { 29491d25ba8SRoss Zwisler if (!entry) 295ac401cc7SJan Kara return; 296ac401cc7SJan Kara 297ac401cc7SJan Kara /* We have to wake up next waiter for the radix tree entry lock */ 298422476c4SRoss Zwisler dax_wake_mapping_entry_waiter(mapping, index, entry, false); 299422476c4SRoss Zwisler } 300422476c4SRoss Zwisler 301ac401cc7SJan Kara /* 30291d25ba8SRoss Zwisler * Find radix tree entry at given index. If it points to an exceptional entry, 30391d25ba8SRoss Zwisler * return it with the radix tree entry locked. If the radix tree doesn't 30491d25ba8SRoss Zwisler * contain given index, create an empty exceptional entry for the index and 30591d25ba8SRoss Zwisler * return with it locked. 306ac401cc7SJan Kara * 307642261acSRoss Zwisler * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will 308642261acSRoss Zwisler * either return that locked entry or will return an error. This error will 30991d25ba8SRoss Zwisler * happen if there are any 4k entries within the 2MiB range that we are 31091d25ba8SRoss Zwisler * requesting. 311642261acSRoss Zwisler * 312642261acSRoss Zwisler * We always favor 4k entries over 2MiB entries. There isn't a flow where we 313642261acSRoss Zwisler * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB 314642261acSRoss Zwisler * insertion will fail if it finds any 4k entries already in the tree, and a 315642261acSRoss Zwisler * 4k insertion will cause an existing 2MiB entry to be unmapped and 316642261acSRoss Zwisler * downgraded to 4k entries. This happens for both 2MiB huge zero pages as 317642261acSRoss Zwisler * well as 2MiB empty entries. 318642261acSRoss Zwisler * 319642261acSRoss Zwisler * The exception to this downgrade path is for 2MiB DAX PMD entries that have 320642261acSRoss Zwisler * real storage backing them. We will leave these real 2MiB DAX entries in 321642261acSRoss Zwisler * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry. 322642261acSRoss Zwisler * 323ac401cc7SJan Kara * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 324ac401cc7SJan Kara * persistent memory the benefit is doubtful. We can add that later if we can 325ac401cc7SJan Kara * show it helps. 326ac401cc7SJan Kara */ 327642261acSRoss Zwisler static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, 328642261acSRoss Zwisler unsigned long size_flag) 329ac401cc7SJan Kara { 330642261acSRoss Zwisler bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */ 331e3ad61c6SRoss Zwisler void *entry, **slot; 332ac401cc7SJan Kara 333ac401cc7SJan Kara restart: 334ac401cc7SJan Kara spin_lock_irq(&mapping->tree_lock); 335e3ad61c6SRoss Zwisler entry = get_unlocked_mapping_entry(mapping, index, &slot); 336642261acSRoss Zwisler 33791d25ba8SRoss Zwisler if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) { 33891d25ba8SRoss Zwisler entry = ERR_PTR(-EIO); 33991d25ba8SRoss Zwisler goto out_unlock; 34091d25ba8SRoss Zwisler } 34191d25ba8SRoss Zwisler 342642261acSRoss Zwisler if (entry) { 343642261acSRoss Zwisler if (size_flag & RADIX_DAX_PMD) { 34491d25ba8SRoss Zwisler if (dax_is_pte_entry(entry)) { 345642261acSRoss Zwisler put_unlocked_mapping_entry(mapping, index, 346642261acSRoss Zwisler entry); 347642261acSRoss Zwisler entry = ERR_PTR(-EEXIST); 348642261acSRoss Zwisler goto out_unlock; 349642261acSRoss Zwisler } 350642261acSRoss Zwisler } else { /* trying to grab a PTE entry */ 35191d25ba8SRoss Zwisler if (dax_is_pmd_entry(entry) && 352642261acSRoss Zwisler (dax_is_zero_entry(entry) || 353642261acSRoss Zwisler dax_is_empty_entry(entry))) { 354642261acSRoss Zwisler pmd_downgrade = true; 355642261acSRoss Zwisler } 356642261acSRoss Zwisler } 357642261acSRoss Zwisler } 358642261acSRoss Zwisler 359ac401cc7SJan Kara /* No entry for given index? Make sure radix tree is big enough. */ 360642261acSRoss Zwisler if (!entry || pmd_downgrade) { 361ac401cc7SJan Kara int err; 362ac401cc7SJan Kara 363642261acSRoss Zwisler if (pmd_downgrade) { 364642261acSRoss Zwisler /* 365642261acSRoss Zwisler * Make sure 'entry' remains valid while we drop 366642261acSRoss Zwisler * mapping->tree_lock. 367642261acSRoss Zwisler */ 368642261acSRoss Zwisler entry = lock_slot(mapping, slot); 369642261acSRoss Zwisler } 370642261acSRoss Zwisler 371ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 372642261acSRoss Zwisler /* 373642261acSRoss Zwisler * Besides huge zero pages the only other thing that gets 374642261acSRoss Zwisler * downgraded are empty entries which don't need to be 375642261acSRoss Zwisler * unmapped. 376642261acSRoss Zwisler */ 377642261acSRoss Zwisler if (pmd_downgrade && dax_is_zero_entry(entry)) 378642261acSRoss Zwisler unmap_mapping_range(mapping, 379642261acSRoss Zwisler (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0); 380642261acSRoss Zwisler 3810cb80b48SJan Kara err = radix_tree_preload( 3820cb80b48SJan Kara mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); 3830cb80b48SJan Kara if (err) { 3840cb80b48SJan Kara if (pmd_downgrade) 38591d25ba8SRoss Zwisler put_locked_mapping_entry(mapping, index); 3860cb80b48SJan Kara return ERR_PTR(err); 3870cb80b48SJan Kara } 388ac401cc7SJan Kara spin_lock_irq(&mapping->tree_lock); 389642261acSRoss Zwisler 390e11f8b7bSRoss Zwisler if (!entry) { 391e11f8b7bSRoss Zwisler /* 392e11f8b7bSRoss Zwisler * We needed to drop the page_tree lock while calling 393e11f8b7bSRoss Zwisler * radix_tree_preload() and we didn't have an entry to 394e11f8b7bSRoss Zwisler * lock. See if another thread inserted an entry at 395e11f8b7bSRoss Zwisler * our index during this time. 396e11f8b7bSRoss Zwisler */ 397e11f8b7bSRoss Zwisler entry = __radix_tree_lookup(&mapping->page_tree, index, 398e11f8b7bSRoss Zwisler NULL, &slot); 399e11f8b7bSRoss Zwisler if (entry) { 400e11f8b7bSRoss Zwisler radix_tree_preload_end(); 401e11f8b7bSRoss Zwisler spin_unlock_irq(&mapping->tree_lock); 402e11f8b7bSRoss Zwisler goto restart; 403e11f8b7bSRoss Zwisler } 404e11f8b7bSRoss Zwisler } 405e11f8b7bSRoss Zwisler 406642261acSRoss Zwisler if (pmd_downgrade) { 407642261acSRoss Zwisler radix_tree_delete(&mapping->page_tree, index); 408642261acSRoss Zwisler mapping->nrexceptional--; 409642261acSRoss Zwisler dax_wake_mapping_entry_waiter(mapping, index, entry, 410642261acSRoss Zwisler true); 411642261acSRoss Zwisler } 412642261acSRoss Zwisler 413642261acSRoss Zwisler entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY); 414642261acSRoss Zwisler 415642261acSRoss Zwisler err = __radix_tree_insert(&mapping->page_tree, index, 416642261acSRoss Zwisler dax_radix_order(entry), entry); 417ac401cc7SJan Kara radix_tree_preload_end(); 418ac401cc7SJan Kara if (err) { 419ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 420642261acSRoss Zwisler /* 421e11f8b7bSRoss Zwisler * Our insertion of a DAX entry failed, most likely 422e11f8b7bSRoss Zwisler * because we were inserting a PMD entry and it 423e11f8b7bSRoss Zwisler * collided with a PTE sized entry at a different 424e11f8b7bSRoss Zwisler * index in the PMD range. We haven't inserted 425e11f8b7bSRoss Zwisler * anything into the radix tree and have no waiters to 426e11f8b7bSRoss Zwisler * wake. 427642261acSRoss Zwisler */ 428ac401cc7SJan Kara return ERR_PTR(err); 429ac401cc7SJan Kara } 430ac401cc7SJan Kara /* Good, we have inserted empty locked entry into the tree. */ 431ac401cc7SJan Kara mapping->nrexceptional++; 432ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 433e3ad61c6SRoss Zwisler return entry; 434ac401cc7SJan Kara } 435e3ad61c6SRoss Zwisler entry = lock_slot(mapping, slot); 436642261acSRoss Zwisler out_unlock: 437ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 438e3ad61c6SRoss Zwisler return entry; 439ac401cc7SJan Kara } 440ac401cc7SJan Kara 441c6dcf52cSJan Kara static int __dax_invalidate_mapping_entry(struct address_space *mapping, 442c6dcf52cSJan Kara pgoff_t index, bool trunc) 443c6dcf52cSJan Kara { 444c6dcf52cSJan Kara int ret = 0; 445c6dcf52cSJan Kara void *entry; 446c6dcf52cSJan Kara struct radix_tree_root *page_tree = &mapping->page_tree; 447c6dcf52cSJan Kara 448c6dcf52cSJan Kara spin_lock_irq(&mapping->tree_lock); 449c6dcf52cSJan Kara entry = get_unlocked_mapping_entry(mapping, index, NULL); 45091d25ba8SRoss Zwisler if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry))) 451c6dcf52cSJan Kara goto out; 452c6dcf52cSJan Kara if (!trunc && 453c6dcf52cSJan Kara (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) || 454c6dcf52cSJan Kara radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))) 455c6dcf52cSJan Kara goto out; 456c6dcf52cSJan Kara radix_tree_delete(page_tree, index); 457c6dcf52cSJan Kara mapping->nrexceptional--; 458c6dcf52cSJan Kara ret = 1; 459c6dcf52cSJan Kara out: 460c6dcf52cSJan Kara put_unlocked_mapping_entry(mapping, index, entry); 461c6dcf52cSJan Kara spin_unlock_irq(&mapping->tree_lock); 462c6dcf52cSJan Kara return ret; 463c6dcf52cSJan Kara } 464ac401cc7SJan Kara /* 465ac401cc7SJan Kara * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree 466ac401cc7SJan Kara * entry to get unlocked before deleting it. 467ac401cc7SJan Kara */ 468ac401cc7SJan Kara int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 469ac401cc7SJan Kara { 470c6dcf52cSJan Kara int ret = __dax_invalidate_mapping_entry(mapping, index, true); 471ac401cc7SJan Kara 472ac401cc7SJan Kara /* 473ac401cc7SJan Kara * This gets called from truncate / punch_hole path. As such, the caller 474ac401cc7SJan Kara * must hold locks protecting against concurrent modifications of the 475ac401cc7SJan Kara * radix tree (usually fs-private i_mmap_sem for writing). Since the 476ac401cc7SJan Kara * caller has seen exceptional entry for this index, we better find it 477ac401cc7SJan Kara * at that index as well... 478ac401cc7SJan Kara */ 479c6dcf52cSJan Kara WARN_ON_ONCE(!ret); 480c6dcf52cSJan Kara return ret; 481ac401cc7SJan Kara } 482ac401cc7SJan Kara 483c6dcf52cSJan Kara /* 484c6dcf52cSJan Kara * Invalidate exceptional DAX entry if it is clean. 485c6dcf52cSJan Kara */ 486c6dcf52cSJan Kara int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 487c6dcf52cSJan Kara pgoff_t index) 488c6dcf52cSJan Kara { 489c6dcf52cSJan Kara return __dax_invalidate_mapping_entry(mapping, index, false); 490ac401cc7SJan Kara } 491ac401cc7SJan Kara 492cccbce67SDan Williams static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, 493cccbce67SDan Williams sector_t sector, size_t size, struct page *to, 494cccbce67SDan Williams unsigned long vaddr) 495f7ca90b1SMatthew Wilcox { 496cccbce67SDan Williams void *vto, *kaddr; 497cccbce67SDan Williams pgoff_t pgoff; 498cccbce67SDan Williams pfn_t pfn; 499cccbce67SDan Williams long rc; 500cccbce67SDan Williams int id; 501e2e05394SRoss Zwisler 502cccbce67SDan Williams rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); 503cccbce67SDan Williams if (rc) 504cccbce67SDan Williams return rc; 505cccbce67SDan Williams 506cccbce67SDan Williams id = dax_read_lock(); 507cccbce67SDan Williams rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); 508cccbce67SDan Williams if (rc < 0) { 509cccbce67SDan Williams dax_read_unlock(id); 510cccbce67SDan Williams return rc; 511cccbce67SDan Williams } 512f7ca90b1SMatthew Wilcox vto = kmap_atomic(to); 513cccbce67SDan Williams copy_user_page(vto, (void __force *)kaddr, vaddr, to); 514f7ca90b1SMatthew Wilcox kunmap_atomic(vto); 515cccbce67SDan Williams dax_read_unlock(id); 516f7ca90b1SMatthew Wilcox return 0; 517f7ca90b1SMatthew Wilcox } 518f7ca90b1SMatthew Wilcox 519642261acSRoss Zwisler /* 520642261acSRoss Zwisler * By this point grab_mapping_entry() has ensured that we have a locked entry 521642261acSRoss Zwisler * of the appropriate size so we don't have to worry about downgrading PMDs to 522642261acSRoss Zwisler * PTEs. If we happen to be trying to insert a PTE and there is a PMD 523642261acSRoss Zwisler * already in the tree, we will skip the insertion and just dirty the PMD as 524642261acSRoss Zwisler * appropriate. 525642261acSRoss Zwisler */ 526ac401cc7SJan Kara static void *dax_insert_mapping_entry(struct address_space *mapping, 527ac401cc7SJan Kara struct vm_fault *vmf, 528642261acSRoss Zwisler void *entry, sector_t sector, 529f5b7b748SJan Kara unsigned long flags, bool dirty) 5309973c98eSRoss Zwisler { 5319973c98eSRoss Zwisler struct radix_tree_root *page_tree = &mapping->page_tree; 532ac401cc7SJan Kara void *new_entry; 533ac401cc7SJan Kara pgoff_t index = vmf->pgoff; 5349973c98eSRoss Zwisler 535f5b7b748SJan Kara if (dirty) 5369973c98eSRoss Zwisler __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 5379973c98eSRoss Zwisler 53891d25ba8SRoss Zwisler if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) { 53991d25ba8SRoss Zwisler /* we are replacing a zero page with block mapping */ 54091d25ba8SRoss Zwisler if (dax_is_pmd_entry(entry)) 54191d25ba8SRoss Zwisler unmap_mapping_range(mapping, 54291d25ba8SRoss Zwisler (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, 54391d25ba8SRoss Zwisler PMD_SIZE, 0); 54491d25ba8SRoss Zwisler else /* pte entry */ 545ac401cc7SJan Kara unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, 546ac401cc7SJan Kara PAGE_SIZE, 0); 547ac401cc7SJan Kara } 5489973c98eSRoss Zwisler 549ac401cc7SJan Kara spin_lock_irq(&mapping->tree_lock); 550642261acSRoss Zwisler new_entry = dax_radix_locked_entry(sector, flags); 551642261acSRoss Zwisler 55291d25ba8SRoss Zwisler if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 553642261acSRoss Zwisler /* 554642261acSRoss Zwisler * Only swap our new entry into the radix tree if the current 555642261acSRoss Zwisler * entry is a zero page or an empty entry. If a normal PTE or 556642261acSRoss Zwisler * PMD entry is already in the tree, we leave it alone. This 557642261acSRoss Zwisler * means that if we are trying to insert a PTE and the 558642261acSRoss Zwisler * existing entry is a PMD, we will just leave the PMD in the 559642261acSRoss Zwisler * tree and dirty it if necessary. 560642261acSRoss Zwisler */ 561f7942430SJohannes Weiner struct radix_tree_node *node; 562ac401cc7SJan Kara void **slot; 563ac401cc7SJan Kara void *ret; 564ac401cc7SJan Kara 565f7942430SJohannes Weiner ret = __radix_tree_lookup(page_tree, index, &node, &slot); 566ac401cc7SJan Kara WARN_ON_ONCE(ret != entry); 5674d693d08SJohannes Weiner __radix_tree_replace(page_tree, node, slot, 5684d693d08SJohannes Weiner new_entry, NULL, NULL); 56991d25ba8SRoss Zwisler entry = new_entry; 570ac401cc7SJan Kara } 57191d25ba8SRoss Zwisler 572f5b7b748SJan Kara if (dirty) 5739973c98eSRoss Zwisler radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY); 57491d25ba8SRoss Zwisler 5759973c98eSRoss Zwisler spin_unlock_irq(&mapping->tree_lock); 57691d25ba8SRoss Zwisler return entry; 5779973c98eSRoss Zwisler } 5789973c98eSRoss Zwisler 5794b4bb46dSJan Kara static inline unsigned long 5804b4bb46dSJan Kara pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma) 5814b4bb46dSJan Kara { 5824b4bb46dSJan Kara unsigned long address; 5834b4bb46dSJan Kara 5844b4bb46dSJan Kara address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 5854b4bb46dSJan Kara VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 5864b4bb46dSJan Kara return address; 5874b4bb46dSJan Kara } 5884b4bb46dSJan Kara 5894b4bb46dSJan Kara /* Walk all mappings of a given index of a file and writeprotect them */ 5904b4bb46dSJan Kara static void dax_mapping_entry_mkclean(struct address_space *mapping, 5914b4bb46dSJan Kara pgoff_t index, unsigned long pfn) 5924b4bb46dSJan Kara { 5934b4bb46dSJan Kara struct vm_area_struct *vma; 594f729c8c9SRoss Zwisler pte_t pte, *ptep = NULL; 595f729c8c9SRoss Zwisler pmd_t *pmdp = NULL; 5964b4bb46dSJan Kara spinlock_t *ptl; 5974b4bb46dSJan Kara 5984b4bb46dSJan Kara i_mmap_lock_read(mapping); 5994b4bb46dSJan Kara vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { 600a4d1a885SJérôme Glisse unsigned long address, start, end; 6014b4bb46dSJan Kara 6024b4bb46dSJan Kara cond_resched(); 6034b4bb46dSJan Kara 6044b4bb46dSJan Kara if (!(vma->vm_flags & VM_SHARED)) 6054b4bb46dSJan Kara continue; 6064b4bb46dSJan Kara 6074b4bb46dSJan Kara address = pgoff_address(index, vma); 608a4d1a885SJérôme Glisse 609a4d1a885SJérôme Glisse /* 610a4d1a885SJérôme Glisse * Note because we provide start/end to follow_pte_pmd it will 611a4d1a885SJérôme Glisse * call mmu_notifier_invalidate_range_start() on our behalf 612a4d1a885SJérôme Glisse * before taking any lock. 613a4d1a885SJérôme Glisse */ 614a4d1a885SJérôme Glisse if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl)) 6154b4bb46dSJan Kara continue; 616f729c8c9SRoss Zwisler 617f729c8c9SRoss Zwisler if (pmdp) { 618f729c8c9SRoss Zwisler #ifdef CONFIG_FS_DAX_PMD 619f729c8c9SRoss Zwisler pmd_t pmd; 620f729c8c9SRoss Zwisler 621f729c8c9SRoss Zwisler if (pfn != pmd_pfn(*pmdp)) 622f729c8c9SRoss Zwisler goto unlock_pmd; 623f729c8c9SRoss Zwisler if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) 624f729c8c9SRoss Zwisler goto unlock_pmd; 625f729c8c9SRoss Zwisler 626f729c8c9SRoss Zwisler flush_cache_page(vma, address, pfn); 627f729c8c9SRoss Zwisler pmd = pmdp_huge_clear_flush(vma, address, pmdp); 628f729c8c9SRoss Zwisler pmd = pmd_wrprotect(pmd); 629f729c8c9SRoss Zwisler pmd = pmd_mkclean(pmd); 630f729c8c9SRoss Zwisler set_pmd_at(vma->vm_mm, address, pmdp, pmd); 631a4d1a885SJérôme Glisse mmu_notifier_invalidate_range(vma->vm_mm, start, end); 632f729c8c9SRoss Zwisler unlock_pmd: 633f729c8c9SRoss Zwisler spin_unlock(ptl); 634f729c8c9SRoss Zwisler #endif 635f729c8c9SRoss Zwisler } else { 6364b4bb46dSJan Kara if (pfn != pte_pfn(*ptep)) 637f729c8c9SRoss Zwisler goto unlock_pte; 6384b4bb46dSJan Kara if (!pte_dirty(*ptep) && !pte_write(*ptep)) 639f729c8c9SRoss Zwisler goto unlock_pte; 6404b4bb46dSJan Kara 6414b4bb46dSJan Kara flush_cache_page(vma, address, pfn); 6424b4bb46dSJan Kara pte = ptep_clear_flush(vma, address, ptep); 6434b4bb46dSJan Kara pte = pte_wrprotect(pte); 6444b4bb46dSJan Kara pte = pte_mkclean(pte); 6454b4bb46dSJan Kara set_pte_at(vma->vm_mm, address, ptep, pte); 646a4d1a885SJérôme Glisse mmu_notifier_invalidate_range(vma->vm_mm, start, end); 647f729c8c9SRoss Zwisler unlock_pte: 6484b4bb46dSJan Kara pte_unmap_unlock(ptep, ptl); 649f729c8c9SRoss Zwisler } 6504b4bb46dSJan Kara 651a4d1a885SJérôme Glisse mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); 6524b4bb46dSJan Kara } 6534b4bb46dSJan Kara i_mmap_unlock_read(mapping); 6544b4bb46dSJan Kara } 6554b4bb46dSJan Kara 6569973c98eSRoss Zwisler static int dax_writeback_one(struct block_device *bdev, 657cccbce67SDan Williams struct dax_device *dax_dev, struct address_space *mapping, 658cccbce67SDan Williams pgoff_t index, void *entry) 6599973c98eSRoss Zwisler { 6609973c98eSRoss Zwisler struct radix_tree_root *page_tree = &mapping->page_tree; 661cccbce67SDan Williams void *entry2, **slot, *kaddr; 662cccbce67SDan Williams long ret = 0, id; 663cccbce67SDan Williams sector_t sector; 664cccbce67SDan Williams pgoff_t pgoff; 665cccbce67SDan Williams size_t size; 666cccbce67SDan Williams pfn_t pfn; 6679973c98eSRoss Zwisler 6689973c98eSRoss Zwisler /* 669a6abc2c0SJan Kara * A page got tagged dirty in DAX mapping? Something is seriously 670a6abc2c0SJan Kara * wrong. 6719973c98eSRoss Zwisler */ 672a6abc2c0SJan Kara if (WARN_ON(!radix_tree_exceptional_entry(entry))) 673a6abc2c0SJan Kara return -EIO; 6749973c98eSRoss Zwisler 675a6abc2c0SJan Kara spin_lock_irq(&mapping->tree_lock); 676a6abc2c0SJan Kara entry2 = get_unlocked_mapping_entry(mapping, index, &slot); 677a6abc2c0SJan Kara /* Entry got punched out / reallocated? */ 67891d25ba8SRoss Zwisler if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2))) 679a6abc2c0SJan Kara goto put_unlocked; 680a6abc2c0SJan Kara /* 681a6abc2c0SJan Kara * Entry got reallocated elsewhere? No need to writeback. We have to 682a6abc2c0SJan Kara * compare sectors as we must not bail out due to difference in lockbit 683a6abc2c0SJan Kara * or entry type. 684a6abc2c0SJan Kara */ 685a6abc2c0SJan Kara if (dax_radix_sector(entry2) != dax_radix_sector(entry)) 686a6abc2c0SJan Kara goto put_unlocked; 687642261acSRoss Zwisler if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 688642261acSRoss Zwisler dax_is_zero_entry(entry))) { 6899973c98eSRoss Zwisler ret = -EIO; 690a6abc2c0SJan Kara goto put_unlocked; 6919973c98eSRoss Zwisler } 6929973c98eSRoss Zwisler 693a6abc2c0SJan Kara /* Another fsync thread may have already written back this entry */ 694a6abc2c0SJan Kara if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)) 695a6abc2c0SJan Kara goto put_unlocked; 696a6abc2c0SJan Kara /* Lock the entry to serialize with page faults */ 697a6abc2c0SJan Kara entry = lock_slot(mapping, slot); 698a6abc2c0SJan Kara /* 699a6abc2c0SJan Kara * We can clear the tag now but we have to be careful so that concurrent 700a6abc2c0SJan Kara * dax_writeback_one() calls for the same index cannot finish before we 701a6abc2c0SJan Kara * actually flush the caches. This is achieved as the calls will look 702a6abc2c0SJan Kara * at the entry only under tree_lock and once they do that they will 703a6abc2c0SJan Kara * see the entry locked and wait for it to unlock. 704a6abc2c0SJan Kara */ 705a6abc2c0SJan Kara radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE); 706a6abc2c0SJan Kara spin_unlock_irq(&mapping->tree_lock); 707a6abc2c0SJan Kara 708642261acSRoss Zwisler /* 709642261acSRoss Zwisler * Even if dax_writeback_mapping_range() was given a wbc->range_start 710642261acSRoss Zwisler * in the middle of a PMD, the 'index' we are given will be aligned to 711642261acSRoss Zwisler * the start index of the PMD, as will the sector we pull from 712642261acSRoss Zwisler * 'entry'. This allows us to flush for PMD_SIZE and not have to 713642261acSRoss Zwisler * worry about partial PMD writebacks. 714642261acSRoss Zwisler */ 715cccbce67SDan Williams sector = dax_radix_sector(entry); 716cccbce67SDan Williams size = PAGE_SIZE << dax_radix_order(entry); 717cccbce67SDan Williams 718cccbce67SDan Williams id = dax_read_lock(); 719cccbce67SDan Williams ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 720cccbce67SDan Williams if (ret) 721cccbce67SDan Williams goto dax_unlock; 7229973c98eSRoss Zwisler 7239973c98eSRoss Zwisler /* 724cccbce67SDan Williams * dax_direct_access() may sleep, so cannot hold tree_lock over 725cccbce67SDan Williams * its invocation. 7269973c98eSRoss Zwisler */ 727cccbce67SDan Williams ret = dax_direct_access(dax_dev, pgoff, size / PAGE_SIZE, &kaddr, &pfn); 728cccbce67SDan Williams if (ret < 0) 729cccbce67SDan Williams goto dax_unlock; 7309973c98eSRoss Zwisler 731cccbce67SDan Williams if (WARN_ON_ONCE(ret < size / PAGE_SIZE)) { 7329973c98eSRoss Zwisler ret = -EIO; 733cccbce67SDan Williams goto dax_unlock; 7349973c98eSRoss Zwisler } 7359973c98eSRoss Zwisler 736cccbce67SDan Williams dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn)); 737c3ca015fSMikulas Patocka dax_flush(dax_dev, kaddr, size); 7384b4bb46dSJan Kara /* 7394b4bb46dSJan Kara * After we have flushed the cache, we can clear the dirty tag. There 7404b4bb46dSJan Kara * cannot be new dirty data in the pfn after the flush has completed as 7414b4bb46dSJan Kara * the pfn mappings are writeprotected and fault waits for mapping 7424b4bb46dSJan Kara * entry lock. 7434b4bb46dSJan Kara */ 7444b4bb46dSJan Kara spin_lock_irq(&mapping->tree_lock); 7454b4bb46dSJan Kara radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY); 7464b4bb46dSJan Kara spin_unlock_irq(&mapping->tree_lock); 747f9bc3a07SRoss Zwisler trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT); 748cccbce67SDan Williams dax_unlock: 749cccbce67SDan Williams dax_read_unlock(id); 75091d25ba8SRoss Zwisler put_locked_mapping_entry(mapping, index); 7519973c98eSRoss Zwisler return ret; 7529973c98eSRoss Zwisler 753a6abc2c0SJan Kara put_unlocked: 754a6abc2c0SJan Kara put_unlocked_mapping_entry(mapping, index, entry2); 7559973c98eSRoss Zwisler spin_unlock_irq(&mapping->tree_lock); 7569973c98eSRoss Zwisler return ret; 7579973c98eSRoss Zwisler } 7589973c98eSRoss Zwisler 7599973c98eSRoss Zwisler /* 7609973c98eSRoss Zwisler * Flush the mapping to the persistent domain within the byte range of [start, 7619973c98eSRoss Zwisler * end]. This is required by data integrity operations to ensure file data is 7629973c98eSRoss Zwisler * on persistent storage prior to completion of the operation. 7639973c98eSRoss Zwisler */ 7647f6d5b52SRoss Zwisler int dax_writeback_mapping_range(struct address_space *mapping, 7657f6d5b52SRoss Zwisler struct block_device *bdev, struct writeback_control *wbc) 7669973c98eSRoss Zwisler { 7679973c98eSRoss Zwisler struct inode *inode = mapping->host; 768642261acSRoss Zwisler pgoff_t start_index, end_index; 7699973c98eSRoss Zwisler pgoff_t indices[PAGEVEC_SIZE]; 770cccbce67SDan Williams struct dax_device *dax_dev; 7719973c98eSRoss Zwisler struct pagevec pvec; 7729973c98eSRoss Zwisler bool done = false; 7739973c98eSRoss Zwisler int i, ret = 0; 7749973c98eSRoss Zwisler 7759973c98eSRoss Zwisler if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 7769973c98eSRoss Zwisler return -EIO; 7779973c98eSRoss Zwisler 7787f6d5b52SRoss Zwisler if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) 7797f6d5b52SRoss Zwisler return 0; 7807f6d5b52SRoss Zwisler 781cccbce67SDan Williams dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 782cccbce67SDan Williams if (!dax_dev) 783cccbce67SDan Williams return -EIO; 784cccbce67SDan Williams 78509cbfeafSKirill A. Shutemov start_index = wbc->range_start >> PAGE_SHIFT; 78609cbfeafSKirill A. Shutemov end_index = wbc->range_end >> PAGE_SHIFT; 7879973c98eSRoss Zwisler 788d14a3f48SRoss Zwisler trace_dax_writeback_range(inode, start_index, end_index); 789d14a3f48SRoss Zwisler 7909973c98eSRoss Zwisler tag_pages_for_writeback(mapping, start_index, end_index); 7919973c98eSRoss Zwisler 7929973c98eSRoss Zwisler pagevec_init(&pvec, 0); 7939973c98eSRoss Zwisler while (!done) { 7949973c98eSRoss Zwisler pvec.nr = find_get_entries_tag(mapping, start_index, 7959973c98eSRoss Zwisler PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE, 7969973c98eSRoss Zwisler pvec.pages, indices); 7979973c98eSRoss Zwisler 7989973c98eSRoss Zwisler if (pvec.nr == 0) 7999973c98eSRoss Zwisler break; 8009973c98eSRoss Zwisler 8019973c98eSRoss Zwisler for (i = 0; i < pvec.nr; i++) { 8029973c98eSRoss Zwisler if (indices[i] > end_index) { 8039973c98eSRoss Zwisler done = true; 8049973c98eSRoss Zwisler break; 8059973c98eSRoss Zwisler } 8069973c98eSRoss Zwisler 807cccbce67SDan Williams ret = dax_writeback_one(bdev, dax_dev, mapping, 808cccbce67SDan Williams indices[i], pvec.pages[i]); 809819ec6b9SJeff Layton if (ret < 0) { 810819ec6b9SJeff Layton mapping_set_error(mapping, ret); 811d14a3f48SRoss Zwisler goto out; 812d14a3f48SRoss Zwisler } 813d14a3f48SRoss Zwisler } 8141eb643d0SJan Kara start_index = indices[pvec.nr - 1] + 1; 815d14a3f48SRoss Zwisler } 816d14a3f48SRoss Zwisler out: 817cccbce67SDan Williams put_dax(dax_dev); 818d14a3f48SRoss Zwisler trace_dax_writeback_range_done(inode, start_index, end_index); 819d14a3f48SRoss Zwisler return (ret < 0 ? ret : 0); 8209973c98eSRoss Zwisler } 8219973c98eSRoss Zwisler EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 8229973c98eSRoss Zwisler 82331a6f1a6SJan Kara static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 824f7ca90b1SMatthew Wilcox { 82531a6f1a6SJan Kara return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9); 82631a6f1a6SJan Kara } 82731a6f1a6SJan Kara 8285e161e40SJan Kara static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size, 8295e161e40SJan Kara pfn_t *pfnp) 8305e161e40SJan Kara { 8315e161e40SJan Kara const sector_t sector = dax_iomap_sector(iomap, pos); 8325e161e40SJan Kara pgoff_t pgoff; 8335e161e40SJan Kara void *kaddr; 8345e161e40SJan Kara int id, rc; 8355e161e40SJan Kara long length; 8365e161e40SJan Kara 8375e161e40SJan Kara rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff); 8385e161e40SJan Kara if (rc) 8395e161e40SJan Kara return rc; 8405e161e40SJan Kara id = dax_read_lock(); 8415e161e40SJan Kara length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), 8425e161e40SJan Kara &kaddr, pfnp); 8435e161e40SJan Kara if (length < 0) { 8445e161e40SJan Kara rc = length; 8455e161e40SJan Kara goto out; 8465e161e40SJan Kara } 8475e161e40SJan Kara rc = -EINVAL; 8485e161e40SJan Kara if (PFN_PHYS(length) < size) 8495e161e40SJan Kara goto out; 8505e161e40SJan Kara if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) 8515e161e40SJan Kara goto out; 8525e161e40SJan Kara /* For larger pages we need devmap */ 8535e161e40SJan Kara if (length > 1 && !pfn_t_devmap(*pfnp)) 8545e161e40SJan Kara goto out; 8555e161e40SJan Kara rc = 0; 8565e161e40SJan Kara out: 8575e161e40SJan Kara dax_read_unlock(id); 8585e161e40SJan Kara return rc; 8595e161e40SJan Kara } 8605e161e40SJan Kara 8612f89dc12SJan Kara /* 86291d25ba8SRoss Zwisler * The user has performed a load from a hole in the file. Allocating a new 86391d25ba8SRoss Zwisler * page in the file would cause excessive storage usage for workloads with 86491d25ba8SRoss Zwisler * sparse files. Instead we insert a read-only mapping of the 4k zero page. 86591d25ba8SRoss Zwisler * If this page is ever written to we will re-fault and change the mapping to 86691d25ba8SRoss Zwisler * point to real DAX storage instead. 8672f89dc12SJan Kara */ 86891d25ba8SRoss Zwisler static int dax_load_hole(struct address_space *mapping, void *entry, 869e30331ffSRoss Zwisler struct vm_fault *vmf) 870e30331ffSRoss Zwisler { 871e30331ffSRoss Zwisler struct inode *inode = mapping->host; 87291d25ba8SRoss Zwisler unsigned long vaddr = vmf->address; 87391d25ba8SRoss Zwisler int ret = VM_FAULT_NOPAGE; 87491d25ba8SRoss Zwisler struct page *zero_page; 87591d25ba8SRoss Zwisler void *entry2; 876e30331ffSRoss Zwisler 87791d25ba8SRoss Zwisler zero_page = ZERO_PAGE(0); 87891d25ba8SRoss Zwisler if (unlikely(!zero_page)) { 879e30331ffSRoss Zwisler ret = VM_FAULT_OOM; 880e30331ffSRoss Zwisler goto out; 881e30331ffSRoss Zwisler } 882e30331ffSRoss Zwisler 88391d25ba8SRoss Zwisler entry2 = dax_insert_mapping_entry(mapping, vmf, entry, 0, 884f5b7b748SJan Kara RADIX_DAX_ZERO_PAGE, false); 88591d25ba8SRoss Zwisler if (IS_ERR(entry2)) { 88691d25ba8SRoss Zwisler ret = VM_FAULT_SIGBUS; 88791d25ba8SRoss Zwisler goto out; 888e30331ffSRoss Zwisler } 88991d25ba8SRoss Zwisler 89091d25ba8SRoss Zwisler vm_insert_mixed(vmf->vma, vaddr, page_to_pfn_t(zero_page)); 891e30331ffSRoss Zwisler out: 892e30331ffSRoss Zwisler trace_dax_load_hole(inode, vmf, ret); 893e30331ffSRoss Zwisler return ret; 894e30331ffSRoss Zwisler } 895e30331ffSRoss Zwisler 8964b0228faSVishal Verma static bool dax_range_is_aligned(struct block_device *bdev, 8974b0228faSVishal Verma unsigned int offset, unsigned int length) 8984b0228faSVishal Verma { 8994b0228faSVishal Verma unsigned short sector_size = bdev_logical_block_size(bdev); 9004b0228faSVishal Verma 9014b0228faSVishal Verma if (!IS_ALIGNED(offset, sector_size)) 9024b0228faSVishal Verma return false; 9034b0228faSVishal Verma if (!IS_ALIGNED(length, sector_size)) 9044b0228faSVishal Verma return false; 9054b0228faSVishal Verma 9064b0228faSVishal Verma return true; 9074b0228faSVishal Verma } 9084b0228faSVishal Verma 909cccbce67SDan Williams int __dax_zero_page_range(struct block_device *bdev, 910cccbce67SDan Williams struct dax_device *dax_dev, sector_t sector, 911cccbce67SDan Williams unsigned int offset, unsigned int size) 912679c8bd3SChristoph Hellwig { 913cccbce67SDan Williams if (dax_range_is_aligned(bdev, offset, size)) { 914cccbce67SDan Williams sector_t start_sector = sector + (offset >> 9); 9154b0228faSVishal Verma 9164b0228faSVishal Verma return blkdev_issue_zeroout(bdev, start_sector, 91753ef7d0eSLinus Torvalds size >> 9, GFP_NOFS, 0); 9184b0228faSVishal Verma } else { 919cccbce67SDan Williams pgoff_t pgoff; 920cccbce67SDan Williams long rc, id; 921cccbce67SDan Williams void *kaddr; 922cccbce67SDan Williams pfn_t pfn; 923cccbce67SDan Williams 924e84b83b9SDan Williams rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); 925cccbce67SDan Williams if (rc) 926cccbce67SDan Williams return rc; 927cccbce67SDan Williams 928cccbce67SDan Williams id = dax_read_lock(); 929e84b83b9SDan Williams rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, 930cccbce67SDan Williams &pfn); 931cccbce67SDan Williams if (rc < 0) { 932cccbce67SDan Williams dax_read_unlock(id); 933cccbce67SDan Williams return rc; 934cccbce67SDan Williams } 93581f55870SDan Williams memset(kaddr + offset, 0, size); 936c3ca015fSMikulas Patocka dax_flush(dax_dev, kaddr + offset, size); 937cccbce67SDan Williams dax_read_unlock(id); 9384b0228faSVishal Verma } 939679c8bd3SChristoph Hellwig return 0; 940679c8bd3SChristoph Hellwig } 941679c8bd3SChristoph Hellwig EXPORT_SYMBOL_GPL(__dax_zero_page_range); 942679c8bd3SChristoph Hellwig 943a254e568SChristoph Hellwig static loff_t 94411c59c92SRoss Zwisler dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 945a254e568SChristoph Hellwig struct iomap *iomap) 946a254e568SChristoph Hellwig { 947cccbce67SDan Williams struct block_device *bdev = iomap->bdev; 948cccbce67SDan Williams struct dax_device *dax_dev = iomap->dax_dev; 949a254e568SChristoph Hellwig struct iov_iter *iter = data; 950a254e568SChristoph Hellwig loff_t end = pos + length, done = 0; 951a254e568SChristoph Hellwig ssize_t ret = 0; 952cccbce67SDan Williams int id; 953a254e568SChristoph Hellwig 954a254e568SChristoph Hellwig if (iov_iter_rw(iter) == READ) { 955a254e568SChristoph Hellwig end = min(end, i_size_read(inode)); 956a254e568SChristoph Hellwig if (pos >= end) 957a254e568SChristoph Hellwig return 0; 958a254e568SChristoph Hellwig 959a254e568SChristoph Hellwig if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 960a254e568SChristoph Hellwig return iov_iter_zero(min(length, end - pos), iter); 961a254e568SChristoph Hellwig } 962a254e568SChristoph Hellwig 963a254e568SChristoph Hellwig if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 964a254e568SChristoph Hellwig return -EIO; 965a254e568SChristoph Hellwig 966e3fce68cSJan Kara /* 967e3fce68cSJan Kara * Write can allocate block for an area which has a hole page mapped 968e3fce68cSJan Kara * into page tables. We have to tear down these mappings so that data 969e3fce68cSJan Kara * written by write(2) is visible in mmap. 970e3fce68cSJan Kara */ 971cd656375SJan Kara if (iomap->flags & IOMAP_F_NEW) { 972e3fce68cSJan Kara invalidate_inode_pages2_range(inode->i_mapping, 973e3fce68cSJan Kara pos >> PAGE_SHIFT, 974e3fce68cSJan Kara (end - 1) >> PAGE_SHIFT); 975e3fce68cSJan Kara } 976e3fce68cSJan Kara 977cccbce67SDan Williams id = dax_read_lock(); 978a254e568SChristoph Hellwig while (pos < end) { 979a254e568SChristoph Hellwig unsigned offset = pos & (PAGE_SIZE - 1); 980cccbce67SDan Williams const size_t size = ALIGN(length + offset, PAGE_SIZE); 981cccbce67SDan Williams const sector_t sector = dax_iomap_sector(iomap, pos); 982a254e568SChristoph Hellwig ssize_t map_len; 983cccbce67SDan Williams pgoff_t pgoff; 984cccbce67SDan Williams void *kaddr; 985cccbce67SDan Williams pfn_t pfn; 986a254e568SChristoph Hellwig 987d1908f52SMichal Hocko if (fatal_signal_pending(current)) { 988d1908f52SMichal Hocko ret = -EINTR; 989d1908f52SMichal Hocko break; 990d1908f52SMichal Hocko } 991d1908f52SMichal Hocko 992cccbce67SDan Williams ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 993cccbce67SDan Williams if (ret) 994cccbce67SDan Williams break; 995cccbce67SDan Williams 996cccbce67SDan Williams map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 997cccbce67SDan Williams &kaddr, &pfn); 998a254e568SChristoph Hellwig if (map_len < 0) { 999a254e568SChristoph Hellwig ret = map_len; 1000a254e568SChristoph Hellwig break; 1001a254e568SChristoph Hellwig } 1002a254e568SChristoph Hellwig 1003cccbce67SDan Williams map_len = PFN_PHYS(map_len); 1004cccbce67SDan Williams kaddr += offset; 1005a254e568SChristoph Hellwig map_len -= offset; 1006a254e568SChristoph Hellwig if (map_len > end - pos) 1007a254e568SChristoph Hellwig map_len = end - pos; 1008a254e568SChristoph Hellwig 1009a2e050f5SRoss Zwisler /* 1010a2e050f5SRoss Zwisler * The userspace address for the memory copy has already been 1011a2e050f5SRoss Zwisler * validated via access_ok() in either vfs_read() or 1012a2e050f5SRoss Zwisler * vfs_write(), depending on which operation we are doing. 1013a2e050f5SRoss Zwisler */ 1014a254e568SChristoph Hellwig if (iov_iter_rw(iter) == WRITE) 1015fec53774SDan Williams map_len = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1016fec53774SDan Williams map_len, iter); 1017a254e568SChristoph Hellwig else 1018cccbce67SDan Williams map_len = copy_to_iter(kaddr, map_len, iter); 1019a254e568SChristoph Hellwig if (map_len <= 0) { 1020a254e568SChristoph Hellwig ret = map_len ? map_len : -EFAULT; 1021a254e568SChristoph Hellwig break; 1022a254e568SChristoph Hellwig } 1023a254e568SChristoph Hellwig 1024a254e568SChristoph Hellwig pos += map_len; 1025a254e568SChristoph Hellwig length -= map_len; 1026a254e568SChristoph Hellwig done += map_len; 1027a254e568SChristoph Hellwig } 1028cccbce67SDan Williams dax_read_unlock(id); 1029a254e568SChristoph Hellwig 1030a254e568SChristoph Hellwig return done ? done : ret; 1031a254e568SChristoph Hellwig } 1032a254e568SChristoph Hellwig 1033a254e568SChristoph Hellwig /** 103411c59c92SRoss Zwisler * dax_iomap_rw - Perform I/O to a DAX file 1035a254e568SChristoph Hellwig * @iocb: The control block for this I/O 1036a254e568SChristoph Hellwig * @iter: The addresses to do I/O from or to 1037a254e568SChristoph Hellwig * @ops: iomap ops passed from the file system 1038a254e568SChristoph Hellwig * 1039a254e568SChristoph Hellwig * This function performs read and write operations to directly mapped 1040a254e568SChristoph Hellwig * persistent memory. The callers needs to take care of read/write exclusion 1041a254e568SChristoph Hellwig * and evicting any page cache pages in the region under I/O. 1042a254e568SChristoph Hellwig */ 1043a254e568SChristoph Hellwig ssize_t 104411c59c92SRoss Zwisler dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 10458ff6daa1SChristoph Hellwig const struct iomap_ops *ops) 1046a254e568SChristoph Hellwig { 1047a254e568SChristoph Hellwig struct address_space *mapping = iocb->ki_filp->f_mapping; 1048a254e568SChristoph Hellwig struct inode *inode = mapping->host; 1049a254e568SChristoph Hellwig loff_t pos = iocb->ki_pos, ret = 0, done = 0; 1050a254e568SChristoph Hellwig unsigned flags = 0; 1051a254e568SChristoph Hellwig 1052168316dbSChristoph Hellwig if (iov_iter_rw(iter) == WRITE) { 1053168316dbSChristoph Hellwig lockdep_assert_held_exclusive(&inode->i_rwsem); 1054a254e568SChristoph Hellwig flags |= IOMAP_WRITE; 1055168316dbSChristoph Hellwig } else { 1056168316dbSChristoph Hellwig lockdep_assert_held(&inode->i_rwsem); 1057168316dbSChristoph Hellwig } 1058a254e568SChristoph Hellwig 1059a254e568SChristoph Hellwig while (iov_iter_count(iter)) { 1060a254e568SChristoph Hellwig ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, 106111c59c92SRoss Zwisler iter, dax_iomap_actor); 1062a254e568SChristoph Hellwig if (ret <= 0) 1063a254e568SChristoph Hellwig break; 1064a254e568SChristoph Hellwig pos += ret; 1065a254e568SChristoph Hellwig done += ret; 1066a254e568SChristoph Hellwig } 1067a254e568SChristoph Hellwig 1068a254e568SChristoph Hellwig iocb->ki_pos += done; 1069a254e568SChristoph Hellwig return done ? done : ret; 1070a254e568SChristoph Hellwig } 107111c59c92SRoss Zwisler EXPORT_SYMBOL_GPL(dax_iomap_rw); 1072a7d73fe6SChristoph Hellwig 10739f141d6eSJan Kara static int dax_fault_return(int error) 10749f141d6eSJan Kara { 10759f141d6eSJan Kara if (error == 0) 10769f141d6eSJan Kara return VM_FAULT_NOPAGE; 10779f141d6eSJan Kara if (error == -ENOMEM) 10789f141d6eSJan Kara return VM_FAULT_OOM; 10799f141d6eSJan Kara return VM_FAULT_SIGBUS; 10809f141d6eSJan Kara } 10819f141d6eSJan Kara 1082aaa422c4SDan Williams /* 1083aaa422c4SDan Williams * MAP_SYNC on a dax mapping guarantees dirty metadata is 1084aaa422c4SDan Williams * flushed on write-faults (non-cow), but not read-faults. 1085aaa422c4SDan Williams */ 1086aaa422c4SDan Williams static bool dax_fault_is_synchronous(unsigned long flags, 1087aaa422c4SDan Williams struct vm_area_struct *vma, struct iomap *iomap) 1088aaa422c4SDan Williams { 1089aaa422c4SDan Williams return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) 1090aaa422c4SDan Williams && (iomap->flags & IOMAP_F_DIRTY); 1091aaa422c4SDan Williams } 1092aaa422c4SDan Williams 10939a0dd422SJan Kara static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1094a2d58167SDave Jiang const struct iomap_ops *ops) 1095a7d73fe6SChristoph Hellwig { 1096a0987ad5SJan Kara struct vm_area_struct *vma = vmf->vma; 1097a0987ad5SJan Kara struct address_space *mapping = vma->vm_file->f_mapping; 1098a7d73fe6SChristoph Hellwig struct inode *inode = mapping->host; 10991a29d85eSJan Kara unsigned long vaddr = vmf->address; 1100a7d73fe6SChristoph Hellwig loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; 1101a7d73fe6SChristoph Hellwig struct iomap iomap = { 0 }; 11029484ab1bSJan Kara unsigned flags = IOMAP_FAULT; 1103a7d73fe6SChristoph Hellwig int error, major = 0; 1104d2c43ef1SJan Kara bool write = vmf->flags & FAULT_FLAG_WRITE; 1105caa51d26SJan Kara bool sync; 1106b1aa812bSJan Kara int vmf_ret = 0; 1107a7d73fe6SChristoph Hellwig void *entry; 11081b5a1cb2SJan Kara pfn_t pfn; 1109a7d73fe6SChristoph Hellwig 1110a9c42b33SRoss Zwisler trace_dax_pte_fault(inode, vmf, vmf_ret); 1111a7d73fe6SChristoph Hellwig /* 1112a7d73fe6SChristoph Hellwig * Check whether offset isn't beyond end of file now. Caller is supposed 1113a7d73fe6SChristoph Hellwig * to hold locks serializing us with truncate / punch hole so this is 1114a7d73fe6SChristoph Hellwig * a reliable test. 1115a7d73fe6SChristoph Hellwig */ 1116a9c42b33SRoss Zwisler if (pos >= i_size_read(inode)) { 1117a9c42b33SRoss Zwisler vmf_ret = VM_FAULT_SIGBUS; 1118a9c42b33SRoss Zwisler goto out; 1119a9c42b33SRoss Zwisler } 1120a7d73fe6SChristoph Hellwig 1121d2c43ef1SJan Kara if (write && !vmf->cow_page) 1122a7d73fe6SChristoph Hellwig flags |= IOMAP_WRITE; 1123a7d73fe6SChristoph Hellwig 112413e451fdSJan Kara entry = grab_mapping_entry(mapping, vmf->pgoff, 0); 112513e451fdSJan Kara if (IS_ERR(entry)) { 112613e451fdSJan Kara vmf_ret = dax_fault_return(PTR_ERR(entry)); 112713e451fdSJan Kara goto out; 112813e451fdSJan Kara } 112913e451fdSJan Kara 1130a7d73fe6SChristoph Hellwig /* 1131e2093926SRoss Zwisler * It is possible, particularly with mixed reads & writes to private 1132e2093926SRoss Zwisler * mappings, that we have raced with a PMD fault that overlaps with 1133e2093926SRoss Zwisler * the PTE we need to set up. If so just return and the fault will be 1134e2093926SRoss Zwisler * retried. 1135e2093926SRoss Zwisler */ 1136e2093926SRoss Zwisler if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1137e2093926SRoss Zwisler vmf_ret = VM_FAULT_NOPAGE; 1138e2093926SRoss Zwisler goto unlock_entry; 1139e2093926SRoss Zwisler } 1140e2093926SRoss Zwisler 1141e2093926SRoss Zwisler /* 1142a7d73fe6SChristoph Hellwig * Note that we don't bother to use iomap_apply here: DAX required 1143a7d73fe6SChristoph Hellwig * the file system block size to be equal the page size, which means 1144a7d73fe6SChristoph Hellwig * that we never have to deal with more than a single extent here. 1145a7d73fe6SChristoph Hellwig */ 1146a7d73fe6SChristoph Hellwig error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); 1147a9c42b33SRoss Zwisler if (error) { 1148a9c42b33SRoss Zwisler vmf_ret = dax_fault_return(error); 114913e451fdSJan Kara goto unlock_entry; 1150a9c42b33SRoss Zwisler } 1151a7d73fe6SChristoph Hellwig if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { 115213e451fdSJan Kara error = -EIO; /* fs corruption? */ 115313e451fdSJan Kara goto error_finish_iomap; 1154a7d73fe6SChristoph Hellwig } 1155a7d73fe6SChristoph Hellwig 1156a7d73fe6SChristoph Hellwig if (vmf->cow_page) { 115731a6f1a6SJan Kara sector_t sector = dax_iomap_sector(&iomap, pos); 115831a6f1a6SJan Kara 1159a7d73fe6SChristoph Hellwig switch (iomap.type) { 1160a7d73fe6SChristoph Hellwig case IOMAP_HOLE: 1161a7d73fe6SChristoph Hellwig case IOMAP_UNWRITTEN: 1162a7d73fe6SChristoph Hellwig clear_user_highpage(vmf->cow_page, vaddr); 1163a7d73fe6SChristoph Hellwig break; 1164a7d73fe6SChristoph Hellwig case IOMAP_MAPPED: 1165cccbce67SDan Williams error = copy_user_dax(iomap.bdev, iomap.dax_dev, 1166cccbce67SDan Williams sector, PAGE_SIZE, vmf->cow_page, vaddr); 1167a7d73fe6SChristoph Hellwig break; 1168a7d73fe6SChristoph Hellwig default: 1169a7d73fe6SChristoph Hellwig WARN_ON_ONCE(1); 1170a7d73fe6SChristoph Hellwig error = -EIO; 1171a7d73fe6SChristoph Hellwig break; 1172a7d73fe6SChristoph Hellwig } 1173a7d73fe6SChristoph Hellwig 1174a7d73fe6SChristoph Hellwig if (error) 117513e451fdSJan Kara goto error_finish_iomap; 1176b1aa812bSJan Kara 1177b1aa812bSJan Kara __SetPageUptodate(vmf->cow_page); 1178b1aa812bSJan Kara vmf_ret = finish_fault(vmf); 1179b1aa812bSJan Kara if (!vmf_ret) 1180b1aa812bSJan Kara vmf_ret = VM_FAULT_DONE_COW; 118113e451fdSJan Kara goto finish_iomap; 1182a7d73fe6SChristoph Hellwig } 1183a7d73fe6SChristoph Hellwig 1184aaa422c4SDan Williams sync = dax_fault_is_synchronous(flags, vma, &iomap); 1185caa51d26SJan Kara 1186a7d73fe6SChristoph Hellwig switch (iomap.type) { 1187a7d73fe6SChristoph Hellwig case IOMAP_MAPPED: 1188a7d73fe6SChristoph Hellwig if (iomap.flags & IOMAP_F_NEW) { 1189a7d73fe6SChristoph Hellwig count_vm_event(PGMAJFAULT); 1190a0987ad5SJan Kara count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 1191a7d73fe6SChristoph Hellwig major = VM_FAULT_MAJOR; 1192a7d73fe6SChristoph Hellwig } 11931b5a1cb2SJan Kara error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn); 11941b5a1cb2SJan Kara if (error < 0) 11951b5a1cb2SJan Kara goto error_finish_iomap; 11961b5a1cb2SJan Kara 11971b5a1cb2SJan Kara entry = dax_insert_mapping_entry(mapping, vmf, entry, 11981b5a1cb2SJan Kara dax_iomap_sector(&iomap, pos), 1199caa51d26SJan Kara 0, write && !sync); 12001b5a1cb2SJan Kara if (IS_ERR(entry)) { 12011b5a1cb2SJan Kara error = PTR_ERR(entry); 12021b5a1cb2SJan Kara goto error_finish_iomap; 12031b5a1cb2SJan Kara } 12041b5a1cb2SJan Kara 1205caa51d26SJan Kara /* 1206caa51d26SJan Kara * If we are doing synchronous page fault and inode needs fsync, 1207caa51d26SJan Kara * we can insert PTE into page tables only after that happens. 1208caa51d26SJan Kara * Skip insertion for now and return the pfn so that caller can 1209caa51d26SJan Kara * insert it after fsync is done. 1210caa51d26SJan Kara */ 1211caa51d26SJan Kara if (sync) { 1212caa51d26SJan Kara if (WARN_ON_ONCE(!pfnp)) { 1213caa51d26SJan Kara error = -EIO; 1214caa51d26SJan Kara goto error_finish_iomap; 1215caa51d26SJan Kara } 1216caa51d26SJan Kara *pfnp = pfn; 1217caa51d26SJan Kara vmf_ret = VM_FAULT_NEEDDSYNC | major; 1218caa51d26SJan Kara goto finish_iomap; 1219caa51d26SJan Kara } 12201b5a1cb2SJan Kara trace_dax_insert_mapping(inode, vmf, entry); 12211b5a1cb2SJan Kara if (write) 12221b5a1cb2SJan Kara error = vm_insert_mixed_mkwrite(vma, vaddr, pfn); 12231b5a1cb2SJan Kara else 12241b5a1cb2SJan Kara error = vm_insert_mixed(vma, vaddr, pfn); 12251b5a1cb2SJan Kara 12269f141d6eSJan Kara /* -EBUSY is fine, somebody else faulted on the same PTE */ 12279f141d6eSJan Kara if (error == -EBUSY) 12289f141d6eSJan Kara error = 0; 1229a7d73fe6SChristoph Hellwig break; 1230a7d73fe6SChristoph Hellwig case IOMAP_UNWRITTEN: 1231a7d73fe6SChristoph Hellwig case IOMAP_HOLE: 1232d2c43ef1SJan Kara if (!write) { 123391d25ba8SRoss Zwisler vmf_ret = dax_load_hole(mapping, entry, vmf); 123413e451fdSJan Kara goto finish_iomap; 12351550290bSRoss Zwisler } 1236a7d73fe6SChristoph Hellwig /*FALLTHRU*/ 1237a7d73fe6SChristoph Hellwig default: 1238a7d73fe6SChristoph Hellwig WARN_ON_ONCE(1); 1239a7d73fe6SChristoph Hellwig error = -EIO; 1240a7d73fe6SChristoph Hellwig break; 1241a7d73fe6SChristoph Hellwig } 1242a7d73fe6SChristoph Hellwig 124313e451fdSJan Kara error_finish_iomap: 12449f141d6eSJan Kara vmf_ret = dax_fault_return(error) | major; 12459f141d6eSJan Kara finish_iomap: 12469f141d6eSJan Kara if (ops->iomap_end) { 12479f141d6eSJan Kara int copied = PAGE_SIZE; 12489f141d6eSJan Kara 12499f141d6eSJan Kara if (vmf_ret & VM_FAULT_ERROR) 12509f141d6eSJan Kara copied = 0; 12519f141d6eSJan Kara /* 12529f141d6eSJan Kara * The fault is done by now and there's no way back (other 12539f141d6eSJan Kara * thread may be already happily using PTE we have installed). 12549f141d6eSJan Kara * Just ignore error from ->iomap_end since we cannot do much 12559f141d6eSJan Kara * with it. 12569f141d6eSJan Kara */ 12579f141d6eSJan Kara ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); 12581550290bSRoss Zwisler } 125913e451fdSJan Kara unlock_entry: 126091d25ba8SRoss Zwisler put_locked_mapping_entry(mapping, vmf->pgoff); 1261a9c42b33SRoss Zwisler out: 1262a9c42b33SRoss Zwisler trace_dax_pte_fault_done(inode, vmf, vmf_ret); 12639f141d6eSJan Kara return vmf_ret; 1264a7d73fe6SChristoph Hellwig } 1265642261acSRoss Zwisler 1266642261acSRoss Zwisler #ifdef CONFIG_FS_DAX_PMD 1267302a5e31SJan Kara /* 1268302a5e31SJan Kara * The 'colour' (ie low bits) within a PMD of a page offset. This comes up 1269302a5e31SJan Kara * more often than one might expect in the below functions. 1270302a5e31SJan Kara */ 1271302a5e31SJan Kara #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 1272642261acSRoss Zwisler 1273f4200391SDave Jiang static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap, 127491d25ba8SRoss Zwisler void *entry) 1275642261acSRoss Zwisler { 1276f4200391SDave Jiang struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1277f4200391SDave Jiang unsigned long pmd_addr = vmf->address & PMD_MASK; 1278653b2ea3SRoss Zwisler struct inode *inode = mapping->host; 1279642261acSRoss Zwisler struct page *zero_page; 1280653b2ea3SRoss Zwisler void *ret = NULL; 1281642261acSRoss Zwisler spinlock_t *ptl; 1282642261acSRoss Zwisler pmd_t pmd_entry; 1283642261acSRoss Zwisler 1284f4200391SDave Jiang zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1285642261acSRoss Zwisler 1286642261acSRoss Zwisler if (unlikely(!zero_page)) 1287653b2ea3SRoss Zwisler goto fallback; 1288642261acSRoss Zwisler 128991d25ba8SRoss Zwisler ret = dax_insert_mapping_entry(mapping, vmf, entry, 0, 1290f5b7b748SJan Kara RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false); 1291642261acSRoss Zwisler if (IS_ERR(ret)) 1292653b2ea3SRoss Zwisler goto fallback; 1293642261acSRoss Zwisler 1294f4200391SDave Jiang ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1295f4200391SDave Jiang if (!pmd_none(*(vmf->pmd))) { 1296642261acSRoss Zwisler spin_unlock(ptl); 1297653b2ea3SRoss Zwisler goto fallback; 1298642261acSRoss Zwisler } 1299642261acSRoss Zwisler 1300f4200391SDave Jiang pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1301642261acSRoss Zwisler pmd_entry = pmd_mkhuge(pmd_entry); 1302f4200391SDave Jiang set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1303642261acSRoss Zwisler spin_unlock(ptl); 1304f4200391SDave Jiang trace_dax_pmd_load_hole(inode, vmf, zero_page, ret); 1305642261acSRoss Zwisler return VM_FAULT_NOPAGE; 1306653b2ea3SRoss Zwisler 1307653b2ea3SRoss Zwisler fallback: 1308f4200391SDave Jiang trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret); 1309642261acSRoss Zwisler return VM_FAULT_FALLBACK; 1310642261acSRoss Zwisler } 1311642261acSRoss Zwisler 13129a0dd422SJan Kara static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1313a2d58167SDave Jiang const struct iomap_ops *ops) 1314642261acSRoss Zwisler { 1315f4200391SDave Jiang struct vm_area_struct *vma = vmf->vma; 1316642261acSRoss Zwisler struct address_space *mapping = vma->vm_file->f_mapping; 1317d8a849e1SDave Jiang unsigned long pmd_addr = vmf->address & PMD_MASK; 1318d8a849e1SDave Jiang bool write = vmf->flags & FAULT_FLAG_WRITE; 1319caa51d26SJan Kara bool sync; 13209484ab1bSJan Kara unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; 1321642261acSRoss Zwisler struct inode *inode = mapping->host; 1322642261acSRoss Zwisler int result = VM_FAULT_FALLBACK; 1323642261acSRoss Zwisler struct iomap iomap = { 0 }; 1324642261acSRoss Zwisler pgoff_t max_pgoff, pgoff; 1325642261acSRoss Zwisler void *entry; 1326642261acSRoss Zwisler loff_t pos; 1327642261acSRoss Zwisler int error; 1328302a5e31SJan Kara pfn_t pfn; 1329642261acSRoss Zwisler 1330282a8e03SRoss Zwisler /* 1331282a8e03SRoss Zwisler * Check whether offset isn't beyond end of file now. Caller is 1332282a8e03SRoss Zwisler * supposed to hold locks serializing us with truncate / punch hole so 1333282a8e03SRoss Zwisler * this is a reliable test. 1334282a8e03SRoss Zwisler */ 1335282a8e03SRoss Zwisler pgoff = linear_page_index(vma, pmd_addr); 1336282a8e03SRoss Zwisler max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT; 1337282a8e03SRoss Zwisler 1338f4200391SDave Jiang trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); 1339282a8e03SRoss Zwisler 1340fffa281bSRoss Zwisler /* 1341fffa281bSRoss Zwisler * Make sure that the faulting address's PMD offset (color) matches 1342fffa281bSRoss Zwisler * the PMD offset from the start of the file. This is necessary so 1343fffa281bSRoss Zwisler * that a PMD range in the page table overlaps exactly with a PMD 1344fffa281bSRoss Zwisler * range in the radix tree. 1345fffa281bSRoss Zwisler */ 1346fffa281bSRoss Zwisler if ((vmf->pgoff & PG_PMD_COLOUR) != 1347fffa281bSRoss Zwisler ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 1348fffa281bSRoss Zwisler goto fallback; 1349fffa281bSRoss Zwisler 1350642261acSRoss Zwisler /* Fall back to PTEs if we're going to COW */ 1351642261acSRoss Zwisler if (write && !(vma->vm_flags & VM_SHARED)) 1352642261acSRoss Zwisler goto fallback; 1353642261acSRoss Zwisler 1354642261acSRoss Zwisler /* If the PMD would extend outside the VMA */ 1355642261acSRoss Zwisler if (pmd_addr < vma->vm_start) 1356642261acSRoss Zwisler goto fallback; 1357642261acSRoss Zwisler if ((pmd_addr + PMD_SIZE) > vma->vm_end) 1358642261acSRoss Zwisler goto fallback; 1359642261acSRoss Zwisler 1360282a8e03SRoss Zwisler if (pgoff > max_pgoff) { 1361282a8e03SRoss Zwisler result = VM_FAULT_SIGBUS; 1362282a8e03SRoss Zwisler goto out; 1363282a8e03SRoss Zwisler } 1364642261acSRoss Zwisler 1365642261acSRoss Zwisler /* If the PMD would extend beyond the file size */ 1366642261acSRoss Zwisler if ((pgoff | PG_PMD_COLOUR) > max_pgoff) 1367642261acSRoss Zwisler goto fallback; 1368642261acSRoss Zwisler 1369642261acSRoss Zwisler /* 137091d25ba8SRoss Zwisler * grab_mapping_entry() will make sure we get a 2MiB empty entry, a 137191d25ba8SRoss Zwisler * 2MiB zero page entry or a DAX PMD. If it can't (because a 4k page 137291d25ba8SRoss Zwisler * is already in the tree, for instance), it will return -EEXIST and 137391d25ba8SRoss Zwisler * we just fall back to 4k entries. 13749f141d6eSJan Kara */ 13759f141d6eSJan Kara entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); 13769f141d6eSJan Kara if (IS_ERR(entry)) 1377876f2946SRoss Zwisler goto fallback; 1378876f2946SRoss Zwisler 1379876f2946SRoss Zwisler /* 1380e2093926SRoss Zwisler * It is possible, particularly with mixed reads & writes to private 1381e2093926SRoss Zwisler * mappings, that we have raced with a PTE fault that overlaps with 1382e2093926SRoss Zwisler * the PMD we need to set up. If so just return and the fault will be 1383e2093926SRoss Zwisler * retried. 1384e2093926SRoss Zwisler */ 1385e2093926SRoss Zwisler if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1386e2093926SRoss Zwisler !pmd_devmap(*vmf->pmd)) { 1387e2093926SRoss Zwisler result = 0; 1388e2093926SRoss Zwisler goto unlock_entry; 1389e2093926SRoss Zwisler } 1390e2093926SRoss Zwisler 1391e2093926SRoss Zwisler /* 1392876f2946SRoss Zwisler * Note that we don't use iomap_apply here. We aren't doing I/O, only 1393876f2946SRoss Zwisler * setting up a mapping, so really we're using iomap_begin() as a way 1394876f2946SRoss Zwisler * to look up our filesystem block. 1395876f2946SRoss Zwisler */ 1396876f2946SRoss Zwisler pos = (loff_t)pgoff << PAGE_SHIFT; 1397876f2946SRoss Zwisler error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); 1398876f2946SRoss Zwisler if (error) 1399876f2946SRoss Zwisler goto unlock_entry; 1400876f2946SRoss Zwisler 1401876f2946SRoss Zwisler if (iomap.offset + iomap.length < pos + PMD_SIZE) 14029f141d6eSJan Kara goto finish_iomap; 14039f141d6eSJan Kara 1404aaa422c4SDan Williams sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap); 1405caa51d26SJan Kara 1406642261acSRoss Zwisler switch (iomap.type) { 1407642261acSRoss Zwisler case IOMAP_MAPPED: 1408302a5e31SJan Kara error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn); 1409302a5e31SJan Kara if (error < 0) 1410302a5e31SJan Kara goto finish_iomap; 1411302a5e31SJan Kara 1412302a5e31SJan Kara entry = dax_insert_mapping_entry(mapping, vmf, entry, 1413302a5e31SJan Kara dax_iomap_sector(&iomap, pos), 1414caa51d26SJan Kara RADIX_DAX_PMD, write && !sync); 1415302a5e31SJan Kara if (IS_ERR(entry)) 1416302a5e31SJan Kara goto finish_iomap; 1417302a5e31SJan Kara 1418caa51d26SJan Kara /* 1419caa51d26SJan Kara * If we are doing synchronous page fault and inode needs fsync, 1420caa51d26SJan Kara * we can insert PMD into page tables only after that happens. 1421caa51d26SJan Kara * Skip insertion for now and return the pfn so that caller can 1422caa51d26SJan Kara * insert it after fsync is done. 1423caa51d26SJan Kara */ 1424caa51d26SJan Kara if (sync) { 1425caa51d26SJan Kara if (WARN_ON_ONCE(!pfnp)) 1426caa51d26SJan Kara goto finish_iomap; 1427caa51d26SJan Kara *pfnp = pfn; 1428caa51d26SJan Kara result = VM_FAULT_NEEDDSYNC; 1429caa51d26SJan Kara goto finish_iomap; 1430caa51d26SJan Kara } 1431caa51d26SJan Kara 1432302a5e31SJan Kara trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); 1433302a5e31SJan Kara result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn, 1434302a5e31SJan Kara write); 1435642261acSRoss Zwisler break; 1436642261acSRoss Zwisler case IOMAP_UNWRITTEN: 1437642261acSRoss Zwisler case IOMAP_HOLE: 1438642261acSRoss Zwisler if (WARN_ON_ONCE(write)) 1439876f2946SRoss Zwisler break; 144091d25ba8SRoss Zwisler result = dax_pmd_load_hole(vmf, &iomap, entry); 1441642261acSRoss Zwisler break; 1442642261acSRoss Zwisler default: 1443642261acSRoss Zwisler WARN_ON_ONCE(1); 1444642261acSRoss Zwisler break; 1445642261acSRoss Zwisler } 1446642261acSRoss Zwisler 14479f141d6eSJan Kara finish_iomap: 14489f141d6eSJan Kara if (ops->iomap_end) { 14499f141d6eSJan Kara int copied = PMD_SIZE; 14509f141d6eSJan Kara 14519f141d6eSJan Kara if (result == VM_FAULT_FALLBACK) 14529f141d6eSJan Kara copied = 0; 14539f141d6eSJan Kara /* 14549f141d6eSJan Kara * The fault is done by now and there's no way back (other 14559f141d6eSJan Kara * thread may be already happily using PMD we have installed). 14569f141d6eSJan Kara * Just ignore error from ->iomap_end since we cannot do much 14579f141d6eSJan Kara * with it. 14589f141d6eSJan Kara */ 14599f141d6eSJan Kara ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags, 14609f141d6eSJan Kara &iomap); 14619f141d6eSJan Kara } 1462876f2946SRoss Zwisler unlock_entry: 146391d25ba8SRoss Zwisler put_locked_mapping_entry(mapping, pgoff); 1464642261acSRoss Zwisler fallback: 1465642261acSRoss Zwisler if (result == VM_FAULT_FALLBACK) { 1466d8a849e1SDave Jiang split_huge_pmd(vma, vmf->pmd, vmf->address); 1467642261acSRoss Zwisler count_vm_event(THP_FAULT_FALLBACK); 1468642261acSRoss Zwisler } 1469282a8e03SRoss Zwisler out: 1470f4200391SDave Jiang trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); 1471642261acSRoss Zwisler return result; 1472642261acSRoss Zwisler } 1473a2d58167SDave Jiang #else 14749a0dd422SJan Kara static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 147501cddfe9SArnd Bergmann const struct iomap_ops *ops) 1476a2d58167SDave Jiang { 1477a2d58167SDave Jiang return VM_FAULT_FALLBACK; 1478a2d58167SDave Jiang } 1479642261acSRoss Zwisler #endif /* CONFIG_FS_DAX_PMD */ 1480a2d58167SDave Jiang 1481a2d58167SDave Jiang /** 1482a2d58167SDave Jiang * dax_iomap_fault - handle a page fault on a DAX file 1483a2d58167SDave Jiang * @vmf: The description of the fault 1484cec04e8cSJan Kara * @pe_size: Size of the page to fault in 14859a0dd422SJan Kara * @pfnp: PFN to insert for synchronous faults if fsync is required 1486cec04e8cSJan Kara * @ops: Iomap ops passed from the file system 1487a2d58167SDave Jiang * 1488a2d58167SDave Jiang * When a page fault occurs, filesystems may call this helper in 1489a2d58167SDave Jiang * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1490a2d58167SDave Jiang * has done all the necessary locking for page fault to proceed 1491a2d58167SDave Jiang * successfully. 1492a2d58167SDave Jiang */ 1493c791ace1SDave Jiang int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 14949a0dd422SJan Kara pfn_t *pfnp, const struct iomap_ops *ops) 1495a2d58167SDave Jiang { 1496c791ace1SDave Jiang switch (pe_size) { 1497c791ace1SDave Jiang case PE_SIZE_PTE: 14989a0dd422SJan Kara return dax_iomap_pte_fault(vmf, pfnp, ops); 1499c791ace1SDave Jiang case PE_SIZE_PMD: 15009a0dd422SJan Kara return dax_iomap_pmd_fault(vmf, pfnp, ops); 1501a2d58167SDave Jiang default: 1502a2d58167SDave Jiang return VM_FAULT_FALLBACK; 1503a2d58167SDave Jiang } 1504a2d58167SDave Jiang } 1505a2d58167SDave Jiang EXPORT_SYMBOL_GPL(dax_iomap_fault); 150671eab6dfSJan Kara 150771eab6dfSJan Kara /** 150871eab6dfSJan Kara * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables 150971eab6dfSJan Kara * @vmf: The description of the fault 151071eab6dfSJan Kara * @pe_size: Size of entry to be inserted 151171eab6dfSJan Kara * @pfn: PFN to insert 151271eab6dfSJan Kara * 151371eab6dfSJan Kara * This function inserts writeable PTE or PMD entry into page tables for mmaped 151471eab6dfSJan Kara * DAX file. It takes care of marking corresponding radix tree entry as dirty 151571eab6dfSJan Kara * as well. 151671eab6dfSJan Kara */ 151771eab6dfSJan Kara static int dax_insert_pfn_mkwrite(struct vm_fault *vmf, 151871eab6dfSJan Kara enum page_entry_size pe_size, 151971eab6dfSJan Kara pfn_t pfn) 152071eab6dfSJan Kara { 152171eab6dfSJan Kara struct address_space *mapping = vmf->vma->vm_file->f_mapping; 152271eab6dfSJan Kara void *entry, **slot; 152371eab6dfSJan Kara pgoff_t index = vmf->pgoff; 152471eab6dfSJan Kara int vmf_ret, error; 152571eab6dfSJan Kara 152671eab6dfSJan Kara spin_lock_irq(&mapping->tree_lock); 152771eab6dfSJan Kara entry = get_unlocked_mapping_entry(mapping, index, &slot); 152871eab6dfSJan Kara /* Did we race with someone splitting entry or so? */ 152971eab6dfSJan Kara if (!entry || 153071eab6dfSJan Kara (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) || 153171eab6dfSJan Kara (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) { 153271eab6dfSJan Kara put_unlocked_mapping_entry(mapping, index, entry); 153371eab6dfSJan Kara spin_unlock_irq(&mapping->tree_lock); 153471eab6dfSJan Kara trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 153571eab6dfSJan Kara VM_FAULT_NOPAGE); 153671eab6dfSJan Kara return VM_FAULT_NOPAGE; 153771eab6dfSJan Kara } 153871eab6dfSJan Kara radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY); 153971eab6dfSJan Kara entry = lock_slot(mapping, slot); 154071eab6dfSJan Kara spin_unlock_irq(&mapping->tree_lock); 154171eab6dfSJan Kara switch (pe_size) { 154271eab6dfSJan Kara case PE_SIZE_PTE: 154371eab6dfSJan Kara error = vm_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 154471eab6dfSJan Kara vmf_ret = dax_fault_return(error); 154571eab6dfSJan Kara break; 154671eab6dfSJan Kara #ifdef CONFIG_FS_DAX_PMD 154771eab6dfSJan Kara case PE_SIZE_PMD: 154871eab6dfSJan Kara vmf_ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, 154971eab6dfSJan Kara pfn, true); 155071eab6dfSJan Kara break; 155171eab6dfSJan Kara #endif 155271eab6dfSJan Kara default: 155371eab6dfSJan Kara vmf_ret = VM_FAULT_FALLBACK; 155471eab6dfSJan Kara } 155571eab6dfSJan Kara put_locked_mapping_entry(mapping, index); 155671eab6dfSJan Kara trace_dax_insert_pfn_mkwrite(mapping->host, vmf, vmf_ret); 155771eab6dfSJan Kara return vmf_ret; 155871eab6dfSJan Kara } 155971eab6dfSJan Kara 156071eab6dfSJan Kara /** 156171eab6dfSJan Kara * dax_finish_sync_fault - finish synchronous page fault 156271eab6dfSJan Kara * @vmf: The description of the fault 156371eab6dfSJan Kara * @pe_size: Size of entry to be inserted 156471eab6dfSJan Kara * @pfn: PFN to insert 156571eab6dfSJan Kara * 156671eab6dfSJan Kara * This function ensures that the file range touched by the page fault is 156771eab6dfSJan Kara * stored persistently on the media and handles inserting of appropriate page 156871eab6dfSJan Kara * table entry. 156971eab6dfSJan Kara */ 157071eab6dfSJan Kara int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 157171eab6dfSJan Kara pfn_t pfn) 157271eab6dfSJan Kara { 157371eab6dfSJan Kara int err; 157471eab6dfSJan Kara loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; 157571eab6dfSJan Kara size_t len = 0; 157671eab6dfSJan Kara 157771eab6dfSJan Kara if (pe_size == PE_SIZE_PTE) 157871eab6dfSJan Kara len = PAGE_SIZE; 157971eab6dfSJan Kara else if (pe_size == PE_SIZE_PMD) 158071eab6dfSJan Kara len = PMD_SIZE; 158171eab6dfSJan Kara else 158271eab6dfSJan Kara WARN_ON_ONCE(1); 158371eab6dfSJan Kara err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); 158471eab6dfSJan Kara if (err) 158571eab6dfSJan Kara return VM_FAULT_SIGBUS; 158671eab6dfSJan Kara return dax_insert_pfn_mkwrite(vmf, pe_size, pfn); 158771eab6dfSJan Kara } 158871eab6dfSJan Kara EXPORT_SYMBOL_GPL(dax_finish_sync_fault); 1589