1d475c634SMatthew Wilcox /* 2d475c634SMatthew Wilcox * fs/dax.c - Direct Access filesystem code 3d475c634SMatthew Wilcox * Copyright (c) 2013-2014 Intel Corporation 4d475c634SMatthew Wilcox * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 5d475c634SMatthew Wilcox * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 6d475c634SMatthew Wilcox * 7d475c634SMatthew Wilcox * This program is free software; you can redistribute it and/or modify it 8d475c634SMatthew Wilcox * under the terms and conditions of the GNU General Public License, 9d475c634SMatthew Wilcox * version 2, as published by the Free Software Foundation. 10d475c634SMatthew Wilcox * 11d475c634SMatthew Wilcox * This program is distributed in the hope it will be useful, but WITHOUT 12d475c634SMatthew Wilcox * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13d475c634SMatthew Wilcox * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14d475c634SMatthew Wilcox * more details. 15d475c634SMatthew Wilcox */ 16d475c634SMatthew Wilcox 17d475c634SMatthew Wilcox #include <linux/atomic.h> 18d475c634SMatthew Wilcox #include <linux/blkdev.h> 19d475c634SMatthew Wilcox #include <linux/buffer_head.h> 20d77e92e2SRoss Zwisler #include <linux/dax.h> 21d475c634SMatthew Wilcox #include <linux/fs.h> 22d475c634SMatthew Wilcox #include <linux/genhd.h> 23f7ca90b1SMatthew Wilcox #include <linux/highmem.h> 24f7ca90b1SMatthew Wilcox #include <linux/memcontrol.h> 25f7ca90b1SMatthew Wilcox #include <linux/mm.h> 26d475c634SMatthew Wilcox #include <linux/mutex.h> 279973c98eSRoss Zwisler #include <linux/pagevec.h> 28289c6aedSMatthew Wilcox #include <linux/sched.h> 29f361bf4aSIngo Molnar #include <linux/sched/signal.h> 30d475c634SMatthew Wilcox #include <linux/uio.h> 31f7ca90b1SMatthew Wilcox #include <linux/vmstat.h> 3234c0fd54SDan Williams #include <linux/pfn_t.h> 330e749e54SDan Williams #include <linux/sizes.h> 344b4bb46dSJan Kara #include <linux/mmu_notifier.h> 35a254e568SChristoph Hellwig #include <linux/iomap.h> 36a254e568SChristoph Hellwig #include "internal.h" 37d475c634SMatthew Wilcox 38282a8e03SRoss Zwisler #define CREATE_TRACE_POINTS 39282a8e03SRoss Zwisler #include <trace/events/fs_dax.h> 40282a8e03SRoss Zwisler 41ac401cc7SJan Kara /* We choose 4096 entries - same as per-zone page wait tables */ 42ac401cc7SJan Kara #define DAX_WAIT_TABLE_BITS 12 43ac401cc7SJan Kara #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 44ac401cc7SJan Kara 45ce95ab0fSRoss Zwisler static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 46ac401cc7SJan Kara 47ac401cc7SJan Kara static int __init init_dax_wait_table(void) 48ac401cc7SJan Kara { 49ac401cc7SJan Kara int i; 50ac401cc7SJan Kara 51ac401cc7SJan Kara for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 52ac401cc7SJan Kara init_waitqueue_head(wait_table + i); 53ac401cc7SJan Kara return 0; 54ac401cc7SJan Kara } 55ac401cc7SJan Kara fs_initcall(init_dax_wait_table); 56ac401cc7SJan Kara 57642261acSRoss Zwisler static int dax_is_pmd_entry(void *entry) 58642261acSRoss Zwisler { 59642261acSRoss Zwisler return (unsigned long)entry & RADIX_DAX_PMD; 60642261acSRoss Zwisler } 61642261acSRoss Zwisler 62642261acSRoss Zwisler static int dax_is_pte_entry(void *entry) 63642261acSRoss Zwisler { 64642261acSRoss Zwisler return !((unsigned long)entry & RADIX_DAX_PMD); 65642261acSRoss Zwisler } 66642261acSRoss Zwisler 67642261acSRoss Zwisler static int dax_is_zero_entry(void *entry) 68642261acSRoss Zwisler { 6991d25ba8SRoss Zwisler return (unsigned long)entry & RADIX_DAX_ZERO_PAGE; 70642261acSRoss Zwisler } 71642261acSRoss Zwisler 72642261acSRoss Zwisler static int dax_is_empty_entry(void *entry) 73642261acSRoss Zwisler { 74642261acSRoss Zwisler return (unsigned long)entry & RADIX_DAX_EMPTY; 75642261acSRoss Zwisler } 76642261acSRoss Zwisler 77f7ca90b1SMatthew Wilcox /* 78ac401cc7SJan Kara * DAX radix tree locking 79ac401cc7SJan Kara */ 80ac401cc7SJan Kara struct exceptional_entry_key { 81ac401cc7SJan Kara struct address_space *mapping; 8263e95b5cSRoss Zwisler pgoff_t entry_start; 83ac401cc7SJan Kara }; 84ac401cc7SJan Kara 85ac401cc7SJan Kara struct wait_exceptional_entry_queue { 86ac6424b9SIngo Molnar wait_queue_entry_t wait; 87ac401cc7SJan Kara struct exceptional_entry_key key; 88ac401cc7SJan Kara }; 89ac401cc7SJan Kara 9063e95b5cSRoss Zwisler static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, 9163e95b5cSRoss Zwisler pgoff_t index, void *entry, struct exceptional_entry_key *key) 9263e95b5cSRoss Zwisler { 9363e95b5cSRoss Zwisler unsigned long hash; 9463e95b5cSRoss Zwisler 9563e95b5cSRoss Zwisler /* 9663e95b5cSRoss Zwisler * If 'entry' is a PMD, align the 'index' that we use for the wait 9763e95b5cSRoss Zwisler * queue to the start of that PMD. This ensures that all offsets in 9863e95b5cSRoss Zwisler * the range covered by the PMD map to the same bit lock. 9963e95b5cSRoss Zwisler */ 100642261acSRoss Zwisler if (dax_is_pmd_entry(entry)) 10163e95b5cSRoss Zwisler index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1); 10263e95b5cSRoss Zwisler 10363e95b5cSRoss Zwisler key->mapping = mapping; 10463e95b5cSRoss Zwisler key->entry_start = index; 10563e95b5cSRoss Zwisler 10663e95b5cSRoss Zwisler hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS); 10763e95b5cSRoss Zwisler return wait_table + hash; 10863e95b5cSRoss Zwisler } 10963e95b5cSRoss Zwisler 110ac6424b9SIngo Molnar static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode, 111ac401cc7SJan Kara int sync, void *keyp) 112ac401cc7SJan Kara { 113ac401cc7SJan Kara struct exceptional_entry_key *key = keyp; 114ac401cc7SJan Kara struct wait_exceptional_entry_queue *ewait = 115ac401cc7SJan Kara container_of(wait, struct wait_exceptional_entry_queue, wait); 116ac401cc7SJan Kara 117ac401cc7SJan Kara if (key->mapping != ewait->key.mapping || 11863e95b5cSRoss Zwisler key->entry_start != ewait->key.entry_start) 119ac401cc7SJan Kara return 0; 120ac401cc7SJan Kara return autoremove_wake_function(wait, mode, sync, NULL); 121ac401cc7SJan Kara } 122ac401cc7SJan Kara 123ac401cc7SJan Kara /* 124e30331ffSRoss Zwisler * We do not necessarily hold the mapping->tree_lock when we call this 125e30331ffSRoss Zwisler * function so it is possible that 'entry' is no longer a valid item in the 126e30331ffSRoss Zwisler * radix tree. This is okay because all we really need to do is to find the 127e30331ffSRoss Zwisler * correct waitqueue where tasks might be waiting for that old 'entry' and 128e30331ffSRoss Zwisler * wake them. 129e30331ffSRoss Zwisler */ 130e30331ffSRoss Zwisler void dax_wake_mapping_entry_waiter(struct address_space *mapping, 131e30331ffSRoss Zwisler pgoff_t index, void *entry, bool wake_all) 132e30331ffSRoss Zwisler { 133e30331ffSRoss Zwisler struct exceptional_entry_key key; 134e30331ffSRoss Zwisler wait_queue_head_t *wq; 135e30331ffSRoss Zwisler 136e30331ffSRoss Zwisler wq = dax_entry_waitqueue(mapping, index, entry, &key); 137e30331ffSRoss Zwisler 138e30331ffSRoss Zwisler /* 139e30331ffSRoss Zwisler * Checking for locked entry and prepare_to_wait_exclusive() happens 140e30331ffSRoss Zwisler * under mapping->tree_lock, ditto for entry handling in our callers. 141e30331ffSRoss Zwisler * So at this point all tasks that could have seen our entry locked 142e30331ffSRoss Zwisler * must be in the waitqueue and the following check will see them. 143e30331ffSRoss Zwisler */ 144e30331ffSRoss Zwisler if (waitqueue_active(wq)) 145e30331ffSRoss Zwisler __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); 146e30331ffSRoss Zwisler } 147e30331ffSRoss Zwisler 148e30331ffSRoss Zwisler /* 149ac401cc7SJan Kara * Check whether the given slot is locked. The function must be called with 150ac401cc7SJan Kara * mapping->tree_lock held 151ac401cc7SJan Kara */ 152ac401cc7SJan Kara static inline int slot_locked(struct address_space *mapping, void **slot) 153ac401cc7SJan Kara { 154ac401cc7SJan Kara unsigned long entry = (unsigned long) 155ac401cc7SJan Kara radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 156ac401cc7SJan Kara return entry & RADIX_DAX_ENTRY_LOCK; 157ac401cc7SJan Kara } 158ac401cc7SJan Kara 159ac401cc7SJan Kara /* 160ac401cc7SJan Kara * Mark the given slot is locked. The function must be called with 161ac401cc7SJan Kara * mapping->tree_lock held 162ac401cc7SJan Kara */ 163ac401cc7SJan Kara static inline void *lock_slot(struct address_space *mapping, void **slot) 164ac401cc7SJan Kara { 165ac401cc7SJan Kara unsigned long entry = (unsigned long) 166ac401cc7SJan Kara radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 167ac401cc7SJan Kara 168ac401cc7SJan Kara entry |= RADIX_DAX_ENTRY_LOCK; 1696d75f366SJohannes Weiner radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry); 170ac401cc7SJan Kara return (void *)entry; 171ac401cc7SJan Kara } 172ac401cc7SJan Kara 173ac401cc7SJan Kara /* 174ac401cc7SJan Kara * Mark the given slot is unlocked. The function must be called with 175ac401cc7SJan Kara * mapping->tree_lock held 176ac401cc7SJan Kara */ 177ac401cc7SJan Kara static inline void *unlock_slot(struct address_space *mapping, void **slot) 178ac401cc7SJan Kara { 179ac401cc7SJan Kara unsigned long entry = (unsigned long) 180ac401cc7SJan Kara radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 181ac401cc7SJan Kara 182ac401cc7SJan Kara entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK; 1836d75f366SJohannes Weiner radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry); 184ac401cc7SJan Kara return (void *)entry; 185ac401cc7SJan Kara } 186ac401cc7SJan Kara 187ac401cc7SJan Kara /* 188ac401cc7SJan Kara * Lookup entry in radix tree, wait for it to become unlocked if it is 189ac401cc7SJan Kara * exceptional entry and return it. The caller must call 190ac401cc7SJan Kara * put_unlocked_mapping_entry() when he decided not to lock the entry or 191ac401cc7SJan Kara * put_locked_mapping_entry() when he locked the entry and now wants to 192ac401cc7SJan Kara * unlock it. 193ac401cc7SJan Kara * 194ac401cc7SJan Kara * The function must be called with mapping->tree_lock held. 195ac401cc7SJan Kara */ 196ac401cc7SJan Kara static void *get_unlocked_mapping_entry(struct address_space *mapping, 197ac401cc7SJan Kara pgoff_t index, void ***slotp) 198ac401cc7SJan Kara { 199e3ad61c6SRoss Zwisler void *entry, **slot; 200ac401cc7SJan Kara struct wait_exceptional_entry_queue ewait; 20163e95b5cSRoss Zwisler wait_queue_head_t *wq; 202ac401cc7SJan Kara 203ac401cc7SJan Kara init_wait(&ewait.wait); 204ac401cc7SJan Kara ewait.wait.func = wake_exceptional_entry_func; 205ac401cc7SJan Kara 206ac401cc7SJan Kara for (;;) { 207e3ad61c6SRoss Zwisler entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, 208ac401cc7SJan Kara &slot); 20991d25ba8SRoss Zwisler if (!entry || 21091d25ba8SRoss Zwisler WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) || 211ac401cc7SJan Kara !slot_locked(mapping, slot)) { 212ac401cc7SJan Kara if (slotp) 213ac401cc7SJan Kara *slotp = slot; 214e3ad61c6SRoss Zwisler return entry; 215ac401cc7SJan Kara } 21663e95b5cSRoss Zwisler 21763e95b5cSRoss Zwisler wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); 218ac401cc7SJan Kara prepare_to_wait_exclusive(wq, &ewait.wait, 219ac401cc7SJan Kara TASK_UNINTERRUPTIBLE); 220ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 221ac401cc7SJan Kara schedule(); 222ac401cc7SJan Kara finish_wait(wq, &ewait.wait); 223ac401cc7SJan Kara spin_lock_irq(&mapping->tree_lock); 224ac401cc7SJan Kara } 225ac401cc7SJan Kara } 226ac401cc7SJan Kara 227b1aa812bSJan Kara static void dax_unlock_mapping_entry(struct address_space *mapping, 228b1aa812bSJan Kara pgoff_t index) 229b1aa812bSJan Kara { 230b1aa812bSJan Kara void *entry, **slot; 231b1aa812bSJan Kara 232b1aa812bSJan Kara spin_lock_irq(&mapping->tree_lock); 233b1aa812bSJan Kara entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot); 234b1aa812bSJan Kara if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) || 235b1aa812bSJan Kara !slot_locked(mapping, slot))) { 236b1aa812bSJan Kara spin_unlock_irq(&mapping->tree_lock); 237b1aa812bSJan Kara return; 238b1aa812bSJan Kara } 239b1aa812bSJan Kara unlock_slot(mapping, slot); 240b1aa812bSJan Kara spin_unlock_irq(&mapping->tree_lock); 241b1aa812bSJan Kara dax_wake_mapping_entry_waiter(mapping, index, entry, false); 242b1aa812bSJan Kara } 243b1aa812bSJan Kara 244ac401cc7SJan Kara static void put_locked_mapping_entry(struct address_space *mapping, 24591d25ba8SRoss Zwisler pgoff_t index) 246ac401cc7SJan Kara { 247bc2466e4SJan Kara dax_unlock_mapping_entry(mapping, index); 248ac401cc7SJan Kara } 249ac401cc7SJan Kara 250ac401cc7SJan Kara /* 251ac401cc7SJan Kara * Called when we are done with radix tree entry we looked up via 252ac401cc7SJan Kara * get_unlocked_mapping_entry() and which we didn't lock in the end. 253ac401cc7SJan Kara */ 254ac401cc7SJan Kara static void put_unlocked_mapping_entry(struct address_space *mapping, 255ac401cc7SJan Kara pgoff_t index, void *entry) 256ac401cc7SJan Kara { 25791d25ba8SRoss Zwisler if (!entry) 258ac401cc7SJan Kara return; 259ac401cc7SJan Kara 260ac401cc7SJan Kara /* We have to wake up next waiter for the radix tree entry lock */ 261422476c4SRoss Zwisler dax_wake_mapping_entry_waiter(mapping, index, entry, false); 262422476c4SRoss Zwisler } 263422476c4SRoss Zwisler 264ac401cc7SJan Kara /* 26591d25ba8SRoss Zwisler * Find radix tree entry at given index. If it points to an exceptional entry, 26691d25ba8SRoss Zwisler * return it with the radix tree entry locked. If the radix tree doesn't 26791d25ba8SRoss Zwisler * contain given index, create an empty exceptional entry for the index and 26891d25ba8SRoss Zwisler * return with it locked. 269ac401cc7SJan Kara * 270642261acSRoss Zwisler * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will 271642261acSRoss Zwisler * either return that locked entry or will return an error. This error will 27291d25ba8SRoss Zwisler * happen if there are any 4k entries within the 2MiB range that we are 27391d25ba8SRoss Zwisler * requesting. 274642261acSRoss Zwisler * 275642261acSRoss Zwisler * We always favor 4k entries over 2MiB entries. There isn't a flow where we 276642261acSRoss Zwisler * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB 277642261acSRoss Zwisler * insertion will fail if it finds any 4k entries already in the tree, and a 278642261acSRoss Zwisler * 4k insertion will cause an existing 2MiB entry to be unmapped and 279642261acSRoss Zwisler * downgraded to 4k entries. This happens for both 2MiB huge zero pages as 280642261acSRoss Zwisler * well as 2MiB empty entries. 281642261acSRoss Zwisler * 282642261acSRoss Zwisler * The exception to this downgrade path is for 2MiB DAX PMD entries that have 283642261acSRoss Zwisler * real storage backing them. We will leave these real 2MiB DAX entries in 284642261acSRoss Zwisler * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry. 285642261acSRoss Zwisler * 286ac401cc7SJan Kara * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 287ac401cc7SJan Kara * persistent memory the benefit is doubtful. We can add that later if we can 288ac401cc7SJan Kara * show it helps. 289ac401cc7SJan Kara */ 290642261acSRoss Zwisler static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, 291642261acSRoss Zwisler unsigned long size_flag) 292ac401cc7SJan Kara { 293642261acSRoss Zwisler bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */ 294e3ad61c6SRoss Zwisler void *entry, **slot; 295ac401cc7SJan Kara 296ac401cc7SJan Kara restart: 297ac401cc7SJan Kara spin_lock_irq(&mapping->tree_lock); 298e3ad61c6SRoss Zwisler entry = get_unlocked_mapping_entry(mapping, index, &slot); 299642261acSRoss Zwisler 30091d25ba8SRoss Zwisler if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) { 30191d25ba8SRoss Zwisler entry = ERR_PTR(-EIO); 30291d25ba8SRoss Zwisler goto out_unlock; 30391d25ba8SRoss Zwisler } 30491d25ba8SRoss Zwisler 305642261acSRoss Zwisler if (entry) { 306642261acSRoss Zwisler if (size_flag & RADIX_DAX_PMD) { 30791d25ba8SRoss Zwisler if (dax_is_pte_entry(entry)) { 308642261acSRoss Zwisler put_unlocked_mapping_entry(mapping, index, 309642261acSRoss Zwisler entry); 310642261acSRoss Zwisler entry = ERR_PTR(-EEXIST); 311642261acSRoss Zwisler goto out_unlock; 312642261acSRoss Zwisler } 313642261acSRoss Zwisler } else { /* trying to grab a PTE entry */ 31491d25ba8SRoss Zwisler if (dax_is_pmd_entry(entry) && 315642261acSRoss Zwisler (dax_is_zero_entry(entry) || 316642261acSRoss Zwisler dax_is_empty_entry(entry))) { 317642261acSRoss Zwisler pmd_downgrade = true; 318642261acSRoss Zwisler } 319642261acSRoss Zwisler } 320642261acSRoss Zwisler } 321642261acSRoss Zwisler 322ac401cc7SJan Kara /* No entry for given index? Make sure radix tree is big enough. */ 323642261acSRoss Zwisler if (!entry || pmd_downgrade) { 324ac401cc7SJan Kara int err; 325ac401cc7SJan Kara 326642261acSRoss Zwisler if (pmd_downgrade) { 327642261acSRoss Zwisler /* 328642261acSRoss Zwisler * Make sure 'entry' remains valid while we drop 329642261acSRoss Zwisler * mapping->tree_lock. 330642261acSRoss Zwisler */ 331642261acSRoss Zwisler entry = lock_slot(mapping, slot); 332642261acSRoss Zwisler } 333642261acSRoss Zwisler 334ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 335642261acSRoss Zwisler /* 336642261acSRoss Zwisler * Besides huge zero pages the only other thing that gets 337642261acSRoss Zwisler * downgraded are empty entries which don't need to be 338642261acSRoss Zwisler * unmapped. 339642261acSRoss Zwisler */ 340642261acSRoss Zwisler if (pmd_downgrade && dax_is_zero_entry(entry)) 341642261acSRoss Zwisler unmap_mapping_range(mapping, 342642261acSRoss Zwisler (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0); 343642261acSRoss Zwisler 3440cb80b48SJan Kara err = radix_tree_preload( 3450cb80b48SJan Kara mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); 3460cb80b48SJan Kara if (err) { 3470cb80b48SJan Kara if (pmd_downgrade) 34891d25ba8SRoss Zwisler put_locked_mapping_entry(mapping, index); 3490cb80b48SJan Kara return ERR_PTR(err); 3500cb80b48SJan Kara } 351ac401cc7SJan Kara spin_lock_irq(&mapping->tree_lock); 352642261acSRoss Zwisler 353e11f8b7bSRoss Zwisler if (!entry) { 354e11f8b7bSRoss Zwisler /* 355e11f8b7bSRoss Zwisler * We needed to drop the page_tree lock while calling 356e11f8b7bSRoss Zwisler * radix_tree_preload() and we didn't have an entry to 357e11f8b7bSRoss Zwisler * lock. See if another thread inserted an entry at 358e11f8b7bSRoss Zwisler * our index during this time. 359e11f8b7bSRoss Zwisler */ 360e11f8b7bSRoss Zwisler entry = __radix_tree_lookup(&mapping->page_tree, index, 361e11f8b7bSRoss Zwisler NULL, &slot); 362e11f8b7bSRoss Zwisler if (entry) { 363e11f8b7bSRoss Zwisler radix_tree_preload_end(); 364e11f8b7bSRoss Zwisler spin_unlock_irq(&mapping->tree_lock); 365e11f8b7bSRoss Zwisler goto restart; 366e11f8b7bSRoss Zwisler } 367e11f8b7bSRoss Zwisler } 368e11f8b7bSRoss Zwisler 369642261acSRoss Zwisler if (pmd_downgrade) { 370642261acSRoss Zwisler radix_tree_delete(&mapping->page_tree, index); 371642261acSRoss Zwisler mapping->nrexceptional--; 372642261acSRoss Zwisler dax_wake_mapping_entry_waiter(mapping, index, entry, 373642261acSRoss Zwisler true); 374642261acSRoss Zwisler } 375642261acSRoss Zwisler 376642261acSRoss Zwisler entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY); 377642261acSRoss Zwisler 378642261acSRoss Zwisler err = __radix_tree_insert(&mapping->page_tree, index, 379642261acSRoss Zwisler dax_radix_order(entry), entry); 380ac401cc7SJan Kara radix_tree_preload_end(); 381ac401cc7SJan Kara if (err) { 382ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 383642261acSRoss Zwisler /* 384e11f8b7bSRoss Zwisler * Our insertion of a DAX entry failed, most likely 385e11f8b7bSRoss Zwisler * because we were inserting a PMD entry and it 386e11f8b7bSRoss Zwisler * collided with a PTE sized entry at a different 387e11f8b7bSRoss Zwisler * index in the PMD range. We haven't inserted 388e11f8b7bSRoss Zwisler * anything into the radix tree and have no waiters to 389e11f8b7bSRoss Zwisler * wake. 390642261acSRoss Zwisler */ 391ac401cc7SJan Kara return ERR_PTR(err); 392ac401cc7SJan Kara } 393ac401cc7SJan Kara /* Good, we have inserted empty locked entry into the tree. */ 394ac401cc7SJan Kara mapping->nrexceptional++; 395ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 396e3ad61c6SRoss Zwisler return entry; 397ac401cc7SJan Kara } 398e3ad61c6SRoss Zwisler entry = lock_slot(mapping, slot); 399642261acSRoss Zwisler out_unlock: 400ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 401e3ad61c6SRoss Zwisler return entry; 402ac401cc7SJan Kara } 403ac401cc7SJan Kara 404c6dcf52cSJan Kara static int __dax_invalidate_mapping_entry(struct address_space *mapping, 405c6dcf52cSJan Kara pgoff_t index, bool trunc) 406c6dcf52cSJan Kara { 407c6dcf52cSJan Kara int ret = 0; 408c6dcf52cSJan Kara void *entry; 409c6dcf52cSJan Kara struct radix_tree_root *page_tree = &mapping->page_tree; 410c6dcf52cSJan Kara 411c6dcf52cSJan Kara spin_lock_irq(&mapping->tree_lock); 412c6dcf52cSJan Kara entry = get_unlocked_mapping_entry(mapping, index, NULL); 41391d25ba8SRoss Zwisler if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry))) 414c6dcf52cSJan Kara goto out; 415c6dcf52cSJan Kara if (!trunc && 416c6dcf52cSJan Kara (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) || 417c6dcf52cSJan Kara radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))) 418c6dcf52cSJan Kara goto out; 419c6dcf52cSJan Kara radix_tree_delete(page_tree, index); 420c6dcf52cSJan Kara mapping->nrexceptional--; 421c6dcf52cSJan Kara ret = 1; 422c6dcf52cSJan Kara out: 423c6dcf52cSJan Kara put_unlocked_mapping_entry(mapping, index, entry); 424c6dcf52cSJan Kara spin_unlock_irq(&mapping->tree_lock); 425c6dcf52cSJan Kara return ret; 426c6dcf52cSJan Kara } 427ac401cc7SJan Kara /* 428ac401cc7SJan Kara * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree 429ac401cc7SJan Kara * entry to get unlocked before deleting it. 430ac401cc7SJan Kara */ 431ac401cc7SJan Kara int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 432ac401cc7SJan Kara { 433c6dcf52cSJan Kara int ret = __dax_invalidate_mapping_entry(mapping, index, true); 434ac401cc7SJan Kara 435ac401cc7SJan Kara /* 436ac401cc7SJan Kara * This gets called from truncate / punch_hole path. As such, the caller 437ac401cc7SJan Kara * must hold locks protecting against concurrent modifications of the 438ac401cc7SJan Kara * radix tree (usually fs-private i_mmap_sem for writing). Since the 439ac401cc7SJan Kara * caller has seen exceptional entry for this index, we better find it 440ac401cc7SJan Kara * at that index as well... 441ac401cc7SJan Kara */ 442c6dcf52cSJan Kara WARN_ON_ONCE(!ret); 443c6dcf52cSJan Kara return ret; 444ac401cc7SJan Kara } 445ac401cc7SJan Kara 446c6dcf52cSJan Kara /* 447c6dcf52cSJan Kara * Invalidate exceptional DAX entry if it is clean. 448c6dcf52cSJan Kara */ 449c6dcf52cSJan Kara int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 450c6dcf52cSJan Kara pgoff_t index) 451c6dcf52cSJan Kara { 452c6dcf52cSJan Kara return __dax_invalidate_mapping_entry(mapping, index, false); 453ac401cc7SJan Kara } 454ac401cc7SJan Kara 455cccbce67SDan Williams static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, 456cccbce67SDan Williams sector_t sector, size_t size, struct page *to, 457cccbce67SDan Williams unsigned long vaddr) 458f7ca90b1SMatthew Wilcox { 459cccbce67SDan Williams void *vto, *kaddr; 460cccbce67SDan Williams pgoff_t pgoff; 461cccbce67SDan Williams pfn_t pfn; 462cccbce67SDan Williams long rc; 463cccbce67SDan Williams int id; 464e2e05394SRoss Zwisler 465cccbce67SDan Williams rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); 466cccbce67SDan Williams if (rc) 467cccbce67SDan Williams return rc; 468cccbce67SDan Williams 469cccbce67SDan Williams id = dax_read_lock(); 470cccbce67SDan Williams rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); 471cccbce67SDan Williams if (rc < 0) { 472cccbce67SDan Williams dax_read_unlock(id); 473cccbce67SDan Williams return rc; 474cccbce67SDan Williams } 475f7ca90b1SMatthew Wilcox vto = kmap_atomic(to); 476cccbce67SDan Williams copy_user_page(vto, (void __force *)kaddr, vaddr, to); 477f7ca90b1SMatthew Wilcox kunmap_atomic(vto); 478cccbce67SDan Williams dax_read_unlock(id); 479f7ca90b1SMatthew Wilcox return 0; 480f7ca90b1SMatthew Wilcox } 481f7ca90b1SMatthew Wilcox 482642261acSRoss Zwisler /* 483642261acSRoss Zwisler * By this point grab_mapping_entry() has ensured that we have a locked entry 484642261acSRoss Zwisler * of the appropriate size so we don't have to worry about downgrading PMDs to 485642261acSRoss Zwisler * PTEs. If we happen to be trying to insert a PTE and there is a PMD 486642261acSRoss Zwisler * already in the tree, we will skip the insertion and just dirty the PMD as 487642261acSRoss Zwisler * appropriate. 488642261acSRoss Zwisler */ 489ac401cc7SJan Kara static void *dax_insert_mapping_entry(struct address_space *mapping, 490ac401cc7SJan Kara struct vm_fault *vmf, 491642261acSRoss Zwisler void *entry, sector_t sector, 492642261acSRoss Zwisler unsigned long flags) 4939973c98eSRoss Zwisler { 4949973c98eSRoss Zwisler struct radix_tree_root *page_tree = &mapping->page_tree; 495ac401cc7SJan Kara void *new_entry; 496ac401cc7SJan Kara pgoff_t index = vmf->pgoff; 4979973c98eSRoss Zwisler 498ac401cc7SJan Kara if (vmf->flags & FAULT_FLAG_WRITE) 4999973c98eSRoss Zwisler __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 5009973c98eSRoss Zwisler 50191d25ba8SRoss Zwisler if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) { 50291d25ba8SRoss Zwisler /* we are replacing a zero page with block mapping */ 50391d25ba8SRoss Zwisler if (dax_is_pmd_entry(entry)) 50491d25ba8SRoss Zwisler unmap_mapping_range(mapping, 50591d25ba8SRoss Zwisler (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, 50691d25ba8SRoss Zwisler PMD_SIZE, 0); 50791d25ba8SRoss Zwisler else /* pte entry */ 508ac401cc7SJan Kara unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, 509ac401cc7SJan Kara PAGE_SIZE, 0); 510ac401cc7SJan Kara } 5119973c98eSRoss Zwisler 512ac401cc7SJan Kara spin_lock_irq(&mapping->tree_lock); 513642261acSRoss Zwisler new_entry = dax_radix_locked_entry(sector, flags); 514642261acSRoss Zwisler 51591d25ba8SRoss Zwisler if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 516642261acSRoss Zwisler /* 517642261acSRoss Zwisler * Only swap our new entry into the radix tree if the current 518642261acSRoss Zwisler * entry is a zero page or an empty entry. If a normal PTE or 519642261acSRoss Zwisler * PMD entry is already in the tree, we leave it alone. This 520642261acSRoss Zwisler * means that if we are trying to insert a PTE and the 521642261acSRoss Zwisler * existing entry is a PMD, we will just leave the PMD in the 522642261acSRoss Zwisler * tree and dirty it if necessary. 523642261acSRoss Zwisler */ 524f7942430SJohannes Weiner struct radix_tree_node *node; 525ac401cc7SJan Kara void **slot; 526ac401cc7SJan Kara void *ret; 527ac401cc7SJan Kara 528f7942430SJohannes Weiner ret = __radix_tree_lookup(page_tree, index, &node, &slot); 529ac401cc7SJan Kara WARN_ON_ONCE(ret != entry); 5304d693d08SJohannes Weiner __radix_tree_replace(page_tree, node, slot, 5314d693d08SJohannes Weiner new_entry, NULL, NULL); 53291d25ba8SRoss Zwisler entry = new_entry; 533ac401cc7SJan Kara } 53491d25ba8SRoss Zwisler 535ac401cc7SJan Kara if (vmf->flags & FAULT_FLAG_WRITE) 5369973c98eSRoss Zwisler radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY); 53791d25ba8SRoss Zwisler 5389973c98eSRoss Zwisler spin_unlock_irq(&mapping->tree_lock); 53991d25ba8SRoss Zwisler return entry; 5409973c98eSRoss Zwisler } 5419973c98eSRoss Zwisler 5424b4bb46dSJan Kara static inline unsigned long 5434b4bb46dSJan Kara pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma) 5444b4bb46dSJan Kara { 5454b4bb46dSJan Kara unsigned long address; 5464b4bb46dSJan Kara 5474b4bb46dSJan Kara address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 5484b4bb46dSJan Kara VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 5494b4bb46dSJan Kara return address; 5504b4bb46dSJan Kara } 5514b4bb46dSJan Kara 5524b4bb46dSJan Kara /* Walk all mappings of a given index of a file and writeprotect them */ 5534b4bb46dSJan Kara static void dax_mapping_entry_mkclean(struct address_space *mapping, 5544b4bb46dSJan Kara pgoff_t index, unsigned long pfn) 5554b4bb46dSJan Kara { 5564b4bb46dSJan Kara struct vm_area_struct *vma; 557f729c8c9SRoss Zwisler pte_t pte, *ptep = NULL; 558f729c8c9SRoss Zwisler pmd_t *pmdp = NULL; 5594b4bb46dSJan Kara spinlock_t *ptl; 5604b4bb46dSJan Kara 5614b4bb46dSJan Kara i_mmap_lock_read(mapping); 5624b4bb46dSJan Kara vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { 563a4d1a885SJérôme Glisse unsigned long address, start, end; 5644b4bb46dSJan Kara 5654b4bb46dSJan Kara cond_resched(); 5664b4bb46dSJan Kara 5674b4bb46dSJan Kara if (!(vma->vm_flags & VM_SHARED)) 5684b4bb46dSJan Kara continue; 5694b4bb46dSJan Kara 5704b4bb46dSJan Kara address = pgoff_address(index, vma); 571a4d1a885SJérôme Glisse 572a4d1a885SJérôme Glisse /* 573a4d1a885SJérôme Glisse * Note because we provide start/end to follow_pte_pmd it will 574a4d1a885SJérôme Glisse * call mmu_notifier_invalidate_range_start() on our behalf 575a4d1a885SJérôme Glisse * before taking any lock. 576a4d1a885SJérôme Glisse */ 577a4d1a885SJérôme Glisse if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl)) 5784b4bb46dSJan Kara continue; 579f729c8c9SRoss Zwisler 580f729c8c9SRoss Zwisler if (pmdp) { 581f729c8c9SRoss Zwisler #ifdef CONFIG_FS_DAX_PMD 582f729c8c9SRoss Zwisler pmd_t pmd; 583f729c8c9SRoss Zwisler 584f729c8c9SRoss Zwisler if (pfn != pmd_pfn(*pmdp)) 585f729c8c9SRoss Zwisler goto unlock_pmd; 586f729c8c9SRoss Zwisler if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) 587f729c8c9SRoss Zwisler goto unlock_pmd; 588f729c8c9SRoss Zwisler 589f729c8c9SRoss Zwisler flush_cache_page(vma, address, pfn); 590f729c8c9SRoss Zwisler pmd = pmdp_huge_clear_flush(vma, address, pmdp); 591f729c8c9SRoss Zwisler pmd = pmd_wrprotect(pmd); 592f729c8c9SRoss Zwisler pmd = pmd_mkclean(pmd); 593f729c8c9SRoss Zwisler set_pmd_at(vma->vm_mm, address, pmdp, pmd); 594a4d1a885SJérôme Glisse mmu_notifier_invalidate_range(vma->vm_mm, start, end); 595f729c8c9SRoss Zwisler unlock_pmd: 596f729c8c9SRoss Zwisler spin_unlock(ptl); 597f729c8c9SRoss Zwisler #endif 598f729c8c9SRoss Zwisler } else { 5994b4bb46dSJan Kara if (pfn != pte_pfn(*ptep)) 600f729c8c9SRoss Zwisler goto unlock_pte; 6014b4bb46dSJan Kara if (!pte_dirty(*ptep) && !pte_write(*ptep)) 602f729c8c9SRoss Zwisler goto unlock_pte; 6034b4bb46dSJan Kara 6044b4bb46dSJan Kara flush_cache_page(vma, address, pfn); 6054b4bb46dSJan Kara pte = ptep_clear_flush(vma, address, ptep); 6064b4bb46dSJan Kara pte = pte_wrprotect(pte); 6074b4bb46dSJan Kara pte = pte_mkclean(pte); 6084b4bb46dSJan Kara set_pte_at(vma->vm_mm, address, ptep, pte); 609a4d1a885SJérôme Glisse mmu_notifier_invalidate_range(vma->vm_mm, start, end); 610f729c8c9SRoss Zwisler unlock_pte: 6114b4bb46dSJan Kara pte_unmap_unlock(ptep, ptl); 612f729c8c9SRoss Zwisler } 6134b4bb46dSJan Kara 614a4d1a885SJérôme Glisse mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); 6154b4bb46dSJan Kara } 6164b4bb46dSJan Kara i_mmap_unlock_read(mapping); 6174b4bb46dSJan Kara } 6184b4bb46dSJan Kara 6199973c98eSRoss Zwisler static int dax_writeback_one(struct block_device *bdev, 620cccbce67SDan Williams struct dax_device *dax_dev, struct address_space *mapping, 621cccbce67SDan Williams pgoff_t index, void *entry) 6229973c98eSRoss Zwisler { 6239973c98eSRoss Zwisler struct radix_tree_root *page_tree = &mapping->page_tree; 624cccbce67SDan Williams void *entry2, **slot, *kaddr; 625cccbce67SDan Williams long ret = 0, id; 626cccbce67SDan Williams sector_t sector; 627cccbce67SDan Williams pgoff_t pgoff; 628cccbce67SDan Williams size_t size; 629cccbce67SDan Williams pfn_t pfn; 6309973c98eSRoss Zwisler 6319973c98eSRoss Zwisler /* 632a6abc2c0SJan Kara * A page got tagged dirty in DAX mapping? Something is seriously 633a6abc2c0SJan Kara * wrong. 6349973c98eSRoss Zwisler */ 635a6abc2c0SJan Kara if (WARN_ON(!radix_tree_exceptional_entry(entry))) 636a6abc2c0SJan Kara return -EIO; 6379973c98eSRoss Zwisler 638a6abc2c0SJan Kara spin_lock_irq(&mapping->tree_lock); 639a6abc2c0SJan Kara entry2 = get_unlocked_mapping_entry(mapping, index, &slot); 640a6abc2c0SJan Kara /* Entry got punched out / reallocated? */ 64191d25ba8SRoss Zwisler if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2))) 642a6abc2c0SJan Kara goto put_unlocked; 643a6abc2c0SJan Kara /* 644a6abc2c0SJan Kara * Entry got reallocated elsewhere? No need to writeback. We have to 645a6abc2c0SJan Kara * compare sectors as we must not bail out due to difference in lockbit 646a6abc2c0SJan Kara * or entry type. 647a6abc2c0SJan Kara */ 648a6abc2c0SJan Kara if (dax_radix_sector(entry2) != dax_radix_sector(entry)) 649a6abc2c0SJan Kara goto put_unlocked; 650642261acSRoss Zwisler if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 651642261acSRoss Zwisler dax_is_zero_entry(entry))) { 6529973c98eSRoss Zwisler ret = -EIO; 653a6abc2c0SJan Kara goto put_unlocked; 6549973c98eSRoss Zwisler } 6559973c98eSRoss Zwisler 656a6abc2c0SJan Kara /* Another fsync thread may have already written back this entry */ 657a6abc2c0SJan Kara if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)) 658a6abc2c0SJan Kara goto put_unlocked; 659a6abc2c0SJan Kara /* Lock the entry to serialize with page faults */ 660a6abc2c0SJan Kara entry = lock_slot(mapping, slot); 661a6abc2c0SJan Kara /* 662a6abc2c0SJan Kara * We can clear the tag now but we have to be careful so that concurrent 663a6abc2c0SJan Kara * dax_writeback_one() calls for the same index cannot finish before we 664a6abc2c0SJan Kara * actually flush the caches. This is achieved as the calls will look 665a6abc2c0SJan Kara * at the entry only under tree_lock and once they do that they will 666a6abc2c0SJan Kara * see the entry locked and wait for it to unlock. 667a6abc2c0SJan Kara */ 668a6abc2c0SJan Kara radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE); 669a6abc2c0SJan Kara spin_unlock_irq(&mapping->tree_lock); 670a6abc2c0SJan Kara 671642261acSRoss Zwisler /* 672642261acSRoss Zwisler * Even if dax_writeback_mapping_range() was given a wbc->range_start 673642261acSRoss Zwisler * in the middle of a PMD, the 'index' we are given will be aligned to 674642261acSRoss Zwisler * the start index of the PMD, as will the sector we pull from 675642261acSRoss Zwisler * 'entry'. This allows us to flush for PMD_SIZE and not have to 676642261acSRoss Zwisler * worry about partial PMD writebacks. 677642261acSRoss Zwisler */ 678cccbce67SDan Williams sector = dax_radix_sector(entry); 679cccbce67SDan Williams size = PAGE_SIZE << dax_radix_order(entry); 680cccbce67SDan Williams 681cccbce67SDan Williams id = dax_read_lock(); 682cccbce67SDan Williams ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 683cccbce67SDan Williams if (ret) 684cccbce67SDan Williams goto dax_unlock; 6859973c98eSRoss Zwisler 6869973c98eSRoss Zwisler /* 687cccbce67SDan Williams * dax_direct_access() may sleep, so cannot hold tree_lock over 688cccbce67SDan Williams * its invocation. 6899973c98eSRoss Zwisler */ 690cccbce67SDan Williams ret = dax_direct_access(dax_dev, pgoff, size / PAGE_SIZE, &kaddr, &pfn); 691cccbce67SDan Williams if (ret < 0) 692cccbce67SDan Williams goto dax_unlock; 6939973c98eSRoss Zwisler 694cccbce67SDan Williams if (WARN_ON_ONCE(ret < size / PAGE_SIZE)) { 6959973c98eSRoss Zwisler ret = -EIO; 696cccbce67SDan Williams goto dax_unlock; 6979973c98eSRoss Zwisler } 6989973c98eSRoss Zwisler 699cccbce67SDan Williams dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn)); 7006318770aSDan Williams dax_flush(dax_dev, pgoff, kaddr, size); 7014b4bb46dSJan Kara /* 7024b4bb46dSJan Kara * After we have flushed the cache, we can clear the dirty tag. There 7034b4bb46dSJan Kara * cannot be new dirty data in the pfn after the flush has completed as 7044b4bb46dSJan Kara * the pfn mappings are writeprotected and fault waits for mapping 7054b4bb46dSJan Kara * entry lock. 7064b4bb46dSJan Kara */ 7074b4bb46dSJan Kara spin_lock_irq(&mapping->tree_lock); 7084b4bb46dSJan Kara radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY); 7094b4bb46dSJan Kara spin_unlock_irq(&mapping->tree_lock); 710f9bc3a07SRoss Zwisler trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT); 711cccbce67SDan Williams dax_unlock: 712cccbce67SDan Williams dax_read_unlock(id); 71391d25ba8SRoss Zwisler put_locked_mapping_entry(mapping, index); 7149973c98eSRoss Zwisler return ret; 7159973c98eSRoss Zwisler 716a6abc2c0SJan Kara put_unlocked: 717a6abc2c0SJan Kara put_unlocked_mapping_entry(mapping, index, entry2); 7189973c98eSRoss Zwisler spin_unlock_irq(&mapping->tree_lock); 7199973c98eSRoss Zwisler return ret; 7209973c98eSRoss Zwisler } 7219973c98eSRoss Zwisler 7229973c98eSRoss Zwisler /* 7239973c98eSRoss Zwisler * Flush the mapping to the persistent domain within the byte range of [start, 7249973c98eSRoss Zwisler * end]. This is required by data integrity operations to ensure file data is 7259973c98eSRoss Zwisler * on persistent storage prior to completion of the operation. 7269973c98eSRoss Zwisler */ 7277f6d5b52SRoss Zwisler int dax_writeback_mapping_range(struct address_space *mapping, 7287f6d5b52SRoss Zwisler struct block_device *bdev, struct writeback_control *wbc) 7299973c98eSRoss Zwisler { 7309973c98eSRoss Zwisler struct inode *inode = mapping->host; 731642261acSRoss Zwisler pgoff_t start_index, end_index; 7329973c98eSRoss Zwisler pgoff_t indices[PAGEVEC_SIZE]; 733cccbce67SDan Williams struct dax_device *dax_dev; 7349973c98eSRoss Zwisler struct pagevec pvec; 7359973c98eSRoss Zwisler bool done = false; 7369973c98eSRoss Zwisler int i, ret = 0; 7379973c98eSRoss Zwisler 7389973c98eSRoss Zwisler if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 7399973c98eSRoss Zwisler return -EIO; 7409973c98eSRoss Zwisler 7417f6d5b52SRoss Zwisler if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) 7427f6d5b52SRoss Zwisler return 0; 7437f6d5b52SRoss Zwisler 744cccbce67SDan Williams dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 745cccbce67SDan Williams if (!dax_dev) 746cccbce67SDan Williams return -EIO; 747cccbce67SDan Williams 74809cbfeafSKirill A. Shutemov start_index = wbc->range_start >> PAGE_SHIFT; 74909cbfeafSKirill A. Shutemov end_index = wbc->range_end >> PAGE_SHIFT; 7509973c98eSRoss Zwisler 751d14a3f48SRoss Zwisler trace_dax_writeback_range(inode, start_index, end_index); 752d14a3f48SRoss Zwisler 7539973c98eSRoss Zwisler tag_pages_for_writeback(mapping, start_index, end_index); 7549973c98eSRoss Zwisler 7559973c98eSRoss Zwisler pagevec_init(&pvec, 0); 7569973c98eSRoss Zwisler while (!done) { 7579973c98eSRoss Zwisler pvec.nr = find_get_entries_tag(mapping, start_index, 7589973c98eSRoss Zwisler PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE, 7599973c98eSRoss Zwisler pvec.pages, indices); 7609973c98eSRoss Zwisler 7619973c98eSRoss Zwisler if (pvec.nr == 0) 7629973c98eSRoss Zwisler break; 7639973c98eSRoss Zwisler 7649973c98eSRoss Zwisler for (i = 0; i < pvec.nr; i++) { 7659973c98eSRoss Zwisler if (indices[i] > end_index) { 7669973c98eSRoss Zwisler done = true; 7679973c98eSRoss Zwisler break; 7689973c98eSRoss Zwisler } 7699973c98eSRoss Zwisler 770cccbce67SDan Williams ret = dax_writeback_one(bdev, dax_dev, mapping, 771cccbce67SDan Williams indices[i], pvec.pages[i]); 772819ec6b9SJeff Layton if (ret < 0) { 773819ec6b9SJeff Layton mapping_set_error(mapping, ret); 774d14a3f48SRoss Zwisler goto out; 775d14a3f48SRoss Zwisler } 776d14a3f48SRoss Zwisler } 7771eb643d0SJan Kara start_index = indices[pvec.nr - 1] + 1; 778d14a3f48SRoss Zwisler } 779d14a3f48SRoss Zwisler out: 780cccbce67SDan Williams put_dax(dax_dev); 781d14a3f48SRoss Zwisler trace_dax_writeback_range_done(inode, start_index, end_index); 782d14a3f48SRoss Zwisler return (ret < 0 ? ret : 0); 7839973c98eSRoss Zwisler } 7849973c98eSRoss Zwisler EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 7859973c98eSRoss Zwisler 786ac401cc7SJan Kara static int dax_insert_mapping(struct address_space *mapping, 787cccbce67SDan Williams struct block_device *bdev, struct dax_device *dax_dev, 78891d25ba8SRoss Zwisler sector_t sector, size_t size, void *entry, 789cccbce67SDan Williams struct vm_area_struct *vma, struct vm_fault *vmf) 790f7ca90b1SMatthew Wilcox { 7911a29d85eSJan Kara unsigned long vaddr = vmf->address; 792cccbce67SDan Williams void *ret, *kaddr; 793cccbce67SDan Williams pgoff_t pgoff; 794cccbce67SDan Williams int id, rc; 795cccbce67SDan Williams pfn_t pfn; 796f7ca90b1SMatthew Wilcox 797cccbce67SDan Williams rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); 798cccbce67SDan Williams if (rc) 799cccbce67SDan Williams return rc; 800f7ca90b1SMatthew Wilcox 801cccbce67SDan Williams id = dax_read_lock(); 802cccbce67SDan Williams rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); 803cccbce67SDan Williams if (rc < 0) { 804cccbce67SDan Williams dax_read_unlock(id); 805cccbce67SDan Williams return rc; 806cccbce67SDan Williams } 807cccbce67SDan Williams dax_read_unlock(id); 808cccbce67SDan Williams 809cccbce67SDan Williams ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0); 8104d9a2c87SJan Kara if (IS_ERR(ret)) 8114d9a2c87SJan Kara return PTR_ERR(ret); 8129973c98eSRoss Zwisler 813b4440734SRoss Zwisler trace_dax_insert_mapping(mapping->host, vmf, ret); 81491d25ba8SRoss Zwisler if (vmf->flags & FAULT_FLAG_WRITE) 81591d25ba8SRoss Zwisler return vm_insert_mixed_mkwrite(vma, vaddr, pfn); 81691d25ba8SRoss Zwisler else 817cccbce67SDan Williams return vm_insert_mixed(vma, vaddr, pfn); 818f7ca90b1SMatthew Wilcox } 819f7ca90b1SMatthew Wilcox 8202f89dc12SJan Kara /* 82191d25ba8SRoss Zwisler * The user has performed a load from a hole in the file. Allocating a new 82291d25ba8SRoss Zwisler * page in the file would cause excessive storage usage for workloads with 82391d25ba8SRoss Zwisler * sparse files. Instead we insert a read-only mapping of the 4k zero page. 82491d25ba8SRoss Zwisler * If this page is ever written to we will re-fault and change the mapping to 82591d25ba8SRoss Zwisler * point to real DAX storage instead. 8262f89dc12SJan Kara */ 82791d25ba8SRoss Zwisler static int dax_load_hole(struct address_space *mapping, void *entry, 828e30331ffSRoss Zwisler struct vm_fault *vmf) 829e30331ffSRoss Zwisler { 830e30331ffSRoss Zwisler struct inode *inode = mapping->host; 83191d25ba8SRoss Zwisler unsigned long vaddr = vmf->address; 83291d25ba8SRoss Zwisler int ret = VM_FAULT_NOPAGE; 83391d25ba8SRoss Zwisler struct page *zero_page; 83491d25ba8SRoss Zwisler void *entry2; 835e30331ffSRoss Zwisler 83691d25ba8SRoss Zwisler zero_page = ZERO_PAGE(0); 83791d25ba8SRoss Zwisler if (unlikely(!zero_page)) { 838e30331ffSRoss Zwisler ret = VM_FAULT_OOM; 839e30331ffSRoss Zwisler goto out; 840e30331ffSRoss Zwisler } 841e30331ffSRoss Zwisler 84291d25ba8SRoss Zwisler entry2 = dax_insert_mapping_entry(mapping, vmf, entry, 0, 84391d25ba8SRoss Zwisler RADIX_DAX_ZERO_PAGE); 84491d25ba8SRoss Zwisler if (IS_ERR(entry2)) { 84591d25ba8SRoss Zwisler ret = VM_FAULT_SIGBUS; 84691d25ba8SRoss Zwisler goto out; 847e30331ffSRoss Zwisler } 84891d25ba8SRoss Zwisler 84991d25ba8SRoss Zwisler vm_insert_mixed(vmf->vma, vaddr, page_to_pfn_t(zero_page)); 850e30331ffSRoss Zwisler out: 851e30331ffSRoss Zwisler trace_dax_load_hole(inode, vmf, ret); 852e30331ffSRoss Zwisler return ret; 853e30331ffSRoss Zwisler } 854e30331ffSRoss Zwisler 8554b0228faSVishal Verma static bool dax_range_is_aligned(struct block_device *bdev, 8564b0228faSVishal Verma unsigned int offset, unsigned int length) 8574b0228faSVishal Verma { 8584b0228faSVishal Verma unsigned short sector_size = bdev_logical_block_size(bdev); 8594b0228faSVishal Verma 8604b0228faSVishal Verma if (!IS_ALIGNED(offset, sector_size)) 8614b0228faSVishal Verma return false; 8624b0228faSVishal Verma if (!IS_ALIGNED(length, sector_size)) 8634b0228faSVishal Verma return false; 8644b0228faSVishal Verma 8654b0228faSVishal Verma return true; 8664b0228faSVishal Verma } 8674b0228faSVishal Verma 868cccbce67SDan Williams int __dax_zero_page_range(struct block_device *bdev, 869cccbce67SDan Williams struct dax_device *dax_dev, sector_t sector, 870cccbce67SDan Williams unsigned int offset, unsigned int size) 871679c8bd3SChristoph Hellwig { 872cccbce67SDan Williams if (dax_range_is_aligned(bdev, offset, size)) { 873cccbce67SDan Williams sector_t start_sector = sector + (offset >> 9); 8744b0228faSVishal Verma 8754b0228faSVishal Verma return blkdev_issue_zeroout(bdev, start_sector, 87653ef7d0eSLinus Torvalds size >> 9, GFP_NOFS, 0); 8774b0228faSVishal Verma } else { 878cccbce67SDan Williams pgoff_t pgoff; 879cccbce67SDan Williams long rc, id; 880cccbce67SDan Williams void *kaddr; 881cccbce67SDan Williams pfn_t pfn; 882cccbce67SDan Williams 883e84b83b9SDan Williams rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); 884cccbce67SDan Williams if (rc) 885cccbce67SDan Williams return rc; 886cccbce67SDan Williams 887cccbce67SDan Williams id = dax_read_lock(); 888e84b83b9SDan Williams rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, 889cccbce67SDan Williams &pfn); 890cccbce67SDan Williams if (rc < 0) { 891cccbce67SDan Williams dax_read_unlock(id); 892cccbce67SDan Williams return rc; 893cccbce67SDan Williams } 89481f55870SDan Williams memset(kaddr + offset, 0, size); 89581f55870SDan Williams dax_flush(dax_dev, pgoff, kaddr + offset, size); 896cccbce67SDan Williams dax_read_unlock(id); 8974b0228faSVishal Verma } 898679c8bd3SChristoph Hellwig return 0; 899679c8bd3SChristoph Hellwig } 900679c8bd3SChristoph Hellwig EXPORT_SYMBOL_GPL(__dax_zero_page_range); 901679c8bd3SChristoph Hellwig 902333ccc97SRoss Zwisler static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 903333ccc97SRoss Zwisler { 904333ccc97SRoss Zwisler return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9); 905333ccc97SRoss Zwisler } 906333ccc97SRoss Zwisler 907a254e568SChristoph Hellwig static loff_t 90811c59c92SRoss Zwisler dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 909a254e568SChristoph Hellwig struct iomap *iomap) 910a254e568SChristoph Hellwig { 911cccbce67SDan Williams struct block_device *bdev = iomap->bdev; 912cccbce67SDan Williams struct dax_device *dax_dev = iomap->dax_dev; 913a254e568SChristoph Hellwig struct iov_iter *iter = data; 914a254e568SChristoph Hellwig loff_t end = pos + length, done = 0; 915a254e568SChristoph Hellwig ssize_t ret = 0; 916cccbce67SDan Williams int id; 917a254e568SChristoph Hellwig 918a254e568SChristoph Hellwig if (iov_iter_rw(iter) == READ) { 919a254e568SChristoph Hellwig end = min(end, i_size_read(inode)); 920a254e568SChristoph Hellwig if (pos >= end) 921a254e568SChristoph Hellwig return 0; 922a254e568SChristoph Hellwig 923a254e568SChristoph Hellwig if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 924a254e568SChristoph Hellwig return iov_iter_zero(min(length, end - pos), iter); 925a254e568SChristoph Hellwig } 926a254e568SChristoph Hellwig 927a254e568SChristoph Hellwig if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 928a254e568SChristoph Hellwig return -EIO; 929a254e568SChristoph Hellwig 930e3fce68cSJan Kara /* 931e3fce68cSJan Kara * Write can allocate block for an area which has a hole page mapped 932e3fce68cSJan Kara * into page tables. We have to tear down these mappings so that data 933e3fce68cSJan Kara * written by write(2) is visible in mmap. 934e3fce68cSJan Kara */ 935cd656375SJan Kara if (iomap->flags & IOMAP_F_NEW) { 936e3fce68cSJan Kara invalidate_inode_pages2_range(inode->i_mapping, 937e3fce68cSJan Kara pos >> PAGE_SHIFT, 938e3fce68cSJan Kara (end - 1) >> PAGE_SHIFT); 939e3fce68cSJan Kara } 940e3fce68cSJan Kara 941cccbce67SDan Williams id = dax_read_lock(); 942a254e568SChristoph Hellwig while (pos < end) { 943a254e568SChristoph Hellwig unsigned offset = pos & (PAGE_SIZE - 1); 944cccbce67SDan Williams const size_t size = ALIGN(length + offset, PAGE_SIZE); 945cccbce67SDan Williams const sector_t sector = dax_iomap_sector(iomap, pos); 946a254e568SChristoph Hellwig ssize_t map_len; 947cccbce67SDan Williams pgoff_t pgoff; 948cccbce67SDan Williams void *kaddr; 949cccbce67SDan Williams pfn_t pfn; 950a254e568SChristoph Hellwig 951d1908f52SMichal Hocko if (fatal_signal_pending(current)) { 952d1908f52SMichal Hocko ret = -EINTR; 953d1908f52SMichal Hocko break; 954d1908f52SMichal Hocko } 955d1908f52SMichal Hocko 956cccbce67SDan Williams ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 957cccbce67SDan Williams if (ret) 958cccbce67SDan Williams break; 959cccbce67SDan Williams 960cccbce67SDan Williams map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 961cccbce67SDan Williams &kaddr, &pfn); 962a254e568SChristoph Hellwig if (map_len < 0) { 963a254e568SChristoph Hellwig ret = map_len; 964a254e568SChristoph Hellwig break; 965a254e568SChristoph Hellwig } 966a254e568SChristoph Hellwig 967cccbce67SDan Williams map_len = PFN_PHYS(map_len); 968cccbce67SDan Williams kaddr += offset; 969a254e568SChristoph Hellwig map_len -= offset; 970a254e568SChristoph Hellwig if (map_len > end - pos) 971a254e568SChristoph Hellwig map_len = end - pos; 972a254e568SChristoph Hellwig 973a254e568SChristoph Hellwig if (iov_iter_rw(iter) == WRITE) 974fec53774SDan Williams map_len = dax_copy_from_iter(dax_dev, pgoff, kaddr, 975fec53774SDan Williams map_len, iter); 976a254e568SChristoph Hellwig else 977cccbce67SDan Williams map_len = copy_to_iter(kaddr, map_len, iter); 978a254e568SChristoph Hellwig if (map_len <= 0) { 979a254e568SChristoph Hellwig ret = map_len ? map_len : -EFAULT; 980a254e568SChristoph Hellwig break; 981a254e568SChristoph Hellwig } 982a254e568SChristoph Hellwig 983a254e568SChristoph Hellwig pos += map_len; 984a254e568SChristoph Hellwig length -= map_len; 985a254e568SChristoph Hellwig done += map_len; 986a254e568SChristoph Hellwig } 987cccbce67SDan Williams dax_read_unlock(id); 988a254e568SChristoph Hellwig 989a254e568SChristoph Hellwig return done ? done : ret; 990a254e568SChristoph Hellwig } 991a254e568SChristoph Hellwig 992a254e568SChristoph Hellwig /** 99311c59c92SRoss Zwisler * dax_iomap_rw - Perform I/O to a DAX file 994a254e568SChristoph Hellwig * @iocb: The control block for this I/O 995a254e568SChristoph Hellwig * @iter: The addresses to do I/O from or to 996a254e568SChristoph Hellwig * @ops: iomap ops passed from the file system 997a254e568SChristoph Hellwig * 998a254e568SChristoph Hellwig * This function performs read and write operations to directly mapped 999a254e568SChristoph Hellwig * persistent memory. The callers needs to take care of read/write exclusion 1000a254e568SChristoph Hellwig * and evicting any page cache pages in the region under I/O. 1001a254e568SChristoph Hellwig */ 1002a254e568SChristoph Hellwig ssize_t 100311c59c92SRoss Zwisler dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 10048ff6daa1SChristoph Hellwig const struct iomap_ops *ops) 1005a254e568SChristoph Hellwig { 1006a254e568SChristoph Hellwig struct address_space *mapping = iocb->ki_filp->f_mapping; 1007a254e568SChristoph Hellwig struct inode *inode = mapping->host; 1008a254e568SChristoph Hellwig loff_t pos = iocb->ki_pos, ret = 0, done = 0; 1009a254e568SChristoph Hellwig unsigned flags = 0; 1010a254e568SChristoph Hellwig 1011168316dbSChristoph Hellwig if (iov_iter_rw(iter) == WRITE) { 1012168316dbSChristoph Hellwig lockdep_assert_held_exclusive(&inode->i_rwsem); 1013a254e568SChristoph Hellwig flags |= IOMAP_WRITE; 1014168316dbSChristoph Hellwig } else { 1015168316dbSChristoph Hellwig lockdep_assert_held(&inode->i_rwsem); 1016168316dbSChristoph Hellwig } 1017a254e568SChristoph Hellwig 1018a254e568SChristoph Hellwig while (iov_iter_count(iter)) { 1019a254e568SChristoph Hellwig ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, 102011c59c92SRoss Zwisler iter, dax_iomap_actor); 1021a254e568SChristoph Hellwig if (ret <= 0) 1022a254e568SChristoph Hellwig break; 1023a254e568SChristoph Hellwig pos += ret; 1024a254e568SChristoph Hellwig done += ret; 1025a254e568SChristoph Hellwig } 1026a254e568SChristoph Hellwig 1027a254e568SChristoph Hellwig iocb->ki_pos += done; 1028a254e568SChristoph Hellwig return done ? done : ret; 1029a254e568SChristoph Hellwig } 103011c59c92SRoss Zwisler EXPORT_SYMBOL_GPL(dax_iomap_rw); 1031a7d73fe6SChristoph Hellwig 10329f141d6eSJan Kara static int dax_fault_return(int error) 10339f141d6eSJan Kara { 10349f141d6eSJan Kara if (error == 0) 10359f141d6eSJan Kara return VM_FAULT_NOPAGE; 10369f141d6eSJan Kara if (error == -ENOMEM) 10379f141d6eSJan Kara return VM_FAULT_OOM; 10389f141d6eSJan Kara return VM_FAULT_SIGBUS; 10399f141d6eSJan Kara } 10409f141d6eSJan Kara 1041a2d58167SDave Jiang static int dax_iomap_pte_fault(struct vm_fault *vmf, 1042a2d58167SDave Jiang const struct iomap_ops *ops) 1043a7d73fe6SChristoph Hellwig { 104411bac800SDave Jiang struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1045a7d73fe6SChristoph Hellwig struct inode *inode = mapping->host; 10461a29d85eSJan Kara unsigned long vaddr = vmf->address; 1047a7d73fe6SChristoph Hellwig loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; 1048a7d73fe6SChristoph Hellwig sector_t sector; 1049a7d73fe6SChristoph Hellwig struct iomap iomap = { 0 }; 10509484ab1bSJan Kara unsigned flags = IOMAP_FAULT; 1051a7d73fe6SChristoph Hellwig int error, major = 0; 1052b1aa812bSJan Kara int vmf_ret = 0; 1053a7d73fe6SChristoph Hellwig void *entry; 1054a7d73fe6SChristoph Hellwig 1055a9c42b33SRoss Zwisler trace_dax_pte_fault(inode, vmf, vmf_ret); 1056a7d73fe6SChristoph Hellwig /* 1057a7d73fe6SChristoph Hellwig * Check whether offset isn't beyond end of file now. Caller is supposed 1058a7d73fe6SChristoph Hellwig * to hold locks serializing us with truncate / punch hole so this is 1059a7d73fe6SChristoph Hellwig * a reliable test. 1060a7d73fe6SChristoph Hellwig */ 1061a9c42b33SRoss Zwisler if (pos >= i_size_read(inode)) { 1062a9c42b33SRoss Zwisler vmf_ret = VM_FAULT_SIGBUS; 1063a9c42b33SRoss Zwisler goto out; 1064a9c42b33SRoss Zwisler } 1065a7d73fe6SChristoph Hellwig 1066a7d73fe6SChristoph Hellwig if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) 1067a7d73fe6SChristoph Hellwig flags |= IOMAP_WRITE; 1068a7d73fe6SChristoph Hellwig 106913e451fdSJan Kara entry = grab_mapping_entry(mapping, vmf->pgoff, 0); 107013e451fdSJan Kara if (IS_ERR(entry)) { 107113e451fdSJan Kara vmf_ret = dax_fault_return(PTR_ERR(entry)); 107213e451fdSJan Kara goto out; 107313e451fdSJan Kara } 107413e451fdSJan Kara 1075a7d73fe6SChristoph Hellwig /* 1076e2093926SRoss Zwisler * It is possible, particularly with mixed reads & writes to private 1077e2093926SRoss Zwisler * mappings, that we have raced with a PMD fault that overlaps with 1078e2093926SRoss Zwisler * the PTE we need to set up. If so just return and the fault will be 1079e2093926SRoss Zwisler * retried. 1080e2093926SRoss Zwisler */ 1081e2093926SRoss Zwisler if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1082e2093926SRoss Zwisler vmf_ret = VM_FAULT_NOPAGE; 1083e2093926SRoss Zwisler goto unlock_entry; 1084e2093926SRoss Zwisler } 1085e2093926SRoss Zwisler 1086e2093926SRoss Zwisler /* 1087a7d73fe6SChristoph Hellwig * Note that we don't bother to use iomap_apply here: DAX required 1088a7d73fe6SChristoph Hellwig * the file system block size to be equal the page size, which means 1089a7d73fe6SChristoph Hellwig * that we never have to deal with more than a single extent here. 1090a7d73fe6SChristoph Hellwig */ 1091a7d73fe6SChristoph Hellwig error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); 1092a9c42b33SRoss Zwisler if (error) { 1093a9c42b33SRoss Zwisler vmf_ret = dax_fault_return(error); 109413e451fdSJan Kara goto unlock_entry; 1095a9c42b33SRoss Zwisler } 1096a7d73fe6SChristoph Hellwig if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { 109713e451fdSJan Kara error = -EIO; /* fs corruption? */ 109813e451fdSJan Kara goto error_finish_iomap; 1099a7d73fe6SChristoph Hellwig } 1100a7d73fe6SChristoph Hellwig 1101333ccc97SRoss Zwisler sector = dax_iomap_sector(&iomap, pos); 1102a7d73fe6SChristoph Hellwig 1103a7d73fe6SChristoph Hellwig if (vmf->cow_page) { 1104a7d73fe6SChristoph Hellwig switch (iomap.type) { 1105a7d73fe6SChristoph Hellwig case IOMAP_HOLE: 1106a7d73fe6SChristoph Hellwig case IOMAP_UNWRITTEN: 1107a7d73fe6SChristoph Hellwig clear_user_highpage(vmf->cow_page, vaddr); 1108a7d73fe6SChristoph Hellwig break; 1109a7d73fe6SChristoph Hellwig case IOMAP_MAPPED: 1110cccbce67SDan Williams error = copy_user_dax(iomap.bdev, iomap.dax_dev, 1111cccbce67SDan Williams sector, PAGE_SIZE, vmf->cow_page, vaddr); 1112a7d73fe6SChristoph Hellwig break; 1113a7d73fe6SChristoph Hellwig default: 1114a7d73fe6SChristoph Hellwig WARN_ON_ONCE(1); 1115a7d73fe6SChristoph Hellwig error = -EIO; 1116a7d73fe6SChristoph Hellwig break; 1117a7d73fe6SChristoph Hellwig } 1118a7d73fe6SChristoph Hellwig 1119a7d73fe6SChristoph Hellwig if (error) 112013e451fdSJan Kara goto error_finish_iomap; 1121b1aa812bSJan Kara 1122b1aa812bSJan Kara __SetPageUptodate(vmf->cow_page); 1123b1aa812bSJan Kara vmf_ret = finish_fault(vmf); 1124b1aa812bSJan Kara if (!vmf_ret) 1125b1aa812bSJan Kara vmf_ret = VM_FAULT_DONE_COW; 112613e451fdSJan Kara goto finish_iomap; 1127a7d73fe6SChristoph Hellwig } 1128a7d73fe6SChristoph Hellwig 1129a7d73fe6SChristoph Hellwig switch (iomap.type) { 1130a7d73fe6SChristoph Hellwig case IOMAP_MAPPED: 1131a7d73fe6SChristoph Hellwig if (iomap.flags & IOMAP_F_NEW) { 1132a7d73fe6SChristoph Hellwig count_vm_event(PGMAJFAULT); 11332262185cSRoman Gushchin count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); 1134a7d73fe6SChristoph Hellwig major = VM_FAULT_MAJOR; 1135a7d73fe6SChristoph Hellwig } 1136cccbce67SDan Williams error = dax_insert_mapping(mapping, iomap.bdev, iomap.dax_dev, 113791d25ba8SRoss Zwisler sector, PAGE_SIZE, entry, vmf->vma, vmf); 11389f141d6eSJan Kara /* -EBUSY is fine, somebody else faulted on the same PTE */ 11399f141d6eSJan Kara if (error == -EBUSY) 11409f141d6eSJan Kara error = 0; 1141a7d73fe6SChristoph Hellwig break; 1142a7d73fe6SChristoph Hellwig case IOMAP_UNWRITTEN: 1143a7d73fe6SChristoph Hellwig case IOMAP_HOLE: 11441550290bSRoss Zwisler if (!(vmf->flags & FAULT_FLAG_WRITE)) { 114591d25ba8SRoss Zwisler vmf_ret = dax_load_hole(mapping, entry, vmf); 114613e451fdSJan Kara goto finish_iomap; 11471550290bSRoss Zwisler } 1148a7d73fe6SChristoph Hellwig /*FALLTHRU*/ 1149a7d73fe6SChristoph Hellwig default: 1150a7d73fe6SChristoph Hellwig WARN_ON_ONCE(1); 1151a7d73fe6SChristoph Hellwig error = -EIO; 1152a7d73fe6SChristoph Hellwig break; 1153a7d73fe6SChristoph Hellwig } 1154a7d73fe6SChristoph Hellwig 115513e451fdSJan Kara error_finish_iomap: 11569f141d6eSJan Kara vmf_ret = dax_fault_return(error) | major; 11579f141d6eSJan Kara finish_iomap: 11589f141d6eSJan Kara if (ops->iomap_end) { 11599f141d6eSJan Kara int copied = PAGE_SIZE; 11609f141d6eSJan Kara 11619f141d6eSJan Kara if (vmf_ret & VM_FAULT_ERROR) 11629f141d6eSJan Kara copied = 0; 11639f141d6eSJan Kara /* 11649f141d6eSJan Kara * The fault is done by now and there's no way back (other 11659f141d6eSJan Kara * thread may be already happily using PTE we have installed). 11669f141d6eSJan Kara * Just ignore error from ->iomap_end since we cannot do much 11679f141d6eSJan Kara * with it. 11689f141d6eSJan Kara */ 11699f141d6eSJan Kara ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); 11701550290bSRoss Zwisler } 117113e451fdSJan Kara unlock_entry: 117291d25ba8SRoss Zwisler put_locked_mapping_entry(mapping, vmf->pgoff); 1173a9c42b33SRoss Zwisler out: 1174a9c42b33SRoss Zwisler trace_dax_pte_fault_done(inode, vmf, vmf_ret); 11759f141d6eSJan Kara return vmf_ret; 1176a7d73fe6SChristoph Hellwig } 1177642261acSRoss Zwisler 1178642261acSRoss Zwisler #ifdef CONFIG_FS_DAX_PMD 1179642261acSRoss Zwisler /* 1180642261acSRoss Zwisler * The 'colour' (ie low bits) within a PMD of a page offset. This comes up 1181642261acSRoss Zwisler * more often than one might expect in the below functions. 1182642261acSRoss Zwisler */ 1183642261acSRoss Zwisler #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 1184642261acSRoss Zwisler 1185f4200391SDave Jiang static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap, 118691d25ba8SRoss Zwisler loff_t pos, void *entry) 1187642261acSRoss Zwisler { 1188f4200391SDave Jiang struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1189cccbce67SDan Williams const sector_t sector = dax_iomap_sector(iomap, pos); 1190cccbce67SDan Williams struct dax_device *dax_dev = iomap->dax_dev; 1191642261acSRoss Zwisler struct block_device *bdev = iomap->bdev; 119227a7ffacSRoss Zwisler struct inode *inode = mapping->host; 1193cccbce67SDan Williams const size_t size = PMD_SIZE; 1194cccbce67SDan Williams void *ret = NULL, *kaddr; 1195cccbce67SDan Williams long length = 0; 1196cccbce67SDan Williams pgoff_t pgoff; 1197cccbce67SDan Williams pfn_t pfn; 1198cccbce67SDan Williams int id; 1199642261acSRoss Zwisler 1200cccbce67SDan Williams if (bdev_dax_pgoff(bdev, sector, size, &pgoff) != 0) 120127a7ffacSRoss Zwisler goto fallback; 1202642261acSRoss Zwisler 1203cccbce67SDan Williams id = dax_read_lock(); 1204cccbce67SDan Williams length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); 1205cccbce67SDan Williams if (length < 0) 1206cccbce67SDan Williams goto unlock_fallback; 1207cccbce67SDan Williams length = PFN_PHYS(length); 1208642261acSRoss Zwisler 1209cccbce67SDan Williams if (length < size) 1210cccbce67SDan Williams goto unlock_fallback; 1211cccbce67SDan Williams if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR) 1212cccbce67SDan Williams goto unlock_fallback; 1213cccbce67SDan Williams if (!pfn_t_devmap(pfn)) 1214cccbce67SDan Williams goto unlock_fallback; 1215cccbce67SDan Williams dax_read_unlock(id); 1216cccbce67SDan Williams 121791d25ba8SRoss Zwisler ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 1218642261acSRoss Zwisler RADIX_DAX_PMD); 1219642261acSRoss Zwisler if (IS_ERR(ret)) 122027a7ffacSRoss Zwisler goto fallback; 1221642261acSRoss Zwisler 1222cccbce67SDan Williams trace_dax_pmd_insert_mapping(inode, vmf, length, pfn, ret); 1223f4200391SDave Jiang return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, 1224cccbce67SDan Williams pfn, vmf->flags & FAULT_FLAG_WRITE); 1225642261acSRoss Zwisler 1226cccbce67SDan Williams unlock_fallback: 1227cccbce67SDan Williams dax_read_unlock(id); 122827a7ffacSRoss Zwisler fallback: 1229cccbce67SDan Williams trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, pfn, ret); 1230642261acSRoss Zwisler return VM_FAULT_FALLBACK; 1231642261acSRoss Zwisler } 1232642261acSRoss Zwisler 1233f4200391SDave Jiang static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap, 123491d25ba8SRoss Zwisler void *entry) 1235642261acSRoss Zwisler { 1236f4200391SDave Jiang struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1237f4200391SDave Jiang unsigned long pmd_addr = vmf->address & PMD_MASK; 1238653b2ea3SRoss Zwisler struct inode *inode = mapping->host; 1239642261acSRoss Zwisler struct page *zero_page; 1240653b2ea3SRoss Zwisler void *ret = NULL; 1241642261acSRoss Zwisler spinlock_t *ptl; 1242642261acSRoss Zwisler pmd_t pmd_entry; 1243642261acSRoss Zwisler 1244f4200391SDave Jiang zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1245642261acSRoss Zwisler 1246642261acSRoss Zwisler if (unlikely(!zero_page)) 1247653b2ea3SRoss Zwisler goto fallback; 1248642261acSRoss Zwisler 124991d25ba8SRoss Zwisler ret = dax_insert_mapping_entry(mapping, vmf, entry, 0, 125091d25ba8SRoss Zwisler RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE); 1251642261acSRoss Zwisler if (IS_ERR(ret)) 1252653b2ea3SRoss Zwisler goto fallback; 1253642261acSRoss Zwisler 1254f4200391SDave Jiang ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1255f4200391SDave Jiang if (!pmd_none(*(vmf->pmd))) { 1256642261acSRoss Zwisler spin_unlock(ptl); 1257653b2ea3SRoss Zwisler goto fallback; 1258642261acSRoss Zwisler } 1259642261acSRoss Zwisler 1260f4200391SDave Jiang pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1261642261acSRoss Zwisler pmd_entry = pmd_mkhuge(pmd_entry); 1262f4200391SDave Jiang set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1263642261acSRoss Zwisler spin_unlock(ptl); 1264f4200391SDave Jiang trace_dax_pmd_load_hole(inode, vmf, zero_page, ret); 1265642261acSRoss Zwisler return VM_FAULT_NOPAGE; 1266653b2ea3SRoss Zwisler 1267653b2ea3SRoss Zwisler fallback: 1268f4200391SDave Jiang trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret); 1269642261acSRoss Zwisler return VM_FAULT_FALLBACK; 1270642261acSRoss Zwisler } 1271642261acSRoss Zwisler 1272a2d58167SDave Jiang static int dax_iomap_pmd_fault(struct vm_fault *vmf, 1273a2d58167SDave Jiang const struct iomap_ops *ops) 1274642261acSRoss Zwisler { 1275f4200391SDave Jiang struct vm_area_struct *vma = vmf->vma; 1276642261acSRoss Zwisler struct address_space *mapping = vma->vm_file->f_mapping; 1277d8a849e1SDave Jiang unsigned long pmd_addr = vmf->address & PMD_MASK; 1278d8a849e1SDave Jiang bool write = vmf->flags & FAULT_FLAG_WRITE; 12799484ab1bSJan Kara unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; 1280642261acSRoss Zwisler struct inode *inode = mapping->host; 1281642261acSRoss Zwisler int result = VM_FAULT_FALLBACK; 1282642261acSRoss Zwisler struct iomap iomap = { 0 }; 1283642261acSRoss Zwisler pgoff_t max_pgoff, pgoff; 1284642261acSRoss Zwisler void *entry; 1285642261acSRoss Zwisler loff_t pos; 1286642261acSRoss Zwisler int error; 1287642261acSRoss Zwisler 1288282a8e03SRoss Zwisler /* 1289282a8e03SRoss Zwisler * Check whether offset isn't beyond end of file now. Caller is 1290282a8e03SRoss Zwisler * supposed to hold locks serializing us with truncate / punch hole so 1291282a8e03SRoss Zwisler * this is a reliable test. 1292282a8e03SRoss Zwisler */ 1293282a8e03SRoss Zwisler pgoff = linear_page_index(vma, pmd_addr); 1294282a8e03SRoss Zwisler max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT; 1295282a8e03SRoss Zwisler 1296f4200391SDave Jiang trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); 1297282a8e03SRoss Zwisler 1298fffa281bSRoss Zwisler /* 1299fffa281bSRoss Zwisler * Make sure that the faulting address's PMD offset (color) matches 1300fffa281bSRoss Zwisler * the PMD offset from the start of the file. This is necessary so 1301fffa281bSRoss Zwisler * that a PMD range in the page table overlaps exactly with a PMD 1302fffa281bSRoss Zwisler * range in the radix tree. 1303fffa281bSRoss Zwisler */ 1304fffa281bSRoss Zwisler if ((vmf->pgoff & PG_PMD_COLOUR) != 1305fffa281bSRoss Zwisler ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 1306fffa281bSRoss Zwisler goto fallback; 1307fffa281bSRoss Zwisler 1308642261acSRoss Zwisler /* Fall back to PTEs if we're going to COW */ 1309642261acSRoss Zwisler if (write && !(vma->vm_flags & VM_SHARED)) 1310642261acSRoss Zwisler goto fallback; 1311642261acSRoss Zwisler 1312642261acSRoss Zwisler /* If the PMD would extend outside the VMA */ 1313642261acSRoss Zwisler if (pmd_addr < vma->vm_start) 1314642261acSRoss Zwisler goto fallback; 1315642261acSRoss Zwisler if ((pmd_addr + PMD_SIZE) > vma->vm_end) 1316642261acSRoss Zwisler goto fallback; 1317642261acSRoss Zwisler 1318282a8e03SRoss Zwisler if (pgoff > max_pgoff) { 1319282a8e03SRoss Zwisler result = VM_FAULT_SIGBUS; 1320282a8e03SRoss Zwisler goto out; 1321282a8e03SRoss Zwisler } 1322642261acSRoss Zwisler 1323642261acSRoss Zwisler /* If the PMD would extend beyond the file size */ 1324642261acSRoss Zwisler if ((pgoff | PG_PMD_COLOUR) > max_pgoff) 1325642261acSRoss Zwisler goto fallback; 1326642261acSRoss Zwisler 1327642261acSRoss Zwisler /* 132891d25ba8SRoss Zwisler * grab_mapping_entry() will make sure we get a 2MiB empty entry, a 132991d25ba8SRoss Zwisler * 2MiB zero page entry or a DAX PMD. If it can't (because a 4k page 133091d25ba8SRoss Zwisler * is already in the tree, for instance), it will return -EEXIST and 133191d25ba8SRoss Zwisler * we just fall back to 4k entries. 13329f141d6eSJan Kara */ 13339f141d6eSJan Kara entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); 13349f141d6eSJan Kara if (IS_ERR(entry)) 1335876f2946SRoss Zwisler goto fallback; 1336876f2946SRoss Zwisler 1337876f2946SRoss Zwisler /* 1338e2093926SRoss Zwisler * It is possible, particularly with mixed reads & writes to private 1339e2093926SRoss Zwisler * mappings, that we have raced with a PTE fault that overlaps with 1340e2093926SRoss Zwisler * the PMD we need to set up. If so just return and the fault will be 1341e2093926SRoss Zwisler * retried. 1342e2093926SRoss Zwisler */ 1343e2093926SRoss Zwisler if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1344e2093926SRoss Zwisler !pmd_devmap(*vmf->pmd)) { 1345e2093926SRoss Zwisler result = 0; 1346e2093926SRoss Zwisler goto unlock_entry; 1347e2093926SRoss Zwisler } 1348e2093926SRoss Zwisler 1349e2093926SRoss Zwisler /* 1350876f2946SRoss Zwisler * Note that we don't use iomap_apply here. We aren't doing I/O, only 1351876f2946SRoss Zwisler * setting up a mapping, so really we're using iomap_begin() as a way 1352876f2946SRoss Zwisler * to look up our filesystem block. 1353876f2946SRoss Zwisler */ 1354876f2946SRoss Zwisler pos = (loff_t)pgoff << PAGE_SHIFT; 1355876f2946SRoss Zwisler error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); 1356876f2946SRoss Zwisler if (error) 1357876f2946SRoss Zwisler goto unlock_entry; 1358876f2946SRoss Zwisler 1359876f2946SRoss Zwisler if (iomap.offset + iomap.length < pos + PMD_SIZE) 13609f141d6eSJan Kara goto finish_iomap; 13619f141d6eSJan Kara 1362642261acSRoss Zwisler switch (iomap.type) { 1363642261acSRoss Zwisler case IOMAP_MAPPED: 136491d25ba8SRoss Zwisler result = dax_pmd_insert_mapping(vmf, &iomap, pos, entry); 1365642261acSRoss Zwisler break; 1366642261acSRoss Zwisler case IOMAP_UNWRITTEN: 1367642261acSRoss Zwisler case IOMAP_HOLE: 1368642261acSRoss Zwisler if (WARN_ON_ONCE(write)) 1369876f2946SRoss Zwisler break; 137091d25ba8SRoss Zwisler result = dax_pmd_load_hole(vmf, &iomap, entry); 1371642261acSRoss Zwisler break; 1372642261acSRoss Zwisler default: 1373642261acSRoss Zwisler WARN_ON_ONCE(1); 1374642261acSRoss Zwisler break; 1375642261acSRoss Zwisler } 1376642261acSRoss Zwisler 13779f141d6eSJan Kara finish_iomap: 13789f141d6eSJan Kara if (ops->iomap_end) { 13799f141d6eSJan Kara int copied = PMD_SIZE; 13809f141d6eSJan Kara 13819f141d6eSJan Kara if (result == VM_FAULT_FALLBACK) 13829f141d6eSJan Kara copied = 0; 13839f141d6eSJan Kara /* 13849f141d6eSJan Kara * The fault is done by now and there's no way back (other 13859f141d6eSJan Kara * thread may be already happily using PMD we have installed). 13869f141d6eSJan Kara * Just ignore error from ->iomap_end since we cannot do much 13879f141d6eSJan Kara * with it. 13889f141d6eSJan Kara */ 13899f141d6eSJan Kara ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags, 13909f141d6eSJan Kara &iomap); 13919f141d6eSJan Kara } 1392876f2946SRoss Zwisler unlock_entry: 139391d25ba8SRoss Zwisler put_locked_mapping_entry(mapping, pgoff); 1394642261acSRoss Zwisler fallback: 1395642261acSRoss Zwisler if (result == VM_FAULT_FALLBACK) { 1396d8a849e1SDave Jiang split_huge_pmd(vma, vmf->pmd, vmf->address); 1397642261acSRoss Zwisler count_vm_event(THP_FAULT_FALLBACK); 1398642261acSRoss Zwisler } 1399282a8e03SRoss Zwisler out: 1400f4200391SDave Jiang trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); 1401642261acSRoss Zwisler return result; 1402642261acSRoss Zwisler } 1403a2d58167SDave Jiang #else 140401cddfe9SArnd Bergmann static int dax_iomap_pmd_fault(struct vm_fault *vmf, 140501cddfe9SArnd Bergmann const struct iomap_ops *ops) 1406a2d58167SDave Jiang { 1407a2d58167SDave Jiang return VM_FAULT_FALLBACK; 1408a2d58167SDave Jiang } 1409642261acSRoss Zwisler #endif /* CONFIG_FS_DAX_PMD */ 1410a2d58167SDave Jiang 1411a2d58167SDave Jiang /** 1412a2d58167SDave Jiang * dax_iomap_fault - handle a page fault on a DAX file 1413a2d58167SDave Jiang * @vmf: The description of the fault 1414a2d58167SDave Jiang * @ops: iomap ops passed from the file system 1415a2d58167SDave Jiang * 1416a2d58167SDave Jiang * When a page fault occurs, filesystems may call this helper in 1417a2d58167SDave Jiang * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1418a2d58167SDave Jiang * has done all the necessary locking for page fault to proceed 1419a2d58167SDave Jiang * successfully. 1420a2d58167SDave Jiang */ 1421c791ace1SDave Jiang int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1422c791ace1SDave Jiang const struct iomap_ops *ops) 1423a2d58167SDave Jiang { 1424c791ace1SDave Jiang switch (pe_size) { 1425c791ace1SDave Jiang case PE_SIZE_PTE: 1426a2d58167SDave Jiang return dax_iomap_pte_fault(vmf, ops); 1427c791ace1SDave Jiang case PE_SIZE_PMD: 1428a2d58167SDave Jiang return dax_iomap_pmd_fault(vmf, ops); 1429a2d58167SDave Jiang default: 1430a2d58167SDave Jiang return VM_FAULT_FALLBACK; 1431a2d58167SDave Jiang } 1432a2d58167SDave Jiang } 1433a2d58167SDave Jiang EXPORT_SYMBOL_GPL(dax_iomap_fault); 1434