1d475c634SMatthew Wilcox /* 2d475c634SMatthew Wilcox * fs/dax.c - Direct Access filesystem code 3d475c634SMatthew Wilcox * Copyright (c) 2013-2014 Intel Corporation 4d475c634SMatthew Wilcox * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 5d475c634SMatthew Wilcox * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 6d475c634SMatthew Wilcox * 7d475c634SMatthew Wilcox * This program is free software; you can redistribute it and/or modify it 8d475c634SMatthew Wilcox * under the terms and conditions of the GNU General Public License, 9d475c634SMatthew Wilcox * version 2, as published by the Free Software Foundation. 10d475c634SMatthew Wilcox * 11d475c634SMatthew Wilcox * This program is distributed in the hope it will be useful, but WITHOUT 12d475c634SMatthew Wilcox * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13d475c634SMatthew Wilcox * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14d475c634SMatthew Wilcox * more details. 15d475c634SMatthew Wilcox */ 16d475c634SMatthew Wilcox 17d475c634SMatthew Wilcox #include <linux/atomic.h> 18d475c634SMatthew Wilcox #include <linux/blkdev.h> 19d475c634SMatthew Wilcox #include <linux/buffer_head.h> 20d77e92e2SRoss Zwisler #include <linux/dax.h> 21d475c634SMatthew Wilcox #include <linux/fs.h> 22d475c634SMatthew Wilcox #include <linux/genhd.h> 23f7ca90b1SMatthew Wilcox #include <linux/highmem.h> 24f7ca90b1SMatthew Wilcox #include <linux/memcontrol.h> 25f7ca90b1SMatthew Wilcox #include <linux/mm.h> 26d475c634SMatthew Wilcox #include <linux/mutex.h> 279973c98eSRoss Zwisler #include <linux/pagevec.h> 28289c6aedSMatthew Wilcox #include <linux/sched.h> 29f361bf4aSIngo Molnar #include <linux/sched/signal.h> 30d475c634SMatthew Wilcox #include <linux/uio.h> 31f7ca90b1SMatthew Wilcox #include <linux/vmstat.h> 3234c0fd54SDan Williams #include <linux/pfn_t.h> 330e749e54SDan Williams #include <linux/sizes.h> 344b4bb46dSJan Kara #include <linux/mmu_notifier.h> 35a254e568SChristoph Hellwig #include <linux/iomap.h> 36a254e568SChristoph Hellwig #include "internal.h" 37d475c634SMatthew Wilcox 38282a8e03SRoss Zwisler #define CREATE_TRACE_POINTS 39282a8e03SRoss Zwisler #include <trace/events/fs_dax.h> 40282a8e03SRoss Zwisler 41ac401cc7SJan Kara /* We choose 4096 entries - same as per-zone page wait tables */ 42ac401cc7SJan Kara #define DAX_WAIT_TABLE_BITS 12 43ac401cc7SJan Kara #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 44ac401cc7SJan Kara 45917f3452SRoss Zwisler /* The 'colour' (ie low bits) within a PMD of a page offset. */ 46917f3452SRoss Zwisler #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 47977fbdcdSMatthew Wilcox #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) 48917f3452SRoss Zwisler 49ce95ab0fSRoss Zwisler static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 50ac401cc7SJan Kara 51ac401cc7SJan Kara static int __init init_dax_wait_table(void) 52ac401cc7SJan Kara { 53ac401cc7SJan Kara int i; 54ac401cc7SJan Kara 55ac401cc7SJan Kara for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 56ac401cc7SJan Kara init_waitqueue_head(wait_table + i); 57ac401cc7SJan Kara return 0; 58ac401cc7SJan Kara } 59ac401cc7SJan Kara fs_initcall(init_dax_wait_table); 60ac401cc7SJan Kara 61527b19d0SRoss Zwisler /* 62527b19d0SRoss Zwisler * We use lowest available bit in exceptional entry for locking, one bit for 63527b19d0SRoss Zwisler * the entry size (PMD) and two more to tell us if the entry is a zero page or 64527b19d0SRoss Zwisler * an empty entry that is just used for locking. In total four special bits. 65527b19d0SRoss Zwisler * 66527b19d0SRoss Zwisler * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE 67527b19d0SRoss Zwisler * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem 68527b19d0SRoss Zwisler * block allocation. 69527b19d0SRoss Zwisler */ 70527b19d0SRoss Zwisler #define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4) 71527b19d0SRoss Zwisler #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) 72527b19d0SRoss Zwisler #define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1)) 73527b19d0SRoss Zwisler #define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2)) 74527b19d0SRoss Zwisler #define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)) 75527b19d0SRoss Zwisler 763fe0791cSDan Williams static unsigned long dax_radix_pfn(void *entry) 77527b19d0SRoss Zwisler { 78527b19d0SRoss Zwisler return (unsigned long)entry >> RADIX_DAX_SHIFT; 79527b19d0SRoss Zwisler } 80527b19d0SRoss Zwisler 813fe0791cSDan Williams static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags) 82527b19d0SRoss Zwisler { 83527b19d0SRoss Zwisler return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags | 843fe0791cSDan Williams (pfn << RADIX_DAX_SHIFT) | RADIX_DAX_ENTRY_LOCK); 85527b19d0SRoss Zwisler } 86527b19d0SRoss Zwisler 87527b19d0SRoss Zwisler static unsigned int dax_radix_order(void *entry) 88527b19d0SRoss Zwisler { 89527b19d0SRoss Zwisler if ((unsigned long)entry & RADIX_DAX_PMD) 90527b19d0SRoss Zwisler return PMD_SHIFT - PAGE_SHIFT; 91527b19d0SRoss Zwisler return 0; 92527b19d0SRoss Zwisler } 93527b19d0SRoss Zwisler 94642261acSRoss Zwisler static int dax_is_pmd_entry(void *entry) 95642261acSRoss Zwisler { 96642261acSRoss Zwisler return (unsigned long)entry & RADIX_DAX_PMD; 97642261acSRoss Zwisler } 98642261acSRoss Zwisler 99642261acSRoss Zwisler static int dax_is_pte_entry(void *entry) 100642261acSRoss Zwisler { 101642261acSRoss Zwisler return !((unsigned long)entry & RADIX_DAX_PMD); 102642261acSRoss Zwisler } 103642261acSRoss Zwisler 104642261acSRoss Zwisler static int dax_is_zero_entry(void *entry) 105642261acSRoss Zwisler { 10691d25ba8SRoss Zwisler return (unsigned long)entry & RADIX_DAX_ZERO_PAGE; 107642261acSRoss Zwisler } 108642261acSRoss Zwisler 109642261acSRoss Zwisler static int dax_is_empty_entry(void *entry) 110642261acSRoss Zwisler { 111642261acSRoss Zwisler return (unsigned long)entry & RADIX_DAX_EMPTY; 112642261acSRoss Zwisler } 113642261acSRoss Zwisler 114f7ca90b1SMatthew Wilcox /* 115ac401cc7SJan Kara * DAX radix tree locking 116ac401cc7SJan Kara */ 117ac401cc7SJan Kara struct exceptional_entry_key { 118ac401cc7SJan Kara struct address_space *mapping; 11963e95b5cSRoss Zwisler pgoff_t entry_start; 120ac401cc7SJan Kara }; 121ac401cc7SJan Kara 122ac401cc7SJan Kara struct wait_exceptional_entry_queue { 123ac6424b9SIngo Molnar wait_queue_entry_t wait; 124ac401cc7SJan Kara struct exceptional_entry_key key; 125ac401cc7SJan Kara }; 126ac401cc7SJan Kara 12763e95b5cSRoss Zwisler static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, 12863e95b5cSRoss Zwisler pgoff_t index, void *entry, struct exceptional_entry_key *key) 12963e95b5cSRoss Zwisler { 13063e95b5cSRoss Zwisler unsigned long hash; 13163e95b5cSRoss Zwisler 13263e95b5cSRoss Zwisler /* 13363e95b5cSRoss Zwisler * If 'entry' is a PMD, align the 'index' that we use for the wait 13463e95b5cSRoss Zwisler * queue to the start of that PMD. This ensures that all offsets in 13563e95b5cSRoss Zwisler * the range covered by the PMD map to the same bit lock. 13663e95b5cSRoss Zwisler */ 137642261acSRoss Zwisler if (dax_is_pmd_entry(entry)) 138917f3452SRoss Zwisler index &= ~PG_PMD_COLOUR; 13963e95b5cSRoss Zwisler 14063e95b5cSRoss Zwisler key->mapping = mapping; 14163e95b5cSRoss Zwisler key->entry_start = index; 14263e95b5cSRoss Zwisler 14363e95b5cSRoss Zwisler hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS); 14463e95b5cSRoss Zwisler return wait_table + hash; 14563e95b5cSRoss Zwisler } 14663e95b5cSRoss Zwisler 147ac6424b9SIngo Molnar static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode, 148ac401cc7SJan Kara int sync, void *keyp) 149ac401cc7SJan Kara { 150ac401cc7SJan Kara struct exceptional_entry_key *key = keyp; 151ac401cc7SJan Kara struct wait_exceptional_entry_queue *ewait = 152ac401cc7SJan Kara container_of(wait, struct wait_exceptional_entry_queue, wait); 153ac401cc7SJan Kara 154ac401cc7SJan Kara if (key->mapping != ewait->key.mapping || 15563e95b5cSRoss Zwisler key->entry_start != ewait->key.entry_start) 156ac401cc7SJan Kara return 0; 157ac401cc7SJan Kara return autoremove_wake_function(wait, mode, sync, NULL); 158ac401cc7SJan Kara } 159ac401cc7SJan Kara 160ac401cc7SJan Kara /* 161b93b0163SMatthew Wilcox * @entry may no longer be the entry at the index in the mapping. 162b93b0163SMatthew Wilcox * The important information it's conveying is whether the entry at 163b93b0163SMatthew Wilcox * this index used to be a PMD entry. 164e30331ffSRoss Zwisler */ 165d01ad197SRoss Zwisler static void dax_wake_mapping_entry_waiter(struct address_space *mapping, 166e30331ffSRoss Zwisler pgoff_t index, void *entry, bool wake_all) 167e30331ffSRoss Zwisler { 168e30331ffSRoss Zwisler struct exceptional_entry_key key; 169e30331ffSRoss Zwisler wait_queue_head_t *wq; 170e30331ffSRoss Zwisler 171e30331ffSRoss Zwisler wq = dax_entry_waitqueue(mapping, index, entry, &key); 172e30331ffSRoss Zwisler 173e30331ffSRoss Zwisler /* 174e30331ffSRoss Zwisler * Checking for locked entry and prepare_to_wait_exclusive() happens 175b93b0163SMatthew Wilcox * under the i_pages lock, ditto for entry handling in our callers. 176e30331ffSRoss Zwisler * So at this point all tasks that could have seen our entry locked 177e30331ffSRoss Zwisler * must be in the waitqueue and the following check will see them. 178e30331ffSRoss Zwisler */ 179e30331ffSRoss Zwisler if (waitqueue_active(wq)) 180e30331ffSRoss Zwisler __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); 181e30331ffSRoss Zwisler } 182e30331ffSRoss Zwisler 183e30331ffSRoss Zwisler /* 184b93b0163SMatthew Wilcox * Check whether the given slot is locked. Must be called with the i_pages 185b93b0163SMatthew Wilcox * lock held. 186ac401cc7SJan Kara */ 187ac401cc7SJan Kara static inline int slot_locked(struct address_space *mapping, void **slot) 188ac401cc7SJan Kara { 189ac401cc7SJan Kara unsigned long entry = (unsigned long) 190b93b0163SMatthew Wilcox radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); 191ac401cc7SJan Kara return entry & RADIX_DAX_ENTRY_LOCK; 192ac401cc7SJan Kara } 193ac401cc7SJan Kara 194ac401cc7SJan Kara /* 195b93b0163SMatthew Wilcox * Mark the given slot as locked. Must be called with the i_pages lock held. 196ac401cc7SJan Kara */ 197ac401cc7SJan Kara static inline void *lock_slot(struct address_space *mapping, void **slot) 198ac401cc7SJan Kara { 199ac401cc7SJan Kara unsigned long entry = (unsigned long) 200b93b0163SMatthew Wilcox radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); 201ac401cc7SJan Kara 202ac401cc7SJan Kara entry |= RADIX_DAX_ENTRY_LOCK; 203b93b0163SMatthew Wilcox radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry); 204ac401cc7SJan Kara return (void *)entry; 205ac401cc7SJan Kara } 206ac401cc7SJan Kara 207ac401cc7SJan Kara /* 208b93b0163SMatthew Wilcox * Mark the given slot as unlocked. Must be called with the i_pages lock held. 209ac401cc7SJan Kara */ 210ac401cc7SJan Kara static inline void *unlock_slot(struct address_space *mapping, void **slot) 211ac401cc7SJan Kara { 212ac401cc7SJan Kara unsigned long entry = (unsigned long) 213b93b0163SMatthew Wilcox radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); 214ac401cc7SJan Kara 215ac401cc7SJan Kara entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK; 216b93b0163SMatthew Wilcox radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry); 217ac401cc7SJan Kara return (void *)entry; 218ac401cc7SJan Kara } 219ac401cc7SJan Kara 220ac401cc7SJan Kara /* 221ac401cc7SJan Kara * Lookup entry in radix tree, wait for it to become unlocked if it is 222ac401cc7SJan Kara * exceptional entry and return it. The caller must call 223ac401cc7SJan Kara * put_unlocked_mapping_entry() when he decided not to lock the entry or 224ac401cc7SJan Kara * put_locked_mapping_entry() when he locked the entry and now wants to 225ac401cc7SJan Kara * unlock it. 226ac401cc7SJan Kara * 227b93b0163SMatthew Wilcox * Must be called with the i_pages lock held. 228ac401cc7SJan Kara */ 229ac401cc7SJan Kara static void *get_unlocked_mapping_entry(struct address_space *mapping, 230ac401cc7SJan Kara pgoff_t index, void ***slotp) 231ac401cc7SJan Kara { 232e3ad61c6SRoss Zwisler void *entry, **slot; 233ac401cc7SJan Kara struct wait_exceptional_entry_queue ewait; 23463e95b5cSRoss Zwisler wait_queue_head_t *wq; 235ac401cc7SJan Kara 236ac401cc7SJan Kara init_wait(&ewait.wait); 237ac401cc7SJan Kara ewait.wait.func = wake_exceptional_entry_func; 238ac401cc7SJan Kara 239ac401cc7SJan Kara for (;;) { 240b93b0163SMatthew Wilcox entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, 241ac401cc7SJan Kara &slot); 24291d25ba8SRoss Zwisler if (!entry || 24391d25ba8SRoss Zwisler WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) || 244ac401cc7SJan Kara !slot_locked(mapping, slot)) { 245ac401cc7SJan Kara if (slotp) 246ac401cc7SJan Kara *slotp = slot; 247e3ad61c6SRoss Zwisler return entry; 248ac401cc7SJan Kara } 24963e95b5cSRoss Zwisler 25063e95b5cSRoss Zwisler wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); 251ac401cc7SJan Kara prepare_to_wait_exclusive(wq, &ewait.wait, 252ac401cc7SJan Kara TASK_UNINTERRUPTIBLE); 253b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 254ac401cc7SJan Kara schedule(); 255ac401cc7SJan Kara finish_wait(wq, &ewait.wait); 256b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages); 257ac401cc7SJan Kara } 258ac401cc7SJan Kara } 259ac401cc7SJan Kara 260b1aa812bSJan Kara static void dax_unlock_mapping_entry(struct address_space *mapping, 261b1aa812bSJan Kara pgoff_t index) 262b1aa812bSJan Kara { 263b1aa812bSJan Kara void *entry, **slot; 264b1aa812bSJan Kara 265b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages); 266b93b0163SMatthew Wilcox entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot); 267b1aa812bSJan Kara if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) || 268b1aa812bSJan Kara !slot_locked(mapping, slot))) { 269b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 270b1aa812bSJan Kara return; 271b1aa812bSJan Kara } 272b1aa812bSJan Kara unlock_slot(mapping, slot); 273b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 274b1aa812bSJan Kara dax_wake_mapping_entry_waiter(mapping, index, entry, false); 275b1aa812bSJan Kara } 276b1aa812bSJan Kara 277ac401cc7SJan Kara static void put_locked_mapping_entry(struct address_space *mapping, 27891d25ba8SRoss Zwisler pgoff_t index) 279ac401cc7SJan Kara { 280bc2466e4SJan Kara dax_unlock_mapping_entry(mapping, index); 281ac401cc7SJan Kara } 282ac401cc7SJan Kara 283ac401cc7SJan Kara /* 284ac401cc7SJan Kara * Called when we are done with radix tree entry we looked up via 285ac401cc7SJan Kara * get_unlocked_mapping_entry() and which we didn't lock in the end. 286ac401cc7SJan Kara */ 287ac401cc7SJan Kara static void put_unlocked_mapping_entry(struct address_space *mapping, 288ac401cc7SJan Kara pgoff_t index, void *entry) 289ac401cc7SJan Kara { 29091d25ba8SRoss Zwisler if (!entry) 291ac401cc7SJan Kara return; 292ac401cc7SJan Kara 293ac401cc7SJan Kara /* We have to wake up next waiter for the radix tree entry lock */ 294422476c4SRoss Zwisler dax_wake_mapping_entry_waiter(mapping, index, entry, false); 295422476c4SRoss Zwisler } 296422476c4SRoss Zwisler 297d2c997c0SDan Williams static unsigned long dax_entry_size(void *entry) 298d2c997c0SDan Williams { 299d2c997c0SDan Williams if (dax_is_zero_entry(entry)) 300d2c997c0SDan Williams return 0; 301d2c997c0SDan Williams else if (dax_is_empty_entry(entry)) 302d2c997c0SDan Williams return 0; 303d2c997c0SDan Williams else if (dax_is_pmd_entry(entry)) 304d2c997c0SDan Williams return PMD_SIZE; 305d2c997c0SDan Williams else 306d2c997c0SDan Williams return PAGE_SIZE; 307d2c997c0SDan Williams } 308d2c997c0SDan Williams 309d2c997c0SDan Williams static unsigned long dax_radix_end_pfn(void *entry) 310d2c997c0SDan Williams { 311d2c997c0SDan Williams return dax_radix_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; 312d2c997c0SDan Williams } 313d2c997c0SDan Williams 314d2c997c0SDan Williams /* 315d2c997c0SDan Williams * Iterate through all mapped pfns represented by an entry, i.e. skip 316d2c997c0SDan Williams * 'empty' and 'zero' entries. 317d2c997c0SDan Williams */ 318d2c997c0SDan Williams #define for_each_mapped_pfn(entry, pfn) \ 319d2c997c0SDan Williams for (pfn = dax_radix_pfn(entry); \ 320d2c997c0SDan Williams pfn < dax_radix_end_pfn(entry); pfn++) 321d2c997c0SDan Williams 322d2c997c0SDan Williams static void dax_associate_entry(void *entry, struct address_space *mapping) 323d2c997c0SDan Williams { 324d2c997c0SDan Williams unsigned long pfn; 325d2c997c0SDan Williams 326d2c997c0SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 327d2c997c0SDan Williams return; 328d2c997c0SDan Williams 329d2c997c0SDan Williams for_each_mapped_pfn(entry, pfn) { 330d2c997c0SDan Williams struct page *page = pfn_to_page(pfn); 331d2c997c0SDan Williams 332d2c997c0SDan Williams WARN_ON_ONCE(page->mapping); 333d2c997c0SDan Williams page->mapping = mapping; 334d2c997c0SDan Williams } 335d2c997c0SDan Williams } 336d2c997c0SDan Williams 337d2c997c0SDan Williams static void dax_disassociate_entry(void *entry, struct address_space *mapping, 338d2c997c0SDan Williams bool trunc) 339d2c997c0SDan Williams { 340d2c997c0SDan Williams unsigned long pfn; 341d2c997c0SDan Williams 342d2c997c0SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 343d2c997c0SDan Williams return; 344d2c997c0SDan Williams 345d2c997c0SDan Williams for_each_mapped_pfn(entry, pfn) { 346d2c997c0SDan Williams struct page *page = pfn_to_page(pfn); 347d2c997c0SDan Williams 348d2c997c0SDan Williams WARN_ON_ONCE(trunc && page_ref_count(page) > 1); 349d2c997c0SDan Williams WARN_ON_ONCE(page->mapping && page->mapping != mapping); 350d2c997c0SDan Williams page->mapping = NULL; 351d2c997c0SDan Williams } 352d2c997c0SDan Williams } 353d2c997c0SDan Williams 3545fac7408SDan Williams static struct page *dax_busy_page(void *entry) 3555fac7408SDan Williams { 3565fac7408SDan Williams unsigned long pfn; 3575fac7408SDan Williams 3585fac7408SDan Williams for_each_mapped_pfn(entry, pfn) { 3595fac7408SDan Williams struct page *page = pfn_to_page(pfn); 3605fac7408SDan Williams 3615fac7408SDan Williams if (page_ref_count(page) > 1) 3625fac7408SDan Williams return page; 3635fac7408SDan Williams } 3645fac7408SDan Williams return NULL; 3655fac7408SDan Williams } 3665fac7408SDan Williams 367ac401cc7SJan Kara /* 36891d25ba8SRoss Zwisler * Find radix tree entry at given index. If it points to an exceptional entry, 36991d25ba8SRoss Zwisler * return it with the radix tree entry locked. If the radix tree doesn't 37091d25ba8SRoss Zwisler * contain given index, create an empty exceptional entry for the index and 37191d25ba8SRoss Zwisler * return with it locked. 372ac401cc7SJan Kara * 373642261acSRoss Zwisler * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will 374642261acSRoss Zwisler * either return that locked entry or will return an error. This error will 37591d25ba8SRoss Zwisler * happen if there are any 4k entries within the 2MiB range that we are 37691d25ba8SRoss Zwisler * requesting. 377642261acSRoss Zwisler * 378642261acSRoss Zwisler * We always favor 4k entries over 2MiB entries. There isn't a flow where we 379642261acSRoss Zwisler * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB 380642261acSRoss Zwisler * insertion will fail if it finds any 4k entries already in the tree, and a 381642261acSRoss Zwisler * 4k insertion will cause an existing 2MiB entry to be unmapped and 382642261acSRoss Zwisler * downgraded to 4k entries. This happens for both 2MiB huge zero pages as 383642261acSRoss Zwisler * well as 2MiB empty entries. 384642261acSRoss Zwisler * 385642261acSRoss Zwisler * The exception to this downgrade path is for 2MiB DAX PMD entries that have 386642261acSRoss Zwisler * real storage backing them. We will leave these real 2MiB DAX entries in 387642261acSRoss Zwisler * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry. 388642261acSRoss Zwisler * 389ac401cc7SJan Kara * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 390ac401cc7SJan Kara * persistent memory the benefit is doubtful. We can add that later if we can 391ac401cc7SJan Kara * show it helps. 392ac401cc7SJan Kara */ 393642261acSRoss Zwisler static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, 394642261acSRoss Zwisler unsigned long size_flag) 395ac401cc7SJan Kara { 396642261acSRoss Zwisler bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */ 397e3ad61c6SRoss Zwisler void *entry, **slot; 398ac401cc7SJan Kara 399ac401cc7SJan Kara restart: 400b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages); 401e3ad61c6SRoss Zwisler entry = get_unlocked_mapping_entry(mapping, index, &slot); 402642261acSRoss Zwisler 40391d25ba8SRoss Zwisler if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) { 40491d25ba8SRoss Zwisler entry = ERR_PTR(-EIO); 40591d25ba8SRoss Zwisler goto out_unlock; 40691d25ba8SRoss Zwisler } 40791d25ba8SRoss Zwisler 408642261acSRoss Zwisler if (entry) { 409642261acSRoss Zwisler if (size_flag & RADIX_DAX_PMD) { 41091d25ba8SRoss Zwisler if (dax_is_pte_entry(entry)) { 411642261acSRoss Zwisler put_unlocked_mapping_entry(mapping, index, 412642261acSRoss Zwisler entry); 413642261acSRoss Zwisler entry = ERR_PTR(-EEXIST); 414642261acSRoss Zwisler goto out_unlock; 415642261acSRoss Zwisler } 416642261acSRoss Zwisler } else { /* trying to grab a PTE entry */ 41791d25ba8SRoss Zwisler if (dax_is_pmd_entry(entry) && 418642261acSRoss Zwisler (dax_is_zero_entry(entry) || 419642261acSRoss Zwisler dax_is_empty_entry(entry))) { 420642261acSRoss Zwisler pmd_downgrade = true; 421642261acSRoss Zwisler } 422642261acSRoss Zwisler } 423642261acSRoss Zwisler } 424642261acSRoss Zwisler 425ac401cc7SJan Kara /* No entry for given index? Make sure radix tree is big enough. */ 426642261acSRoss Zwisler if (!entry || pmd_downgrade) { 427ac401cc7SJan Kara int err; 428ac401cc7SJan Kara 429642261acSRoss Zwisler if (pmd_downgrade) { 430642261acSRoss Zwisler /* 431642261acSRoss Zwisler * Make sure 'entry' remains valid while we drop 432b93b0163SMatthew Wilcox * the i_pages lock. 433642261acSRoss Zwisler */ 434642261acSRoss Zwisler entry = lock_slot(mapping, slot); 435642261acSRoss Zwisler } 436642261acSRoss Zwisler 437b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 438642261acSRoss Zwisler /* 439642261acSRoss Zwisler * Besides huge zero pages the only other thing that gets 440642261acSRoss Zwisler * downgraded are empty entries which don't need to be 441642261acSRoss Zwisler * unmapped. 442642261acSRoss Zwisler */ 443642261acSRoss Zwisler if (pmd_downgrade && dax_is_zero_entry(entry)) 444977fbdcdSMatthew Wilcox unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 445977fbdcdSMatthew Wilcox PG_PMD_NR, false); 446642261acSRoss Zwisler 4470cb80b48SJan Kara err = radix_tree_preload( 4480cb80b48SJan Kara mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); 4490cb80b48SJan Kara if (err) { 4500cb80b48SJan Kara if (pmd_downgrade) 45191d25ba8SRoss Zwisler put_locked_mapping_entry(mapping, index); 4520cb80b48SJan Kara return ERR_PTR(err); 4530cb80b48SJan Kara } 454b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages); 455642261acSRoss Zwisler 456e11f8b7bSRoss Zwisler if (!entry) { 457e11f8b7bSRoss Zwisler /* 458b93b0163SMatthew Wilcox * We needed to drop the i_pages lock while calling 459e11f8b7bSRoss Zwisler * radix_tree_preload() and we didn't have an entry to 460e11f8b7bSRoss Zwisler * lock. See if another thread inserted an entry at 461e11f8b7bSRoss Zwisler * our index during this time. 462e11f8b7bSRoss Zwisler */ 463b93b0163SMatthew Wilcox entry = __radix_tree_lookup(&mapping->i_pages, index, 464e11f8b7bSRoss Zwisler NULL, &slot); 465e11f8b7bSRoss Zwisler if (entry) { 466e11f8b7bSRoss Zwisler radix_tree_preload_end(); 467b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 468e11f8b7bSRoss Zwisler goto restart; 469e11f8b7bSRoss Zwisler } 470e11f8b7bSRoss Zwisler } 471e11f8b7bSRoss Zwisler 472642261acSRoss Zwisler if (pmd_downgrade) { 473d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, false); 474b93b0163SMatthew Wilcox radix_tree_delete(&mapping->i_pages, index); 475642261acSRoss Zwisler mapping->nrexceptional--; 476642261acSRoss Zwisler dax_wake_mapping_entry_waiter(mapping, index, entry, 477642261acSRoss Zwisler true); 478642261acSRoss Zwisler } 479642261acSRoss Zwisler 480642261acSRoss Zwisler entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY); 481642261acSRoss Zwisler 482b93b0163SMatthew Wilcox err = __radix_tree_insert(&mapping->i_pages, index, 483642261acSRoss Zwisler dax_radix_order(entry), entry); 484ac401cc7SJan Kara radix_tree_preload_end(); 485ac401cc7SJan Kara if (err) { 486b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 487642261acSRoss Zwisler /* 488e11f8b7bSRoss Zwisler * Our insertion of a DAX entry failed, most likely 489e11f8b7bSRoss Zwisler * because we were inserting a PMD entry and it 490e11f8b7bSRoss Zwisler * collided with a PTE sized entry at a different 491e11f8b7bSRoss Zwisler * index in the PMD range. We haven't inserted 492e11f8b7bSRoss Zwisler * anything into the radix tree and have no waiters to 493e11f8b7bSRoss Zwisler * wake. 494642261acSRoss Zwisler */ 495ac401cc7SJan Kara return ERR_PTR(err); 496ac401cc7SJan Kara } 497ac401cc7SJan Kara /* Good, we have inserted empty locked entry into the tree. */ 498ac401cc7SJan Kara mapping->nrexceptional++; 499b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 500e3ad61c6SRoss Zwisler return entry; 501ac401cc7SJan Kara } 502e3ad61c6SRoss Zwisler entry = lock_slot(mapping, slot); 503642261acSRoss Zwisler out_unlock: 504b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 505e3ad61c6SRoss Zwisler return entry; 506ac401cc7SJan Kara } 507ac401cc7SJan Kara 5085fac7408SDan Williams /** 5095fac7408SDan Williams * dax_layout_busy_page - find first pinned page in @mapping 5105fac7408SDan Williams * @mapping: address space to scan for a page with ref count > 1 5115fac7408SDan Williams * 5125fac7408SDan Williams * DAX requires ZONE_DEVICE mapped pages. These pages are never 5135fac7408SDan Williams * 'onlined' to the page allocator so they are considered idle when 5145fac7408SDan Williams * page->count == 1. A filesystem uses this interface to determine if 5155fac7408SDan Williams * any page in the mapping is busy, i.e. for DMA, or other 5165fac7408SDan Williams * get_user_pages() usages. 5175fac7408SDan Williams * 5185fac7408SDan Williams * It is expected that the filesystem is holding locks to block the 5195fac7408SDan Williams * establishment of new mappings in this address_space. I.e. it expects 5205fac7408SDan Williams * to be able to run unmap_mapping_range() and subsequently not race 5215fac7408SDan Williams * mapping_mapped() becoming true. 5225fac7408SDan Williams */ 5235fac7408SDan Williams struct page *dax_layout_busy_page(struct address_space *mapping) 5245fac7408SDan Williams { 5255fac7408SDan Williams pgoff_t indices[PAGEVEC_SIZE]; 5265fac7408SDan Williams struct page *page = NULL; 5275fac7408SDan Williams struct pagevec pvec; 5285fac7408SDan Williams pgoff_t index, end; 5295fac7408SDan Williams unsigned i; 5305fac7408SDan Williams 5315fac7408SDan Williams /* 5325fac7408SDan Williams * In the 'limited' case get_user_pages() for dax is disabled. 5335fac7408SDan Williams */ 5345fac7408SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 5355fac7408SDan Williams return NULL; 5365fac7408SDan Williams 5375fac7408SDan Williams if (!dax_mapping(mapping) || !mapping_mapped(mapping)) 5385fac7408SDan Williams return NULL; 5395fac7408SDan Williams 5405fac7408SDan Williams pagevec_init(&pvec); 5415fac7408SDan Williams index = 0; 5425fac7408SDan Williams end = -1; 5435fac7408SDan Williams 5445fac7408SDan Williams /* 5455fac7408SDan Williams * If we race get_user_pages_fast() here either we'll see the 5465fac7408SDan Williams * elevated page count in the pagevec_lookup and wait, or 5475fac7408SDan Williams * get_user_pages_fast() will see that the page it took a reference 5485fac7408SDan Williams * against is no longer mapped in the page tables and bail to the 5495fac7408SDan Williams * get_user_pages() slow path. The slow path is protected by 5505fac7408SDan Williams * pte_lock() and pmd_lock(). New references are not taken without 5515fac7408SDan Williams * holding those locks, and unmap_mapping_range() will not zero the 5525fac7408SDan Williams * pte or pmd without holding the respective lock, so we are 5535fac7408SDan Williams * guaranteed to either see new references or prevent new 5545fac7408SDan Williams * references from being established. 5555fac7408SDan Williams */ 5565fac7408SDan Williams unmap_mapping_range(mapping, 0, 0, 1); 5575fac7408SDan Williams 5585fac7408SDan Williams while (index < end && pagevec_lookup_entries(&pvec, mapping, index, 5595fac7408SDan Williams min(end - index, (pgoff_t)PAGEVEC_SIZE), 5605fac7408SDan Williams indices)) { 5615fac7408SDan Williams for (i = 0; i < pagevec_count(&pvec); i++) { 5625fac7408SDan Williams struct page *pvec_ent = pvec.pages[i]; 5635fac7408SDan Williams void *entry; 5645fac7408SDan Williams 5655fac7408SDan Williams index = indices[i]; 5665fac7408SDan Williams if (index >= end) 5675fac7408SDan Williams break; 5685fac7408SDan Williams 569cdbf8897SRoss Zwisler if (WARN_ON_ONCE( 570cdbf8897SRoss Zwisler !radix_tree_exceptional_entry(pvec_ent))) 5715fac7408SDan Williams continue; 5725fac7408SDan Williams 5735fac7408SDan Williams xa_lock_irq(&mapping->i_pages); 5745fac7408SDan Williams entry = get_unlocked_mapping_entry(mapping, index, NULL); 5755fac7408SDan Williams if (entry) 5765fac7408SDan Williams page = dax_busy_page(entry); 5775fac7408SDan Williams put_unlocked_mapping_entry(mapping, index, entry); 5785fac7408SDan Williams xa_unlock_irq(&mapping->i_pages); 5795fac7408SDan Williams if (page) 5805fac7408SDan Williams break; 5815fac7408SDan Williams } 582cdbf8897SRoss Zwisler 583cdbf8897SRoss Zwisler /* 584cdbf8897SRoss Zwisler * We don't expect normal struct page entries to exist in our 585cdbf8897SRoss Zwisler * tree, but we keep these pagevec calls so that this code is 586cdbf8897SRoss Zwisler * consistent with the common pattern for handling pagevecs 587cdbf8897SRoss Zwisler * throughout the kernel. 588cdbf8897SRoss Zwisler */ 5895fac7408SDan Williams pagevec_remove_exceptionals(&pvec); 5905fac7408SDan Williams pagevec_release(&pvec); 5915fac7408SDan Williams index++; 5925fac7408SDan Williams 5935fac7408SDan Williams if (page) 5945fac7408SDan Williams break; 5955fac7408SDan Williams } 5965fac7408SDan Williams return page; 5975fac7408SDan Williams } 5985fac7408SDan Williams EXPORT_SYMBOL_GPL(dax_layout_busy_page); 5995fac7408SDan Williams 600c6dcf52cSJan Kara static int __dax_invalidate_mapping_entry(struct address_space *mapping, 601c6dcf52cSJan Kara pgoff_t index, bool trunc) 602c6dcf52cSJan Kara { 603c6dcf52cSJan Kara int ret = 0; 604c6dcf52cSJan Kara void *entry; 605b93b0163SMatthew Wilcox struct radix_tree_root *pages = &mapping->i_pages; 606c6dcf52cSJan Kara 607b93b0163SMatthew Wilcox xa_lock_irq(pages); 608c6dcf52cSJan Kara entry = get_unlocked_mapping_entry(mapping, index, NULL); 60991d25ba8SRoss Zwisler if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry))) 610c6dcf52cSJan Kara goto out; 611c6dcf52cSJan Kara if (!trunc && 612b93b0163SMatthew Wilcox (radix_tree_tag_get(pages, index, PAGECACHE_TAG_DIRTY) || 613b93b0163SMatthew Wilcox radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE))) 614c6dcf52cSJan Kara goto out; 615d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, trunc); 616b93b0163SMatthew Wilcox radix_tree_delete(pages, index); 617c6dcf52cSJan Kara mapping->nrexceptional--; 618c6dcf52cSJan Kara ret = 1; 619c6dcf52cSJan Kara out: 620c6dcf52cSJan Kara put_unlocked_mapping_entry(mapping, index, entry); 621b93b0163SMatthew Wilcox xa_unlock_irq(pages); 622c6dcf52cSJan Kara return ret; 623c6dcf52cSJan Kara } 624ac401cc7SJan Kara /* 625ac401cc7SJan Kara * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree 626ac401cc7SJan Kara * entry to get unlocked before deleting it. 627ac401cc7SJan Kara */ 628ac401cc7SJan Kara int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 629ac401cc7SJan Kara { 630c6dcf52cSJan Kara int ret = __dax_invalidate_mapping_entry(mapping, index, true); 631ac401cc7SJan Kara 632ac401cc7SJan Kara /* 633ac401cc7SJan Kara * This gets called from truncate / punch_hole path. As such, the caller 634ac401cc7SJan Kara * must hold locks protecting against concurrent modifications of the 635ac401cc7SJan Kara * radix tree (usually fs-private i_mmap_sem for writing). Since the 636ac401cc7SJan Kara * caller has seen exceptional entry for this index, we better find it 637ac401cc7SJan Kara * at that index as well... 638ac401cc7SJan Kara */ 639c6dcf52cSJan Kara WARN_ON_ONCE(!ret); 640c6dcf52cSJan Kara return ret; 641ac401cc7SJan Kara } 642ac401cc7SJan Kara 643c6dcf52cSJan Kara /* 644c6dcf52cSJan Kara * Invalidate exceptional DAX entry if it is clean. 645c6dcf52cSJan Kara */ 646c6dcf52cSJan Kara int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 647c6dcf52cSJan Kara pgoff_t index) 648c6dcf52cSJan Kara { 649c6dcf52cSJan Kara return __dax_invalidate_mapping_entry(mapping, index, false); 650ac401cc7SJan Kara } 651ac401cc7SJan Kara 652cccbce67SDan Williams static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, 653cccbce67SDan Williams sector_t sector, size_t size, struct page *to, 654cccbce67SDan Williams unsigned long vaddr) 655f7ca90b1SMatthew Wilcox { 656cccbce67SDan Williams void *vto, *kaddr; 657cccbce67SDan Williams pgoff_t pgoff; 658cccbce67SDan Williams pfn_t pfn; 659cccbce67SDan Williams long rc; 660cccbce67SDan Williams int id; 661e2e05394SRoss Zwisler 662cccbce67SDan Williams rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); 663cccbce67SDan Williams if (rc) 664cccbce67SDan Williams return rc; 665cccbce67SDan Williams 666cccbce67SDan Williams id = dax_read_lock(); 667cccbce67SDan Williams rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); 668cccbce67SDan Williams if (rc < 0) { 669cccbce67SDan Williams dax_read_unlock(id); 670cccbce67SDan Williams return rc; 671cccbce67SDan Williams } 672f7ca90b1SMatthew Wilcox vto = kmap_atomic(to); 673cccbce67SDan Williams copy_user_page(vto, (void __force *)kaddr, vaddr, to); 674f7ca90b1SMatthew Wilcox kunmap_atomic(vto); 675cccbce67SDan Williams dax_read_unlock(id); 676f7ca90b1SMatthew Wilcox return 0; 677f7ca90b1SMatthew Wilcox } 678f7ca90b1SMatthew Wilcox 679642261acSRoss Zwisler /* 680642261acSRoss Zwisler * By this point grab_mapping_entry() has ensured that we have a locked entry 681642261acSRoss Zwisler * of the appropriate size so we don't have to worry about downgrading PMDs to 682642261acSRoss Zwisler * PTEs. If we happen to be trying to insert a PTE and there is a PMD 683642261acSRoss Zwisler * already in the tree, we will skip the insertion and just dirty the PMD as 684642261acSRoss Zwisler * appropriate. 685642261acSRoss Zwisler */ 686ac401cc7SJan Kara static void *dax_insert_mapping_entry(struct address_space *mapping, 687ac401cc7SJan Kara struct vm_fault *vmf, 6883fe0791cSDan Williams void *entry, pfn_t pfn_t, 689f5b7b748SJan Kara unsigned long flags, bool dirty) 6909973c98eSRoss Zwisler { 691b93b0163SMatthew Wilcox struct radix_tree_root *pages = &mapping->i_pages; 6923fe0791cSDan Williams unsigned long pfn = pfn_t_to_pfn(pfn_t); 693ac401cc7SJan Kara pgoff_t index = vmf->pgoff; 6943fe0791cSDan Williams void *new_entry; 6959973c98eSRoss Zwisler 696f5b7b748SJan Kara if (dirty) 6979973c98eSRoss Zwisler __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 6989973c98eSRoss Zwisler 69991d25ba8SRoss Zwisler if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) { 70091d25ba8SRoss Zwisler /* we are replacing a zero page with block mapping */ 70191d25ba8SRoss Zwisler if (dax_is_pmd_entry(entry)) 702977fbdcdSMatthew Wilcox unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 703977fbdcdSMatthew Wilcox PG_PMD_NR, false); 70491d25ba8SRoss Zwisler else /* pte entry */ 705977fbdcdSMatthew Wilcox unmap_mapping_pages(mapping, vmf->pgoff, 1, false); 706ac401cc7SJan Kara } 7079973c98eSRoss Zwisler 708b93b0163SMatthew Wilcox xa_lock_irq(pages); 7093fe0791cSDan Williams new_entry = dax_radix_locked_entry(pfn, flags); 710d2c997c0SDan Williams if (dax_entry_size(entry) != dax_entry_size(new_entry)) { 711d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, false); 712d2c997c0SDan Williams dax_associate_entry(new_entry, mapping); 713d2c997c0SDan Williams } 714642261acSRoss Zwisler 71591d25ba8SRoss Zwisler if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 716642261acSRoss Zwisler /* 717642261acSRoss Zwisler * Only swap our new entry into the radix tree if the current 718642261acSRoss Zwisler * entry is a zero page or an empty entry. If a normal PTE or 719642261acSRoss Zwisler * PMD entry is already in the tree, we leave it alone. This 720642261acSRoss Zwisler * means that if we are trying to insert a PTE and the 721642261acSRoss Zwisler * existing entry is a PMD, we will just leave the PMD in the 722642261acSRoss Zwisler * tree and dirty it if necessary. 723642261acSRoss Zwisler */ 724f7942430SJohannes Weiner struct radix_tree_node *node; 725ac401cc7SJan Kara void **slot; 726ac401cc7SJan Kara void *ret; 727ac401cc7SJan Kara 728b93b0163SMatthew Wilcox ret = __radix_tree_lookup(pages, index, &node, &slot); 729ac401cc7SJan Kara WARN_ON_ONCE(ret != entry); 730b93b0163SMatthew Wilcox __radix_tree_replace(pages, node, slot, 731c7df8ad2SMel Gorman new_entry, NULL); 73291d25ba8SRoss Zwisler entry = new_entry; 733ac401cc7SJan Kara } 73491d25ba8SRoss Zwisler 735f5b7b748SJan Kara if (dirty) 736b93b0163SMatthew Wilcox radix_tree_tag_set(pages, index, PAGECACHE_TAG_DIRTY); 73791d25ba8SRoss Zwisler 738b93b0163SMatthew Wilcox xa_unlock_irq(pages); 73991d25ba8SRoss Zwisler return entry; 7409973c98eSRoss Zwisler } 7419973c98eSRoss Zwisler 7424b4bb46dSJan Kara static inline unsigned long 7434b4bb46dSJan Kara pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma) 7444b4bb46dSJan Kara { 7454b4bb46dSJan Kara unsigned long address; 7464b4bb46dSJan Kara 7474b4bb46dSJan Kara address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 7484b4bb46dSJan Kara VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 7494b4bb46dSJan Kara return address; 7504b4bb46dSJan Kara } 7514b4bb46dSJan Kara 7524b4bb46dSJan Kara /* Walk all mappings of a given index of a file and writeprotect them */ 7534b4bb46dSJan Kara static void dax_mapping_entry_mkclean(struct address_space *mapping, 7544b4bb46dSJan Kara pgoff_t index, unsigned long pfn) 7554b4bb46dSJan Kara { 7564b4bb46dSJan Kara struct vm_area_struct *vma; 757f729c8c9SRoss Zwisler pte_t pte, *ptep = NULL; 758f729c8c9SRoss Zwisler pmd_t *pmdp = NULL; 7594b4bb46dSJan Kara spinlock_t *ptl; 7604b4bb46dSJan Kara 7614b4bb46dSJan Kara i_mmap_lock_read(mapping); 7624b4bb46dSJan Kara vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { 763a4d1a885SJérôme Glisse unsigned long address, start, end; 7644b4bb46dSJan Kara 7654b4bb46dSJan Kara cond_resched(); 7664b4bb46dSJan Kara 7674b4bb46dSJan Kara if (!(vma->vm_flags & VM_SHARED)) 7684b4bb46dSJan Kara continue; 7694b4bb46dSJan Kara 7704b4bb46dSJan Kara address = pgoff_address(index, vma); 771a4d1a885SJérôme Glisse 772a4d1a885SJérôme Glisse /* 773a4d1a885SJérôme Glisse * Note because we provide start/end to follow_pte_pmd it will 774a4d1a885SJérôme Glisse * call mmu_notifier_invalidate_range_start() on our behalf 775a4d1a885SJérôme Glisse * before taking any lock. 776a4d1a885SJérôme Glisse */ 777a4d1a885SJérôme Glisse if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl)) 7784b4bb46dSJan Kara continue; 779f729c8c9SRoss Zwisler 7800f10851eSJérôme Glisse /* 7810f10851eSJérôme Glisse * No need to call mmu_notifier_invalidate_range() as we are 7820f10851eSJérôme Glisse * downgrading page table protection not changing it to point 7830f10851eSJérôme Glisse * to a new page. 7840f10851eSJérôme Glisse * 785ad56b738SMike Rapoport * See Documentation/vm/mmu_notifier.rst 7860f10851eSJérôme Glisse */ 787f729c8c9SRoss Zwisler if (pmdp) { 788f729c8c9SRoss Zwisler #ifdef CONFIG_FS_DAX_PMD 789f729c8c9SRoss Zwisler pmd_t pmd; 790f729c8c9SRoss Zwisler 791f729c8c9SRoss Zwisler if (pfn != pmd_pfn(*pmdp)) 792f729c8c9SRoss Zwisler goto unlock_pmd; 793f6f37321SLinus Torvalds if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) 794f729c8c9SRoss Zwisler goto unlock_pmd; 795f729c8c9SRoss Zwisler 796f729c8c9SRoss Zwisler flush_cache_page(vma, address, pfn); 797f729c8c9SRoss Zwisler pmd = pmdp_huge_clear_flush(vma, address, pmdp); 798f729c8c9SRoss Zwisler pmd = pmd_wrprotect(pmd); 799f729c8c9SRoss Zwisler pmd = pmd_mkclean(pmd); 800f729c8c9SRoss Zwisler set_pmd_at(vma->vm_mm, address, pmdp, pmd); 801f729c8c9SRoss Zwisler unlock_pmd: 802f729c8c9SRoss Zwisler #endif 803ee190ca6SJan H. Schönherr spin_unlock(ptl); 804f729c8c9SRoss Zwisler } else { 8054b4bb46dSJan Kara if (pfn != pte_pfn(*ptep)) 806f729c8c9SRoss Zwisler goto unlock_pte; 8074b4bb46dSJan Kara if (!pte_dirty(*ptep) && !pte_write(*ptep)) 808f729c8c9SRoss Zwisler goto unlock_pte; 8094b4bb46dSJan Kara 8104b4bb46dSJan Kara flush_cache_page(vma, address, pfn); 8114b4bb46dSJan Kara pte = ptep_clear_flush(vma, address, ptep); 8124b4bb46dSJan Kara pte = pte_wrprotect(pte); 8134b4bb46dSJan Kara pte = pte_mkclean(pte); 8144b4bb46dSJan Kara set_pte_at(vma->vm_mm, address, ptep, pte); 815f729c8c9SRoss Zwisler unlock_pte: 8164b4bb46dSJan Kara pte_unmap_unlock(ptep, ptl); 817f729c8c9SRoss Zwisler } 8184b4bb46dSJan Kara 819a4d1a885SJérôme Glisse mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); 8204b4bb46dSJan Kara } 8214b4bb46dSJan Kara i_mmap_unlock_read(mapping); 8224b4bb46dSJan Kara } 8234b4bb46dSJan Kara 8243fe0791cSDan Williams static int dax_writeback_one(struct dax_device *dax_dev, 8253fe0791cSDan Williams struct address_space *mapping, pgoff_t index, void *entry) 8269973c98eSRoss Zwisler { 827b93b0163SMatthew Wilcox struct radix_tree_root *pages = &mapping->i_pages; 8283fe0791cSDan Williams void *entry2, **slot; 8293fe0791cSDan Williams unsigned long pfn; 8303fe0791cSDan Williams long ret = 0; 831cccbce67SDan Williams size_t size; 8329973c98eSRoss Zwisler 8339973c98eSRoss Zwisler /* 834a6abc2c0SJan Kara * A page got tagged dirty in DAX mapping? Something is seriously 835a6abc2c0SJan Kara * wrong. 8369973c98eSRoss Zwisler */ 837a6abc2c0SJan Kara if (WARN_ON(!radix_tree_exceptional_entry(entry))) 838a6abc2c0SJan Kara return -EIO; 8399973c98eSRoss Zwisler 840b93b0163SMatthew Wilcox xa_lock_irq(pages); 841a6abc2c0SJan Kara entry2 = get_unlocked_mapping_entry(mapping, index, &slot); 842a6abc2c0SJan Kara /* Entry got punched out / reallocated? */ 84391d25ba8SRoss Zwisler if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2))) 844a6abc2c0SJan Kara goto put_unlocked; 845a6abc2c0SJan Kara /* 846a6abc2c0SJan Kara * Entry got reallocated elsewhere? No need to writeback. We have to 8473fe0791cSDan Williams * compare pfns as we must not bail out due to difference in lockbit 848a6abc2c0SJan Kara * or entry type. 849a6abc2c0SJan Kara */ 8503fe0791cSDan Williams if (dax_radix_pfn(entry2) != dax_radix_pfn(entry)) 851a6abc2c0SJan Kara goto put_unlocked; 852642261acSRoss Zwisler if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 853642261acSRoss Zwisler dax_is_zero_entry(entry))) { 8549973c98eSRoss Zwisler ret = -EIO; 855a6abc2c0SJan Kara goto put_unlocked; 8569973c98eSRoss Zwisler } 8579973c98eSRoss Zwisler 858a6abc2c0SJan Kara /* Another fsync thread may have already written back this entry */ 859b93b0163SMatthew Wilcox if (!radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE)) 860a6abc2c0SJan Kara goto put_unlocked; 861a6abc2c0SJan Kara /* Lock the entry to serialize with page faults */ 862a6abc2c0SJan Kara entry = lock_slot(mapping, slot); 863a6abc2c0SJan Kara /* 864a6abc2c0SJan Kara * We can clear the tag now but we have to be careful so that concurrent 865a6abc2c0SJan Kara * dax_writeback_one() calls for the same index cannot finish before we 866a6abc2c0SJan Kara * actually flush the caches. This is achieved as the calls will look 867b93b0163SMatthew Wilcox * at the entry only under the i_pages lock and once they do that 868b93b0163SMatthew Wilcox * they will see the entry locked and wait for it to unlock. 869a6abc2c0SJan Kara */ 870b93b0163SMatthew Wilcox radix_tree_tag_clear(pages, index, PAGECACHE_TAG_TOWRITE); 871b93b0163SMatthew Wilcox xa_unlock_irq(pages); 872a6abc2c0SJan Kara 873642261acSRoss Zwisler /* 874642261acSRoss Zwisler * Even if dax_writeback_mapping_range() was given a wbc->range_start 875642261acSRoss Zwisler * in the middle of a PMD, the 'index' we are given will be aligned to 8763fe0791cSDan Williams * the start index of the PMD, as will the pfn we pull from 'entry'. 8773fe0791cSDan Williams * This allows us to flush for PMD_SIZE and not have to worry about 8783fe0791cSDan Williams * partial PMD writebacks. 879642261acSRoss Zwisler */ 8803fe0791cSDan Williams pfn = dax_radix_pfn(entry); 881cccbce67SDan Williams size = PAGE_SIZE << dax_radix_order(entry); 882cccbce67SDan Williams 8833fe0791cSDan Williams dax_mapping_entry_mkclean(mapping, index, pfn); 8843fe0791cSDan Williams dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size); 8854b4bb46dSJan Kara /* 8864b4bb46dSJan Kara * After we have flushed the cache, we can clear the dirty tag. There 8874b4bb46dSJan Kara * cannot be new dirty data in the pfn after the flush has completed as 8884b4bb46dSJan Kara * the pfn mappings are writeprotected and fault waits for mapping 8894b4bb46dSJan Kara * entry lock. 8904b4bb46dSJan Kara */ 891b93b0163SMatthew Wilcox xa_lock_irq(pages); 892b93b0163SMatthew Wilcox radix_tree_tag_clear(pages, index, PAGECACHE_TAG_DIRTY); 893b93b0163SMatthew Wilcox xa_unlock_irq(pages); 894f9bc3a07SRoss Zwisler trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT); 89591d25ba8SRoss Zwisler put_locked_mapping_entry(mapping, index); 8969973c98eSRoss Zwisler return ret; 8979973c98eSRoss Zwisler 898a6abc2c0SJan Kara put_unlocked: 899a6abc2c0SJan Kara put_unlocked_mapping_entry(mapping, index, entry2); 900b93b0163SMatthew Wilcox xa_unlock_irq(pages); 9019973c98eSRoss Zwisler return ret; 9029973c98eSRoss Zwisler } 9039973c98eSRoss Zwisler 9049973c98eSRoss Zwisler /* 9059973c98eSRoss Zwisler * Flush the mapping to the persistent domain within the byte range of [start, 9069973c98eSRoss Zwisler * end]. This is required by data integrity operations to ensure file data is 9079973c98eSRoss Zwisler * on persistent storage prior to completion of the operation. 9089973c98eSRoss Zwisler */ 9097f6d5b52SRoss Zwisler int dax_writeback_mapping_range(struct address_space *mapping, 9107f6d5b52SRoss Zwisler struct block_device *bdev, struct writeback_control *wbc) 9119973c98eSRoss Zwisler { 9129973c98eSRoss Zwisler struct inode *inode = mapping->host; 913642261acSRoss Zwisler pgoff_t start_index, end_index; 9149973c98eSRoss Zwisler pgoff_t indices[PAGEVEC_SIZE]; 915cccbce67SDan Williams struct dax_device *dax_dev; 9169973c98eSRoss Zwisler struct pagevec pvec; 9179973c98eSRoss Zwisler bool done = false; 9189973c98eSRoss Zwisler int i, ret = 0; 9199973c98eSRoss Zwisler 9209973c98eSRoss Zwisler if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 9219973c98eSRoss Zwisler return -EIO; 9229973c98eSRoss Zwisler 9237f6d5b52SRoss Zwisler if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) 9247f6d5b52SRoss Zwisler return 0; 9257f6d5b52SRoss Zwisler 926cccbce67SDan Williams dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 927cccbce67SDan Williams if (!dax_dev) 928cccbce67SDan Williams return -EIO; 929cccbce67SDan Williams 93009cbfeafSKirill A. Shutemov start_index = wbc->range_start >> PAGE_SHIFT; 93109cbfeafSKirill A. Shutemov end_index = wbc->range_end >> PAGE_SHIFT; 9329973c98eSRoss Zwisler 933d14a3f48SRoss Zwisler trace_dax_writeback_range(inode, start_index, end_index); 934d14a3f48SRoss Zwisler 9359973c98eSRoss Zwisler tag_pages_for_writeback(mapping, start_index, end_index); 9369973c98eSRoss Zwisler 93786679820SMel Gorman pagevec_init(&pvec); 9389973c98eSRoss Zwisler while (!done) { 9399973c98eSRoss Zwisler pvec.nr = find_get_entries_tag(mapping, start_index, 9409973c98eSRoss Zwisler PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE, 9419973c98eSRoss Zwisler pvec.pages, indices); 9429973c98eSRoss Zwisler 9439973c98eSRoss Zwisler if (pvec.nr == 0) 9449973c98eSRoss Zwisler break; 9459973c98eSRoss Zwisler 9469973c98eSRoss Zwisler for (i = 0; i < pvec.nr; i++) { 9479973c98eSRoss Zwisler if (indices[i] > end_index) { 9489973c98eSRoss Zwisler done = true; 9499973c98eSRoss Zwisler break; 9509973c98eSRoss Zwisler } 9519973c98eSRoss Zwisler 9523fe0791cSDan Williams ret = dax_writeback_one(dax_dev, mapping, indices[i], 9533fe0791cSDan Williams pvec.pages[i]); 954819ec6b9SJeff Layton if (ret < 0) { 955819ec6b9SJeff Layton mapping_set_error(mapping, ret); 956d14a3f48SRoss Zwisler goto out; 957d14a3f48SRoss Zwisler } 958d14a3f48SRoss Zwisler } 9591eb643d0SJan Kara start_index = indices[pvec.nr - 1] + 1; 960d14a3f48SRoss Zwisler } 961d14a3f48SRoss Zwisler out: 962cccbce67SDan Williams put_dax(dax_dev); 963d14a3f48SRoss Zwisler trace_dax_writeback_range_done(inode, start_index, end_index); 964d14a3f48SRoss Zwisler return (ret < 0 ? ret : 0); 9659973c98eSRoss Zwisler } 9669973c98eSRoss Zwisler EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 9679973c98eSRoss Zwisler 96831a6f1a6SJan Kara static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 969f7ca90b1SMatthew Wilcox { 970a3841f94SLinus Torvalds return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9; 97131a6f1a6SJan Kara } 972f7ca90b1SMatthew Wilcox 9735e161e40SJan Kara static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size, 9745e161e40SJan Kara pfn_t *pfnp) 9755e161e40SJan Kara { 9765e161e40SJan Kara const sector_t sector = dax_iomap_sector(iomap, pos); 9775e161e40SJan Kara pgoff_t pgoff; 9785e161e40SJan Kara void *kaddr; 9795e161e40SJan Kara int id, rc; 9805e161e40SJan Kara long length; 9815e161e40SJan Kara 9825e161e40SJan Kara rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff); 983cccbce67SDan Williams if (rc) 984cccbce67SDan Williams return rc; 985cccbce67SDan Williams id = dax_read_lock(); 9865e161e40SJan Kara length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), 9875e161e40SJan Kara &kaddr, pfnp); 9885e161e40SJan Kara if (length < 0) { 9895e161e40SJan Kara rc = length; 9905e161e40SJan Kara goto out; 9915e161e40SJan Kara } 9925e161e40SJan Kara rc = -EINVAL; 9935e161e40SJan Kara if (PFN_PHYS(length) < size) 9945e161e40SJan Kara goto out; 9955e161e40SJan Kara if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) 9965e161e40SJan Kara goto out; 9975e161e40SJan Kara /* For larger pages we need devmap */ 9985e161e40SJan Kara if (length > 1 && !pfn_t_devmap(*pfnp)) 9995e161e40SJan Kara goto out; 10005e161e40SJan Kara rc = 0; 10015e161e40SJan Kara out: 1002cccbce67SDan Williams dax_read_unlock(id); 1003cccbce67SDan Williams return rc; 1004cccbce67SDan Williams } 1005f7ca90b1SMatthew Wilcox 10062f89dc12SJan Kara /* 100791d25ba8SRoss Zwisler * The user has performed a load from a hole in the file. Allocating a new 100891d25ba8SRoss Zwisler * page in the file would cause excessive storage usage for workloads with 100991d25ba8SRoss Zwisler * sparse files. Instead we insert a read-only mapping of the 4k zero page. 101091d25ba8SRoss Zwisler * If this page is ever written to we will re-fault and change the mapping to 101191d25ba8SRoss Zwisler * point to real DAX storage instead. 10122f89dc12SJan Kara */ 1013ab77dab4SSouptick Joarder static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry, 1014e30331ffSRoss Zwisler struct vm_fault *vmf) 1015e30331ffSRoss Zwisler { 1016e30331ffSRoss Zwisler struct inode *inode = mapping->host; 101791d25ba8SRoss Zwisler unsigned long vaddr = vmf->address; 1018ab77dab4SSouptick Joarder vm_fault_t ret = VM_FAULT_NOPAGE; 101991d25ba8SRoss Zwisler struct page *zero_page; 10203fe0791cSDan Williams pfn_t pfn; 1021e30331ffSRoss Zwisler 102291d25ba8SRoss Zwisler zero_page = ZERO_PAGE(0); 102391d25ba8SRoss Zwisler if (unlikely(!zero_page)) { 1024e30331ffSRoss Zwisler ret = VM_FAULT_OOM; 1025e30331ffSRoss Zwisler goto out; 1026e30331ffSRoss Zwisler } 1027e30331ffSRoss Zwisler 10283fe0791cSDan Williams pfn = page_to_pfn_t(zero_page); 1029cc4a90acSMatthew Wilcox dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE, 1030cc4a90acSMatthew Wilcox false); 1031ab77dab4SSouptick Joarder ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); 1032e30331ffSRoss Zwisler out: 1033e30331ffSRoss Zwisler trace_dax_load_hole(inode, vmf, ret); 1034e30331ffSRoss Zwisler return ret; 1035e30331ffSRoss Zwisler } 1036e30331ffSRoss Zwisler 10374b0228faSVishal Verma static bool dax_range_is_aligned(struct block_device *bdev, 10384b0228faSVishal Verma unsigned int offset, unsigned int length) 10394b0228faSVishal Verma { 10404b0228faSVishal Verma unsigned short sector_size = bdev_logical_block_size(bdev); 10414b0228faSVishal Verma 10424b0228faSVishal Verma if (!IS_ALIGNED(offset, sector_size)) 10434b0228faSVishal Verma return false; 10444b0228faSVishal Verma if (!IS_ALIGNED(length, sector_size)) 10454b0228faSVishal Verma return false; 10464b0228faSVishal Verma 10474b0228faSVishal Verma return true; 10484b0228faSVishal Verma } 10494b0228faSVishal Verma 1050cccbce67SDan Williams int __dax_zero_page_range(struct block_device *bdev, 1051cccbce67SDan Williams struct dax_device *dax_dev, sector_t sector, 1052cccbce67SDan Williams unsigned int offset, unsigned int size) 1053679c8bd3SChristoph Hellwig { 1054cccbce67SDan Williams if (dax_range_is_aligned(bdev, offset, size)) { 1055cccbce67SDan Williams sector_t start_sector = sector + (offset >> 9); 10564b0228faSVishal Verma 10574b0228faSVishal Verma return blkdev_issue_zeroout(bdev, start_sector, 105853ef7d0eSLinus Torvalds size >> 9, GFP_NOFS, 0); 10594b0228faSVishal Verma } else { 1060cccbce67SDan Williams pgoff_t pgoff; 1061cccbce67SDan Williams long rc, id; 1062cccbce67SDan Williams void *kaddr; 1063cccbce67SDan Williams pfn_t pfn; 1064cccbce67SDan Williams 1065e84b83b9SDan Williams rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); 1066cccbce67SDan Williams if (rc) 1067cccbce67SDan Williams return rc; 1068cccbce67SDan Williams 1069cccbce67SDan Williams id = dax_read_lock(); 1070e84b83b9SDan Williams rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, 1071cccbce67SDan Williams &pfn); 1072cccbce67SDan Williams if (rc < 0) { 1073cccbce67SDan Williams dax_read_unlock(id); 1074cccbce67SDan Williams return rc; 1075cccbce67SDan Williams } 107681f55870SDan Williams memset(kaddr + offset, 0, size); 1077c3ca015fSMikulas Patocka dax_flush(dax_dev, kaddr + offset, size); 1078cccbce67SDan Williams dax_read_unlock(id); 10794b0228faSVishal Verma } 1080679c8bd3SChristoph Hellwig return 0; 1081679c8bd3SChristoph Hellwig } 1082679c8bd3SChristoph Hellwig EXPORT_SYMBOL_GPL(__dax_zero_page_range); 1083679c8bd3SChristoph Hellwig 1084a254e568SChristoph Hellwig static loff_t 108511c59c92SRoss Zwisler dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 1086a254e568SChristoph Hellwig struct iomap *iomap) 1087a254e568SChristoph Hellwig { 1088cccbce67SDan Williams struct block_device *bdev = iomap->bdev; 1089cccbce67SDan Williams struct dax_device *dax_dev = iomap->dax_dev; 1090a254e568SChristoph Hellwig struct iov_iter *iter = data; 1091a254e568SChristoph Hellwig loff_t end = pos + length, done = 0; 1092a254e568SChristoph Hellwig ssize_t ret = 0; 1093a77d4786SDan Williams size_t xfer; 1094cccbce67SDan Williams int id; 1095a254e568SChristoph Hellwig 1096a254e568SChristoph Hellwig if (iov_iter_rw(iter) == READ) { 1097a254e568SChristoph Hellwig end = min(end, i_size_read(inode)); 1098a254e568SChristoph Hellwig if (pos >= end) 1099a254e568SChristoph Hellwig return 0; 1100a254e568SChristoph Hellwig 1101a254e568SChristoph Hellwig if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 1102a254e568SChristoph Hellwig return iov_iter_zero(min(length, end - pos), iter); 1103a254e568SChristoph Hellwig } 1104a254e568SChristoph Hellwig 1105a254e568SChristoph Hellwig if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 1106a254e568SChristoph Hellwig return -EIO; 1107a254e568SChristoph Hellwig 1108e3fce68cSJan Kara /* 1109e3fce68cSJan Kara * Write can allocate block for an area which has a hole page mapped 1110e3fce68cSJan Kara * into page tables. We have to tear down these mappings so that data 1111e3fce68cSJan Kara * written by write(2) is visible in mmap. 1112e3fce68cSJan Kara */ 1113cd656375SJan Kara if (iomap->flags & IOMAP_F_NEW) { 1114e3fce68cSJan Kara invalidate_inode_pages2_range(inode->i_mapping, 1115e3fce68cSJan Kara pos >> PAGE_SHIFT, 1116e3fce68cSJan Kara (end - 1) >> PAGE_SHIFT); 1117e3fce68cSJan Kara } 1118e3fce68cSJan Kara 1119cccbce67SDan Williams id = dax_read_lock(); 1120a254e568SChristoph Hellwig while (pos < end) { 1121a254e568SChristoph Hellwig unsigned offset = pos & (PAGE_SIZE - 1); 1122cccbce67SDan Williams const size_t size = ALIGN(length + offset, PAGE_SIZE); 1123cccbce67SDan Williams const sector_t sector = dax_iomap_sector(iomap, pos); 1124a254e568SChristoph Hellwig ssize_t map_len; 1125cccbce67SDan Williams pgoff_t pgoff; 1126cccbce67SDan Williams void *kaddr; 1127cccbce67SDan Williams pfn_t pfn; 1128a254e568SChristoph Hellwig 1129d1908f52SMichal Hocko if (fatal_signal_pending(current)) { 1130d1908f52SMichal Hocko ret = -EINTR; 1131d1908f52SMichal Hocko break; 1132d1908f52SMichal Hocko } 1133d1908f52SMichal Hocko 1134cccbce67SDan Williams ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 1135cccbce67SDan Williams if (ret) 1136cccbce67SDan Williams break; 1137cccbce67SDan Williams 1138cccbce67SDan Williams map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 1139cccbce67SDan Williams &kaddr, &pfn); 1140a254e568SChristoph Hellwig if (map_len < 0) { 1141a254e568SChristoph Hellwig ret = map_len; 1142a254e568SChristoph Hellwig break; 1143a254e568SChristoph Hellwig } 1144a254e568SChristoph Hellwig 1145cccbce67SDan Williams map_len = PFN_PHYS(map_len); 1146cccbce67SDan Williams kaddr += offset; 1147a254e568SChristoph Hellwig map_len -= offset; 1148a254e568SChristoph Hellwig if (map_len > end - pos) 1149a254e568SChristoph Hellwig map_len = end - pos; 1150a254e568SChristoph Hellwig 1151a2e050f5SRoss Zwisler /* 1152a2e050f5SRoss Zwisler * The userspace address for the memory copy has already been 1153a2e050f5SRoss Zwisler * validated via access_ok() in either vfs_read() or 1154a2e050f5SRoss Zwisler * vfs_write(), depending on which operation we are doing. 1155a2e050f5SRoss Zwisler */ 1156a254e568SChristoph Hellwig if (iov_iter_rw(iter) == WRITE) 1157a77d4786SDan Williams xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1158fec53774SDan Williams map_len, iter); 1159a254e568SChristoph Hellwig else 1160a77d4786SDan Williams xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, 1161b3a9a0c3SDan Williams map_len, iter); 1162a254e568SChristoph Hellwig 1163a77d4786SDan Williams pos += xfer; 1164a77d4786SDan Williams length -= xfer; 1165a77d4786SDan Williams done += xfer; 1166a77d4786SDan Williams 1167a77d4786SDan Williams if (xfer == 0) 1168a77d4786SDan Williams ret = -EFAULT; 1169a77d4786SDan Williams if (xfer < map_len) 1170a77d4786SDan Williams break; 1171a254e568SChristoph Hellwig } 1172cccbce67SDan Williams dax_read_unlock(id); 1173a254e568SChristoph Hellwig 1174a254e568SChristoph Hellwig return done ? done : ret; 1175a254e568SChristoph Hellwig } 1176a254e568SChristoph Hellwig 1177a254e568SChristoph Hellwig /** 117811c59c92SRoss Zwisler * dax_iomap_rw - Perform I/O to a DAX file 1179a254e568SChristoph Hellwig * @iocb: The control block for this I/O 1180a254e568SChristoph Hellwig * @iter: The addresses to do I/O from or to 1181a254e568SChristoph Hellwig * @ops: iomap ops passed from the file system 1182a254e568SChristoph Hellwig * 1183a254e568SChristoph Hellwig * This function performs read and write operations to directly mapped 1184a254e568SChristoph Hellwig * persistent memory. The callers needs to take care of read/write exclusion 1185a254e568SChristoph Hellwig * and evicting any page cache pages in the region under I/O. 1186a254e568SChristoph Hellwig */ 1187a254e568SChristoph Hellwig ssize_t 118811c59c92SRoss Zwisler dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 11898ff6daa1SChristoph Hellwig const struct iomap_ops *ops) 1190a254e568SChristoph Hellwig { 1191a254e568SChristoph Hellwig struct address_space *mapping = iocb->ki_filp->f_mapping; 1192a254e568SChristoph Hellwig struct inode *inode = mapping->host; 1193a254e568SChristoph Hellwig loff_t pos = iocb->ki_pos, ret = 0, done = 0; 1194a254e568SChristoph Hellwig unsigned flags = 0; 1195a254e568SChristoph Hellwig 1196168316dbSChristoph Hellwig if (iov_iter_rw(iter) == WRITE) { 1197168316dbSChristoph Hellwig lockdep_assert_held_exclusive(&inode->i_rwsem); 1198a254e568SChristoph Hellwig flags |= IOMAP_WRITE; 1199168316dbSChristoph Hellwig } else { 1200168316dbSChristoph Hellwig lockdep_assert_held(&inode->i_rwsem); 1201168316dbSChristoph Hellwig } 1202a254e568SChristoph Hellwig 1203a254e568SChristoph Hellwig while (iov_iter_count(iter)) { 1204a254e568SChristoph Hellwig ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, 120511c59c92SRoss Zwisler iter, dax_iomap_actor); 1206a254e568SChristoph Hellwig if (ret <= 0) 1207a254e568SChristoph Hellwig break; 1208a254e568SChristoph Hellwig pos += ret; 1209a254e568SChristoph Hellwig done += ret; 1210a254e568SChristoph Hellwig } 1211a254e568SChristoph Hellwig 1212a254e568SChristoph Hellwig iocb->ki_pos += done; 1213a254e568SChristoph Hellwig return done ? done : ret; 1214a254e568SChristoph Hellwig } 121511c59c92SRoss Zwisler EXPORT_SYMBOL_GPL(dax_iomap_rw); 1216a7d73fe6SChristoph Hellwig 1217ab77dab4SSouptick Joarder static vm_fault_t dax_fault_return(int error) 12189f141d6eSJan Kara { 12199f141d6eSJan Kara if (error == 0) 12209f141d6eSJan Kara return VM_FAULT_NOPAGE; 12219f141d6eSJan Kara if (error == -ENOMEM) 12229f141d6eSJan Kara return VM_FAULT_OOM; 12239f141d6eSJan Kara return VM_FAULT_SIGBUS; 12249f141d6eSJan Kara } 12259f141d6eSJan Kara 1226aaa422c4SDan Williams /* 1227aaa422c4SDan Williams * MAP_SYNC on a dax mapping guarantees dirty metadata is 1228aaa422c4SDan Williams * flushed on write-faults (non-cow), but not read-faults. 1229aaa422c4SDan Williams */ 1230aaa422c4SDan Williams static bool dax_fault_is_synchronous(unsigned long flags, 1231aaa422c4SDan Williams struct vm_area_struct *vma, struct iomap *iomap) 1232aaa422c4SDan Williams { 1233aaa422c4SDan Williams return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) 1234aaa422c4SDan Williams && (iomap->flags & IOMAP_F_DIRTY); 1235aaa422c4SDan Williams } 1236aaa422c4SDan Williams 1237ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1238c0b24625SJan Kara int *iomap_errp, const struct iomap_ops *ops) 1239a7d73fe6SChristoph Hellwig { 1240a0987ad5SJan Kara struct vm_area_struct *vma = vmf->vma; 1241a0987ad5SJan Kara struct address_space *mapping = vma->vm_file->f_mapping; 1242a7d73fe6SChristoph Hellwig struct inode *inode = mapping->host; 12431a29d85eSJan Kara unsigned long vaddr = vmf->address; 1244a7d73fe6SChristoph Hellwig loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; 1245a7d73fe6SChristoph Hellwig struct iomap iomap = { 0 }; 12469484ab1bSJan Kara unsigned flags = IOMAP_FAULT; 1247a7d73fe6SChristoph Hellwig int error, major = 0; 1248d2c43ef1SJan Kara bool write = vmf->flags & FAULT_FLAG_WRITE; 1249caa51d26SJan Kara bool sync; 1250ab77dab4SSouptick Joarder vm_fault_t ret = 0; 1251a7d73fe6SChristoph Hellwig void *entry; 12521b5a1cb2SJan Kara pfn_t pfn; 1253a7d73fe6SChristoph Hellwig 1254ab77dab4SSouptick Joarder trace_dax_pte_fault(inode, vmf, ret); 1255a7d73fe6SChristoph Hellwig /* 1256a7d73fe6SChristoph Hellwig * Check whether offset isn't beyond end of file now. Caller is supposed 1257a7d73fe6SChristoph Hellwig * to hold locks serializing us with truncate / punch hole so this is 1258a7d73fe6SChristoph Hellwig * a reliable test. 1259a7d73fe6SChristoph Hellwig */ 1260a9c42b33SRoss Zwisler if (pos >= i_size_read(inode)) { 1261ab77dab4SSouptick Joarder ret = VM_FAULT_SIGBUS; 1262a9c42b33SRoss Zwisler goto out; 1263a9c42b33SRoss Zwisler } 1264a7d73fe6SChristoph Hellwig 1265d2c43ef1SJan Kara if (write && !vmf->cow_page) 1266a7d73fe6SChristoph Hellwig flags |= IOMAP_WRITE; 1267a7d73fe6SChristoph Hellwig 126813e451fdSJan Kara entry = grab_mapping_entry(mapping, vmf->pgoff, 0); 126913e451fdSJan Kara if (IS_ERR(entry)) { 1270ab77dab4SSouptick Joarder ret = dax_fault_return(PTR_ERR(entry)); 127113e451fdSJan Kara goto out; 127213e451fdSJan Kara } 127313e451fdSJan Kara 1274a7d73fe6SChristoph Hellwig /* 1275e2093926SRoss Zwisler * It is possible, particularly with mixed reads & writes to private 1276e2093926SRoss Zwisler * mappings, that we have raced with a PMD fault that overlaps with 1277e2093926SRoss Zwisler * the PTE we need to set up. If so just return and the fault will be 1278e2093926SRoss Zwisler * retried. 1279e2093926SRoss Zwisler */ 1280e2093926SRoss Zwisler if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1281ab77dab4SSouptick Joarder ret = VM_FAULT_NOPAGE; 1282e2093926SRoss Zwisler goto unlock_entry; 1283e2093926SRoss Zwisler } 1284e2093926SRoss Zwisler 1285e2093926SRoss Zwisler /* 1286a7d73fe6SChristoph Hellwig * Note that we don't bother to use iomap_apply here: DAX required 1287a7d73fe6SChristoph Hellwig * the file system block size to be equal the page size, which means 1288a7d73fe6SChristoph Hellwig * that we never have to deal with more than a single extent here. 1289a7d73fe6SChristoph Hellwig */ 1290a7d73fe6SChristoph Hellwig error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); 1291c0b24625SJan Kara if (iomap_errp) 1292c0b24625SJan Kara *iomap_errp = error; 1293a9c42b33SRoss Zwisler if (error) { 1294ab77dab4SSouptick Joarder ret = dax_fault_return(error); 129513e451fdSJan Kara goto unlock_entry; 1296a9c42b33SRoss Zwisler } 1297a7d73fe6SChristoph Hellwig if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { 129813e451fdSJan Kara error = -EIO; /* fs corruption? */ 129913e451fdSJan Kara goto error_finish_iomap; 1300a7d73fe6SChristoph Hellwig } 1301a7d73fe6SChristoph Hellwig 1302a7d73fe6SChristoph Hellwig if (vmf->cow_page) { 130331a6f1a6SJan Kara sector_t sector = dax_iomap_sector(&iomap, pos); 130431a6f1a6SJan Kara 1305a7d73fe6SChristoph Hellwig switch (iomap.type) { 1306a7d73fe6SChristoph Hellwig case IOMAP_HOLE: 1307a7d73fe6SChristoph Hellwig case IOMAP_UNWRITTEN: 1308a7d73fe6SChristoph Hellwig clear_user_highpage(vmf->cow_page, vaddr); 1309a7d73fe6SChristoph Hellwig break; 1310a7d73fe6SChristoph Hellwig case IOMAP_MAPPED: 1311cccbce67SDan Williams error = copy_user_dax(iomap.bdev, iomap.dax_dev, 1312cccbce67SDan Williams sector, PAGE_SIZE, vmf->cow_page, vaddr); 1313a7d73fe6SChristoph Hellwig break; 1314a7d73fe6SChristoph Hellwig default: 1315a7d73fe6SChristoph Hellwig WARN_ON_ONCE(1); 1316a7d73fe6SChristoph Hellwig error = -EIO; 1317a7d73fe6SChristoph Hellwig break; 1318a7d73fe6SChristoph Hellwig } 1319a7d73fe6SChristoph Hellwig 1320a7d73fe6SChristoph Hellwig if (error) 132113e451fdSJan Kara goto error_finish_iomap; 1322b1aa812bSJan Kara 1323b1aa812bSJan Kara __SetPageUptodate(vmf->cow_page); 1324ab77dab4SSouptick Joarder ret = finish_fault(vmf); 1325ab77dab4SSouptick Joarder if (!ret) 1326ab77dab4SSouptick Joarder ret = VM_FAULT_DONE_COW; 132713e451fdSJan Kara goto finish_iomap; 1328a7d73fe6SChristoph Hellwig } 1329a7d73fe6SChristoph Hellwig 1330aaa422c4SDan Williams sync = dax_fault_is_synchronous(flags, vma, &iomap); 1331caa51d26SJan Kara 1332a7d73fe6SChristoph Hellwig switch (iomap.type) { 1333a7d73fe6SChristoph Hellwig case IOMAP_MAPPED: 1334a7d73fe6SChristoph Hellwig if (iomap.flags & IOMAP_F_NEW) { 1335a7d73fe6SChristoph Hellwig count_vm_event(PGMAJFAULT); 1336a0987ad5SJan Kara count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 1337a7d73fe6SChristoph Hellwig major = VM_FAULT_MAJOR; 1338a7d73fe6SChristoph Hellwig } 13391b5a1cb2SJan Kara error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn); 13401b5a1cb2SJan Kara if (error < 0) 13411b5a1cb2SJan Kara goto error_finish_iomap; 13421b5a1cb2SJan Kara 13433fe0791cSDan Williams entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn, 1344caa51d26SJan Kara 0, write && !sync); 13451b5a1cb2SJan Kara 1346caa51d26SJan Kara /* 1347caa51d26SJan Kara * If we are doing synchronous page fault and inode needs fsync, 1348caa51d26SJan Kara * we can insert PTE into page tables only after that happens. 1349caa51d26SJan Kara * Skip insertion for now and return the pfn so that caller can 1350caa51d26SJan Kara * insert it after fsync is done. 1351caa51d26SJan Kara */ 1352caa51d26SJan Kara if (sync) { 1353caa51d26SJan Kara if (WARN_ON_ONCE(!pfnp)) { 1354caa51d26SJan Kara error = -EIO; 1355caa51d26SJan Kara goto error_finish_iomap; 1356caa51d26SJan Kara } 1357caa51d26SJan Kara *pfnp = pfn; 1358ab77dab4SSouptick Joarder ret = VM_FAULT_NEEDDSYNC | major; 1359caa51d26SJan Kara goto finish_iomap; 1360caa51d26SJan Kara } 13611b5a1cb2SJan Kara trace_dax_insert_mapping(inode, vmf, entry); 13621b5a1cb2SJan Kara if (write) 1363ab77dab4SSouptick Joarder ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn); 13641b5a1cb2SJan Kara else 1365ab77dab4SSouptick Joarder ret = vmf_insert_mixed(vma, vaddr, pfn); 13661b5a1cb2SJan Kara 1367ab77dab4SSouptick Joarder goto finish_iomap; 1368a7d73fe6SChristoph Hellwig case IOMAP_UNWRITTEN: 1369a7d73fe6SChristoph Hellwig case IOMAP_HOLE: 1370d2c43ef1SJan Kara if (!write) { 1371ab77dab4SSouptick Joarder ret = dax_load_hole(mapping, entry, vmf); 137213e451fdSJan Kara goto finish_iomap; 13731550290bSRoss Zwisler } 1374a7d73fe6SChristoph Hellwig /*FALLTHRU*/ 1375a7d73fe6SChristoph Hellwig default: 1376a7d73fe6SChristoph Hellwig WARN_ON_ONCE(1); 1377a7d73fe6SChristoph Hellwig error = -EIO; 1378a7d73fe6SChristoph Hellwig break; 1379a7d73fe6SChristoph Hellwig } 1380a7d73fe6SChristoph Hellwig 138113e451fdSJan Kara error_finish_iomap: 1382ab77dab4SSouptick Joarder ret = dax_fault_return(error); 13839f141d6eSJan Kara finish_iomap: 13849f141d6eSJan Kara if (ops->iomap_end) { 13859f141d6eSJan Kara int copied = PAGE_SIZE; 13869f141d6eSJan Kara 1387ab77dab4SSouptick Joarder if (ret & VM_FAULT_ERROR) 13889f141d6eSJan Kara copied = 0; 13899f141d6eSJan Kara /* 13909f141d6eSJan Kara * The fault is done by now and there's no way back (other 13919f141d6eSJan Kara * thread may be already happily using PTE we have installed). 13929f141d6eSJan Kara * Just ignore error from ->iomap_end since we cannot do much 13939f141d6eSJan Kara * with it. 13949f141d6eSJan Kara */ 13959f141d6eSJan Kara ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); 13961550290bSRoss Zwisler } 139713e451fdSJan Kara unlock_entry: 139891d25ba8SRoss Zwisler put_locked_mapping_entry(mapping, vmf->pgoff); 1399a9c42b33SRoss Zwisler out: 1400ab77dab4SSouptick Joarder trace_dax_pte_fault_done(inode, vmf, ret); 1401ab77dab4SSouptick Joarder return ret | major; 1402a7d73fe6SChristoph Hellwig } 1403642261acSRoss Zwisler 1404642261acSRoss Zwisler #ifdef CONFIG_FS_DAX_PMD 1405ab77dab4SSouptick Joarder static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap, 140691d25ba8SRoss Zwisler void *entry) 1407642261acSRoss Zwisler { 1408f4200391SDave Jiang struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1409f4200391SDave Jiang unsigned long pmd_addr = vmf->address & PMD_MASK; 1410653b2ea3SRoss Zwisler struct inode *inode = mapping->host; 1411642261acSRoss Zwisler struct page *zero_page; 1412653b2ea3SRoss Zwisler void *ret = NULL; 1413642261acSRoss Zwisler spinlock_t *ptl; 1414642261acSRoss Zwisler pmd_t pmd_entry; 14153fe0791cSDan Williams pfn_t pfn; 1416642261acSRoss Zwisler 1417f4200391SDave Jiang zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1418642261acSRoss Zwisler 1419642261acSRoss Zwisler if (unlikely(!zero_page)) 1420653b2ea3SRoss Zwisler goto fallback; 1421642261acSRoss Zwisler 14223fe0791cSDan Williams pfn = page_to_pfn_t(zero_page); 14233fe0791cSDan Williams ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn, 1424f5b7b748SJan Kara RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false); 1425642261acSRoss Zwisler 1426f4200391SDave Jiang ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1427f4200391SDave Jiang if (!pmd_none(*(vmf->pmd))) { 1428642261acSRoss Zwisler spin_unlock(ptl); 1429653b2ea3SRoss Zwisler goto fallback; 1430642261acSRoss Zwisler } 1431642261acSRoss Zwisler 1432f4200391SDave Jiang pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1433642261acSRoss Zwisler pmd_entry = pmd_mkhuge(pmd_entry); 1434f4200391SDave Jiang set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1435642261acSRoss Zwisler spin_unlock(ptl); 1436f4200391SDave Jiang trace_dax_pmd_load_hole(inode, vmf, zero_page, ret); 1437642261acSRoss Zwisler return VM_FAULT_NOPAGE; 1438653b2ea3SRoss Zwisler 1439653b2ea3SRoss Zwisler fallback: 1440f4200391SDave Jiang trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret); 1441642261acSRoss Zwisler return VM_FAULT_FALLBACK; 1442642261acSRoss Zwisler } 1443642261acSRoss Zwisler 1444ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1445a2d58167SDave Jiang const struct iomap_ops *ops) 1446642261acSRoss Zwisler { 1447f4200391SDave Jiang struct vm_area_struct *vma = vmf->vma; 1448642261acSRoss Zwisler struct address_space *mapping = vma->vm_file->f_mapping; 1449d8a849e1SDave Jiang unsigned long pmd_addr = vmf->address & PMD_MASK; 1450d8a849e1SDave Jiang bool write = vmf->flags & FAULT_FLAG_WRITE; 1451caa51d26SJan Kara bool sync; 14529484ab1bSJan Kara unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; 1453642261acSRoss Zwisler struct inode *inode = mapping->host; 1454ab77dab4SSouptick Joarder vm_fault_t result = VM_FAULT_FALLBACK; 1455642261acSRoss Zwisler struct iomap iomap = { 0 }; 1456642261acSRoss Zwisler pgoff_t max_pgoff, pgoff; 1457642261acSRoss Zwisler void *entry; 1458642261acSRoss Zwisler loff_t pos; 1459642261acSRoss Zwisler int error; 1460302a5e31SJan Kara pfn_t pfn; 1461642261acSRoss Zwisler 1462282a8e03SRoss Zwisler /* 1463282a8e03SRoss Zwisler * Check whether offset isn't beyond end of file now. Caller is 1464282a8e03SRoss Zwisler * supposed to hold locks serializing us with truncate / punch hole so 1465282a8e03SRoss Zwisler * this is a reliable test. 1466282a8e03SRoss Zwisler */ 1467282a8e03SRoss Zwisler pgoff = linear_page_index(vma, pmd_addr); 1468957ac8c4SJeff Moyer max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1469282a8e03SRoss Zwisler 1470f4200391SDave Jiang trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); 1471282a8e03SRoss Zwisler 1472fffa281bSRoss Zwisler /* 1473fffa281bSRoss Zwisler * Make sure that the faulting address's PMD offset (color) matches 1474fffa281bSRoss Zwisler * the PMD offset from the start of the file. This is necessary so 1475fffa281bSRoss Zwisler * that a PMD range in the page table overlaps exactly with a PMD 1476fffa281bSRoss Zwisler * range in the radix tree. 1477fffa281bSRoss Zwisler */ 1478fffa281bSRoss Zwisler if ((vmf->pgoff & PG_PMD_COLOUR) != 1479fffa281bSRoss Zwisler ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 1480fffa281bSRoss Zwisler goto fallback; 1481fffa281bSRoss Zwisler 1482642261acSRoss Zwisler /* Fall back to PTEs if we're going to COW */ 1483642261acSRoss Zwisler if (write && !(vma->vm_flags & VM_SHARED)) 1484642261acSRoss Zwisler goto fallback; 1485642261acSRoss Zwisler 1486642261acSRoss Zwisler /* If the PMD would extend outside the VMA */ 1487642261acSRoss Zwisler if (pmd_addr < vma->vm_start) 1488642261acSRoss Zwisler goto fallback; 1489642261acSRoss Zwisler if ((pmd_addr + PMD_SIZE) > vma->vm_end) 1490642261acSRoss Zwisler goto fallback; 1491642261acSRoss Zwisler 1492957ac8c4SJeff Moyer if (pgoff >= max_pgoff) { 1493282a8e03SRoss Zwisler result = VM_FAULT_SIGBUS; 1494282a8e03SRoss Zwisler goto out; 1495282a8e03SRoss Zwisler } 1496642261acSRoss Zwisler 1497642261acSRoss Zwisler /* If the PMD would extend beyond the file size */ 1498957ac8c4SJeff Moyer if ((pgoff | PG_PMD_COLOUR) >= max_pgoff) 1499642261acSRoss Zwisler goto fallback; 1500642261acSRoss Zwisler 1501642261acSRoss Zwisler /* 150291d25ba8SRoss Zwisler * grab_mapping_entry() will make sure we get a 2MiB empty entry, a 150391d25ba8SRoss Zwisler * 2MiB zero page entry or a DAX PMD. If it can't (because a 4k page 150491d25ba8SRoss Zwisler * is already in the tree, for instance), it will return -EEXIST and 150591d25ba8SRoss Zwisler * we just fall back to 4k entries. 15069f141d6eSJan Kara */ 15079f141d6eSJan Kara entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); 15089f141d6eSJan Kara if (IS_ERR(entry)) 1509876f2946SRoss Zwisler goto fallback; 1510876f2946SRoss Zwisler 1511876f2946SRoss Zwisler /* 1512e2093926SRoss Zwisler * It is possible, particularly with mixed reads & writes to private 1513e2093926SRoss Zwisler * mappings, that we have raced with a PTE fault that overlaps with 1514e2093926SRoss Zwisler * the PMD we need to set up. If so just return and the fault will be 1515e2093926SRoss Zwisler * retried. 1516e2093926SRoss Zwisler */ 1517e2093926SRoss Zwisler if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1518e2093926SRoss Zwisler !pmd_devmap(*vmf->pmd)) { 1519e2093926SRoss Zwisler result = 0; 1520e2093926SRoss Zwisler goto unlock_entry; 1521e2093926SRoss Zwisler } 1522e2093926SRoss Zwisler 1523e2093926SRoss Zwisler /* 1524876f2946SRoss Zwisler * Note that we don't use iomap_apply here. We aren't doing I/O, only 1525876f2946SRoss Zwisler * setting up a mapping, so really we're using iomap_begin() as a way 1526876f2946SRoss Zwisler * to look up our filesystem block. 1527876f2946SRoss Zwisler */ 1528876f2946SRoss Zwisler pos = (loff_t)pgoff << PAGE_SHIFT; 1529876f2946SRoss Zwisler error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); 1530876f2946SRoss Zwisler if (error) 1531876f2946SRoss Zwisler goto unlock_entry; 1532876f2946SRoss Zwisler 1533876f2946SRoss Zwisler if (iomap.offset + iomap.length < pos + PMD_SIZE) 15349f141d6eSJan Kara goto finish_iomap; 15359f141d6eSJan Kara 1536aaa422c4SDan Williams sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap); 1537caa51d26SJan Kara 1538642261acSRoss Zwisler switch (iomap.type) { 1539642261acSRoss Zwisler case IOMAP_MAPPED: 1540302a5e31SJan Kara error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn); 1541302a5e31SJan Kara if (error < 0) 1542302a5e31SJan Kara goto finish_iomap; 1543302a5e31SJan Kara 15443fe0791cSDan Williams entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn, 1545caa51d26SJan Kara RADIX_DAX_PMD, write && !sync); 1546302a5e31SJan Kara 1547caa51d26SJan Kara /* 1548caa51d26SJan Kara * If we are doing synchronous page fault and inode needs fsync, 1549caa51d26SJan Kara * we can insert PMD into page tables only after that happens. 1550caa51d26SJan Kara * Skip insertion for now and return the pfn so that caller can 1551caa51d26SJan Kara * insert it after fsync is done. 1552caa51d26SJan Kara */ 1553caa51d26SJan Kara if (sync) { 1554caa51d26SJan Kara if (WARN_ON_ONCE(!pfnp)) 1555caa51d26SJan Kara goto finish_iomap; 1556caa51d26SJan Kara *pfnp = pfn; 1557caa51d26SJan Kara result = VM_FAULT_NEEDDSYNC; 1558caa51d26SJan Kara goto finish_iomap; 1559caa51d26SJan Kara } 1560caa51d26SJan Kara 1561302a5e31SJan Kara trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); 1562302a5e31SJan Kara result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn, 1563302a5e31SJan Kara write); 1564642261acSRoss Zwisler break; 1565642261acSRoss Zwisler case IOMAP_UNWRITTEN: 1566642261acSRoss Zwisler case IOMAP_HOLE: 1567642261acSRoss Zwisler if (WARN_ON_ONCE(write)) 1568876f2946SRoss Zwisler break; 156991d25ba8SRoss Zwisler result = dax_pmd_load_hole(vmf, &iomap, entry); 1570642261acSRoss Zwisler break; 1571642261acSRoss Zwisler default: 1572642261acSRoss Zwisler WARN_ON_ONCE(1); 1573642261acSRoss Zwisler break; 1574642261acSRoss Zwisler } 1575642261acSRoss Zwisler 15769f141d6eSJan Kara finish_iomap: 15779f141d6eSJan Kara if (ops->iomap_end) { 15789f141d6eSJan Kara int copied = PMD_SIZE; 15799f141d6eSJan Kara 15809f141d6eSJan Kara if (result == VM_FAULT_FALLBACK) 15819f141d6eSJan Kara copied = 0; 15829f141d6eSJan Kara /* 15839f141d6eSJan Kara * The fault is done by now and there's no way back (other 15849f141d6eSJan Kara * thread may be already happily using PMD we have installed). 15859f141d6eSJan Kara * Just ignore error from ->iomap_end since we cannot do much 15869f141d6eSJan Kara * with it. 15879f141d6eSJan Kara */ 15889f141d6eSJan Kara ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags, 15899f141d6eSJan Kara &iomap); 15909f141d6eSJan Kara } 1591876f2946SRoss Zwisler unlock_entry: 159291d25ba8SRoss Zwisler put_locked_mapping_entry(mapping, pgoff); 1593642261acSRoss Zwisler fallback: 1594642261acSRoss Zwisler if (result == VM_FAULT_FALLBACK) { 1595d8a849e1SDave Jiang split_huge_pmd(vma, vmf->pmd, vmf->address); 1596642261acSRoss Zwisler count_vm_event(THP_FAULT_FALLBACK); 1597642261acSRoss Zwisler } 1598282a8e03SRoss Zwisler out: 1599f4200391SDave Jiang trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); 1600642261acSRoss Zwisler return result; 1601642261acSRoss Zwisler } 1602a2d58167SDave Jiang #else 1603ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 160401cddfe9SArnd Bergmann const struct iomap_ops *ops) 1605a2d58167SDave Jiang { 1606a2d58167SDave Jiang return VM_FAULT_FALLBACK; 1607a2d58167SDave Jiang } 1608642261acSRoss Zwisler #endif /* CONFIG_FS_DAX_PMD */ 1609a2d58167SDave Jiang 1610a2d58167SDave Jiang /** 1611a2d58167SDave Jiang * dax_iomap_fault - handle a page fault on a DAX file 1612a2d58167SDave Jiang * @vmf: The description of the fault 1613cec04e8cSJan Kara * @pe_size: Size of the page to fault in 16149a0dd422SJan Kara * @pfnp: PFN to insert for synchronous faults if fsync is required 1615c0b24625SJan Kara * @iomap_errp: Storage for detailed error code in case of error 1616cec04e8cSJan Kara * @ops: Iomap ops passed from the file system 1617a2d58167SDave Jiang * 1618a2d58167SDave Jiang * When a page fault occurs, filesystems may call this helper in 1619a2d58167SDave Jiang * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1620a2d58167SDave Jiang * has done all the necessary locking for page fault to proceed 1621a2d58167SDave Jiang * successfully. 1622a2d58167SDave Jiang */ 1623ab77dab4SSouptick Joarder vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1624c0b24625SJan Kara pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) 1625a2d58167SDave Jiang { 1626c791ace1SDave Jiang switch (pe_size) { 1627c791ace1SDave Jiang case PE_SIZE_PTE: 1628c0b24625SJan Kara return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); 1629c791ace1SDave Jiang case PE_SIZE_PMD: 16309a0dd422SJan Kara return dax_iomap_pmd_fault(vmf, pfnp, ops); 1631a2d58167SDave Jiang default: 1632a2d58167SDave Jiang return VM_FAULT_FALLBACK; 1633a2d58167SDave Jiang } 1634a2d58167SDave Jiang } 1635a2d58167SDave Jiang EXPORT_SYMBOL_GPL(dax_iomap_fault); 163671eab6dfSJan Kara 163771eab6dfSJan Kara /** 163871eab6dfSJan Kara * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables 163971eab6dfSJan Kara * @vmf: The description of the fault 164071eab6dfSJan Kara * @pe_size: Size of entry to be inserted 164171eab6dfSJan Kara * @pfn: PFN to insert 164271eab6dfSJan Kara * 164371eab6dfSJan Kara * This function inserts writeable PTE or PMD entry into page tables for mmaped 164471eab6dfSJan Kara * DAX file. It takes care of marking corresponding radix tree entry as dirty 164571eab6dfSJan Kara * as well. 164671eab6dfSJan Kara */ 1647ab77dab4SSouptick Joarder static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf, 164871eab6dfSJan Kara enum page_entry_size pe_size, 164971eab6dfSJan Kara pfn_t pfn) 165071eab6dfSJan Kara { 165171eab6dfSJan Kara struct address_space *mapping = vmf->vma->vm_file->f_mapping; 165271eab6dfSJan Kara void *entry, **slot; 165371eab6dfSJan Kara pgoff_t index = vmf->pgoff; 1654ab77dab4SSouptick Joarder vm_fault_t ret; 165571eab6dfSJan Kara 1656b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages); 165771eab6dfSJan Kara entry = get_unlocked_mapping_entry(mapping, index, &slot); 165871eab6dfSJan Kara /* Did we race with someone splitting entry or so? */ 165971eab6dfSJan Kara if (!entry || 166071eab6dfSJan Kara (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) || 166171eab6dfSJan Kara (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) { 166271eab6dfSJan Kara put_unlocked_mapping_entry(mapping, index, entry); 1663b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 166471eab6dfSJan Kara trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 166571eab6dfSJan Kara VM_FAULT_NOPAGE); 166671eab6dfSJan Kara return VM_FAULT_NOPAGE; 166771eab6dfSJan Kara } 1668b93b0163SMatthew Wilcox radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY); 166971eab6dfSJan Kara entry = lock_slot(mapping, slot); 1670b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 167171eab6dfSJan Kara switch (pe_size) { 167271eab6dfSJan Kara case PE_SIZE_PTE: 1673ab77dab4SSouptick Joarder ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 167471eab6dfSJan Kara break; 167571eab6dfSJan Kara #ifdef CONFIG_FS_DAX_PMD 167671eab6dfSJan Kara case PE_SIZE_PMD: 1677ab77dab4SSouptick Joarder ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, 167871eab6dfSJan Kara pfn, true); 167971eab6dfSJan Kara break; 168071eab6dfSJan Kara #endif 168171eab6dfSJan Kara default: 1682ab77dab4SSouptick Joarder ret = VM_FAULT_FALLBACK; 168371eab6dfSJan Kara } 168471eab6dfSJan Kara put_locked_mapping_entry(mapping, index); 1685ab77dab4SSouptick Joarder trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); 1686ab77dab4SSouptick Joarder return ret; 168771eab6dfSJan Kara } 168871eab6dfSJan Kara 168971eab6dfSJan Kara /** 169071eab6dfSJan Kara * dax_finish_sync_fault - finish synchronous page fault 169171eab6dfSJan Kara * @vmf: The description of the fault 169271eab6dfSJan Kara * @pe_size: Size of entry to be inserted 169371eab6dfSJan Kara * @pfn: PFN to insert 169471eab6dfSJan Kara * 169571eab6dfSJan Kara * This function ensures that the file range touched by the page fault is 169671eab6dfSJan Kara * stored persistently on the media and handles inserting of appropriate page 169771eab6dfSJan Kara * table entry. 169871eab6dfSJan Kara */ 1699ab77dab4SSouptick Joarder vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 1700ab77dab4SSouptick Joarder enum page_entry_size pe_size, pfn_t pfn) 170171eab6dfSJan Kara { 170271eab6dfSJan Kara int err; 170371eab6dfSJan Kara loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; 170471eab6dfSJan Kara size_t len = 0; 170571eab6dfSJan Kara 170671eab6dfSJan Kara if (pe_size == PE_SIZE_PTE) 170771eab6dfSJan Kara len = PAGE_SIZE; 170871eab6dfSJan Kara else if (pe_size == PE_SIZE_PMD) 170971eab6dfSJan Kara len = PMD_SIZE; 171071eab6dfSJan Kara else 171171eab6dfSJan Kara WARN_ON_ONCE(1); 171271eab6dfSJan Kara err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); 171371eab6dfSJan Kara if (err) 171471eab6dfSJan Kara return VM_FAULT_SIGBUS; 171571eab6dfSJan Kara return dax_insert_pfn_mkwrite(vmf, pe_size, pfn); 171671eab6dfSJan Kara } 171771eab6dfSJan Kara EXPORT_SYMBOL_GPL(dax_finish_sync_fault); 1718