1d475c634SMatthew Wilcox /* 2d475c634SMatthew Wilcox * fs/dax.c - Direct Access filesystem code 3d475c634SMatthew Wilcox * Copyright (c) 2013-2014 Intel Corporation 4d475c634SMatthew Wilcox * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 5d475c634SMatthew Wilcox * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 6d475c634SMatthew Wilcox * 7d475c634SMatthew Wilcox * This program is free software; you can redistribute it and/or modify it 8d475c634SMatthew Wilcox * under the terms and conditions of the GNU General Public License, 9d475c634SMatthew Wilcox * version 2, as published by the Free Software Foundation. 10d475c634SMatthew Wilcox * 11d475c634SMatthew Wilcox * This program is distributed in the hope it will be useful, but WITHOUT 12d475c634SMatthew Wilcox * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13d475c634SMatthew Wilcox * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14d475c634SMatthew Wilcox * more details. 15d475c634SMatthew Wilcox */ 16d475c634SMatthew Wilcox 17d475c634SMatthew Wilcox #include <linux/atomic.h> 18d475c634SMatthew Wilcox #include <linux/blkdev.h> 19d475c634SMatthew Wilcox #include <linux/buffer_head.h> 20d77e92e2SRoss Zwisler #include <linux/dax.h> 21d475c634SMatthew Wilcox #include <linux/fs.h> 22d475c634SMatthew Wilcox #include <linux/genhd.h> 23f7ca90b1SMatthew Wilcox #include <linux/highmem.h> 24f7ca90b1SMatthew Wilcox #include <linux/memcontrol.h> 25f7ca90b1SMatthew Wilcox #include <linux/mm.h> 26d475c634SMatthew Wilcox #include <linux/mutex.h> 279973c98eSRoss Zwisler #include <linux/pagevec.h> 28289c6aedSMatthew Wilcox #include <linux/sched.h> 29f361bf4aSIngo Molnar #include <linux/sched/signal.h> 30d475c634SMatthew Wilcox #include <linux/uio.h> 31f7ca90b1SMatthew Wilcox #include <linux/vmstat.h> 3234c0fd54SDan Williams #include <linux/pfn_t.h> 330e749e54SDan Williams #include <linux/sizes.h> 344b4bb46dSJan Kara #include <linux/mmu_notifier.h> 35a254e568SChristoph Hellwig #include <linux/iomap.h> 36a254e568SChristoph Hellwig #include "internal.h" 37d475c634SMatthew Wilcox 38282a8e03SRoss Zwisler #define CREATE_TRACE_POINTS 39282a8e03SRoss Zwisler #include <trace/events/fs_dax.h> 40282a8e03SRoss Zwisler 41ac401cc7SJan Kara /* We choose 4096 entries - same as per-zone page wait tables */ 42ac401cc7SJan Kara #define DAX_WAIT_TABLE_BITS 12 43ac401cc7SJan Kara #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 44ac401cc7SJan Kara 45917f3452SRoss Zwisler /* The 'colour' (ie low bits) within a PMD of a page offset. */ 46917f3452SRoss Zwisler #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 47977fbdcdSMatthew Wilcox #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) 48917f3452SRoss Zwisler 49ce95ab0fSRoss Zwisler static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 50ac401cc7SJan Kara 51ac401cc7SJan Kara static int __init init_dax_wait_table(void) 52ac401cc7SJan Kara { 53ac401cc7SJan Kara int i; 54ac401cc7SJan Kara 55ac401cc7SJan Kara for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 56ac401cc7SJan Kara init_waitqueue_head(wait_table + i); 57ac401cc7SJan Kara return 0; 58ac401cc7SJan Kara } 59ac401cc7SJan Kara fs_initcall(init_dax_wait_table); 60ac401cc7SJan Kara 61527b19d0SRoss Zwisler /* 623159f943SMatthew Wilcox * DAX pagecache entries use XArray value entries so they can't be mistaken 633159f943SMatthew Wilcox * for pages. We use one bit for locking, one bit for the entry size (PMD) 643159f943SMatthew Wilcox * and two more to tell us if the entry is a zero page or an empty entry that 653159f943SMatthew Wilcox * is just used for locking. In total four special bits. 66527b19d0SRoss Zwisler * 67527b19d0SRoss Zwisler * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE 68527b19d0SRoss Zwisler * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem 69527b19d0SRoss Zwisler * block allocation. 70527b19d0SRoss Zwisler */ 713159f943SMatthew Wilcox #define DAX_SHIFT (4) 723159f943SMatthew Wilcox #define DAX_LOCKED (1UL << 0) 733159f943SMatthew Wilcox #define DAX_PMD (1UL << 1) 743159f943SMatthew Wilcox #define DAX_ZERO_PAGE (1UL << 2) 753159f943SMatthew Wilcox #define DAX_EMPTY (1UL << 3) 76527b19d0SRoss Zwisler 773fe0791cSDan Williams static unsigned long dax_radix_pfn(void *entry) 78527b19d0SRoss Zwisler { 793159f943SMatthew Wilcox return xa_to_value(entry) >> DAX_SHIFT; 80527b19d0SRoss Zwisler } 81527b19d0SRoss Zwisler 823fe0791cSDan Williams static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags) 83527b19d0SRoss Zwisler { 843159f943SMatthew Wilcox return xa_mk_value(flags | ((unsigned long)pfn << DAX_SHIFT) | 853159f943SMatthew Wilcox DAX_LOCKED); 86527b19d0SRoss Zwisler } 87527b19d0SRoss Zwisler 88527b19d0SRoss Zwisler static unsigned int dax_radix_order(void *entry) 89527b19d0SRoss Zwisler { 903159f943SMatthew Wilcox if (xa_to_value(entry) & DAX_PMD) 91527b19d0SRoss Zwisler return PMD_SHIFT - PAGE_SHIFT; 92527b19d0SRoss Zwisler return 0; 93527b19d0SRoss Zwisler } 94527b19d0SRoss Zwisler 95642261acSRoss Zwisler static int dax_is_pmd_entry(void *entry) 96642261acSRoss Zwisler { 973159f943SMatthew Wilcox return xa_to_value(entry) & DAX_PMD; 98642261acSRoss Zwisler } 99642261acSRoss Zwisler 100642261acSRoss Zwisler static int dax_is_pte_entry(void *entry) 101642261acSRoss Zwisler { 1023159f943SMatthew Wilcox return !(xa_to_value(entry) & DAX_PMD); 103642261acSRoss Zwisler } 104642261acSRoss Zwisler 105642261acSRoss Zwisler static int dax_is_zero_entry(void *entry) 106642261acSRoss Zwisler { 1073159f943SMatthew Wilcox return xa_to_value(entry) & DAX_ZERO_PAGE; 108642261acSRoss Zwisler } 109642261acSRoss Zwisler 110642261acSRoss Zwisler static int dax_is_empty_entry(void *entry) 111642261acSRoss Zwisler { 1123159f943SMatthew Wilcox return xa_to_value(entry) & DAX_EMPTY; 113642261acSRoss Zwisler } 114642261acSRoss Zwisler 115f7ca90b1SMatthew Wilcox /* 116ac401cc7SJan Kara * DAX radix tree locking 117ac401cc7SJan Kara */ 118ac401cc7SJan Kara struct exceptional_entry_key { 119ac401cc7SJan Kara struct address_space *mapping; 12063e95b5cSRoss Zwisler pgoff_t entry_start; 121ac401cc7SJan Kara }; 122ac401cc7SJan Kara 123ac401cc7SJan Kara struct wait_exceptional_entry_queue { 124ac6424b9SIngo Molnar wait_queue_entry_t wait; 125ac401cc7SJan Kara struct exceptional_entry_key key; 126ac401cc7SJan Kara }; 127ac401cc7SJan Kara 12863e95b5cSRoss Zwisler static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, 12963e95b5cSRoss Zwisler pgoff_t index, void *entry, struct exceptional_entry_key *key) 13063e95b5cSRoss Zwisler { 13163e95b5cSRoss Zwisler unsigned long hash; 13263e95b5cSRoss Zwisler 13363e95b5cSRoss Zwisler /* 13463e95b5cSRoss Zwisler * If 'entry' is a PMD, align the 'index' that we use for the wait 13563e95b5cSRoss Zwisler * queue to the start of that PMD. This ensures that all offsets in 13663e95b5cSRoss Zwisler * the range covered by the PMD map to the same bit lock. 13763e95b5cSRoss Zwisler */ 138642261acSRoss Zwisler if (dax_is_pmd_entry(entry)) 139917f3452SRoss Zwisler index &= ~PG_PMD_COLOUR; 14063e95b5cSRoss Zwisler 14163e95b5cSRoss Zwisler key->mapping = mapping; 14263e95b5cSRoss Zwisler key->entry_start = index; 14363e95b5cSRoss Zwisler 14463e95b5cSRoss Zwisler hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS); 14563e95b5cSRoss Zwisler return wait_table + hash; 14663e95b5cSRoss Zwisler } 14763e95b5cSRoss Zwisler 148ac6424b9SIngo Molnar static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode, 149ac401cc7SJan Kara int sync, void *keyp) 150ac401cc7SJan Kara { 151ac401cc7SJan Kara struct exceptional_entry_key *key = keyp; 152ac401cc7SJan Kara struct wait_exceptional_entry_queue *ewait = 153ac401cc7SJan Kara container_of(wait, struct wait_exceptional_entry_queue, wait); 154ac401cc7SJan Kara 155ac401cc7SJan Kara if (key->mapping != ewait->key.mapping || 15663e95b5cSRoss Zwisler key->entry_start != ewait->key.entry_start) 157ac401cc7SJan Kara return 0; 158ac401cc7SJan Kara return autoremove_wake_function(wait, mode, sync, NULL); 159ac401cc7SJan Kara } 160ac401cc7SJan Kara 161ac401cc7SJan Kara /* 162b93b0163SMatthew Wilcox * @entry may no longer be the entry at the index in the mapping. 163b93b0163SMatthew Wilcox * The important information it's conveying is whether the entry at 164b93b0163SMatthew Wilcox * this index used to be a PMD entry. 165e30331ffSRoss Zwisler */ 166d01ad197SRoss Zwisler static void dax_wake_mapping_entry_waiter(struct address_space *mapping, 167e30331ffSRoss Zwisler pgoff_t index, void *entry, bool wake_all) 168e30331ffSRoss Zwisler { 169e30331ffSRoss Zwisler struct exceptional_entry_key key; 170e30331ffSRoss Zwisler wait_queue_head_t *wq; 171e30331ffSRoss Zwisler 172e30331ffSRoss Zwisler wq = dax_entry_waitqueue(mapping, index, entry, &key); 173e30331ffSRoss Zwisler 174e30331ffSRoss Zwisler /* 175e30331ffSRoss Zwisler * Checking for locked entry and prepare_to_wait_exclusive() happens 176b93b0163SMatthew Wilcox * under the i_pages lock, ditto for entry handling in our callers. 177e30331ffSRoss Zwisler * So at this point all tasks that could have seen our entry locked 178e30331ffSRoss Zwisler * must be in the waitqueue and the following check will see them. 179e30331ffSRoss Zwisler */ 180e30331ffSRoss Zwisler if (waitqueue_active(wq)) 181e30331ffSRoss Zwisler __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); 182e30331ffSRoss Zwisler } 183e30331ffSRoss Zwisler 184e30331ffSRoss Zwisler /* 185b93b0163SMatthew Wilcox * Check whether the given slot is locked. Must be called with the i_pages 186b93b0163SMatthew Wilcox * lock held. 187ac401cc7SJan Kara */ 188ac401cc7SJan Kara static inline int slot_locked(struct address_space *mapping, void **slot) 189ac401cc7SJan Kara { 1903159f943SMatthew Wilcox unsigned long entry = xa_to_value( 1913159f943SMatthew Wilcox radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock)); 1923159f943SMatthew Wilcox return entry & DAX_LOCKED; 193ac401cc7SJan Kara } 194ac401cc7SJan Kara 195ac401cc7SJan Kara /* 196b93b0163SMatthew Wilcox * Mark the given slot as locked. Must be called with the i_pages lock held. 197ac401cc7SJan Kara */ 198ac401cc7SJan Kara static inline void *lock_slot(struct address_space *mapping, void **slot) 199ac401cc7SJan Kara { 2003159f943SMatthew Wilcox unsigned long v = xa_to_value( 2013159f943SMatthew Wilcox radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock)); 2023159f943SMatthew Wilcox void *entry = xa_mk_value(v | DAX_LOCKED); 2033159f943SMatthew Wilcox radix_tree_replace_slot(&mapping->i_pages, slot, entry); 2043159f943SMatthew Wilcox return entry; 205ac401cc7SJan Kara } 206ac401cc7SJan Kara 207ac401cc7SJan Kara /* 208b93b0163SMatthew Wilcox * Mark the given slot as unlocked. Must be called with the i_pages lock held. 209ac401cc7SJan Kara */ 210ac401cc7SJan Kara static inline void *unlock_slot(struct address_space *mapping, void **slot) 211ac401cc7SJan Kara { 2123159f943SMatthew Wilcox unsigned long v = xa_to_value( 2133159f943SMatthew Wilcox radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock)); 2143159f943SMatthew Wilcox void *entry = xa_mk_value(v & ~DAX_LOCKED); 2153159f943SMatthew Wilcox radix_tree_replace_slot(&mapping->i_pages, slot, entry); 2163159f943SMatthew Wilcox return entry; 217ac401cc7SJan Kara } 218ac401cc7SJan Kara 219ac401cc7SJan Kara /* 220ac401cc7SJan Kara * Lookup entry in radix tree, wait for it to become unlocked if it is 2213159f943SMatthew Wilcox * a DAX entry and return it. The caller must call 222ac401cc7SJan Kara * put_unlocked_mapping_entry() when he decided not to lock the entry or 223ac401cc7SJan Kara * put_locked_mapping_entry() when he locked the entry and now wants to 224ac401cc7SJan Kara * unlock it. 225ac401cc7SJan Kara * 226b93b0163SMatthew Wilcox * Must be called with the i_pages lock held. 227ac401cc7SJan Kara */ 228c2a7d2a1SDan Williams static void *__get_unlocked_mapping_entry(struct address_space *mapping, 229c2a7d2a1SDan Williams pgoff_t index, void ***slotp, bool (*wait_fn)(void)) 230ac401cc7SJan Kara { 231e3ad61c6SRoss Zwisler void *entry, **slot; 232ac401cc7SJan Kara struct wait_exceptional_entry_queue ewait; 23363e95b5cSRoss Zwisler wait_queue_head_t *wq; 234ac401cc7SJan Kara 235ac401cc7SJan Kara init_wait(&ewait.wait); 236ac401cc7SJan Kara ewait.wait.func = wake_exceptional_entry_func; 237ac401cc7SJan Kara 238ac401cc7SJan Kara for (;;) { 239c2a7d2a1SDan Williams bool revalidate; 240c2a7d2a1SDan Williams 241b93b0163SMatthew Wilcox entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, 242ac401cc7SJan Kara &slot); 24391d25ba8SRoss Zwisler if (!entry || 2443159f943SMatthew Wilcox WARN_ON_ONCE(!xa_is_value(entry)) || 245ac401cc7SJan Kara !slot_locked(mapping, slot)) { 246ac401cc7SJan Kara if (slotp) 247ac401cc7SJan Kara *slotp = slot; 248e3ad61c6SRoss Zwisler return entry; 249ac401cc7SJan Kara } 25063e95b5cSRoss Zwisler 25163e95b5cSRoss Zwisler wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); 252ac401cc7SJan Kara prepare_to_wait_exclusive(wq, &ewait.wait, 253ac401cc7SJan Kara TASK_UNINTERRUPTIBLE); 254b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 255c2a7d2a1SDan Williams revalidate = wait_fn(); 256ac401cc7SJan Kara finish_wait(wq, &ewait.wait); 257b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages); 258c2a7d2a1SDan Williams if (revalidate) 259c2a7d2a1SDan Williams return ERR_PTR(-EAGAIN); 260ac401cc7SJan Kara } 261ac401cc7SJan Kara } 262ac401cc7SJan Kara 263c2a7d2a1SDan Williams static bool entry_wait(void) 264c2a7d2a1SDan Williams { 265c2a7d2a1SDan Williams schedule(); 266c2a7d2a1SDan Williams /* 267c2a7d2a1SDan Williams * Never return an ERR_PTR() from 268c2a7d2a1SDan Williams * __get_unlocked_mapping_entry(), just keep looping. 269c2a7d2a1SDan Williams */ 270c2a7d2a1SDan Williams return false; 271c2a7d2a1SDan Williams } 272c2a7d2a1SDan Williams 273c2a7d2a1SDan Williams static void *get_unlocked_mapping_entry(struct address_space *mapping, 274c2a7d2a1SDan Williams pgoff_t index, void ***slotp) 275c2a7d2a1SDan Williams { 276c2a7d2a1SDan Williams return __get_unlocked_mapping_entry(mapping, index, slotp, entry_wait); 277c2a7d2a1SDan Williams } 278c2a7d2a1SDan Williams 279c2a7d2a1SDan Williams static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index) 280b1aa812bSJan Kara { 281b1aa812bSJan Kara void *entry, **slot; 282b1aa812bSJan Kara 283b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages); 284b93b0163SMatthew Wilcox entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot); 2853159f943SMatthew Wilcox if (WARN_ON_ONCE(!entry || !xa_is_value(entry) || 286b1aa812bSJan Kara !slot_locked(mapping, slot))) { 287b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 288b1aa812bSJan Kara return; 289b1aa812bSJan Kara } 290b1aa812bSJan Kara unlock_slot(mapping, slot); 291b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 292b1aa812bSJan Kara dax_wake_mapping_entry_waiter(mapping, index, entry, false); 293b1aa812bSJan Kara } 294b1aa812bSJan Kara 295ac401cc7SJan Kara static void put_locked_mapping_entry(struct address_space *mapping, 29691d25ba8SRoss Zwisler pgoff_t index) 297ac401cc7SJan Kara { 298c2a7d2a1SDan Williams unlock_mapping_entry(mapping, index); 299ac401cc7SJan Kara } 300ac401cc7SJan Kara 301ac401cc7SJan Kara /* 302ac401cc7SJan Kara * Called when we are done with radix tree entry we looked up via 303ac401cc7SJan Kara * get_unlocked_mapping_entry() and which we didn't lock in the end. 304ac401cc7SJan Kara */ 305ac401cc7SJan Kara static void put_unlocked_mapping_entry(struct address_space *mapping, 306ac401cc7SJan Kara pgoff_t index, void *entry) 307ac401cc7SJan Kara { 30891d25ba8SRoss Zwisler if (!entry) 309ac401cc7SJan Kara return; 310ac401cc7SJan Kara 311ac401cc7SJan Kara /* We have to wake up next waiter for the radix tree entry lock */ 312422476c4SRoss Zwisler dax_wake_mapping_entry_waiter(mapping, index, entry, false); 313422476c4SRoss Zwisler } 314422476c4SRoss Zwisler 315d2c997c0SDan Williams static unsigned long dax_entry_size(void *entry) 316d2c997c0SDan Williams { 317d2c997c0SDan Williams if (dax_is_zero_entry(entry)) 318d2c997c0SDan Williams return 0; 319d2c997c0SDan Williams else if (dax_is_empty_entry(entry)) 320d2c997c0SDan Williams return 0; 321d2c997c0SDan Williams else if (dax_is_pmd_entry(entry)) 322d2c997c0SDan Williams return PMD_SIZE; 323d2c997c0SDan Williams else 324d2c997c0SDan Williams return PAGE_SIZE; 325d2c997c0SDan Williams } 326d2c997c0SDan Williams 327d2c997c0SDan Williams static unsigned long dax_radix_end_pfn(void *entry) 328d2c997c0SDan Williams { 329d2c997c0SDan Williams return dax_radix_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; 330d2c997c0SDan Williams } 331d2c997c0SDan Williams 332d2c997c0SDan Williams /* 333d2c997c0SDan Williams * Iterate through all mapped pfns represented by an entry, i.e. skip 334d2c997c0SDan Williams * 'empty' and 'zero' entries. 335d2c997c0SDan Williams */ 336d2c997c0SDan Williams #define for_each_mapped_pfn(entry, pfn) \ 337d2c997c0SDan Williams for (pfn = dax_radix_pfn(entry); \ 338d2c997c0SDan Williams pfn < dax_radix_end_pfn(entry); pfn++) 339d2c997c0SDan Williams 34073449dafSDan Williams /* 34173449dafSDan Williams * TODO: for reflink+dax we need a way to associate a single page with 34273449dafSDan Williams * multiple address_space instances at different linear_page_index() 34373449dafSDan Williams * offsets. 34473449dafSDan Williams */ 34573449dafSDan Williams static void dax_associate_entry(void *entry, struct address_space *mapping, 34673449dafSDan Williams struct vm_area_struct *vma, unsigned long address) 347d2c997c0SDan Williams { 34873449dafSDan Williams unsigned long size = dax_entry_size(entry), pfn, index; 34973449dafSDan Williams int i = 0; 350d2c997c0SDan Williams 351d2c997c0SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 352d2c997c0SDan Williams return; 353d2c997c0SDan Williams 35473449dafSDan Williams index = linear_page_index(vma, address & ~(size - 1)); 355d2c997c0SDan Williams for_each_mapped_pfn(entry, pfn) { 356d2c997c0SDan Williams struct page *page = pfn_to_page(pfn); 357d2c997c0SDan Williams 358d2c997c0SDan Williams WARN_ON_ONCE(page->mapping); 359d2c997c0SDan Williams page->mapping = mapping; 36073449dafSDan Williams page->index = index + i++; 361d2c997c0SDan Williams } 362d2c997c0SDan Williams } 363d2c997c0SDan Williams 364d2c997c0SDan Williams static void dax_disassociate_entry(void *entry, struct address_space *mapping, 365d2c997c0SDan Williams bool trunc) 366d2c997c0SDan Williams { 367d2c997c0SDan Williams unsigned long pfn; 368d2c997c0SDan Williams 369d2c997c0SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 370d2c997c0SDan Williams return; 371d2c997c0SDan Williams 372d2c997c0SDan Williams for_each_mapped_pfn(entry, pfn) { 373d2c997c0SDan Williams struct page *page = pfn_to_page(pfn); 374d2c997c0SDan Williams 375d2c997c0SDan Williams WARN_ON_ONCE(trunc && page_ref_count(page) > 1); 376d2c997c0SDan Williams WARN_ON_ONCE(page->mapping && page->mapping != mapping); 377d2c997c0SDan Williams page->mapping = NULL; 37873449dafSDan Williams page->index = 0; 379d2c997c0SDan Williams } 380d2c997c0SDan Williams } 381d2c997c0SDan Williams 3825fac7408SDan Williams static struct page *dax_busy_page(void *entry) 3835fac7408SDan Williams { 3845fac7408SDan Williams unsigned long pfn; 3855fac7408SDan Williams 3865fac7408SDan Williams for_each_mapped_pfn(entry, pfn) { 3875fac7408SDan Williams struct page *page = pfn_to_page(pfn); 3885fac7408SDan Williams 3895fac7408SDan Williams if (page_ref_count(page) > 1) 3905fac7408SDan Williams return page; 3915fac7408SDan Williams } 3925fac7408SDan Williams return NULL; 3935fac7408SDan Williams } 3945fac7408SDan Williams 395c2a7d2a1SDan Williams static bool entry_wait_revalidate(void) 396c2a7d2a1SDan Williams { 397c2a7d2a1SDan Williams rcu_read_unlock(); 398c2a7d2a1SDan Williams schedule(); 399c2a7d2a1SDan Williams rcu_read_lock(); 400c2a7d2a1SDan Williams 401c2a7d2a1SDan Williams /* 402c2a7d2a1SDan Williams * Tell __get_unlocked_mapping_entry() to take a break, we need 403c2a7d2a1SDan Williams * to revalidate page->mapping after dropping locks 404c2a7d2a1SDan Williams */ 405c2a7d2a1SDan Williams return true; 406c2a7d2a1SDan Williams } 407c2a7d2a1SDan Williams 408c2a7d2a1SDan Williams bool dax_lock_mapping_entry(struct page *page) 409c2a7d2a1SDan Williams { 410c2a7d2a1SDan Williams pgoff_t index; 411c2a7d2a1SDan Williams struct inode *inode; 412c2a7d2a1SDan Williams bool did_lock = false; 413c2a7d2a1SDan Williams void *entry = NULL, **slot; 414c2a7d2a1SDan Williams struct address_space *mapping; 415c2a7d2a1SDan Williams 416c2a7d2a1SDan Williams rcu_read_lock(); 417c2a7d2a1SDan Williams for (;;) { 418c2a7d2a1SDan Williams mapping = READ_ONCE(page->mapping); 419c2a7d2a1SDan Williams 420c2a7d2a1SDan Williams if (!dax_mapping(mapping)) 421c2a7d2a1SDan Williams break; 422c2a7d2a1SDan Williams 423c2a7d2a1SDan Williams /* 424c2a7d2a1SDan Williams * In the device-dax case there's no need to lock, a 425c2a7d2a1SDan Williams * struct dev_pagemap pin is sufficient to keep the 426c2a7d2a1SDan Williams * inode alive, and we assume we have dev_pagemap pin 427c2a7d2a1SDan Williams * otherwise we would not have a valid pfn_to_page() 428c2a7d2a1SDan Williams * translation. 429c2a7d2a1SDan Williams */ 430c2a7d2a1SDan Williams inode = mapping->host; 431c2a7d2a1SDan Williams if (S_ISCHR(inode->i_mode)) { 432c2a7d2a1SDan Williams did_lock = true; 433c2a7d2a1SDan Williams break; 434c2a7d2a1SDan Williams } 435c2a7d2a1SDan Williams 436c2a7d2a1SDan Williams xa_lock_irq(&mapping->i_pages); 437c2a7d2a1SDan Williams if (mapping != page->mapping) { 438c2a7d2a1SDan Williams xa_unlock_irq(&mapping->i_pages); 439c2a7d2a1SDan Williams continue; 440c2a7d2a1SDan Williams } 441c2a7d2a1SDan Williams index = page->index; 442c2a7d2a1SDan Williams 443c2a7d2a1SDan Williams entry = __get_unlocked_mapping_entry(mapping, index, &slot, 444c2a7d2a1SDan Williams entry_wait_revalidate); 445c2a7d2a1SDan Williams if (!entry) { 446c2a7d2a1SDan Williams xa_unlock_irq(&mapping->i_pages); 447c2a7d2a1SDan Williams break; 448c2a7d2a1SDan Williams } else if (IS_ERR(entry)) { 449c2a7d2a1SDan Williams WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN); 450c2a7d2a1SDan Williams continue; 451c2a7d2a1SDan Williams } 452c2a7d2a1SDan Williams lock_slot(mapping, slot); 453c2a7d2a1SDan Williams did_lock = true; 454c2a7d2a1SDan Williams xa_unlock_irq(&mapping->i_pages); 455c2a7d2a1SDan Williams break; 456c2a7d2a1SDan Williams } 457c2a7d2a1SDan Williams rcu_read_unlock(); 458c2a7d2a1SDan Williams 459c2a7d2a1SDan Williams return did_lock; 460c2a7d2a1SDan Williams } 461c2a7d2a1SDan Williams 462c2a7d2a1SDan Williams void dax_unlock_mapping_entry(struct page *page) 463c2a7d2a1SDan Williams { 464c2a7d2a1SDan Williams struct address_space *mapping = page->mapping; 465c2a7d2a1SDan Williams struct inode *inode = mapping->host; 466c2a7d2a1SDan Williams 467c2a7d2a1SDan Williams if (S_ISCHR(inode->i_mode)) 468c2a7d2a1SDan Williams return; 469c2a7d2a1SDan Williams 470c2a7d2a1SDan Williams unlock_mapping_entry(mapping, page->index); 471c2a7d2a1SDan Williams } 472c2a7d2a1SDan Williams 473ac401cc7SJan Kara /* 4743159f943SMatthew Wilcox * Find radix tree entry at given index. If it is a DAX entry, return it 4753159f943SMatthew Wilcox * with the radix tree entry locked. If the radix tree doesn't contain the 4763159f943SMatthew Wilcox * given index, create an empty entry for the index and return with it locked. 477ac401cc7SJan Kara * 4783159f943SMatthew Wilcox * When requesting an entry with size DAX_PMD, grab_mapping_entry() will 479642261acSRoss Zwisler * either return that locked entry or will return an error. This error will 48091d25ba8SRoss Zwisler * happen if there are any 4k entries within the 2MiB range that we are 48191d25ba8SRoss Zwisler * requesting. 482642261acSRoss Zwisler * 483642261acSRoss Zwisler * We always favor 4k entries over 2MiB entries. There isn't a flow where we 484642261acSRoss Zwisler * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB 485642261acSRoss Zwisler * insertion will fail if it finds any 4k entries already in the tree, and a 486642261acSRoss Zwisler * 4k insertion will cause an existing 2MiB entry to be unmapped and 487642261acSRoss Zwisler * downgraded to 4k entries. This happens for both 2MiB huge zero pages as 488642261acSRoss Zwisler * well as 2MiB empty entries. 489642261acSRoss Zwisler * 490642261acSRoss Zwisler * The exception to this downgrade path is for 2MiB DAX PMD entries that have 491642261acSRoss Zwisler * real storage backing them. We will leave these real 2MiB DAX entries in 492642261acSRoss Zwisler * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry. 493642261acSRoss Zwisler * 494ac401cc7SJan Kara * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 495ac401cc7SJan Kara * persistent memory the benefit is doubtful. We can add that later if we can 496ac401cc7SJan Kara * show it helps. 497ac401cc7SJan Kara */ 498642261acSRoss Zwisler static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, 499642261acSRoss Zwisler unsigned long size_flag) 500ac401cc7SJan Kara { 501642261acSRoss Zwisler bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */ 502e3ad61c6SRoss Zwisler void *entry, **slot; 503ac401cc7SJan Kara 504ac401cc7SJan Kara restart: 505b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages); 506e3ad61c6SRoss Zwisler entry = get_unlocked_mapping_entry(mapping, index, &slot); 507642261acSRoss Zwisler 5083159f943SMatthew Wilcox if (WARN_ON_ONCE(entry && !xa_is_value(entry))) { 50991d25ba8SRoss Zwisler entry = ERR_PTR(-EIO); 51091d25ba8SRoss Zwisler goto out_unlock; 51191d25ba8SRoss Zwisler } 51291d25ba8SRoss Zwisler 513642261acSRoss Zwisler if (entry) { 5143159f943SMatthew Wilcox if (size_flag & DAX_PMD) { 51591d25ba8SRoss Zwisler if (dax_is_pte_entry(entry)) { 516642261acSRoss Zwisler put_unlocked_mapping_entry(mapping, index, 517642261acSRoss Zwisler entry); 518642261acSRoss Zwisler entry = ERR_PTR(-EEXIST); 519642261acSRoss Zwisler goto out_unlock; 520642261acSRoss Zwisler } 521642261acSRoss Zwisler } else { /* trying to grab a PTE entry */ 52291d25ba8SRoss Zwisler if (dax_is_pmd_entry(entry) && 523642261acSRoss Zwisler (dax_is_zero_entry(entry) || 524642261acSRoss Zwisler dax_is_empty_entry(entry))) { 525642261acSRoss Zwisler pmd_downgrade = true; 526642261acSRoss Zwisler } 527642261acSRoss Zwisler } 528642261acSRoss Zwisler } 529642261acSRoss Zwisler 530ac401cc7SJan Kara /* No entry for given index? Make sure radix tree is big enough. */ 531642261acSRoss Zwisler if (!entry || pmd_downgrade) { 532ac401cc7SJan Kara int err; 533ac401cc7SJan Kara 534642261acSRoss Zwisler if (pmd_downgrade) { 535642261acSRoss Zwisler /* 536642261acSRoss Zwisler * Make sure 'entry' remains valid while we drop 537b93b0163SMatthew Wilcox * the i_pages lock. 538642261acSRoss Zwisler */ 539642261acSRoss Zwisler entry = lock_slot(mapping, slot); 540642261acSRoss Zwisler } 541642261acSRoss Zwisler 542b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 543642261acSRoss Zwisler /* 544642261acSRoss Zwisler * Besides huge zero pages the only other thing that gets 545642261acSRoss Zwisler * downgraded are empty entries which don't need to be 546642261acSRoss Zwisler * unmapped. 547642261acSRoss Zwisler */ 548642261acSRoss Zwisler if (pmd_downgrade && dax_is_zero_entry(entry)) 549977fbdcdSMatthew Wilcox unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 550977fbdcdSMatthew Wilcox PG_PMD_NR, false); 551642261acSRoss Zwisler 5520cb80b48SJan Kara err = radix_tree_preload( 5530cb80b48SJan Kara mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); 5540cb80b48SJan Kara if (err) { 5550cb80b48SJan Kara if (pmd_downgrade) 55691d25ba8SRoss Zwisler put_locked_mapping_entry(mapping, index); 5570cb80b48SJan Kara return ERR_PTR(err); 5580cb80b48SJan Kara } 559b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages); 560642261acSRoss Zwisler 561e11f8b7bSRoss Zwisler if (!entry) { 562e11f8b7bSRoss Zwisler /* 563b93b0163SMatthew Wilcox * We needed to drop the i_pages lock while calling 564e11f8b7bSRoss Zwisler * radix_tree_preload() and we didn't have an entry to 565e11f8b7bSRoss Zwisler * lock. See if another thread inserted an entry at 566e11f8b7bSRoss Zwisler * our index during this time. 567e11f8b7bSRoss Zwisler */ 568b93b0163SMatthew Wilcox entry = __radix_tree_lookup(&mapping->i_pages, index, 569e11f8b7bSRoss Zwisler NULL, &slot); 570e11f8b7bSRoss Zwisler if (entry) { 571e11f8b7bSRoss Zwisler radix_tree_preload_end(); 572b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 573e11f8b7bSRoss Zwisler goto restart; 574e11f8b7bSRoss Zwisler } 575e11f8b7bSRoss Zwisler } 576e11f8b7bSRoss Zwisler 577642261acSRoss Zwisler if (pmd_downgrade) { 578d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, false); 579b93b0163SMatthew Wilcox radix_tree_delete(&mapping->i_pages, index); 580642261acSRoss Zwisler mapping->nrexceptional--; 581642261acSRoss Zwisler dax_wake_mapping_entry_waiter(mapping, index, entry, 582642261acSRoss Zwisler true); 583642261acSRoss Zwisler } 584642261acSRoss Zwisler 5853159f943SMatthew Wilcox entry = dax_radix_locked_entry(0, size_flag | DAX_EMPTY); 586642261acSRoss Zwisler 587b93b0163SMatthew Wilcox err = __radix_tree_insert(&mapping->i_pages, index, 588642261acSRoss Zwisler dax_radix_order(entry), entry); 589ac401cc7SJan Kara radix_tree_preload_end(); 590ac401cc7SJan Kara if (err) { 591b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 592642261acSRoss Zwisler /* 593e11f8b7bSRoss Zwisler * Our insertion of a DAX entry failed, most likely 594e11f8b7bSRoss Zwisler * because we were inserting a PMD entry and it 595e11f8b7bSRoss Zwisler * collided with a PTE sized entry at a different 596e11f8b7bSRoss Zwisler * index in the PMD range. We haven't inserted 597e11f8b7bSRoss Zwisler * anything into the radix tree and have no waiters to 598e11f8b7bSRoss Zwisler * wake. 599642261acSRoss Zwisler */ 600ac401cc7SJan Kara return ERR_PTR(err); 601ac401cc7SJan Kara } 602ac401cc7SJan Kara /* Good, we have inserted empty locked entry into the tree. */ 603ac401cc7SJan Kara mapping->nrexceptional++; 604b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 605e3ad61c6SRoss Zwisler return entry; 606ac401cc7SJan Kara } 607e3ad61c6SRoss Zwisler entry = lock_slot(mapping, slot); 608642261acSRoss Zwisler out_unlock: 609b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 610e3ad61c6SRoss Zwisler return entry; 611ac401cc7SJan Kara } 612ac401cc7SJan Kara 6135fac7408SDan Williams /** 6145fac7408SDan Williams * dax_layout_busy_page - find first pinned page in @mapping 6155fac7408SDan Williams * @mapping: address space to scan for a page with ref count > 1 6165fac7408SDan Williams * 6175fac7408SDan Williams * DAX requires ZONE_DEVICE mapped pages. These pages are never 6185fac7408SDan Williams * 'onlined' to the page allocator so they are considered idle when 6195fac7408SDan Williams * page->count == 1. A filesystem uses this interface to determine if 6205fac7408SDan Williams * any page in the mapping is busy, i.e. for DMA, or other 6215fac7408SDan Williams * get_user_pages() usages. 6225fac7408SDan Williams * 6235fac7408SDan Williams * It is expected that the filesystem is holding locks to block the 6245fac7408SDan Williams * establishment of new mappings in this address_space. I.e. it expects 6255fac7408SDan Williams * to be able to run unmap_mapping_range() and subsequently not race 6265fac7408SDan Williams * mapping_mapped() becoming true. 6275fac7408SDan Williams */ 6285fac7408SDan Williams struct page *dax_layout_busy_page(struct address_space *mapping) 6295fac7408SDan Williams { 6305fac7408SDan Williams pgoff_t indices[PAGEVEC_SIZE]; 6315fac7408SDan Williams struct page *page = NULL; 6325fac7408SDan Williams struct pagevec pvec; 6335fac7408SDan Williams pgoff_t index, end; 6345fac7408SDan Williams unsigned i; 6355fac7408SDan Williams 6365fac7408SDan Williams /* 6375fac7408SDan Williams * In the 'limited' case get_user_pages() for dax is disabled. 6385fac7408SDan Williams */ 6395fac7408SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 6405fac7408SDan Williams return NULL; 6415fac7408SDan Williams 6425fac7408SDan Williams if (!dax_mapping(mapping) || !mapping_mapped(mapping)) 6435fac7408SDan Williams return NULL; 6445fac7408SDan Williams 6455fac7408SDan Williams pagevec_init(&pvec); 6465fac7408SDan Williams index = 0; 6475fac7408SDan Williams end = -1; 6485fac7408SDan Williams 6495fac7408SDan Williams /* 6505fac7408SDan Williams * If we race get_user_pages_fast() here either we'll see the 6515fac7408SDan Williams * elevated page count in the pagevec_lookup and wait, or 6525fac7408SDan Williams * get_user_pages_fast() will see that the page it took a reference 6535fac7408SDan Williams * against is no longer mapped in the page tables and bail to the 6545fac7408SDan Williams * get_user_pages() slow path. The slow path is protected by 6555fac7408SDan Williams * pte_lock() and pmd_lock(). New references are not taken without 6565fac7408SDan Williams * holding those locks, and unmap_mapping_range() will not zero the 6575fac7408SDan Williams * pte or pmd without holding the respective lock, so we are 6585fac7408SDan Williams * guaranteed to either see new references or prevent new 6595fac7408SDan Williams * references from being established. 6605fac7408SDan Williams */ 6615fac7408SDan Williams unmap_mapping_range(mapping, 0, 0, 1); 6625fac7408SDan Williams 6635fac7408SDan Williams while (index < end && pagevec_lookup_entries(&pvec, mapping, index, 6645fac7408SDan Williams min(end - index, (pgoff_t)PAGEVEC_SIZE), 6655fac7408SDan Williams indices)) { 6665fac7408SDan Williams for (i = 0; i < pagevec_count(&pvec); i++) { 6675fac7408SDan Williams struct page *pvec_ent = pvec.pages[i]; 6685fac7408SDan Williams void *entry; 6695fac7408SDan Williams 6705fac7408SDan Williams index = indices[i]; 6715fac7408SDan Williams if (index >= end) 6725fac7408SDan Williams break; 6735fac7408SDan Williams 6743159f943SMatthew Wilcox if (WARN_ON_ONCE(!xa_is_value(pvec_ent))) 6755fac7408SDan Williams continue; 6765fac7408SDan Williams 6775fac7408SDan Williams xa_lock_irq(&mapping->i_pages); 6785fac7408SDan Williams entry = get_unlocked_mapping_entry(mapping, index, NULL); 6795fac7408SDan Williams if (entry) 6805fac7408SDan Williams page = dax_busy_page(entry); 6815fac7408SDan Williams put_unlocked_mapping_entry(mapping, index, entry); 6825fac7408SDan Williams xa_unlock_irq(&mapping->i_pages); 6835fac7408SDan Williams if (page) 6845fac7408SDan Williams break; 6855fac7408SDan Williams } 686cdbf8897SRoss Zwisler 687cdbf8897SRoss Zwisler /* 688cdbf8897SRoss Zwisler * We don't expect normal struct page entries to exist in our 689cdbf8897SRoss Zwisler * tree, but we keep these pagevec calls so that this code is 690cdbf8897SRoss Zwisler * consistent with the common pattern for handling pagevecs 691cdbf8897SRoss Zwisler * throughout the kernel. 692cdbf8897SRoss Zwisler */ 6935fac7408SDan Williams pagevec_remove_exceptionals(&pvec); 6945fac7408SDan Williams pagevec_release(&pvec); 6955fac7408SDan Williams index++; 6965fac7408SDan Williams 6975fac7408SDan Williams if (page) 6985fac7408SDan Williams break; 6995fac7408SDan Williams } 7005fac7408SDan Williams return page; 7015fac7408SDan Williams } 7025fac7408SDan Williams EXPORT_SYMBOL_GPL(dax_layout_busy_page); 7035fac7408SDan Williams 704c6dcf52cSJan Kara static int __dax_invalidate_mapping_entry(struct address_space *mapping, 705c6dcf52cSJan Kara pgoff_t index, bool trunc) 706c6dcf52cSJan Kara { 707c6dcf52cSJan Kara int ret = 0; 708c6dcf52cSJan Kara void *entry; 709b93b0163SMatthew Wilcox struct radix_tree_root *pages = &mapping->i_pages; 710c6dcf52cSJan Kara 711b93b0163SMatthew Wilcox xa_lock_irq(pages); 712c6dcf52cSJan Kara entry = get_unlocked_mapping_entry(mapping, index, NULL); 7133159f943SMatthew Wilcox if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 714c6dcf52cSJan Kara goto out; 715c6dcf52cSJan Kara if (!trunc && 716b93b0163SMatthew Wilcox (radix_tree_tag_get(pages, index, PAGECACHE_TAG_DIRTY) || 717b93b0163SMatthew Wilcox radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE))) 718c6dcf52cSJan Kara goto out; 719d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, trunc); 720b93b0163SMatthew Wilcox radix_tree_delete(pages, index); 721c6dcf52cSJan Kara mapping->nrexceptional--; 722c6dcf52cSJan Kara ret = 1; 723c6dcf52cSJan Kara out: 724c6dcf52cSJan Kara put_unlocked_mapping_entry(mapping, index, entry); 725b93b0163SMatthew Wilcox xa_unlock_irq(pages); 726c6dcf52cSJan Kara return ret; 727c6dcf52cSJan Kara } 728ac401cc7SJan Kara /* 7293159f943SMatthew Wilcox * Delete DAX entry at @index from @mapping. Wait for it 7303159f943SMatthew Wilcox * to be unlocked before deleting it. 731ac401cc7SJan Kara */ 732ac401cc7SJan Kara int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 733ac401cc7SJan Kara { 734c6dcf52cSJan Kara int ret = __dax_invalidate_mapping_entry(mapping, index, true); 735ac401cc7SJan Kara 736ac401cc7SJan Kara /* 737ac401cc7SJan Kara * This gets called from truncate / punch_hole path. As such, the caller 738ac401cc7SJan Kara * must hold locks protecting against concurrent modifications of the 739ac401cc7SJan Kara * radix tree (usually fs-private i_mmap_sem for writing). Since the 7403159f943SMatthew Wilcox * caller has seen a DAX entry for this index, we better find it 741ac401cc7SJan Kara * at that index as well... 742ac401cc7SJan Kara */ 743c6dcf52cSJan Kara WARN_ON_ONCE(!ret); 744c6dcf52cSJan Kara return ret; 745ac401cc7SJan Kara } 746ac401cc7SJan Kara 747c6dcf52cSJan Kara /* 7483159f943SMatthew Wilcox * Invalidate DAX entry if it is clean. 749c6dcf52cSJan Kara */ 750c6dcf52cSJan Kara int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 751c6dcf52cSJan Kara pgoff_t index) 752c6dcf52cSJan Kara { 753c6dcf52cSJan Kara return __dax_invalidate_mapping_entry(mapping, index, false); 754ac401cc7SJan Kara } 755ac401cc7SJan Kara 756cccbce67SDan Williams static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, 757cccbce67SDan Williams sector_t sector, size_t size, struct page *to, 758cccbce67SDan Williams unsigned long vaddr) 759f7ca90b1SMatthew Wilcox { 760cccbce67SDan Williams void *vto, *kaddr; 761cccbce67SDan Williams pgoff_t pgoff; 762cccbce67SDan Williams long rc; 763cccbce67SDan Williams int id; 764e2e05394SRoss Zwisler 765cccbce67SDan Williams rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); 766cccbce67SDan Williams if (rc) 767cccbce67SDan Williams return rc; 768cccbce67SDan Williams 769cccbce67SDan Williams id = dax_read_lock(); 77086ed913bSHuaisheng Ye rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL); 771cccbce67SDan Williams if (rc < 0) { 772cccbce67SDan Williams dax_read_unlock(id); 773cccbce67SDan Williams return rc; 774cccbce67SDan Williams } 775f7ca90b1SMatthew Wilcox vto = kmap_atomic(to); 776cccbce67SDan Williams copy_user_page(vto, (void __force *)kaddr, vaddr, to); 777f7ca90b1SMatthew Wilcox kunmap_atomic(vto); 778cccbce67SDan Williams dax_read_unlock(id); 779f7ca90b1SMatthew Wilcox return 0; 780f7ca90b1SMatthew Wilcox } 781f7ca90b1SMatthew Wilcox 782642261acSRoss Zwisler /* 783642261acSRoss Zwisler * By this point grab_mapping_entry() has ensured that we have a locked entry 784642261acSRoss Zwisler * of the appropriate size so we don't have to worry about downgrading PMDs to 785642261acSRoss Zwisler * PTEs. If we happen to be trying to insert a PTE and there is a PMD 786642261acSRoss Zwisler * already in the tree, we will skip the insertion and just dirty the PMD as 787642261acSRoss Zwisler * appropriate. 788642261acSRoss Zwisler */ 789ac401cc7SJan Kara static void *dax_insert_mapping_entry(struct address_space *mapping, 790ac401cc7SJan Kara struct vm_fault *vmf, 7913fe0791cSDan Williams void *entry, pfn_t pfn_t, 792f5b7b748SJan Kara unsigned long flags, bool dirty) 7939973c98eSRoss Zwisler { 794b93b0163SMatthew Wilcox struct radix_tree_root *pages = &mapping->i_pages; 7953fe0791cSDan Williams unsigned long pfn = pfn_t_to_pfn(pfn_t); 796ac401cc7SJan Kara pgoff_t index = vmf->pgoff; 7973fe0791cSDan Williams void *new_entry; 7989973c98eSRoss Zwisler 799f5b7b748SJan Kara if (dirty) 8009973c98eSRoss Zwisler __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 8019973c98eSRoss Zwisler 8023159f943SMatthew Wilcox if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) { 80391d25ba8SRoss Zwisler /* we are replacing a zero page with block mapping */ 80491d25ba8SRoss Zwisler if (dax_is_pmd_entry(entry)) 805977fbdcdSMatthew Wilcox unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 806977fbdcdSMatthew Wilcox PG_PMD_NR, false); 80791d25ba8SRoss Zwisler else /* pte entry */ 808977fbdcdSMatthew Wilcox unmap_mapping_pages(mapping, vmf->pgoff, 1, false); 809ac401cc7SJan Kara } 8109973c98eSRoss Zwisler 811b93b0163SMatthew Wilcox xa_lock_irq(pages); 8123fe0791cSDan Williams new_entry = dax_radix_locked_entry(pfn, flags); 813d2c997c0SDan Williams if (dax_entry_size(entry) != dax_entry_size(new_entry)) { 814d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, false); 81573449dafSDan Williams dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); 816d2c997c0SDan Williams } 817642261acSRoss Zwisler 81891d25ba8SRoss Zwisler if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 819642261acSRoss Zwisler /* 820642261acSRoss Zwisler * Only swap our new entry into the radix tree if the current 821642261acSRoss Zwisler * entry is a zero page or an empty entry. If a normal PTE or 822642261acSRoss Zwisler * PMD entry is already in the tree, we leave it alone. This 823642261acSRoss Zwisler * means that if we are trying to insert a PTE and the 824642261acSRoss Zwisler * existing entry is a PMD, we will just leave the PMD in the 825642261acSRoss Zwisler * tree and dirty it if necessary. 826642261acSRoss Zwisler */ 827f7942430SJohannes Weiner struct radix_tree_node *node; 828ac401cc7SJan Kara void **slot; 829ac401cc7SJan Kara void *ret; 830ac401cc7SJan Kara 831b93b0163SMatthew Wilcox ret = __radix_tree_lookup(pages, index, &node, &slot); 832ac401cc7SJan Kara WARN_ON_ONCE(ret != entry); 833b93b0163SMatthew Wilcox __radix_tree_replace(pages, node, slot, 834c7df8ad2SMel Gorman new_entry, NULL); 83591d25ba8SRoss Zwisler entry = new_entry; 836ac401cc7SJan Kara } 83791d25ba8SRoss Zwisler 838f5b7b748SJan Kara if (dirty) 839b93b0163SMatthew Wilcox radix_tree_tag_set(pages, index, PAGECACHE_TAG_DIRTY); 84091d25ba8SRoss Zwisler 841b93b0163SMatthew Wilcox xa_unlock_irq(pages); 84291d25ba8SRoss Zwisler return entry; 8439973c98eSRoss Zwisler } 8449973c98eSRoss Zwisler 8454b4bb46dSJan Kara static inline unsigned long 8464b4bb46dSJan Kara pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma) 8474b4bb46dSJan Kara { 8484b4bb46dSJan Kara unsigned long address; 8494b4bb46dSJan Kara 8504b4bb46dSJan Kara address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 8514b4bb46dSJan Kara VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 8524b4bb46dSJan Kara return address; 8534b4bb46dSJan Kara } 8544b4bb46dSJan Kara 8554b4bb46dSJan Kara /* Walk all mappings of a given index of a file and writeprotect them */ 8564b4bb46dSJan Kara static void dax_mapping_entry_mkclean(struct address_space *mapping, 8574b4bb46dSJan Kara pgoff_t index, unsigned long pfn) 8584b4bb46dSJan Kara { 8594b4bb46dSJan Kara struct vm_area_struct *vma; 860f729c8c9SRoss Zwisler pte_t pte, *ptep = NULL; 861f729c8c9SRoss Zwisler pmd_t *pmdp = NULL; 8624b4bb46dSJan Kara spinlock_t *ptl; 8634b4bb46dSJan Kara 8644b4bb46dSJan Kara i_mmap_lock_read(mapping); 8654b4bb46dSJan Kara vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { 866a4d1a885SJérôme Glisse unsigned long address, start, end; 8674b4bb46dSJan Kara 8684b4bb46dSJan Kara cond_resched(); 8694b4bb46dSJan Kara 8704b4bb46dSJan Kara if (!(vma->vm_flags & VM_SHARED)) 8714b4bb46dSJan Kara continue; 8724b4bb46dSJan Kara 8734b4bb46dSJan Kara address = pgoff_address(index, vma); 874a4d1a885SJérôme Glisse 875a4d1a885SJérôme Glisse /* 876a4d1a885SJérôme Glisse * Note because we provide start/end to follow_pte_pmd it will 877a4d1a885SJérôme Glisse * call mmu_notifier_invalidate_range_start() on our behalf 878a4d1a885SJérôme Glisse * before taking any lock. 879a4d1a885SJérôme Glisse */ 880a4d1a885SJérôme Glisse if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl)) 8814b4bb46dSJan Kara continue; 882f729c8c9SRoss Zwisler 8830f10851eSJérôme Glisse /* 8840f10851eSJérôme Glisse * No need to call mmu_notifier_invalidate_range() as we are 8850f10851eSJérôme Glisse * downgrading page table protection not changing it to point 8860f10851eSJérôme Glisse * to a new page. 8870f10851eSJérôme Glisse * 888ad56b738SMike Rapoport * See Documentation/vm/mmu_notifier.rst 8890f10851eSJérôme Glisse */ 890f729c8c9SRoss Zwisler if (pmdp) { 891f729c8c9SRoss Zwisler #ifdef CONFIG_FS_DAX_PMD 892f729c8c9SRoss Zwisler pmd_t pmd; 893f729c8c9SRoss Zwisler 894f729c8c9SRoss Zwisler if (pfn != pmd_pfn(*pmdp)) 895f729c8c9SRoss Zwisler goto unlock_pmd; 896f6f37321SLinus Torvalds if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) 897f729c8c9SRoss Zwisler goto unlock_pmd; 898f729c8c9SRoss Zwisler 899f729c8c9SRoss Zwisler flush_cache_page(vma, address, pfn); 900f729c8c9SRoss Zwisler pmd = pmdp_huge_clear_flush(vma, address, pmdp); 901f729c8c9SRoss Zwisler pmd = pmd_wrprotect(pmd); 902f729c8c9SRoss Zwisler pmd = pmd_mkclean(pmd); 903f729c8c9SRoss Zwisler set_pmd_at(vma->vm_mm, address, pmdp, pmd); 904f729c8c9SRoss Zwisler unlock_pmd: 905f729c8c9SRoss Zwisler #endif 906ee190ca6SJan H. Schönherr spin_unlock(ptl); 907f729c8c9SRoss Zwisler } else { 9084b4bb46dSJan Kara if (pfn != pte_pfn(*ptep)) 909f729c8c9SRoss Zwisler goto unlock_pte; 9104b4bb46dSJan Kara if (!pte_dirty(*ptep) && !pte_write(*ptep)) 911f729c8c9SRoss Zwisler goto unlock_pte; 9124b4bb46dSJan Kara 9134b4bb46dSJan Kara flush_cache_page(vma, address, pfn); 9144b4bb46dSJan Kara pte = ptep_clear_flush(vma, address, ptep); 9154b4bb46dSJan Kara pte = pte_wrprotect(pte); 9164b4bb46dSJan Kara pte = pte_mkclean(pte); 9174b4bb46dSJan Kara set_pte_at(vma->vm_mm, address, ptep, pte); 918f729c8c9SRoss Zwisler unlock_pte: 9194b4bb46dSJan Kara pte_unmap_unlock(ptep, ptl); 920f729c8c9SRoss Zwisler } 9214b4bb46dSJan Kara 922a4d1a885SJérôme Glisse mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); 9234b4bb46dSJan Kara } 9244b4bb46dSJan Kara i_mmap_unlock_read(mapping); 9254b4bb46dSJan Kara } 9264b4bb46dSJan Kara 9273fe0791cSDan Williams static int dax_writeback_one(struct dax_device *dax_dev, 9283fe0791cSDan Williams struct address_space *mapping, pgoff_t index, void *entry) 9299973c98eSRoss Zwisler { 930b93b0163SMatthew Wilcox struct radix_tree_root *pages = &mapping->i_pages; 9313fe0791cSDan Williams void *entry2, **slot; 9323fe0791cSDan Williams unsigned long pfn; 9333fe0791cSDan Williams long ret = 0; 934cccbce67SDan Williams size_t size; 9359973c98eSRoss Zwisler 9369973c98eSRoss Zwisler /* 937a6abc2c0SJan Kara * A page got tagged dirty in DAX mapping? Something is seriously 938a6abc2c0SJan Kara * wrong. 9399973c98eSRoss Zwisler */ 9403159f943SMatthew Wilcox if (WARN_ON(!xa_is_value(entry))) 941a6abc2c0SJan Kara return -EIO; 9429973c98eSRoss Zwisler 943b93b0163SMatthew Wilcox xa_lock_irq(pages); 944a6abc2c0SJan Kara entry2 = get_unlocked_mapping_entry(mapping, index, &slot); 945a6abc2c0SJan Kara /* Entry got punched out / reallocated? */ 9463159f943SMatthew Wilcox if (!entry2 || WARN_ON_ONCE(!xa_is_value(entry2))) 947a6abc2c0SJan Kara goto put_unlocked; 948a6abc2c0SJan Kara /* 949a6abc2c0SJan Kara * Entry got reallocated elsewhere? No need to writeback. We have to 9503fe0791cSDan Williams * compare pfns as we must not bail out due to difference in lockbit 951a6abc2c0SJan Kara * or entry type. 952a6abc2c0SJan Kara */ 9533fe0791cSDan Williams if (dax_radix_pfn(entry2) != dax_radix_pfn(entry)) 954a6abc2c0SJan Kara goto put_unlocked; 955642261acSRoss Zwisler if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 956642261acSRoss Zwisler dax_is_zero_entry(entry))) { 9579973c98eSRoss Zwisler ret = -EIO; 958a6abc2c0SJan Kara goto put_unlocked; 9599973c98eSRoss Zwisler } 9609973c98eSRoss Zwisler 961a6abc2c0SJan Kara /* Another fsync thread may have already written back this entry */ 962b93b0163SMatthew Wilcox if (!radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE)) 963a6abc2c0SJan Kara goto put_unlocked; 964a6abc2c0SJan Kara /* Lock the entry to serialize with page faults */ 965a6abc2c0SJan Kara entry = lock_slot(mapping, slot); 966a6abc2c0SJan Kara /* 967a6abc2c0SJan Kara * We can clear the tag now but we have to be careful so that concurrent 968a6abc2c0SJan Kara * dax_writeback_one() calls for the same index cannot finish before we 969a6abc2c0SJan Kara * actually flush the caches. This is achieved as the calls will look 970b93b0163SMatthew Wilcox * at the entry only under the i_pages lock and once they do that 971b93b0163SMatthew Wilcox * they will see the entry locked and wait for it to unlock. 972a6abc2c0SJan Kara */ 973b93b0163SMatthew Wilcox radix_tree_tag_clear(pages, index, PAGECACHE_TAG_TOWRITE); 974b93b0163SMatthew Wilcox xa_unlock_irq(pages); 975a6abc2c0SJan Kara 976642261acSRoss Zwisler /* 977642261acSRoss Zwisler * Even if dax_writeback_mapping_range() was given a wbc->range_start 978642261acSRoss Zwisler * in the middle of a PMD, the 'index' we are given will be aligned to 9793fe0791cSDan Williams * the start index of the PMD, as will the pfn we pull from 'entry'. 9803fe0791cSDan Williams * This allows us to flush for PMD_SIZE and not have to worry about 9813fe0791cSDan Williams * partial PMD writebacks. 982642261acSRoss Zwisler */ 9833fe0791cSDan Williams pfn = dax_radix_pfn(entry); 984cccbce67SDan Williams size = PAGE_SIZE << dax_radix_order(entry); 985cccbce67SDan Williams 9863fe0791cSDan Williams dax_mapping_entry_mkclean(mapping, index, pfn); 9873fe0791cSDan Williams dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size); 9884b4bb46dSJan Kara /* 9894b4bb46dSJan Kara * After we have flushed the cache, we can clear the dirty tag. There 9904b4bb46dSJan Kara * cannot be new dirty data in the pfn after the flush has completed as 9914b4bb46dSJan Kara * the pfn mappings are writeprotected and fault waits for mapping 9924b4bb46dSJan Kara * entry lock. 9934b4bb46dSJan Kara */ 994b93b0163SMatthew Wilcox xa_lock_irq(pages); 995b93b0163SMatthew Wilcox radix_tree_tag_clear(pages, index, PAGECACHE_TAG_DIRTY); 996b93b0163SMatthew Wilcox xa_unlock_irq(pages); 997f9bc3a07SRoss Zwisler trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT); 99891d25ba8SRoss Zwisler put_locked_mapping_entry(mapping, index); 9999973c98eSRoss Zwisler return ret; 10009973c98eSRoss Zwisler 1001a6abc2c0SJan Kara put_unlocked: 1002a6abc2c0SJan Kara put_unlocked_mapping_entry(mapping, index, entry2); 1003b93b0163SMatthew Wilcox xa_unlock_irq(pages); 10049973c98eSRoss Zwisler return ret; 10059973c98eSRoss Zwisler } 10069973c98eSRoss Zwisler 10079973c98eSRoss Zwisler /* 10089973c98eSRoss Zwisler * Flush the mapping to the persistent domain within the byte range of [start, 10099973c98eSRoss Zwisler * end]. This is required by data integrity operations to ensure file data is 10109973c98eSRoss Zwisler * on persistent storage prior to completion of the operation. 10119973c98eSRoss Zwisler */ 10127f6d5b52SRoss Zwisler int dax_writeback_mapping_range(struct address_space *mapping, 10137f6d5b52SRoss Zwisler struct block_device *bdev, struct writeback_control *wbc) 10149973c98eSRoss Zwisler { 10159973c98eSRoss Zwisler struct inode *inode = mapping->host; 1016642261acSRoss Zwisler pgoff_t start_index, end_index; 10179973c98eSRoss Zwisler pgoff_t indices[PAGEVEC_SIZE]; 1018cccbce67SDan Williams struct dax_device *dax_dev; 10199973c98eSRoss Zwisler struct pagevec pvec; 10209973c98eSRoss Zwisler bool done = false; 10219973c98eSRoss Zwisler int i, ret = 0; 10229973c98eSRoss Zwisler 10239973c98eSRoss Zwisler if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 10249973c98eSRoss Zwisler return -EIO; 10259973c98eSRoss Zwisler 10267f6d5b52SRoss Zwisler if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) 10277f6d5b52SRoss Zwisler return 0; 10287f6d5b52SRoss Zwisler 1029cccbce67SDan Williams dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 1030cccbce67SDan Williams if (!dax_dev) 1031cccbce67SDan Williams return -EIO; 1032cccbce67SDan Williams 103309cbfeafSKirill A. Shutemov start_index = wbc->range_start >> PAGE_SHIFT; 103409cbfeafSKirill A. Shutemov end_index = wbc->range_end >> PAGE_SHIFT; 10359973c98eSRoss Zwisler 1036d14a3f48SRoss Zwisler trace_dax_writeback_range(inode, start_index, end_index); 1037d14a3f48SRoss Zwisler 10389973c98eSRoss Zwisler tag_pages_for_writeback(mapping, start_index, end_index); 10399973c98eSRoss Zwisler 104086679820SMel Gorman pagevec_init(&pvec); 10419973c98eSRoss Zwisler while (!done) { 10429973c98eSRoss Zwisler pvec.nr = find_get_entries_tag(mapping, start_index, 10439973c98eSRoss Zwisler PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE, 10449973c98eSRoss Zwisler pvec.pages, indices); 10459973c98eSRoss Zwisler 10469973c98eSRoss Zwisler if (pvec.nr == 0) 10479973c98eSRoss Zwisler break; 10489973c98eSRoss Zwisler 10499973c98eSRoss Zwisler for (i = 0; i < pvec.nr; i++) { 10509973c98eSRoss Zwisler if (indices[i] > end_index) { 10519973c98eSRoss Zwisler done = true; 10529973c98eSRoss Zwisler break; 10539973c98eSRoss Zwisler } 10549973c98eSRoss Zwisler 10553fe0791cSDan Williams ret = dax_writeback_one(dax_dev, mapping, indices[i], 10563fe0791cSDan Williams pvec.pages[i]); 1057819ec6b9SJeff Layton if (ret < 0) { 1058819ec6b9SJeff Layton mapping_set_error(mapping, ret); 1059d14a3f48SRoss Zwisler goto out; 1060d14a3f48SRoss Zwisler } 1061d14a3f48SRoss Zwisler } 10621eb643d0SJan Kara start_index = indices[pvec.nr - 1] + 1; 1063d14a3f48SRoss Zwisler } 1064d14a3f48SRoss Zwisler out: 1065cccbce67SDan Williams put_dax(dax_dev); 1066d14a3f48SRoss Zwisler trace_dax_writeback_range_done(inode, start_index, end_index); 1067d14a3f48SRoss Zwisler return (ret < 0 ? ret : 0); 10689973c98eSRoss Zwisler } 10699973c98eSRoss Zwisler EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 10709973c98eSRoss Zwisler 107131a6f1a6SJan Kara static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 1072f7ca90b1SMatthew Wilcox { 1073a3841f94SLinus Torvalds return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9; 107431a6f1a6SJan Kara } 1075f7ca90b1SMatthew Wilcox 10765e161e40SJan Kara static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size, 10775e161e40SJan Kara pfn_t *pfnp) 10785e161e40SJan Kara { 10795e161e40SJan Kara const sector_t sector = dax_iomap_sector(iomap, pos); 10805e161e40SJan Kara pgoff_t pgoff; 10815e161e40SJan Kara int id, rc; 10825e161e40SJan Kara long length; 10835e161e40SJan Kara 10845e161e40SJan Kara rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff); 1085cccbce67SDan Williams if (rc) 1086cccbce67SDan Williams return rc; 1087cccbce67SDan Williams id = dax_read_lock(); 10885e161e40SJan Kara length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), 108986ed913bSHuaisheng Ye NULL, pfnp); 10905e161e40SJan Kara if (length < 0) { 10915e161e40SJan Kara rc = length; 10925e161e40SJan Kara goto out; 10935e161e40SJan Kara } 10945e161e40SJan Kara rc = -EINVAL; 10955e161e40SJan Kara if (PFN_PHYS(length) < size) 10965e161e40SJan Kara goto out; 10975e161e40SJan Kara if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) 10985e161e40SJan Kara goto out; 10995e161e40SJan Kara /* For larger pages we need devmap */ 11005e161e40SJan Kara if (length > 1 && !pfn_t_devmap(*pfnp)) 11015e161e40SJan Kara goto out; 11025e161e40SJan Kara rc = 0; 11035e161e40SJan Kara out: 1104cccbce67SDan Williams dax_read_unlock(id); 1105cccbce67SDan Williams return rc; 1106cccbce67SDan Williams } 1107f7ca90b1SMatthew Wilcox 11082f89dc12SJan Kara /* 110991d25ba8SRoss Zwisler * The user has performed a load from a hole in the file. Allocating a new 111091d25ba8SRoss Zwisler * page in the file would cause excessive storage usage for workloads with 111191d25ba8SRoss Zwisler * sparse files. Instead we insert a read-only mapping of the 4k zero page. 111291d25ba8SRoss Zwisler * If this page is ever written to we will re-fault and change the mapping to 111391d25ba8SRoss Zwisler * point to real DAX storage instead. 11142f89dc12SJan Kara */ 1115ab77dab4SSouptick Joarder static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry, 1116e30331ffSRoss Zwisler struct vm_fault *vmf) 1117e30331ffSRoss Zwisler { 1118e30331ffSRoss Zwisler struct inode *inode = mapping->host; 111991d25ba8SRoss Zwisler unsigned long vaddr = vmf->address; 1120b90ca5ccSMatthew Wilcox pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); 1121b90ca5ccSMatthew Wilcox vm_fault_t ret; 1122e30331ffSRoss Zwisler 11233159f943SMatthew Wilcox dax_insert_mapping_entry(mapping, vmf, entry, pfn, 11243159f943SMatthew Wilcox DAX_ZERO_PAGE, false); 11253159f943SMatthew Wilcox 1126ab77dab4SSouptick Joarder ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); 1127e30331ffSRoss Zwisler trace_dax_load_hole(inode, vmf, ret); 1128e30331ffSRoss Zwisler return ret; 1129e30331ffSRoss Zwisler } 1130e30331ffSRoss Zwisler 11314b0228faSVishal Verma static bool dax_range_is_aligned(struct block_device *bdev, 11324b0228faSVishal Verma unsigned int offset, unsigned int length) 11334b0228faSVishal Verma { 11344b0228faSVishal Verma unsigned short sector_size = bdev_logical_block_size(bdev); 11354b0228faSVishal Verma 11364b0228faSVishal Verma if (!IS_ALIGNED(offset, sector_size)) 11374b0228faSVishal Verma return false; 11384b0228faSVishal Verma if (!IS_ALIGNED(length, sector_size)) 11394b0228faSVishal Verma return false; 11404b0228faSVishal Verma 11414b0228faSVishal Verma return true; 11424b0228faSVishal Verma } 11434b0228faSVishal Verma 1144cccbce67SDan Williams int __dax_zero_page_range(struct block_device *bdev, 1145cccbce67SDan Williams struct dax_device *dax_dev, sector_t sector, 1146cccbce67SDan Williams unsigned int offset, unsigned int size) 1147679c8bd3SChristoph Hellwig { 1148cccbce67SDan Williams if (dax_range_is_aligned(bdev, offset, size)) { 1149cccbce67SDan Williams sector_t start_sector = sector + (offset >> 9); 11504b0228faSVishal Verma 11514b0228faSVishal Verma return blkdev_issue_zeroout(bdev, start_sector, 115253ef7d0eSLinus Torvalds size >> 9, GFP_NOFS, 0); 11534b0228faSVishal Verma } else { 1154cccbce67SDan Williams pgoff_t pgoff; 1155cccbce67SDan Williams long rc, id; 1156cccbce67SDan Williams void *kaddr; 1157cccbce67SDan Williams 1158e84b83b9SDan Williams rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); 1159cccbce67SDan Williams if (rc) 1160cccbce67SDan Williams return rc; 1161cccbce67SDan Williams 1162cccbce67SDan Williams id = dax_read_lock(); 116386ed913bSHuaisheng Ye rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL); 1164cccbce67SDan Williams if (rc < 0) { 1165cccbce67SDan Williams dax_read_unlock(id); 1166cccbce67SDan Williams return rc; 1167cccbce67SDan Williams } 116881f55870SDan Williams memset(kaddr + offset, 0, size); 1169c3ca015fSMikulas Patocka dax_flush(dax_dev, kaddr + offset, size); 1170cccbce67SDan Williams dax_read_unlock(id); 11714b0228faSVishal Verma } 1172679c8bd3SChristoph Hellwig return 0; 1173679c8bd3SChristoph Hellwig } 1174679c8bd3SChristoph Hellwig EXPORT_SYMBOL_GPL(__dax_zero_page_range); 1175679c8bd3SChristoph Hellwig 1176a254e568SChristoph Hellwig static loff_t 117711c59c92SRoss Zwisler dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 1178a254e568SChristoph Hellwig struct iomap *iomap) 1179a254e568SChristoph Hellwig { 1180cccbce67SDan Williams struct block_device *bdev = iomap->bdev; 1181cccbce67SDan Williams struct dax_device *dax_dev = iomap->dax_dev; 1182a254e568SChristoph Hellwig struct iov_iter *iter = data; 1183a254e568SChristoph Hellwig loff_t end = pos + length, done = 0; 1184a254e568SChristoph Hellwig ssize_t ret = 0; 1185a77d4786SDan Williams size_t xfer; 1186cccbce67SDan Williams int id; 1187a254e568SChristoph Hellwig 1188a254e568SChristoph Hellwig if (iov_iter_rw(iter) == READ) { 1189a254e568SChristoph Hellwig end = min(end, i_size_read(inode)); 1190a254e568SChristoph Hellwig if (pos >= end) 1191a254e568SChristoph Hellwig return 0; 1192a254e568SChristoph Hellwig 1193a254e568SChristoph Hellwig if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 1194a254e568SChristoph Hellwig return iov_iter_zero(min(length, end - pos), iter); 1195a254e568SChristoph Hellwig } 1196a254e568SChristoph Hellwig 1197a254e568SChristoph Hellwig if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 1198a254e568SChristoph Hellwig return -EIO; 1199a254e568SChristoph Hellwig 1200e3fce68cSJan Kara /* 1201e3fce68cSJan Kara * Write can allocate block for an area which has a hole page mapped 1202e3fce68cSJan Kara * into page tables. We have to tear down these mappings so that data 1203e3fce68cSJan Kara * written by write(2) is visible in mmap. 1204e3fce68cSJan Kara */ 1205cd656375SJan Kara if (iomap->flags & IOMAP_F_NEW) { 1206e3fce68cSJan Kara invalidate_inode_pages2_range(inode->i_mapping, 1207e3fce68cSJan Kara pos >> PAGE_SHIFT, 1208e3fce68cSJan Kara (end - 1) >> PAGE_SHIFT); 1209e3fce68cSJan Kara } 1210e3fce68cSJan Kara 1211cccbce67SDan Williams id = dax_read_lock(); 1212a254e568SChristoph Hellwig while (pos < end) { 1213a254e568SChristoph Hellwig unsigned offset = pos & (PAGE_SIZE - 1); 1214cccbce67SDan Williams const size_t size = ALIGN(length + offset, PAGE_SIZE); 1215cccbce67SDan Williams const sector_t sector = dax_iomap_sector(iomap, pos); 1216a254e568SChristoph Hellwig ssize_t map_len; 1217cccbce67SDan Williams pgoff_t pgoff; 1218cccbce67SDan Williams void *kaddr; 1219a254e568SChristoph Hellwig 1220d1908f52SMichal Hocko if (fatal_signal_pending(current)) { 1221d1908f52SMichal Hocko ret = -EINTR; 1222d1908f52SMichal Hocko break; 1223d1908f52SMichal Hocko } 1224d1908f52SMichal Hocko 1225cccbce67SDan Williams ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 1226cccbce67SDan Williams if (ret) 1227cccbce67SDan Williams break; 1228cccbce67SDan Williams 1229cccbce67SDan Williams map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 123086ed913bSHuaisheng Ye &kaddr, NULL); 1231a254e568SChristoph Hellwig if (map_len < 0) { 1232a254e568SChristoph Hellwig ret = map_len; 1233a254e568SChristoph Hellwig break; 1234a254e568SChristoph Hellwig } 1235a254e568SChristoph Hellwig 1236cccbce67SDan Williams map_len = PFN_PHYS(map_len); 1237cccbce67SDan Williams kaddr += offset; 1238a254e568SChristoph Hellwig map_len -= offset; 1239a254e568SChristoph Hellwig if (map_len > end - pos) 1240a254e568SChristoph Hellwig map_len = end - pos; 1241a254e568SChristoph Hellwig 1242a2e050f5SRoss Zwisler /* 1243a2e050f5SRoss Zwisler * The userspace address for the memory copy has already been 1244a2e050f5SRoss Zwisler * validated via access_ok() in either vfs_read() or 1245a2e050f5SRoss Zwisler * vfs_write(), depending on which operation we are doing. 1246a2e050f5SRoss Zwisler */ 1247a254e568SChristoph Hellwig if (iov_iter_rw(iter) == WRITE) 1248a77d4786SDan Williams xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1249fec53774SDan Williams map_len, iter); 1250a254e568SChristoph Hellwig else 1251a77d4786SDan Williams xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, 1252b3a9a0c3SDan Williams map_len, iter); 1253a254e568SChristoph Hellwig 1254a77d4786SDan Williams pos += xfer; 1255a77d4786SDan Williams length -= xfer; 1256a77d4786SDan Williams done += xfer; 1257a77d4786SDan Williams 1258a77d4786SDan Williams if (xfer == 0) 1259a77d4786SDan Williams ret = -EFAULT; 1260a77d4786SDan Williams if (xfer < map_len) 1261a77d4786SDan Williams break; 1262a254e568SChristoph Hellwig } 1263cccbce67SDan Williams dax_read_unlock(id); 1264a254e568SChristoph Hellwig 1265a254e568SChristoph Hellwig return done ? done : ret; 1266a254e568SChristoph Hellwig } 1267a254e568SChristoph Hellwig 1268a254e568SChristoph Hellwig /** 126911c59c92SRoss Zwisler * dax_iomap_rw - Perform I/O to a DAX file 1270a254e568SChristoph Hellwig * @iocb: The control block for this I/O 1271a254e568SChristoph Hellwig * @iter: The addresses to do I/O from or to 1272a254e568SChristoph Hellwig * @ops: iomap ops passed from the file system 1273a254e568SChristoph Hellwig * 1274a254e568SChristoph Hellwig * This function performs read and write operations to directly mapped 1275a254e568SChristoph Hellwig * persistent memory. The callers needs to take care of read/write exclusion 1276a254e568SChristoph Hellwig * and evicting any page cache pages in the region under I/O. 1277a254e568SChristoph Hellwig */ 1278a254e568SChristoph Hellwig ssize_t 127911c59c92SRoss Zwisler dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 12808ff6daa1SChristoph Hellwig const struct iomap_ops *ops) 1281a254e568SChristoph Hellwig { 1282a254e568SChristoph Hellwig struct address_space *mapping = iocb->ki_filp->f_mapping; 1283a254e568SChristoph Hellwig struct inode *inode = mapping->host; 1284a254e568SChristoph Hellwig loff_t pos = iocb->ki_pos, ret = 0, done = 0; 1285a254e568SChristoph Hellwig unsigned flags = 0; 1286a254e568SChristoph Hellwig 1287168316dbSChristoph Hellwig if (iov_iter_rw(iter) == WRITE) { 1288168316dbSChristoph Hellwig lockdep_assert_held_exclusive(&inode->i_rwsem); 1289a254e568SChristoph Hellwig flags |= IOMAP_WRITE; 1290168316dbSChristoph Hellwig } else { 1291168316dbSChristoph Hellwig lockdep_assert_held(&inode->i_rwsem); 1292168316dbSChristoph Hellwig } 1293a254e568SChristoph Hellwig 1294a254e568SChristoph Hellwig while (iov_iter_count(iter)) { 1295a254e568SChristoph Hellwig ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, 129611c59c92SRoss Zwisler iter, dax_iomap_actor); 1297a254e568SChristoph Hellwig if (ret <= 0) 1298a254e568SChristoph Hellwig break; 1299a254e568SChristoph Hellwig pos += ret; 1300a254e568SChristoph Hellwig done += ret; 1301a254e568SChristoph Hellwig } 1302a254e568SChristoph Hellwig 1303a254e568SChristoph Hellwig iocb->ki_pos += done; 1304a254e568SChristoph Hellwig return done ? done : ret; 1305a254e568SChristoph Hellwig } 130611c59c92SRoss Zwisler EXPORT_SYMBOL_GPL(dax_iomap_rw); 1307a7d73fe6SChristoph Hellwig 1308ab77dab4SSouptick Joarder static vm_fault_t dax_fault_return(int error) 13099f141d6eSJan Kara { 13109f141d6eSJan Kara if (error == 0) 13119f141d6eSJan Kara return VM_FAULT_NOPAGE; 13129f141d6eSJan Kara if (error == -ENOMEM) 13139f141d6eSJan Kara return VM_FAULT_OOM; 13149f141d6eSJan Kara return VM_FAULT_SIGBUS; 13159f141d6eSJan Kara } 13169f141d6eSJan Kara 1317aaa422c4SDan Williams /* 1318aaa422c4SDan Williams * MAP_SYNC on a dax mapping guarantees dirty metadata is 1319aaa422c4SDan Williams * flushed on write-faults (non-cow), but not read-faults. 1320aaa422c4SDan Williams */ 1321aaa422c4SDan Williams static bool dax_fault_is_synchronous(unsigned long flags, 1322aaa422c4SDan Williams struct vm_area_struct *vma, struct iomap *iomap) 1323aaa422c4SDan Williams { 1324aaa422c4SDan Williams return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) 1325aaa422c4SDan Williams && (iomap->flags & IOMAP_F_DIRTY); 1326aaa422c4SDan Williams } 1327aaa422c4SDan Williams 1328ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1329c0b24625SJan Kara int *iomap_errp, const struct iomap_ops *ops) 1330a7d73fe6SChristoph Hellwig { 1331a0987ad5SJan Kara struct vm_area_struct *vma = vmf->vma; 1332a0987ad5SJan Kara struct address_space *mapping = vma->vm_file->f_mapping; 1333a7d73fe6SChristoph Hellwig struct inode *inode = mapping->host; 13341a29d85eSJan Kara unsigned long vaddr = vmf->address; 1335a7d73fe6SChristoph Hellwig loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; 1336a7d73fe6SChristoph Hellwig struct iomap iomap = { 0 }; 13379484ab1bSJan Kara unsigned flags = IOMAP_FAULT; 1338a7d73fe6SChristoph Hellwig int error, major = 0; 1339d2c43ef1SJan Kara bool write = vmf->flags & FAULT_FLAG_WRITE; 1340caa51d26SJan Kara bool sync; 1341ab77dab4SSouptick Joarder vm_fault_t ret = 0; 1342a7d73fe6SChristoph Hellwig void *entry; 13431b5a1cb2SJan Kara pfn_t pfn; 1344a7d73fe6SChristoph Hellwig 1345ab77dab4SSouptick Joarder trace_dax_pte_fault(inode, vmf, ret); 1346a7d73fe6SChristoph Hellwig /* 1347a7d73fe6SChristoph Hellwig * Check whether offset isn't beyond end of file now. Caller is supposed 1348a7d73fe6SChristoph Hellwig * to hold locks serializing us with truncate / punch hole so this is 1349a7d73fe6SChristoph Hellwig * a reliable test. 1350a7d73fe6SChristoph Hellwig */ 1351a9c42b33SRoss Zwisler if (pos >= i_size_read(inode)) { 1352ab77dab4SSouptick Joarder ret = VM_FAULT_SIGBUS; 1353a9c42b33SRoss Zwisler goto out; 1354a9c42b33SRoss Zwisler } 1355a7d73fe6SChristoph Hellwig 1356d2c43ef1SJan Kara if (write && !vmf->cow_page) 1357a7d73fe6SChristoph Hellwig flags |= IOMAP_WRITE; 1358a7d73fe6SChristoph Hellwig 135913e451fdSJan Kara entry = grab_mapping_entry(mapping, vmf->pgoff, 0); 136013e451fdSJan Kara if (IS_ERR(entry)) { 1361ab77dab4SSouptick Joarder ret = dax_fault_return(PTR_ERR(entry)); 136213e451fdSJan Kara goto out; 136313e451fdSJan Kara } 136413e451fdSJan Kara 1365a7d73fe6SChristoph Hellwig /* 1366e2093926SRoss Zwisler * It is possible, particularly with mixed reads & writes to private 1367e2093926SRoss Zwisler * mappings, that we have raced with a PMD fault that overlaps with 1368e2093926SRoss Zwisler * the PTE we need to set up. If so just return and the fault will be 1369e2093926SRoss Zwisler * retried. 1370e2093926SRoss Zwisler */ 1371e2093926SRoss Zwisler if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1372ab77dab4SSouptick Joarder ret = VM_FAULT_NOPAGE; 1373e2093926SRoss Zwisler goto unlock_entry; 1374e2093926SRoss Zwisler } 1375e2093926SRoss Zwisler 1376e2093926SRoss Zwisler /* 1377a7d73fe6SChristoph Hellwig * Note that we don't bother to use iomap_apply here: DAX required 1378a7d73fe6SChristoph Hellwig * the file system block size to be equal the page size, which means 1379a7d73fe6SChristoph Hellwig * that we never have to deal with more than a single extent here. 1380a7d73fe6SChristoph Hellwig */ 1381a7d73fe6SChristoph Hellwig error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); 1382c0b24625SJan Kara if (iomap_errp) 1383c0b24625SJan Kara *iomap_errp = error; 1384a9c42b33SRoss Zwisler if (error) { 1385ab77dab4SSouptick Joarder ret = dax_fault_return(error); 138613e451fdSJan Kara goto unlock_entry; 1387a9c42b33SRoss Zwisler } 1388a7d73fe6SChristoph Hellwig if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { 138913e451fdSJan Kara error = -EIO; /* fs corruption? */ 139013e451fdSJan Kara goto error_finish_iomap; 1391a7d73fe6SChristoph Hellwig } 1392a7d73fe6SChristoph Hellwig 1393a7d73fe6SChristoph Hellwig if (vmf->cow_page) { 139431a6f1a6SJan Kara sector_t sector = dax_iomap_sector(&iomap, pos); 139531a6f1a6SJan Kara 1396a7d73fe6SChristoph Hellwig switch (iomap.type) { 1397a7d73fe6SChristoph Hellwig case IOMAP_HOLE: 1398a7d73fe6SChristoph Hellwig case IOMAP_UNWRITTEN: 1399a7d73fe6SChristoph Hellwig clear_user_highpage(vmf->cow_page, vaddr); 1400a7d73fe6SChristoph Hellwig break; 1401a7d73fe6SChristoph Hellwig case IOMAP_MAPPED: 1402cccbce67SDan Williams error = copy_user_dax(iomap.bdev, iomap.dax_dev, 1403cccbce67SDan Williams sector, PAGE_SIZE, vmf->cow_page, vaddr); 1404a7d73fe6SChristoph Hellwig break; 1405a7d73fe6SChristoph Hellwig default: 1406a7d73fe6SChristoph Hellwig WARN_ON_ONCE(1); 1407a7d73fe6SChristoph Hellwig error = -EIO; 1408a7d73fe6SChristoph Hellwig break; 1409a7d73fe6SChristoph Hellwig } 1410a7d73fe6SChristoph Hellwig 1411a7d73fe6SChristoph Hellwig if (error) 141213e451fdSJan Kara goto error_finish_iomap; 1413b1aa812bSJan Kara 1414b1aa812bSJan Kara __SetPageUptodate(vmf->cow_page); 1415ab77dab4SSouptick Joarder ret = finish_fault(vmf); 1416ab77dab4SSouptick Joarder if (!ret) 1417ab77dab4SSouptick Joarder ret = VM_FAULT_DONE_COW; 141813e451fdSJan Kara goto finish_iomap; 1419a7d73fe6SChristoph Hellwig } 1420a7d73fe6SChristoph Hellwig 1421aaa422c4SDan Williams sync = dax_fault_is_synchronous(flags, vma, &iomap); 1422caa51d26SJan Kara 1423a7d73fe6SChristoph Hellwig switch (iomap.type) { 1424a7d73fe6SChristoph Hellwig case IOMAP_MAPPED: 1425a7d73fe6SChristoph Hellwig if (iomap.flags & IOMAP_F_NEW) { 1426a7d73fe6SChristoph Hellwig count_vm_event(PGMAJFAULT); 1427a0987ad5SJan Kara count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 1428a7d73fe6SChristoph Hellwig major = VM_FAULT_MAJOR; 1429a7d73fe6SChristoph Hellwig } 14301b5a1cb2SJan Kara error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn); 14311b5a1cb2SJan Kara if (error < 0) 14321b5a1cb2SJan Kara goto error_finish_iomap; 14331b5a1cb2SJan Kara 14343fe0791cSDan Williams entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn, 1435caa51d26SJan Kara 0, write && !sync); 14361b5a1cb2SJan Kara 1437caa51d26SJan Kara /* 1438caa51d26SJan Kara * If we are doing synchronous page fault and inode needs fsync, 1439caa51d26SJan Kara * we can insert PTE into page tables only after that happens. 1440caa51d26SJan Kara * Skip insertion for now and return the pfn so that caller can 1441caa51d26SJan Kara * insert it after fsync is done. 1442caa51d26SJan Kara */ 1443caa51d26SJan Kara if (sync) { 1444caa51d26SJan Kara if (WARN_ON_ONCE(!pfnp)) { 1445caa51d26SJan Kara error = -EIO; 1446caa51d26SJan Kara goto error_finish_iomap; 1447caa51d26SJan Kara } 1448caa51d26SJan Kara *pfnp = pfn; 1449ab77dab4SSouptick Joarder ret = VM_FAULT_NEEDDSYNC | major; 1450caa51d26SJan Kara goto finish_iomap; 1451caa51d26SJan Kara } 14521b5a1cb2SJan Kara trace_dax_insert_mapping(inode, vmf, entry); 14531b5a1cb2SJan Kara if (write) 1454ab77dab4SSouptick Joarder ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn); 14551b5a1cb2SJan Kara else 1456ab77dab4SSouptick Joarder ret = vmf_insert_mixed(vma, vaddr, pfn); 14571b5a1cb2SJan Kara 1458ab77dab4SSouptick Joarder goto finish_iomap; 1459a7d73fe6SChristoph Hellwig case IOMAP_UNWRITTEN: 1460a7d73fe6SChristoph Hellwig case IOMAP_HOLE: 1461d2c43ef1SJan Kara if (!write) { 1462ab77dab4SSouptick Joarder ret = dax_load_hole(mapping, entry, vmf); 146313e451fdSJan Kara goto finish_iomap; 14641550290bSRoss Zwisler } 1465a7d73fe6SChristoph Hellwig /*FALLTHRU*/ 1466a7d73fe6SChristoph Hellwig default: 1467a7d73fe6SChristoph Hellwig WARN_ON_ONCE(1); 1468a7d73fe6SChristoph Hellwig error = -EIO; 1469a7d73fe6SChristoph Hellwig break; 1470a7d73fe6SChristoph Hellwig } 1471a7d73fe6SChristoph Hellwig 147213e451fdSJan Kara error_finish_iomap: 1473ab77dab4SSouptick Joarder ret = dax_fault_return(error); 14749f141d6eSJan Kara finish_iomap: 14759f141d6eSJan Kara if (ops->iomap_end) { 14769f141d6eSJan Kara int copied = PAGE_SIZE; 14779f141d6eSJan Kara 1478ab77dab4SSouptick Joarder if (ret & VM_FAULT_ERROR) 14799f141d6eSJan Kara copied = 0; 14809f141d6eSJan Kara /* 14819f141d6eSJan Kara * The fault is done by now and there's no way back (other 14829f141d6eSJan Kara * thread may be already happily using PTE we have installed). 14839f141d6eSJan Kara * Just ignore error from ->iomap_end since we cannot do much 14849f141d6eSJan Kara * with it. 14859f141d6eSJan Kara */ 14869f141d6eSJan Kara ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); 14871550290bSRoss Zwisler } 148813e451fdSJan Kara unlock_entry: 148991d25ba8SRoss Zwisler put_locked_mapping_entry(mapping, vmf->pgoff); 1490a9c42b33SRoss Zwisler out: 1491ab77dab4SSouptick Joarder trace_dax_pte_fault_done(inode, vmf, ret); 1492ab77dab4SSouptick Joarder return ret | major; 1493a7d73fe6SChristoph Hellwig } 1494642261acSRoss Zwisler 1495642261acSRoss Zwisler #ifdef CONFIG_FS_DAX_PMD 1496ab77dab4SSouptick Joarder static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap, 149791d25ba8SRoss Zwisler void *entry) 1498642261acSRoss Zwisler { 1499f4200391SDave Jiang struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1500f4200391SDave Jiang unsigned long pmd_addr = vmf->address & PMD_MASK; 1501653b2ea3SRoss Zwisler struct inode *inode = mapping->host; 1502642261acSRoss Zwisler struct page *zero_page; 1503653b2ea3SRoss Zwisler void *ret = NULL; 1504642261acSRoss Zwisler spinlock_t *ptl; 1505642261acSRoss Zwisler pmd_t pmd_entry; 15063fe0791cSDan Williams pfn_t pfn; 1507642261acSRoss Zwisler 1508f4200391SDave Jiang zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1509642261acSRoss Zwisler 1510642261acSRoss Zwisler if (unlikely(!zero_page)) 1511653b2ea3SRoss Zwisler goto fallback; 1512642261acSRoss Zwisler 15133fe0791cSDan Williams pfn = page_to_pfn_t(zero_page); 15143fe0791cSDan Williams ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn, 15153159f943SMatthew Wilcox DAX_PMD | DAX_ZERO_PAGE, false); 1516642261acSRoss Zwisler 1517f4200391SDave Jiang ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1518f4200391SDave Jiang if (!pmd_none(*(vmf->pmd))) { 1519642261acSRoss Zwisler spin_unlock(ptl); 1520653b2ea3SRoss Zwisler goto fallback; 1521642261acSRoss Zwisler } 1522642261acSRoss Zwisler 1523f4200391SDave Jiang pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1524642261acSRoss Zwisler pmd_entry = pmd_mkhuge(pmd_entry); 1525f4200391SDave Jiang set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1526642261acSRoss Zwisler spin_unlock(ptl); 1527f4200391SDave Jiang trace_dax_pmd_load_hole(inode, vmf, zero_page, ret); 1528642261acSRoss Zwisler return VM_FAULT_NOPAGE; 1529653b2ea3SRoss Zwisler 1530653b2ea3SRoss Zwisler fallback: 1531f4200391SDave Jiang trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret); 1532642261acSRoss Zwisler return VM_FAULT_FALLBACK; 1533642261acSRoss Zwisler } 1534642261acSRoss Zwisler 1535ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1536a2d58167SDave Jiang const struct iomap_ops *ops) 1537642261acSRoss Zwisler { 1538f4200391SDave Jiang struct vm_area_struct *vma = vmf->vma; 1539642261acSRoss Zwisler struct address_space *mapping = vma->vm_file->f_mapping; 1540d8a849e1SDave Jiang unsigned long pmd_addr = vmf->address & PMD_MASK; 1541d8a849e1SDave Jiang bool write = vmf->flags & FAULT_FLAG_WRITE; 1542caa51d26SJan Kara bool sync; 15439484ab1bSJan Kara unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; 1544642261acSRoss Zwisler struct inode *inode = mapping->host; 1545ab77dab4SSouptick Joarder vm_fault_t result = VM_FAULT_FALLBACK; 1546642261acSRoss Zwisler struct iomap iomap = { 0 }; 1547642261acSRoss Zwisler pgoff_t max_pgoff, pgoff; 1548642261acSRoss Zwisler void *entry; 1549642261acSRoss Zwisler loff_t pos; 1550642261acSRoss Zwisler int error; 1551302a5e31SJan Kara pfn_t pfn; 1552642261acSRoss Zwisler 1553282a8e03SRoss Zwisler /* 1554282a8e03SRoss Zwisler * Check whether offset isn't beyond end of file now. Caller is 1555282a8e03SRoss Zwisler * supposed to hold locks serializing us with truncate / punch hole so 1556282a8e03SRoss Zwisler * this is a reliable test. 1557282a8e03SRoss Zwisler */ 1558282a8e03SRoss Zwisler pgoff = linear_page_index(vma, pmd_addr); 1559957ac8c4SJeff Moyer max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1560282a8e03SRoss Zwisler 1561f4200391SDave Jiang trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); 1562282a8e03SRoss Zwisler 1563fffa281bSRoss Zwisler /* 1564fffa281bSRoss Zwisler * Make sure that the faulting address's PMD offset (color) matches 1565fffa281bSRoss Zwisler * the PMD offset from the start of the file. This is necessary so 1566fffa281bSRoss Zwisler * that a PMD range in the page table overlaps exactly with a PMD 1567fffa281bSRoss Zwisler * range in the radix tree. 1568fffa281bSRoss Zwisler */ 1569fffa281bSRoss Zwisler if ((vmf->pgoff & PG_PMD_COLOUR) != 1570fffa281bSRoss Zwisler ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 1571fffa281bSRoss Zwisler goto fallback; 1572fffa281bSRoss Zwisler 1573642261acSRoss Zwisler /* Fall back to PTEs if we're going to COW */ 1574642261acSRoss Zwisler if (write && !(vma->vm_flags & VM_SHARED)) 1575642261acSRoss Zwisler goto fallback; 1576642261acSRoss Zwisler 1577642261acSRoss Zwisler /* If the PMD would extend outside the VMA */ 1578642261acSRoss Zwisler if (pmd_addr < vma->vm_start) 1579642261acSRoss Zwisler goto fallback; 1580642261acSRoss Zwisler if ((pmd_addr + PMD_SIZE) > vma->vm_end) 1581642261acSRoss Zwisler goto fallback; 1582642261acSRoss Zwisler 1583957ac8c4SJeff Moyer if (pgoff >= max_pgoff) { 1584282a8e03SRoss Zwisler result = VM_FAULT_SIGBUS; 1585282a8e03SRoss Zwisler goto out; 1586282a8e03SRoss Zwisler } 1587642261acSRoss Zwisler 1588642261acSRoss Zwisler /* If the PMD would extend beyond the file size */ 1589957ac8c4SJeff Moyer if ((pgoff | PG_PMD_COLOUR) >= max_pgoff) 1590642261acSRoss Zwisler goto fallback; 1591642261acSRoss Zwisler 1592642261acSRoss Zwisler /* 159391d25ba8SRoss Zwisler * grab_mapping_entry() will make sure we get a 2MiB empty entry, a 159491d25ba8SRoss Zwisler * 2MiB zero page entry or a DAX PMD. If it can't (because a 4k page 159591d25ba8SRoss Zwisler * is already in the tree, for instance), it will return -EEXIST and 159691d25ba8SRoss Zwisler * we just fall back to 4k entries. 15979f141d6eSJan Kara */ 15983159f943SMatthew Wilcox entry = grab_mapping_entry(mapping, pgoff, DAX_PMD); 15999f141d6eSJan Kara if (IS_ERR(entry)) 1600876f2946SRoss Zwisler goto fallback; 1601876f2946SRoss Zwisler 1602876f2946SRoss Zwisler /* 1603e2093926SRoss Zwisler * It is possible, particularly with mixed reads & writes to private 1604e2093926SRoss Zwisler * mappings, that we have raced with a PTE fault that overlaps with 1605e2093926SRoss Zwisler * the PMD we need to set up. If so just return and the fault will be 1606e2093926SRoss Zwisler * retried. 1607e2093926SRoss Zwisler */ 1608e2093926SRoss Zwisler if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1609e2093926SRoss Zwisler !pmd_devmap(*vmf->pmd)) { 1610e2093926SRoss Zwisler result = 0; 1611e2093926SRoss Zwisler goto unlock_entry; 1612e2093926SRoss Zwisler } 1613e2093926SRoss Zwisler 1614e2093926SRoss Zwisler /* 1615876f2946SRoss Zwisler * Note that we don't use iomap_apply here. We aren't doing I/O, only 1616876f2946SRoss Zwisler * setting up a mapping, so really we're using iomap_begin() as a way 1617876f2946SRoss Zwisler * to look up our filesystem block. 1618876f2946SRoss Zwisler */ 1619876f2946SRoss Zwisler pos = (loff_t)pgoff << PAGE_SHIFT; 1620876f2946SRoss Zwisler error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); 1621876f2946SRoss Zwisler if (error) 1622876f2946SRoss Zwisler goto unlock_entry; 1623876f2946SRoss Zwisler 1624876f2946SRoss Zwisler if (iomap.offset + iomap.length < pos + PMD_SIZE) 16259f141d6eSJan Kara goto finish_iomap; 16269f141d6eSJan Kara 1627aaa422c4SDan Williams sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap); 1628caa51d26SJan Kara 1629642261acSRoss Zwisler switch (iomap.type) { 1630642261acSRoss Zwisler case IOMAP_MAPPED: 1631302a5e31SJan Kara error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn); 1632302a5e31SJan Kara if (error < 0) 1633302a5e31SJan Kara goto finish_iomap; 1634302a5e31SJan Kara 16353fe0791cSDan Williams entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn, 16363159f943SMatthew Wilcox DAX_PMD, write && !sync); 1637302a5e31SJan Kara 1638caa51d26SJan Kara /* 1639caa51d26SJan Kara * If we are doing synchronous page fault and inode needs fsync, 1640caa51d26SJan Kara * we can insert PMD into page tables only after that happens. 1641caa51d26SJan Kara * Skip insertion for now and return the pfn so that caller can 1642caa51d26SJan Kara * insert it after fsync is done. 1643caa51d26SJan Kara */ 1644caa51d26SJan Kara if (sync) { 1645caa51d26SJan Kara if (WARN_ON_ONCE(!pfnp)) 1646caa51d26SJan Kara goto finish_iomap; 1647caa51d26SJan Kara *pfnp = pfn; 1648caa51d26SJan Kara result = VM_FAULT_NEEDDSYNC; 1649caa51d26SJan Kara goto finish_iomap; 1650caa51d26SJan Kara } 1651caa51d26SJan Kara 1652302a5e31SJan Kara trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); 1653302a5e31SJan Kara result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn, 1654302a5e31SJan Kara write); 1655642261acSRoss Zwisler break; 1656642261acSRoss Zwisler case IOMAP_UNWRITTEN: 1657642261acSRoss Zwisler case IOMAP_HOLE: 1658642261acSRoss Zwisler if (WARN_ON_ONCE(write)) 1659876f2946SRoss Zwisler break; 166091d25ba8SRoss Zwisler result = dax_pmd_load_hole(vmf, &iomap, entry); 1661642261acSRoss Zwisler break; 1662642261acSRoss Zwisler default: 1663642261acSRoss Zwisler WARN_ON_ONCE(1); 1664642261acSRoss Zwisler break; 1665642261acSRoss Zwisler } 1666642261acSRoss Zwisler 16679f141d6eSJan Kara finish_iomap: 16689f141d6eSJan Kara if (ops->iomap_end) { 16699f141d6eSJan Kara int copied = PMD_SIZE; 16709f141d6eSJan Kara 16719f141d6eSJan Kara if (result == VM_FAULT_FALLBACK) 16729f141d6eSJan Kara copied = 0; 16739f141d6eSJan Kara /* 16749f141d6eSJan Kara * The fault is done by now and there's no way back (other 16759f141d6eSJan Kara * thread may be already happily using PMD we have installed). 16769f141d6eSJan Kara * Just ignore error from ->iomap_end since we cannot do much 16779f141d6eSJan Kara * with it. 16789f141d6eSJan Kara */ 16799f141d6eSJan Kara ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags, 16809f141d6eSJan Kara &iomap); 16819f141d6eSJan Kara } 1682876f2946SRoss Zwisler unlock_entry: 168391d25ba8SRoss Zwisler put_locked_mapping_entry(mapping, pgoff); 1684642261acSRoss Zwisler fallback: 1685642261acSRoss Zwisler if (result == VM_FAULT_FALLBACK) { 1686d8a849e1SDave Jiang split_huge_pmd(vma, vmf->pmd, vmf->address); 1687642261acSRoss Zwisler count_vm_event(THP_FAULT_FALLBACK); 1688642261acSRoss Zwisler } 1689282a8e03SRoss Zwisler out: 1690f4200391SDave Jiang trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); 1691642261acSRoss Zwisler return result; 1692642261acSRoss Zwisler } 1693a2d58167SDave Jiang #else 1694ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 169501cddfe9SArnd Bergmann const struct iomap_ops *ops) 1696a2d58167SDave Jiang { 1697a2d58167SDave Jiang return VM_FAULT_FALLBACK; 1698a2d58167SDave Jiang } 1699642261acSRoss Zwisler #endif /* CONFIG_FS_DAX_PMD */ 1700a2d58167SDave Jiang 1701a2d58167SDave Jiang /** 1702a2d58167SDave Jiang * dax_iomap_fault - handle a page fault on a DAX file 1703a2d58167SDave Jiang * @vmf: The description of the fault 1704cec04e8cSJan Kara * @pe_size: Size of the page to fault in 17059a0dd422SJan Kara * @pfnp: PFN to insert for synchronous faults if fsync is required 1706c0b24625SJan Kara * @iomap_errp: Storage for detailed error code in case of error 1707cec04e8cSJan Kara * @ops: Iomap ops passed from the file system 1708a2d58167SDave Jiang * 1709a2d58167SDave Jiang * When a page fault occurs, filesystems may call this helper in 1710a2d58167SDave Jiang * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1711a2d58167SDave Jiang * has done all the necessary locking for page fault to proceed 1712a2d58167SDave Jiang * successfully. 1713a2d58167SDave Jiang */ 1714ab77dab4SSouptick Joarder vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1715c0b24625SJan Kara pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) 1716a2d58167SDave Jiang { 1717c791ace1SDave Jiang switch (pe_size) { 1718c791ace1SDave Jiang case PE_SIZE_PTE: 1719c0b24625SJan Kara return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); 1720c791ace1SDave Jiang case PE_SIZE_PMD: 17219a0dd422SJan Kara return dax_iomap_pmd_fault(vmf, pfnp, ops); 1722a2d58167SDave Jiang default: 1723a2d58167SDave Jiang return VM_FAULT_FALLBACK; 1724a2d58167SDave Jiang } 1725a2d58167SDave Jiang } 1726a2d58167SDave Jiang EXPORT_SYMBOL_GPL(dax_iomap_fault); 172771eab6dfSJan Kara 172871eab6dfSJan Kara /** 172971eab6dfSJan Kara * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables 173071eab6dfSJan Kara * @vmf: The description of the fault 173171eab6dfSJan Kara * @pe_size: Size of entry to be inserted 173271eab6dfSJan Kara * @pfn: PFN to insert 173371eab6dfSJan Kara * 173471eab6dfSJan Kara * This function inserts writeable PTE or PMD entry into page tables for mmaped 173571eab6dfSJan Kara * DAX file. It takes care of marking corresponding radix tree entry as dirty 173671eab6dfSJan Kara * as well. 173771eab6dfSJan Kara */ 1738ab77dab4SSouptick Joarder static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf, 173971eab6dfSJan Kara enum page_entry_size pe_size, 174071eab6dfSJan Kara pfn_t pfn) 174171eab6dfSJan Kara { 174271eab6dfSJan Kara struct address_space *mapping = vmf->vma->vm_file->f_mapping; 174371eab6dfSJan Kara void *entry, **slot; 174471eab6dfSJan Kara pgoff_t index = vmf->pgoff; 1745ab77dab4SSouptick Joarder vm_fault_t ret; 174671eab6dfSJan Kara 1747b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages); 174871eab6dfSJan Kara entry = get_unlocked_mapping_entry(mapping, index, &slot); 174971eab6dfSJan Kara /* Did we race with someone splitting entry or so? */ 175071eab6dfSJan Kara if (!entry || 175171eab6dfSJan Kara (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) || 175271eab6dfSJan Kara (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) { 175371eab6dfSJan Kara put_unlocked_mapping_entry(mapping, index, entry); 1754b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 175571eab6dfSJan Kara trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 175671eab6dfSJan Kara VM_FAULT_NOPAGE); 175771eab6dfSJan Kara return VM_FAULT_NOPAGE; 175871eab6dfSJan Kara } 1759b93b0163SMatthew Wilcox radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY); 176071eab6dfSJan Kara entry = lock_slot(mapping, slot); 1761b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 176271eab6dfSJan Kara switch (pe_size) { 176371eab6dfSJan Kara case PE_SIZE_PTE: 1764ab77dab4SSouptick Joarder ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 176571eab6dfSJan Kara break; 176671eab6dfSJan Kara #ifdef CONFIG_FS_DAX_PMD 176771eab6dfSJan Kara case PE_SIZE_PMD: 1768ab77dab4SSouptick Joarder ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, 176971eab6dfSJan Kara pfn, true); 177071eab6dfSJan Kara break; 177171eab6dfSJan Kara #endif 177271eab6dfSJan Kara default: 1773ab77dab4SSouptick Joarder ret = VM_FAULT_FALLBACK; 177471eab6dfSJan Kara } 177571eab6dfSJan Kara put_locked_mapping_entry(mapping, index); 1776ab77dab4SSouptick Joarder trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); 1777ab77dab4SSouptick Joarder return ret; 177871eab6dfSJan Kara } 177971eab6dfSJan Kara 178071eab6dfSJan Kara /** 178171eab6dfSJan Kara * dax_finish_sync_fault - finish synchronous page fault 178271eab6dfSJan Kara * @vmf: The description of the fault 178371eab6dfSJan Kara * @pe_size: Size of entry to be inserted 178471eab6dfSJan Kara * @pfn: PFN to insert 178571eab6dfSJan Kara * 178671eab6dfSJan Kara * This function ensures that the file range touched by the page fault is 178771eab6dfSJan Kara * stored persistently on the media and handles inserting of appropriate page 178871eab6dfSJan Kara * table entry. 178971eab6dfSJan Kara */ 1790ab77dab4SSouptick Joarder vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 1791ab77dab4SSouptick Joarder enum page_entry_size pe_size, pfn_t pfn) 179271eab6dfSJan Kara { 179371eab6dfSJan Kara int err; 179471eab6dfSJan Kara loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; 179571eab6dfSJan Kara size_t len = 0; 179671eab6dfSJan Kara 179771eab6dfSJan Kara if (pe_size == PE_SIZE_PTE) 179871eab6dfSJan Kara len = PAGE_SIZE; 179971eab6dfSJan Kara else if (pe_size == PE_SIZE_PMD) 180071eab6dfSJan Kara len = PMD_SIZE; 180171eab6dfSJan Kara else 180271eab6dfSJan Kara WARN_ON_ONCE(1); 180371eab6dfSJan Kara err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); 180471eab6dfSJan Kara if (err) 180571eab6dfSJan Kara return VM_FAULT_SIGBUS; 180671eab6dfSJan Kara return dax_insert_pfn_mkwrite(vmf, pe_size, pfn); 180771eab6dfSJan Kara } 180871eab6dfSJan Kara EXPORT_SYMBOL_GPL(dax_finish_sync_fault); 1809