12025cf9eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2d475c634SMatthew Wilcox /* 3d475c634SMatthew Wilcox * fs/dax.c - Direct Access filesystem code 4d475c634SMatthew Wilcox * Copyright (c) 2013-2014 Intel Corporation 5d475c634SMatthew Wilcox * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 6d475c634SMatthew Wilcox * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 7d475c634SMatthew Wilcox */ 8d475c634SMatthew Wilcox 9d475c634SMatthew Wilcox #include <linux/atomic.h> 10d475c634SMatthew Wilcox #include <linux/blkdev.h> 11d475c634SMatthew Wilcox #include <linux/buffer_head.h> 12d77e92e2SRoss Zwisler #include <linux/dax.h> 13d475c634SMatthew Wilcox #include <linux/fs.h> 14f7ca90b1SMatthew Wilcox #include <linux/highmem.h> 15f7ca90b1SMatthew Wilcox #include <linux/memcontrol.h> 16f7ca90b1SMatthew Wilcox #include <linux/mm.h> 17d475c634SMatthew Wilcox #include <linux/mutex.h> 189973c98eSRoss Zwisler #include <linux/pagevec.h> 19289c6aedSMatthew Wilcox #include <linux/sched.h> 20f361bf4aSIngo Molnar #include <linux/sched/signal.h> 21d475c634SMatthew Wilcox #include <linux/uio.h> 22f7ca90b1SMatthew Wilcox #include <linux/vmstat.h> 2334c0fd54SDan Williams #include <linux/pfn_t.h> 240e749e54SDan Williams #include <linux/sizes.h> 254b4bb46dSJan Kara #include <linux/mmu_notifier.h> 26a254e568SChristoph Hellwig #include <linux/iomap.h> 2706083a09SMuchun Song #include <linux/rmap.h> 2811cf9d86SAneesh Kumar K.V #include <asm/pgalloc.h> 29d475c634SMatthew Wilcox 30282a8e03SRoss Zwisler #define CREATE_TRACE_POINTS 31282a8e03SRoss Zwisler #include <trace/events/fs_dax.h> 32282a8e03SRoss Zwisler 33ac401cc7SJan Kara /* We choose 4096 entries - same as per-zone page wait tables */ 34ac401cc7SJan Kara #define DAX_WAIT_TABLE_BITS 12 35ac401cc7SJan Kara #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 36ac401cc7SJan Kara 37917f3452SRoss Zwisler /* The 'colour' (ie low bits) within a PMD of a page offset. */ 38917f3452SRoss Zwisler #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 39977fbdcdSMatthew Wilcox #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) 40917f3452SRoss Zwisler 41ce95ab0fSRoss Zwisler static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 42ac401cc7SJan Kara 43ac401cc7SJan Kara static int __init init_dax_wait_table(void) 44ac401cc7SJan Kara { 45ac401cc7SJan Kara int i; 46ac401cc7SJan Kara 47ac401cc7SJan Kara for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 48ac401cc7SJan Kara init_waitqueue_head(wait_table + i); 49ac401cc7SJan Kara return 0; 50ac401cc7SJan Kara } 51ac401cc7SJan Kara fs_initcall(init_dax_wait_table); 52ac401cc7SJan Kara 53527b19d0SRoss Zwisler /* 543159f943SMatthew Wilcox * DAX pagecache entries use XArray value entries so they can't be mistaken 553159f943SMatthew Wilcox * for pages. We use one bit for locking, one bit for the entry size (PMD) 563159f943SMatthew Wilcox * and two more to tell us if the entry is a zero page or an empty entry that 573159f943SMatthew Wilcox * is just used for locking. In total four special bits. 58527b19d0SRoss Zwisler * 59527b19d0SRoss Zwisler * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE 60527b19d0SRoss Zwisler * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem 61527b19d0SRoss Zwisler * block allocation. 62527b19d0SRoss Zwisler */ 633159f943SMatthew Wilcox #define DAX_SHIFT (4) 643159f943SMatthew Wilcox #define DAX_LOCKED (1UL << 0) 653159f943SMatthew Wilcox #define DAX_PMD (1UL << 1) 663159f943SMatthew Wilcox #define DAX_ZERO_PAGE (1UL << 2) 673159f943SMatthew Wilcox #define DAX_EMPTY (1UL << 3) 68527b19d0SRoss Zwisler 69a77d19f4SMatthew Wilcox static unsigned long dax_to_pfn(void *entry) 70527b19d0SRoss Zwisler { 713159f943SMatthew Wilcox return xa_to_value(entry) >> DAX_SHIFT; 72527b19d0SRoss Zwisler } 73527b19d0SRoss Zwisler 749f32d221SMatthew Wilcox static void *dax_make_entry(pfn_t pfn, unsigned long flags) 759f32d221SMatthew Wilcox { 769f32d221SMatthew Wilcox return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); 779f32d221SMatthew Wilcox } 789f32d221SMatthew Wilcox 79cfc93c6cSMatthew Wilcox static bool dax_is_locked(void *entry) 80cfc93c6cSMatthew Wilcox { 81cfc93c6cSMatthew Wilcox return xa_to_value(entry) & DAX_LOCKED; 82cfc93c6cSMatthew Wilcox } 83cfc93c6cSMatthew Wilcox 84a77d19f4SMatthew Wilcox static unsigned int dax_entry_order(void *entry) 85527b19d0SRoss Zwisler { 863159f943SMatthew Wilcox if (xa_to_value(entry) & DAX_PMD) 87cfc93c6cSMatthew Wilcox return PMD_ORDER; 88527b19d0SRoss Zwisler return 0; 89527b19d0SRoss Zwisler } 90527b19d0SRoss Zwisler 91fda490d3SMatthew Wilcox static unsigned long dax_is_pmd_entry(void *entry) 92642261acSRoss Zwisler { 933159f943SMatthew Wilcox return xa_to_value(entry) & DAX_PMD; 94642261acSRoss Zwisler } 95642261acSRoss Zwisler 96fda490d3SMatthew Wilcox static bool dax_is_pte_entry(void *entry) 97642261acSRoss Zwisler { 983159f943SMatthew Wilcox return !(xa_to_value(entry) & DAX_PMD); 99642261acSRoss Zwisler } 100642261acSRoss Zwisler 101642261acSRoss Zwisler static int dax_is_zero_entry(void *entry) 102642261acSRoss Zwisler { 1033159f943SMatthew Wilcox return xa_to_value(entry) & DAX_ZERO_PAGE; 104642261acSRoss Zwisler } 105642261acSRoss Zwisler 106642261acSRoss Zwisler static int dax_is_empty_entry(void *entry) 107642261acSRoss Zwisler { 1083159f943SMatthew Wilcox return xa_to_value(entry) & DAX_EMPTY; 109642261acSRoss Zwisler } 110642261acSRoss Zwisler 111f7ca90b1SMatthew Wilcox /* 11223c84eb7SMatthew Wilcox (Oracle) * true if the entry that was found is of a smaller order than the entry 11323c84eb7SMatthew Wilcox (Oracle) * we were looking for 11423c84eb7SMatthew Wilcox (Oracle) */ 11523c84eb7SMatthew Wilcox (Oracle) static bool dax_is_conflict(void *entry) 11623c84eb7SMatthew Wilcox (Oracle) { 11723c84eb7SMatthew Wilcox (Oracle) return entry == XA_RETRY_ENTRY; 11823c84eb7SMatthew Wilcox (Oracle) } 11923c84eb7SMatthew Wilcox (Oracle) 12023c84eb7SMatthew Wilcox (Oracle) /* 121a77d19f4SMatthew Wilcox * DAX page cache entry locking 122ac401cc7SJan Kara */ 123ac401cc7SJan Kara struct exceptional_entry_key { 124ec4907ffSMatthew Wilcox struct xarray *xa; 12563e95b5cSRoss Zwisler pgoff_t entry_start; 126ac401cc7SJan Kara }; 127ac401cc7SJan Kara 128ac401cc7SJan Kara struct wait_exceptional_entry_queue { 129ac6424b9SIngo Molnar wait_queue_entry_t wait; 130ac401cc7SJan Kara struct exceptional_entry_key key; 131ac401cc7SJan Kara }; 132ac401cc7SJan Kara 133698ab77aSVivek Goyal /** 134698ab77aSVivek Goyal * enum dax_wake_mode: waitqueue wakeup behaviour 135698ab77aSVivek Goyal * @WAKE_ALL: wake all waiters in the waitqueue 136698ab77aSVivek Goyal * @WAKE_NEXT: wake only the first waiter in the waitqueue 137698ab77aSVivek Goyal */ 138698ab77aSVivek Goyal enum dax_wake_mode { 139698ab77aSVivek Goyal WAKE_ALL, 140698ab77aSVivek Goyal WAKE_NEXT, 141698ab77aSVivek Goyal }; 142698ab77aSVivek Goyal 143b15cd800SMatthew Wilcox static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, 144b15cd800SMatthew Wilcox void *entry, struct exceptional_entry_key *key) 14563e95b5cSRoss Zwisler { 14663e95b5cSRoss Zwisler unsigned long hash; 147b15cd800SMatthew Wilcox unsigned long index = xas->xa_index; 14863e95b5cSRoss Zwisler 14963e95b5cSRoss Zwisler /* 15063e95b5cSRoss Zwisler * If 'entry' is a PMD, align the 'index' that we use for the wait 15163e95b5cSRoss Zwisler * queue to the start of that PMD. This ensures that all offsets in 15263e95b5cSRoss Zwisler * the range covered by the PMD map to the same bit lock. 15363e95b5cSRoss Zwisler */ 154642261acSRoss Zwisler if (dax_is_pmd_entry(entry)) 155917f3452SRoss Zwisler index &= ~PG_PMD_COLOUR; 156b15cd800SMatthew Wilcox key->xa = xas->xa; 15763e95b5cSRoss Zwisler key->entry_start = index; 15863e95b5cSRoss Zwisler 159b15cd800SMatthew Wilcox hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); 16063e95b5cSRoss Zwisler return wait_table + hash; 16163e95b5cSRoss Zwisler } 16263e95b5cSRoss Zwisler 163ec4907ffSMatthew Wilcox static int wake_exceptional_entry_func(wait_queue_entry_t *wait, 164ec4907ffSMatthew Wilcox unsigned int mode, int sync, void *keyp) 165ac401cc7SJan Kara { 166ac401cc7SJan Kara struct exceptional_entry_key *key = keyp; 167ac401cc7SJan Kara struct wait_exceptional_entry_queue *ewait = 168ac401cc7SJan Kara container_of(wait, struct wait_exceptional_entry_queue, wait); 169ac401cc7SJan Kara 170ec4907ffSMatthew Wilcox if (key->xa != ewait->key.xa || 17163e95b5cSRoss Zwisler key->entry_start != ewait->key.entry_start) 172ac401cc7SJan Kara return 0; 173ac401cc7SJan Kara return autoremove_wake_function(wait, mode, sync, NULL); 174ac401cc7SJan Kara } 175ac401cc7SJan Kara 176ac401cc7SJan Kara /* 177b93b0163SMatthew Wilcox * @entry may no longer be the entry at the index in the mapping. 178b93b0163SMatthew Wilcox * The important information it's conveying is whether the entry at 179b93b0163SMatthew Wilcox * this index used to be a PMD entry. 180e30331ffSRoss Zwisler */ 181698ab77aSVivek Goyal static void dax_wake_entry(struct xa_state *xas, void *entry, 182698ab77aSVivek Goyal enum dax_wake_mode mode) 183e30331ffSRoss Zwisler { 184e30331ffSRoss Zwisler struct exceptional_entry_key key; 185e30331ffSRoss Zwisler wait_queue_head_t *wq; 186e30331ffSRoss Zwisler 187b15cd800SMatthew Wilcox wq = dax_entry_waitqueue(xas, entry, &key); 188e30331ffSRoss Zwisler 189e30331ffSRoss Zwisler /* 190e30331ffSRoss Zwisler * Checking for locked entry and prepare_to_wait_exclusive() happens 191b93b0163SMatthew Wilcox * under the i_pages lock, ditto for entry handling in our callers. 192e30331ffSRoss Zwisler * So at this point all tasks that could have seen our entry locked 193e30331ffSRoss Zwisler * must be in the waitqueue and the following check will see them. 194e30331ffSRoss Zwisler */ 195e30331ffSRoss Zwisler if (waitqueue_active(wq)) 196698ab77aSVivek Goyal __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key); 197e30331ffSRoss Zwisler } 198e30331ffSRoss Zwisler 199cfc93c6cSMatthew Wilcox /* 200cfc93c6cSMatthew Wilcox * Look up entry in page cache, wait for it to become unlocked if it 201cfc93c6cSMatthew Wilcox * is a DAX entry and return it. The caller must subsequently call 202cfc93c6cSMatthew Wilcox * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry() 20323c84eb7SMatthew Wilcox (Oracle) * if it did. The entry returned may have a larger order than @order. 20423c84eb7SMatthew Wilcox (Oracle) * If @order is larger than the order of the entry found in i_pages, this 20523c84eb7SMatthew Wilcox (Oracle) * function returns a dax_is_conflict entry. 206cfc93c6cSMatthew Wilcox * 207cfc93c6cSMatthew Wilcox * Must be called with the i_pages lock held. 208cfc93c6cSMatthew Wilcox */ 20923c84eb7SMatthew Wilcox (Oracle) static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) 210cfc93c6cSMatthew Wilcox { 211cfc93c6cSMatthew Wilcox void *entry; 212cfc93c6cSMatthew Wilcox struct wait_exceptional_entry_queue ewait; 213cfc93c6cSMatthew Wilcox wait_queue_head_t *wq; 214cfc93c6cSMatthew Wilcox 215cfc93c6cSMatthew Wilcox init_wait(&ewait.wait); 216cfc93c6cSMatthew Wilcox ewait.wait.func = wake_exceptional_entry_func; 217cfc93c6cSMatthew Wilcox 218cfc93c6cSMatthew Wilcox for (;;) { 2190e40de03SMatthew Wilcox entry = xas_find_conflict(xas); 2206370740eSDan Williams if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 2216370740eSDan Williams return entry; 22223c84eb7SMatthew Wilcox (Oracle) if (dax_entry_order(entry) < order) 22323c84eb7SMatthew Wilcox (Oracle) return XA_RETRY_ENTRY; 2246370740eSDan Williams if (!dax_is_locked(entry)) 225cfc93c6cSMatthew Wilcox return entry; 226cfc93c6cSMatthew Wilcox 227b15cd800SMatthew Wilcox wq = dax_entry_waitqueue(xas, entry, &ewait.key); 228cfc93c6cSMatthew Wilcox prepare_to_wait_exclusive(wq, &ewait.wait, 229cfc93c6cSMatthew Wilcox TASK_UNINTERRUPTIBLE); 230cfc93c6cSMatthew Wilcox xas_unlock_irq(xas); 231cfc93c6cSMatthew Wilcox xas_reset(xas); 232cfc93c6cSMatthew Wilcox schedule(); 233cfc93c6cSMatthew Wilcox finish_wait(wq, &ewait.wait); 234cfc93c6cSMatthew Wilcox xas_lock_irq(xas); 235cfc93c6cSMatthew Wilcox } 236cfc93c6cSMatthew Wilcox } 237cfc93c6cSMatthew Wilcox 23855e56f06SMatthew Wilcox /* 23955e56f06SMatthew Wilcox * The only thing keeping the address space around is the i_pages lock 24055e56f06SMatthew Wilcox * (it's cycled in clear_inode() after removing the entries from i_pages) 24155e56f06SMatthew Wilcox * After we call xas_unlock_irq(), we cannot touch xas->xa. 24255e56f06SMatthew Wilcox */ 24355e56f06SMatthew Wilcox static void wait_entry_unlocked(struct xa_state *xas, void *entry) 24455e56f06SMatthew Wilcox { 24555e56f06SMatthew Wilcox struct wait_exceptional_entry_queue ewait; 24655e56f06SMatthew Wilcox wait_queue_head_t *wq; 24755e56f06SMatthew Wilcox 24855e56f06SMatthew Wilcox init_wait(&ewait.wait); 24955e56f06SMatthew Wilcox ewait.wait.func = wake_exceptional_entry_func; 25055e56f06SMatthew Wilcox 25155e56f06SMatthew Wilcox wq = dax_entry_waitqueue(xas, entry, &ewait.key); 252d8a70641SDan Williams /* 253d8a70641SDan Williams * Unlike get_unlocked_entry() there is no guarantee that this 254d8a70641SDan Williams * path ever successfully retrieves an unlocked entry before an 255d8a70641SDan Williams * inode dies. Perform a non-exclusive wait in case this path 256d8a70641SDan Williams * never successfully performs its own wake up. 257d8a70641SDan Williams */ 258d8a70641SDan Williams prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); 25955e56f06SMatthew Wilcox xas_unlock_irq(xas); 26055e56f06SMatthew Wilcox schedule(); 26155e56f06SMatthew Wilcox finish_wait(wq, &ewait.wait); 26255e56f06SMatthew Wilcox } 26355e56f06SMatthew Wilcox 2644c3d043dSVivek Goyal static void put_unlocked_entry(struct xa_state *xas, void *entry, 2654c3d043dSVivek Goyal enum dax_wake_mode mode) 266cfc93c6cSMatthew Wilcox { 26761c30c98SJan Kara if (entry && !dax_is_conflict(entry)) 2684c3d043dSVivek Goyal dax_wake_entry(xas, entry, mode); 269cfc93c6cSMatthew Wilcox } 270cfc93c6cSMatthew Wilcox 271cfc93c6cSMatthew Wilcox /* 272cfc93c6cSMatthew Wilcox * We used the xa_state to get the entry, but then we locked the entry and 273cfc93c6cSMatthew Wilcox * dropped the xa_lock, so we know the xa_state is stale and must be reset 274cfc93c6cSMatthew Wilcox * before use. 275cfc93c6cSMatthew Wilcox */ 276cfc93c6cSMatthew Wilcox static void dax_unlock_entry(struct xa_state *xas, void *entry) 277cfc93c6cSMatthew Wilcox { 278cfc93c6cSMatthew Wilcox void *old; 279cfc93c6cSMatthew Wilcox 2807ae2ea7dSMatthew Wilcox BUG_ON(dax_is_locked(entry)); 281cfc93c6cSMatthew Wilcox xas_reset(xas); 282cfc93c6cSMatthew Wilcox xas_lock_irq(xas); 283cfc93c6cSMatthew Wilcox old = xas_store(xas, entry); 284cfc93c6cSMatthew Wilcox xas_unlock_irq(xas); 285cfc93c6cSMatthew Wilcox BUG_ON(!dax_is_locked(old)); 286698ab77aSVivek Goyal dax_wake_entry(xas, entry, WAKE_NEXT); 287cfc93c6cSMatthew Wilcox } 288cfc93c6cSMatthew Wilcox 289cfc93c6cSMatthew Wilcox /* 290cfc93c6cSMatthew Wilcox * Return: The entry stored at this location before it was locked. 291cfc93c6cSMatthew Wilcox */ 292cfc93c6cSMatthew Wilcox static void *dax_lock_entry(struct xa_state *xas, void *entry) 293cfc93c6cSMatthew Wilcox { 294cfc93c6cSMatthew Wilcox unsigned long v = xa_to_value(entry); 295cfc93c6cSMatthew Wilcox return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); 296cfc93c6cSMatthew Wilcox } 297cfc93c6cSMatthew Wilcox 298d2c997c0SDan Williams static unsigned long dax_entry_size(void *entry) 299d2c997c0SDan Williams { 300d2c997c0SDan Williams if (dax_is_zero_entry(entry)) 301d2c997c0SDan Williams return 0; 302d2c997c0SDan Williams else if (dax_is_empty_entry(entry)) 303d2c997c0SDan Williams return 0; 304d2c997c0SDan Williams else if (dax_is_pmd_entry(entry)) 305d2c997c0SDan Williams return PMD_SIZE; 306d2c997c0SDan Williams else 307d2c997c0SDan Williams return PAGE_SIZE; 308d2c997c0SDan Williams } 309d2c997c0SDan Williams 310a77d19f4SMatthew Wilcox static unsigned long dax_end_pfn(void *entry) 311d2c997c0SDan Williams { 312a77d19f4SMatthew Wilcox return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; 313d2c997c0SDan Williams } 314d2c997c0SDan Williams 315d2c997c0SDan Williams /* 316d2c997c0SDan Williams * Iterate through all mapped pfns represented by an entry, i.e. skip 317d2c997c0SDan Williams * 'empty' and 'zero' entries. 318d2c997c0SDan Williams */ 319d2c997c0SDan Williams #define for_each_mapped_pfn(entry, pfn) \ 320a77d19f4SMatthew Wilcox for (pfn = dax_to_pfn(entry); \ 321a77d19f4SMatthew Wilcox pfn < dax_end_pfn(entry); pfn++) 322d2c997c0SDan Williams 32316900426SShiyang Ruan static inline bool dax_page_is_shared(struct page *page) 3246061b69bSShiyang Ruan { 32516900426SShiyang Ruan return page->mapping == PAGE_MAPPING_DAX_SHARED; 3266061b69bSShiyang Ruan } 3276061b69bSShiyang Ruan 32873449dafSDan Williams /* 32916900426SShiyang Ruan * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the 33016900426SShiyang Ruan * refcount. 3316061b69bSShiyang Ruan */ 33216900426SShiyang Ruan static inline void dax_page_share_get(struct page *page) 3336061b69bSShiyang Ruan { 33416900426SShiyang Ruan if (page->mapping != PAGE_MAPPING_DAX_SHARED) { 3356061b69bSShiyang Ruan /* 3366061b69bSShiyang Ruan * Reset the index if the page was already mapped 3376061b69bSShiyang Ruan * regularly before. 3386061b69bSShiyang Ruan */ 3396061b69bSShiyang Ruan if (page->mapping) 34016900426SShiyang Ruan page->share = 1; 34116900426SShiyang Ruan page->mapping = PAGE_MAPPING_DAX_SHARED; 3426061b69bSShiyang Ruan } 34316900426SShiyang Ruan page->share++; 34416900426SShiyang Ruan } 34516900426SShiyang Ruan 34616900426SShiyang Ruan static inline unsigned long dax_page_share_put(struct page *page) 34716900426SShiyang Ruan { 34816900426SShiyang Ruan return --page->share; 3496061b69bSShiyang Ruan } 3506061b69bSShiyang Ruan 3516061b69bSShiyang Ruan /* 35216900426SShiyang Ruan * When it is called in dax_insert_entry(), the shared flag will indicate that 3536061b69bSShiyang Ruan * whether this entry is shared by multiple files. If so, set the page->mapping 35416900426SShiyang Ruan * PAGE_MAPPING_DAX_SHARED, and use page->share as refcount. 35573449dafSDan Williams */ 35673449dafSDan Williams static void dax_associate_entry(void *entry, struct address_space *mapping, 35716900426SShiyang Ruan struct vm_area_struct *vma, unsigned long address, bool shared) 358d2c997c0SDan Williams { 35973449dafSDan Williams unsigned long size = dax_entry_size(entry), pfn, index; 36073449dafSDan Williams int i = 0; 361d2c997c0SDan Williams 362d2c997c0SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 363d2c997c0SDan Williams return; 364d2c997c0SDan Williams 36573449dafSDan Williams index = linear_page_index(vma, address & ~(size - 1)); 366d2c997c0SDan Williams for_each_mapped_pfn(entry, pfn) { 367d2c997c0SDan Williams struct page *page = pfn_to_page(pfn); 368d2c997c0SDan Williams 36916900426SShiyang Ruan if (shared) { 37016900426SShiyang Ruan dax_page_share_get(page); 3716061b69bSShiyang Ruan } else { 372d2c997c0SDan Williams WARN_ON_ONCE(page->mapping); 373d2c997c0SDan Williams page->mapping = mapping; 37473449dafSDan Williams page->index = index + i++; 375d2c997c0SDan Williams } 376d2c997c0SDan Williams } 3776061b69bSShiyang Ruan } 378d2c997c0SDan Williams 379d2c997c0SDan Williams static void dax_disassociate_entry(void *entry, struct address_space *mapping, 380d2c997c0SDan Williams bool trunc) 381d2c997c0SDan Williams { 382d2c997c0SDan Williams unsigned long pfn; 383d2c997c0SDan Williams 384d2c997c0SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 385d2c997c0SDan Williams return; 386d2c997c0SDan Williams 387d2c997c0SDan Williams for_each_mapped_pfn(entry, pfn) { 388d2c997c0SDan Williams struct page *page = pfn_to_page(pfn); 389d2c997c0SDan Williams 390d2c997c0SDan Williams WARN_ON_ONCE(trunc && page_ref_count(page) > 1); 39116900426SShiyang Ruan if (dax_page_is_shared(page)) { 39216900426SShiyang Ruan /* keep the shared flag if this page is still shared */ 39316900426SShiyang Ruan if (dax_page_share_put(page) > 0) 3946061b69bSShiyang Ruan continue; 3956061b69bSShiyang Ruan } else 396d2c997c0SDan Williams WARN_ON_ONCE(page->mapping && page->mapping != mapping); 397d2c997c0SDan Williams page->mapping = NULL; 39873449dafSDan Williams page->index = 0; 399d2c997c0SDan Williams } 400d2c997c0SDan Williams } 401d2c997c0SDan Williams 4025fac7408SDan Williams static struct page *dax_busy_page(void *entry) 4035fac7408SDan Williams { 4045fac7408SDan Williams unsigned long pfn; 4055fac7408SDan Williams 4065fac7408SDan Williams for_each_mapped_pfn(entry, pfn) { 4075fac7408SDan Williams struct page *page = pfn_to_page(pfn); 4085fac7408SDan Williams 4095fac7408SDan Williams if (page_ref_count(page) > 1) 4105fac7408SDan Williams return page; 4115fac7408SDan Williams } 4125fac7408SDan Williams return NULL; 4135fac7408SDan Williams } 4145fac7408SDan Williams 415c5bbd451SMatthew Wilcox /* 416c2e8021aSShiyang Ruan * dax_lock_page - Lock the DAX entry corresponding to a page 417c5bbd451SMatthew Wilcox * @page: The page whose entry we want to lock 418c5bbd451SMatthew Wilcox * 419c5bbd451SMatthew Wilcox * Context: Process context. 42027359fd6SMatthew Wilcox * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could 42127359fd6SMatthew Wilcox * not be locked. 422c5bbd451SMatthew Wilcox */ 42327359fd6SMatthew Wilcox dax_entry_t dax_lock_page(struct page *page) 424c2a7d2a1SDan Williams { 4259f32d221SMatthew Wilcox XA_STATE(xas, NULL, 0); 4269f32d221SMatthew Wilcox void *entry; 427c2a7d2a1SDan Williams 428c5bbd451SMatthew Wilcox /* Ensure page->mapping isn't freed while we look at it */ 429c5bbd451SMatthew Wilcox rcu_read_lock(); 430c2a7d2a1SDan Williams for (;;) { 4319f32d221SMatthew Wilcox struct address_space *mapping = READ_ONCE(page->mapping); 432c2a7d2a1SDan Williams 43327359fd6SMatthew Wilcox entry = NULL; 434c93db7bbSMatthew Wilcox if (!mapping || !dax_mapping(mapping)) 435c5bbd451SMatthew Wilcox break; 436c2a7d2a1SDan Williams 437c2a7d2a1SDan Williams /* 438c2a7d2a1SDan Williams * In the device-dax case there's no need to lock, a 439c2a7d2a1SDan Williams * struct dev_pagemap pin is sufficient to keep the 440c2a7d2a1SDan Williams * inode alive, and we assume we have dev_pagemap pin 441c2a7d2a1SDan Williams * otherwise we would not have a valid pfn_to_page() 442c2a7d2a1SDan Williams * translation. 443c2a7d2a1SDan Williams */ 44427359fd6SMatthew Wilcox entry = (void *)~0UL; 4459f32d221SMatthew Wilcox if (S_ISCHR(mapping->host->i_mode)) 446c5bbd451SMatthew Wilcox break; 447c2a7d2a1SDan Williams 4489f32d221SMatthew Wilcox xas.xa = &mapping->i_pages; 4499f32d221SMatthew Wilcox xas_lock_irq(&xas); 450c2a7d2a1SDan Williams if (mapping != page->mapping) { 4519f32d221SMatthew Wilcox xas_unlock_irq(&xas); 452c2a7d2a1SDan Williams continue; 453c2a7d2a1SDan Williams } 4549f32d221SMatthew Wilcox xas_set(&xas, page->index); 4559f32d221SMatthew Wilcox entry = xas_load(&xas); 4569f32d221SMatthew Wilcox if (dax_is_locked(entry)) { 457c5bbd451SMatthew Wilcox rcu_read_unlock(); 45855e56f06SMatthew Wilcox wait_entry_unlocked(&xas, entry); 459c5bbd451SMatthew Wilcox rcu_read_lock(); 460c2a7d2a1SDan Williams continue; 461c2a7d2a1SDan Williams } 4629f32d221SMatthew Wilcox dax_lock_entry(&xas, entry); 4639f32d221SMatthew Wilcox xas_unlock_irq(&xas); 464c5bbd451SMatthew Wilcox break; 4659f32d221SMatthew Wilcox } 466c5bbd451SMatthew Wilcox rcu_read_unlock(); 46727359fd6SMatthew Wilcox return (dax_entry_t)entry; 468c2a7d2a1SDan Williams } 469c2a7d2a1SDan Williams 47027359fd6SMatthew Wilcox void dax_unlock_page(struct page *page, dax_entry_t cookie) 471c2a7d2a1SDan Williams { 472c2a7d2a1SDan Williams struct address_space *mapping = page->mapping; 4739f32d221SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, page->index); 474c2a7d2a1SDan Williams 4759f32d221SMatthew Wilcox if (S_ISCHR(mapping->host->i_mode)) 476c2a7d2a1SDan Williams return; 477c2a7d2a1SDan Williams 47827359fd6SMatthew Wilcox dax_unlock_entry(&xas, (void *)cookie); 479c2a7d2a1SDan Williams } 480c2a7d2a1SDan Williams 481ac401cc7SJan Kara /* 4822f437effSShiyang Ruan * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping 4832f437effSShiyang Ruan * @mapping: the file's mapping whose entry we want to lock 4842f437effSShiyang Ruan * @index: the offset within this file 4852f437effSShiyang Ruan * @page: output the dax page corresponding to this dax entry 4862f437effSShiyang Ruan * 4872f437effSShiyang Ruan * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry 4882f437effSShiyang Ruan * could not be locked. 4892f437effSShiyang Ruan */ 4902f437effSShiyang Ruan dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index, 4912f437effSShiyang Ruan struct page **page) 4922f437effSShiyang Ruan { 4932f437effSShiyang Ruan XA_STATE(xas, NULL, 0); 4942f437effSShiyang Ruan void *entry; 4952f437effSShiyang Ruan 4962f437effSShiyang Ruan rcu_read_lock(); 4972f437effSShiyang Ruan for (;;) { 4982f437effSShiyang Ruan entry = NULL; 4992f437effSShiyang Ruan if (!dax_mapping(mapping)) 5002f437effSShiyang Ruan break; 5012f437effSShiyang Ruan 5022f437effSShiyang Ruan xas.xa = &mapping->i_pages; 5032f437effSShiyang Ruan xas_lock_irq(&xas); 5042f437effSShiyang Ruan xas_set(&xas, index); 5052f437effSShiyang Ruan entry = xas_load(&xas); 5062f437effSShiyang Ruan if (dax_is_locked(entry)) { 5072f437effSShiyang Ruan rcu_read_unlock(); 5082f437effSShiyang Ruan wait_entry_unlocked(&xas, entry); 5092f437effSShiyang Ruan rcu_read_lock(); 5102f437effSShiyang Ruan continue; 5112f437effSShiyang Ruan } 5122f437effSShiyang Ruan if (!entry || 5132f437effSShiyang Ruan dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 5142f437effSShiyang Ruan /* 5152f437effSShiyang Ruan * Because we are looking for entry from file's mapping 5162f437effSShiyang Ruan * and index, so the entry may not be inserted for now, 5172f437effSShiyang Ruan * or even a zero/empty entry. We don't think this is 5182f437effSShiyang Ruan * an error case. So, return a special value and do 5192f437effSShiyang Ruan * not output @page. 5202f437effSShiyang Ruan */ 5212f437effSShiyang Ruan entry = (void *)~0UL; 5222f437effSShiyang Ruan } else { 5232f437effSShiyang Ruan *page = pfn_to_page(dax_to_pfn(entry)); 5242f437effSShiyang Ruan dax_lock_entry(&xas, entry); 5252f437effSShiyang Ruan } 5262f437effSShiyang Ruan xas_unlock_irq(&xas); 5272f437effSShiyang Ruan break; 5282f437effSShiyang Ruan } 5292f437effSShiyang Ruan rcu_read_unlock(); 5302f437effSShiyang Ruan return (dax_entry_t)entry; 5312f437effSShiyang Ruan } 5322f437effSShiyang Ruan 5332f437effSShiyang Ruan void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index, 5342f437effSShiyang Ruan dax_entry_t cookie) 5352f437effSShiyang Ruan { 5362f437effSShiyang Ruan XA_STATE(xas, &mapping->i_pages, index); 5372f437effSShiyang Ruan 5382f437effSShiyang Ruan if (cookie == ~0UL) 5392f437effSShiyang Ruan return; 5402f437effSShiyang Ruan 5412f437effSShiyang Ruan dax_unlock_entry(&xas, (void *)cookie); 5422f437effSShiyang Ruan } 5432f437effSShiyang Ruan 5442f437effSShiyang Ruan /* 545a77d19f4SMatthew Wilcox * Find page cache entry at given index. If it is a DAX entry, return it 546a77d19f4SMatthew Wilcox * with the entry locked. If the page cache doesn't contain an entry at 547a77d19f4SMatthew Wilcox * that index, add a locked empty entry. 548ac401cc7SJan Kara * 5493159f943SMatthew Wilcox * When requesting an entry with size DAX_PMD, grab_mapping_entry() will 550b15cd800SMatthew Wilcox * either return that locked entry or will return VM_FAULT_FALLBACK. 551b15cd800SMatthew Wilcox * This will happen if there are any PTE entries within the PMD range 552b15cd800SMatthew Wilcox * that we are requesting. 553642261acSRoss Zwisler * 554b15cd800SMatthew Wilcox * We always favor PTE entries over PMD entries. There isn't a flow where we 555b15cd800SMatthew Wilcox * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD 556b15cd800SMatthew Wilcox * insertion will fail if it finds any PTE entries already in the tree, and a 557b15cd800SMatthew Wilcox * PTE insertion will cause an existing PMD entry to be unmapped and 558b15cd800SMatthew Wilcox * downgraded to PTE entries. This happens for both PMD zero pages as 559b15cd800SMatthew Wilcox * well as PMD empty entries. 560642261acSRoss Zwisler * 561b15cd800SMatthew Wilcox * The exception to this downgrade path is for PMD entries that have 562b15cd800SMatthew Wilcox * real storage backing them. We will leave these real PMD entries in 563b15cd800SMatthew Wilcox * the tree, and PTE writes will simply dirty the entire PMD entry. 564642261acSRoss Zwisler * 565ac401cc7SJan Kara * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 566ac401cc7SJan Kara * persistent memory the benefit is doubtful. We can add that later if we can 567ac401cc7SJan Kara * show it helps. 568b15cd800SMatthew Wilcox * 569b15cd800SMatthew Wilcox * On error, this function does not return an ERR_PTR. Instead it returns 570b15cd800SMatthew Wilcox * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values 571b15cd800SMatthew Wilcox * overlap with xarray value entries. 572ac401cc7SJan Kara */ 573b15cd800SMatthew Wilcox static void *grab_mapping_entry(struct xa_state *xas, 57423c84eb7SMatthew Wilcox (Oracle) struct address_space *mapping, unsigned int order) 575ac401cc7SJan Kara { 576b15cd800SMatthew Wilcox unsigned long index = xas->xa_index; 5771a14e377SJan Kara bool pmd_downgrade; /* splitting PMD entry into PTE entries? */ 578b15cd800SMatthew Wilcox void *entry; 579ac401cc7SJan Kara 580b15cd800SMatthew Wilcox retry: 5811a14e377SJan Kara pmd_downgrade = false; 582b15cd800SMatthew Wilcox xas_lock_irq(xas); 58323c84eb7SMatthew Wilcox (Oracle) entry = get_unlocked_entry(xas, order); 584642261acSRoss Zwisler 585b15cd800SMatthew Wilcox if (entry) { 58623c84eb7SMatthew Wilcox (Oracle) if (dax_is_conflict(entry)) 58723c84eb7SMatthew Wilcox (Oracle) goto fallback; 5880e40de03SMatthew Wilcox if (!xa_is_value(entry)) { 58949688e65SHao Li xas_set_err(xas, -EIO); 59091d25ba8SRoss Zwisler goto out_unlock; 59191d25ba8SRoss Zwisler } 59291d25ba8SRoss Zwisler 59323c84eb7SMatthew Wilcox (Oracle) if (order == 0) { 59491d25ba8SRoss Zwisler if (dax_is_pmd_entry(entry) && 595642261acSRoss Zwisler (dax_is_zero_entry(entry) || 596642261acSRoss Zwisler dax_is_empty_entry(entry))) { 597642261acSRoss Zwisler pmd_downgrade = true; 598642261acSRoss Zwisler } 599642261acSRoss Zwisler } 600642261acSRoss Zwisler } 601642261acSRoss Zwisler 602642261acSRoss Zwisler if (pmd_downgrade) { 603642261acSRoss Zwisler /* 604642261acSRoss Zwisler * Make sure 'entry' remains valid while we drop 605b93b0163SMatthew Wilcox * the i_pages lock. 606642261acSRoss Zwisler */ 607b15cd800SMatthew Wilcox dax_lock_entry(xas, entry); 608642261acSRoss Zwisler 609642261acSRoss Zwisler /* 610642261acSRoss Zwisler * Besides huge zero pages the only other thing that gets 611642261acSRoss Zwisler * downgraded are empty entries which don't need to be 612642261acSRoss Zwisler * unmapped. 613642261acSRoss Zwisler */ 614b15cd800SMatthew Wilcox if (dax_is_zero_entry(entry)) { 615b15cd800SMatthew Wilcox xas_unlock_irq(xas); 616b15cd800SMatthew Wilcox unmap_mapping_pages(mapping, 617b15cd800SMatthew Wilcox xas->xa_index & ~PG_PMD_COLOUR, 618977fbdcdSMatthew Wilcox PG_PMD_NR, false); 619b15cd800SMatthew Wilcox xas_reset(xas); 620b15cd800SMatthew Wilcox xas_lock_irq(xas); 621e11f8b7bSRoss Zwisler } 622e11f8b7bSRoss Zwisler 623d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, false); 624b15cd800SMatthew Wilcox xas_store(xas, NULL); /* undo the PMD join */ 625698ab77aSVivek Goyal dax_wake_entry(xas, entry, WAKE_ALL); 6267f0e07fbSMatthew Wilcox (Oracle) mapping->nrpages -= PG_PMD_NR; 627b15cd800SMatthew Wilcox entry = NULL; 628b15cd800SMatthew Wilcox xas_set(xas, index); 629642261acSRoss Zwisler } 630642261acSRoss Zwisler 631b15cd800SMatthew Wilcox if (entry) { 632b15cd800SMatthew Wilcox dax_lock_entry(xas, entry); 633b15cd800SMatthew Wilcox } else { 63423c84eb7SMatthew Wilcox (Oracle) unsigned long flags = DAX_EMPTY; 63523c84eb7SMatthew Wilcox (Oracle) 63623c84eb7SMatthew Wilcox (Oracle) if (order > 0) 63723c84eb7SMatthew Wilcox (Oracle) flags |= DAX_PMD; 63823c84eb7SMatthew Wilcox (Oracle) entry = dax_make_entry(pfn_to_pfn_t(0), flags); 639b15cd800SMatthew Wilcox dax_lock_entry(xas, entry); 640b15cd800SMatthew Wilcox if (xas_error(xas)) 641b15cd800SMatthew Wilcox goto out_unlock; 6427f0e07fbSMatthew Wilcox (Oracle) mapping->nrpages += 1UL << order; 643ac401cc7SJan Kara } 644b15cd800SMatthew Wilcox 645642261acSRoss Zwisler out_unlock: 646b15cd800SMatthew Wilcox xas_unlock_irq(xas); 647b15cd800SMatthew Wilcox if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) 648b15cd800SMatthew Wilcox goto retry; 649b15cd800SMatthew Wilcox if (xas->xa_node == XA_ERROR(-ENOMEM)) 650b15cd800SMatthew Wilcox return xa_mk_internal(VM_FAULT_OOM); 651b15cd800SMatthew Wilcox if (xas_error(xas)) 652b15cd800SMatthew Wilcox return xa_mk_internal(VM_FAULT_SIGBUS); 653e3ad61c6SRoss Zwisler return entry; 654b15cd800SMatthew Wilcox fallback: 655b15cd800SMatthew Wilcox xas_unlock_irq(xas); 656b15cd800SMatthew Wilcox return xa_mk_internal(VM_FAULT_FALLBACK); 657ac401cc7SJan Kara } 658ac401cc7SJan Kara 6595fac7408SDan Williams /** 6606bbdd563SVivek Goyal * dax_layout_busy_page_range - find first pinned page in @mapping 6615fac7408SDan Williams * @mapping: address space to scan for a page with ref count > 1 6626bbdd563SVivek Goyal * @start: Starting offset. Page containing 'start' is included. 6636bbdd563SVivek Goyal * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX, 6646bbdd563SVivek Goyal * pages from 'start' till the end of file are included. 6655fac7408SDan Williams * 6665fac7408SDan Williams * DAX requires ZONE_DEVICE mapped pages. These pages are never 6675fac7408SDan Williams * 'onlined' to the page allocator so they are considered idle when 6685fac7408SDan Williams * page->count == 1. A filesystem uses this interface to determine if 6695fac7408SDan Williams * any page in the mapping is busy, i.e. for DMA, or other 6705fac7408SDan Williams * get_user_pages() usages. 6715fac7408SDan Williams * 6725fac7408SDan Williams * It is expected that the filesystem is holding locks to block the 6735fac7408SDan Williams * establishment of new mappings in this address_space. I.e. it expects 6745fac7408SDan Williams * to be able to run unmap_mapping_range() and subsequently not race 6755fac7408SDan Williams * mapping_mapped() becoming true. 6765fac7408SDan Williams */ 6776bbdd563SVivek Goyal struct page *dax_layout_busy_page_range(struct address_space *mapping, 6786bbdd563SVivek Goyal loff_t start, loff_t end) 6795fac7408SDan Williams { 680084a8990SMatthew Wilcox void *entry; 681084a8990SMatthew Wilcox unsigned int scanned = 0; 6825fac7408SDan Williams struct page *page = NULL; 6836bbdd563SVivek Goyal pgoff_t start_idx = start >> PAGE_SHIFT; 6846bbdd563SVivek Goyal pgoff_t end_idx; 6856bbdd563SVivek Goyal XA_STATE(xas, &mapping->i_pages, start_idx); 6865fac7408SDan Williams 6875fac7408SDan Williams /* 6885fac7408SDan Williams * In the 'limited' case get_user_pages() for dax is disabled. 6895fac7408SDan Williams */ 6905fac7408SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 6915fac7408SDan Williams return NULL; 6925fac7408SDan Williams 6935fac7408SDan Williams if (!dax_mapping(mapping) || !mapping_mapped(mapping)) 6945fac7408SDan Williams return NULL; 6955fac7408SDan Williams 6966bbdd563SVivek Goyal /* If end == LLONG_MAX, all pages from start to till end of file */ 6976bbdd563SVivek Goyal if (end == LLONG_MAX) 6986bbdd563SVivek Goyal end_idx = ULONG_MAX; 6996bbdd563SVivek Goyal else 7006bbdd563SVivek Goyal end_idx = end >> PAGE_SHIFT; 7015fac7408SDan Williams /* 7025fac7408SDan Williams * If we race get_user_pages_fast() here either we'll see the 703084a8990SMatthew Wilcox * elevated page count in the iteration and wait, or 7045fac7408SDan Williams * get_user_pages_fast() will see that the page it took a reference 7055fac7408SDan Williams * against is no longer mapped in the page tables and bail to the 7065fac7408SDan Williams * get_user_pages() slow path. The slow path is protected by 7075fac7408SDan Williams * pte_lock() and pmd_lock(). New references are not taken without 7086bbdd563SVivek Goyal * holding those locks, and unmap_mapping_pages() will not zero the 7095fac7408SDan Williams * pte or pmd without holding the respective lock, so we are 7105fac7408SDan Williams * guaranteed to either see new references or prevent new 7115fac7408SDan Williams * references from being established. 7125fac7408SDan Williams */ 7136bbdd563SVivek Goyal unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0); 7145fac7408SDan Williams 715084a8990SMatthew Wilcox xas_lock_irq(&xas); 7166bbdd563SVivek Goyal xas_for_each(&xas, entry, end_idx) { 717084a8990SMatthew Wilcox if (WARN_ON_ONCE(!xa_is_value(entry))) 7185fac7408SDan Williams continue; 719084a8990SMatthew Wilcox if (unlikely(dax_is_locked(entry))) 72023c84eb7SMatthew Wilcox (Oracle) entry = get_unlocked_entry(&xas, 0); 7215fac7408SDan Williams if (entry) 7225fac7408SDan Williams page = dax_busy_page(entry); 7234c3d043dSVivek Goyal put_unlocked_entry(&xas, entry, WAKE_NEXT); 7245fac7408SDan Williams if (page) 7255fac7408SDan Williams break; 726084a8990SMatthew Wilcox if (++scanned % XA_CHECK_SCHED) 727084a8990SMatthew Wilcox continue; 728cdbf8897SRoss Zwisler 729084a8990SMatthew Wilcox xas_pause(&xas); 730084a8990SMatthew Wilcox xas_unlock_irq(&xas); 731084a8990SMatthew Wilcox cond_resched(); 732084a8990SMatthew Wilcox xas_lock_irq(&xas); 7335fac7408SDan Williams } 734084a8990SMatthew Wilcox xas_unlock_irq(&xas); 7355fac7408SDan Williams return page; 7365fac7408SDan Williams } 7376bbdd563SVivek Goyal EXPORT_SYMBOL_GPL(dax_layout_busy_page_range); 7386bbdd563SVivek Goyal 7396bbdd563SVivek Goyal struct page *dax_layout_busy_page(struct address_space *mapping) 7406bbdd563SVivek Goyal { 7416bbdd563SVivek Goyal return dax_layout_busy_page_range(mapping, 0, LLONG_MAX); 7426bbdd563SVivek Goyal } 7435fac7408SDan Williams EXPORT_SYMBOL_GPL(dax_layout_busy_page); 7445fac7408SDan Williams 745a77d19f4SMatthew Wilcox static int __dax_invalidate_entry(struct address_space *mapping, 746c6dcf52cSJan Kara pgoff_t index, bool trunc) 747c6dcf52cSJan Kara { 74807f2d89cSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, index); 749c6dcf52cSJan Kara int ret = 0; 750c6dcf52cSJan Kara void *entry; 751c6dcf52cSJan Kara 75207f2d89cSMatthew Wilcox xas_lock_irq(&xas); 75323c84eb7SMatthew Wilcox (Oracle) entry = get_unlocked_entry(&xas, 0); 7543159f943SMatthew Wilcox if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 755c6dcf52cSJan Kara goto out; 756c6dcf52cSJan Kara if (!trunc && 75707f2d89cSMatthew Wilcox (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || 75807f2d89cSMatthew Wilcox xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) 759c6dcf52cSJan Kara goto out; 760d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, trunc); 76107f2d89cSMatthew Wilcox xas_store(&xas, NULL); 7627f0e07fbSMatthew Wilcox (Oracle) mapping->nrpages -= 1UL << dax_entry_order(entry); 763c6dcf52cSJan Kara ret = 1; 764c6dcf52cSJan Kara out: 76523738832SVivek Goyal put_unlocked_entry(&xas, entry, WAKE_ALL); 76607f2d89cSMatthew Wilcox xas_unlock_irq(&xas); 767c6dcf52cSJan Kara return ret; 768c6dcf52cSJan Kara } 76907f2d89cSMatthew Wilcox 770f76b3a32SShiyang Ruan static int __dax_clear_dirty_range(struct address_space *mapping, 771f76b3a32SShiyang Ruan pgoff_t start, pgoff_t end) 772f76b3a32SShiyang Ruan { 773f76b3a32SShiyang Ruan XA_STATE(xas, &mapping->i_pages, start); 774f76b3a32SShiyang Ruan unsigned int scanned = 0; 775f76b3a32SShiyang Ruan void *entry; 776f76b3a32SShiyang Ruan 777f76b3a32SShiyang Ruan xas_lock_irq(&xas); 778f76b3a32SShiyang Ruan xas_for_each(&xas, entry, end) { 779f76b3a32SShiyang Ruan entry = get_unlocked_entry(&xas, 0); 780f76b3a32SShiyang Ruan xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); 781f76b3a32SShiyang Ruan xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE); 782f76b3a32SShiyang Ruan put_unlocked_entry(&xas, entry, WAKE_NEXT); 783f76b3a32SShiyang Ruan 784f76b3a32SShiyang Ruan if (++scanned % XA_CHECK_SCHED) 785f76b3a32SShiyang Ruan continue; 786f76b3a32SShiyang Ruan 787f76b3a32SShiyang Ruan xas_pause(&xas); 788f76b3a32SShiyang Ruan xas_unlock_irq(&xas); 789f76b3a32SShiyang Ruan cond_resched(); 790f76b3a32SShiyang Ruan xas_lock_irq(&xas); 791f76b3a32SShiyang Ruan } 792f76b3a32SShiyang Ruan xas_unlock_irq(&xas); 793f76b3a32SShiyang Ruan 794f76b3a32SShiyang Ruan return 0; 795f76b3a32SShiyang Ruan } 796f76b3a32SShiyang Ruan 797ac401cc7SJan Kara /* 7983159f943SMatthew Wilcox * Delete DAX entry at @index from @mapping. Wait for it 7993159f943SMatthew Wilcox * to be unlocked before deleting it. 800ac401cc7SJan Kara */ 801ac401cc7SJan Kara int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 802ac401cc7SJan Kara { 803a77d19f4SMatthew Wilcox int ret = __dax_invalidate_entry(mapping, index, true); 804ac401cc7SJan Kara 805ac401cc7SJan Kara /* 806ac401cc7SJan Kara * This gets called from truncate / punch_hole path. As such, the caller 807ac401cc7SJan Kara * must hold locks protecting against concurrent modifications of the 808a77d19f4SMatthew Wilcox * page cache (usually fs-private i_mmap_sem for writing). Since the 8093159f943SMatthew Wilcox * caller has seen a DAX entry for this index, we better find it 810ac401cc7SJan Kara * at that index as well... 811ac401cc7SJan Kara */ 812c6dcf52cSJan Kara WARN_ON_ONCE(!ret); 813c6dcf52cSJan Kara return ret; 814ac401cc7SJan Kara } 815ac401cc7SJan Kara 816c6dcf52cSJan Kara /* 8173159f943SMatthew Wilcox * Invalidate DAX entry if it is clean. 818c6dcf52cSJan Kara */ 819c6dcf52cSJan Kara int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 820c6dcf52cSJan Kara pgoff_t index) 821c6dcf52cSJan Kara { 822a77d19f4SMatthew Wilcox return __dax_invalidate_entry(mapping, index, false); 823ac401cc7SJan Kara } 824ac401cc7SJan Kara 82560696eb2SChristoph Hellwig static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos) 826f7ca90b1SMatthew Wilcox { 827de205114SChristoph Hellwig return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset); 828429f8de7SChristoph Hellwig } 829429f8de7SChristoph Hellwig 830429f8de7SChristoph Hellwig static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter) 831429f8de7SChristoph Hellwig { 83260696eb2SChristoph Hellwig pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos); 833cccbce67SDan Williams void *vto, *kaddr; 834cccbce67SDan Williams long rc; 835cccbce67SDan Williams int id; 836e2e05394SRoss Zwisler 837cccbce67SDan Williams id = dax_read_lock(); 838e511c4a3SJane Chu rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS, 839e511c4a3SJane Chu &kaddr, NULL); 840cccbce67SDan Williams if (rc < 0) { 841cccbce67SDan Williams dax_read_unlock(id); 842cccbce67SDan Williams return rc; 843cccbce67SDan Williams } 844429f8de7SChristoph Hellwig vto = kmap_atomic(vmf->cow_page); 845429f8de7SChristoph Hellwig copy_user_page(vto, kaddr, vmf->address, vmf->cow_page); 846f7ca90b1SMatthew Wilcox kunmap_atomic(vto); 847cccbce67SDan Williams dax_read_unlock(id); 848f7ca90b1SMatthew Wilcox return 0; 849f7ca90b1SMatthew Wilcox } 850f7ca90b1SMatthew Wilcox 851642261acSRoss Zwisler /* 852e5d6df73SShiyang Ruan * MAP_SYNC on a dax mapping guarantees dirty metadata is 853e5d6df73SShiyang Ruan * flushed on write-faults (non-cow), but not read-faults. 854e5d6df73SShiyang Ruan */ 855e5d6df73SShiyang Ruan static bool dax_fault_is_synchronous(const struct iomap_iter *iter, 856e5d6df73SShiyang Ruan struct vm_area_struct *vma) 857e5d6df73SShiyang Ruan { 858e5d6df73SShiyang Ruan return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) && 859e5d6df73SShiyang Ruan (iter->iomap.flags & IOMAP_F_DIRTY); 860e5d6df73SShiyang Ruan } 861e5d6df73SShiyang Ruan 862e5d6df73SShiyang Ruan /* 863642261acSRoss Zwisler * By this point grab_mapping_entry() has ensured that we have a locked entry 864642261acSRoss Zwisler * of the appropriate size so we don't have to worry about downgrading PMDs to 865642261acSRoss Zwisler * PTEs. If we happen to be trying to insert a PTE and there is a PMD 866642261acSRoss Zwisler * already in the tree, we will skip the insertion and just dirty the PMD as 867642261acSRoss Zwisler * appropriate. 868642261acSRoss Zwisler */ 869e5d6df73SShiyang Ruan static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, 870e5d6df73SShiyang Ruan const struct iomap_iter *iter, void *entry, pfn_t pfn, 871e5d6df73SShiyang Ruan unsigned long flags) 8729973c98eSRoss Zwisler { 873e5d6df73SShiyang Ruan struct address_space *mapping = vmf->vma->vm_file->f_mapping; 874b15cd800SMatthew Wilcox void *new_entry = dax_make_entry(pfn, flags); 875c6f0b395SShiyang Ruan bool write = iter->flags & IOMAP_WRITE; 876c6f0b395SShiyang Ruan bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma); 877c6f0b395SShiyang Ruan bool shared = iter->iomap.flags & IOMAP_F_SHARED; 8789973c98eSRoss Zwisler 879f5b7b748SJan Kara if (dirty) 8809973c98eSRoss Zwisler __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 8819973c98eSRoss Zwisler 882c6f0b395SShiyang Ruan if (shared || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) { 883b15cd800SMatthew Wilcox unsigned long index = xas->xa_index; 88491d25ba8SRoss Zwisler /* we are replacing a zero page with block mapping */ 88591d25ba8SRoss Zwisler if (dax_is_pmd_entry(entry)) 886977fbdcdSMatthew Wilcox unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 887977fbdcdSMatthew Wilcox PG_PMD_NR, false); 88891d25ba8SRoss Zwisler else /* pte entry */ 889b15cd800SMatthew Wilcox unmap_mapping_pages(mapping, index, 1, false); 890ac401cc7SJan Kara } 8919973c98eSRoss Zwisler 892b15cd800SMatthew Wilcox xas_reset(xas); 893b15cd800SMatthew Wilcox xas_lock_irq(xas); 894c6f0b395SShiyang Ruan if (shared || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 8951571c029SJan Kara void *old; 8961571c029SJan Kara 897d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, false); 8986061b69bSShiyang Ruan dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, 899c6f0b395SShiyang Ruan shared); 900642261acSRoss Zwisler /* 901a77d19f4SMatthew Wilcox * Only swap our new entry into the page cache if the current 902642261acSRoss Zwisler * entry is a zero page or an empty entry. If a normal PTE or 903a77d19f4SMatthew Wilcox * PMD entry is already in the cache, we leave it alone. This 904642261acSRoss Zwisler * means that if we are trying to insert a PTE and the 905642261acSRoss Zwisler * existing entry is a PMD, we will just leave the PMD in the 906642261acSRoss Zwisler * tree and dirty it if necessary. 907642261acSRoss Zwisler */ 9081571c029SJan Kara old = dax_lock_entry(xas, new_entry); 909b15cd800SMatthew Wilcox WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | 910b15cd800SMatthew Wilcox DAX_LOCKED)); 91191d25ba8SRoss Zwisler entry = new_entry; 912b15cd800SMatthew Wilcox } else { 913b15cd800SMatthew Wilcox xas_load(xas); /* Walk the xa_state */ 914ac401cc7SJan Kara } 91591d25ba8SRoss Zwisler 916f5b7b748SJan Kara if (dirty) 917b15cd800SMatthew Wilcox xas_set_mark(xas, PAGECACHE_TAG_DIRTY); 91891d25ba8SRoss Zwisler 919c6f0b395SShiyang Ruan if (write && shared) 920e5d6df73SShiyang Ruan xas_set_mark(xas, PAGECACHE_TAG_TOWRITE); 921e5d6df73SShiyang Ruan 922b15cd800SMatthew Wilcox xas_unlock_irq(xas); 92391d25ba8SRoss Zwisler return entry; 9249973c98eSRoss Zwisler } 9259973c98eSRoss Zwisler 9269fc747f6SMatthew Wilcox static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, 9279fc747f6SMatthew Wilcox struct address_space *mapping, void *entry) 9289973c98eSRoss Zwisler { 92906083a09SMuchun Song unsigned long pfn, index, count, end; 9303fe0791cSDan Williams long ret = 0; 93106083a09SMuchun Song struct vm_area_struct *vma; 9329973c98eSRoss Zwisler 9339973c98eSRoss Zwisler /* 934a6abc2c0SJan Kara * A page got tagged dirty in DAX mapping? Something is seriously 935a6abc2c0SJan Kara * wrong. 9369973c98eSRoss Zwisler */ 9373159f943SMatthew Wilcox if (WARN_ON(!xa_is_value(entry))) 938a6abc2c0SJan Kara return -EIO; 9399973c98eSRoss Zwisler 9409fc747f6SMatthew Wilcox if (unlikely(dax_is_locked(entry))) { 9419fc747f6SMatthew Wilcox void *old_entry = entry; 9429fc747f6SMatthew Wilcox 94323c84eb7SMatthew Wilcox (Oracle) entry = get_unlocked_entry(xas, 0); 9449fc747f6SMatthew Wilcox 945a6abc2c0SJan Kara /* Entry got punched out / reallocated? */ 9469fc747f6SMatthew Wilcox if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 947a6abc2c0SJan Kara goto put_unlocked; 948a6abc2c0SJan Kara /* 9499fc747f6SMatthew Wilcox * Entry got reallocated elsewhere? No need to writeback. 9509fc747f6SMatthew Wilcox * We have to compare pfns as we must not bail out due to 9519fc747f6SMatthew Wilcox * difference in lockbit or entry type. 952a6abc2c0SJan Kara */ 9539fc747f6SMatthew Wilcox if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) 954a6abc2c0SJan Kara goto put_unlocked; 955642261acSRoss Zwisler if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 956642261acSRoss Zwisler dax_is_zero_entry(entry))) { 9579973c98eSRoss Zwisler ret = -EIO; 958a6abc2c0SJan Kara goto put_unlocked; 9599973c98eSRoss Zwisler } 9609973c98eSRoss Zwisler 9619fc747f6SMatthew Wilcox /* Another fsync thread may have already done this entry */ 9629fc747f6SMatthew Wilcox if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) 963a6abc2c0SJan Kara goto put_unlocked; 9649fc747f6SMatthew Wilcox } 9659fc747f6SMatthew Wilcox 966a6abc2c0SJan Kara /* Lock the entry to serialize with page faults */ 9679fc747f6SMatthew Wilcox dax_lock_entry(xas, entry); 9689fc747f6SMatthew Wilcox 969a6abc2c0SJan Kara /* 970a6abc2c0SJan Kara * We can clear the tag now but we have to be careful so that concurrent 971a6abc2c0SJan Kara * dax_writeback_one() calls for the same index cannot finish before we 972a6abc2c0SJan Kara * actually flush the caches. This is achieved as the calls will look 973b93b0163SMatthew Wilcox * at the entry only under the i_pages lock and once they do that 974b93b0163SMatthew Wilcox * they will see the entry locked and wait for it to unlock. 975a6abc2c0SJan Kara */ 9769fc747f6SMatthew Wilcox xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); 9779fc747f6SMatthew Wilcox xas_unlock_irq(xas); 978a6abc2c0SJan Kara 979642261acSRoss Zwisler /* 980e4b3448bSMatthew Wilcox * If dax_writeback_mapping_range() was given a wbc->range_start 981e4b3448bSMatthew Wilcox * in the middle of a PMD, the 'index' we use needs to be 982e4b3448bSMatthew Wilcox * aligned to the start of the PMD. 9833fe0791cSDan Williams * This allows us to flush for PMD_SIZE and not have to worry about 9843fe0791cSDan Williams * partial PMD writebacks. 985642261acSRoss Zwisler */ 986a77d19f4SMatthew Wilcox pfn = dax_to_pfn(entry); 987e4b3448bSMatthew Wilcox count = 1UL << dax_entry_order(entry); 988e4b3448bSMatthew Wilcox index = xas->xa_index & ~(count - 1); 98906083a09SMuchun Song end = index + count - 1; 990cccbce67SDan Williams 99106083a09SMuchun Song /* Walk all mappings of a given index of a file and writeprotect them */ 99206083a09SMuchun Song i_mmap_lock_read(mapping); 99306083a09SMuchun Song vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) { 99406083a09SMuchun Song pfn_mkclean_range(pfn, count, index, vma); 99506083a09SMuchun Song cond_resched(); 99606083a09SMuchun Song } 99706083a09SMuchun Song i_mmap_unlock_read(mapping); 99806083a09SMuchun Song 999e4b3448bSMatthew Wilcox dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE); 10004b4bb46dSJan Kara /* 10014b4bb46dSJan Kara * After we have flushed the cache, we can clear the dirty tag. There 10024b4bb46dSJan Kara * cannot be new dirty data in the pfn after the flush has completed as 10034b4bb46dSJan Kara * the pfn mappings are writeprotected and fault waits for mapping 10044b4bb46dSJan Kara * entry lock. 10054b4bb46dSJan Kara */ 10069fc747f6SMatthew Wilcox xas_reset(xas); 10079fc747f6SMatthew Wilcox xas_lock_irq(xas); 10089fc747f6SMatthew Wilcox xas_store(xas, entry); 10099fc747f6SMatthew Wilcox xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); 1010698ab77aSVivek Goyal dax_wake_entry(xas, entry, WAKE_NEXT); 10119fc747f6SMatthew Wilcox 1012e4b3448bSMatthew Wilcox trace_dax_writeback_one(mapping->host, index, count); 10139973c98eSRoss Zwisler return ret; 10149973c98eSRoss Zwisler 1015a6abc2c0SJan Kara put_unlocked: 10164c3d043dSVivek Goyal put_unlocked_entry(xas, entry, WAKE_NEXT); 10179973c98eSRoss Zwisler return ret; 10189973c98eSRoss Zwisler } 10199973c98eSRoss Zwisler 10209973c98eSRoss Zwisler /* 10219973c98eSRoss Zwisler * Flush the mapping to the persistent domain within the byte range of [start, 10229973c98eSRoss Zwisler * end]. This is required by data integrity operations to ensure file data is 10239973c98eSRoss Zwisler * on persistent storage prior to completion of the operation. 10249973c98eSRoss Zwisler */ 10257f6d5b52SRoss Zwisler int dax_writeback_mapping_range(struct address_space *mapping, 10263f666c56SVivek Goyal struct dax_device *dax_dev, struct writeback_control *wbc) 10279973c98eSRoss Zwisler { 10289fc747f6SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); 10299973c98eSRoss Zwisler struct inode *inode = mapping->host; 10309fc747f6SMatthew Wilcox pgoff_t end_index = wbc->range_end >> PAGE_SHIFT; 10319fc747f6SMatthew Wilcox void *entry; 10329fc747f6SMatthew Wilcox int ret = 0; 10339fc747f6SMatthew Wilcox unsigned int scanned = 0; 10349973c98eSRoss Zwisler 10359973c98eSRoss Zwisler if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 10369973c98eSRoss Zwisler return -EIO; 10379973c98eSRoss Zwisler 10387716506aSMatthew Wilcox (Oracle) if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL) 10397f6d5b52SRoss Zwisler return 0; 10407f6d5b52SRoss Zwisler 10419fc747f6SMatthew Wilcox trace_dax_writeback_range(inode, xas.xa_index, end_index); 10429973c98eSRoss Zwisler 10439fc747f6SMatthew Wilcox tag_pages_for_writeback(mapping, xas.xa_index, end_index); 1044d14a3f48SRoss Zwisler 10459fc747f6SMatthew Wilcox xas_lock_irq(&xas); 10469fc747f6SMatthew Wilcox xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { 10479fc747f6SMatthew Wilcox ret = dax_writeback_one(&xas, dax_dev, mapping, entry); 1048819ec6b9SJeff Layton if (ret < 0) { 1049819ec6b9SJeff Layton mapping_set_error(mapping, ret); 10509fc747f6SMatthew Wilcox break; 1051d14a3f48SRoss Zwisler } 10529fc747f6SMatthew Wilcox if (++scanned % XA_CHECK_SCHED) 10539fc747f6SMatthew Wilcox continue; 10549fc747f6SMatthew Wilcox 10559fc747f6SMatthew Wilcox xas_pause(&xas); 10569fc747f6SMatthew Wilcox xas_unlock_irq(&xas); 10579fc747f6SMatthew Wilcox cond_resched(); 10589fc747f6SMatthew Wilcox xas_lock_irq(&xas); 1059d14a3f48SRoss Zwisler } 10609fc747f6SMatthew Wilcox xas_unlock_irq(&xas); 10619fc747f6SMatthew Wilcox trace_dax_writeback_range_done(inode, xas.xa_index, end_index); 10629fc747f6SMatthew Wilcox return ret; 10639973c98eSRoss Zwisler } 10649973c98eSRoss Zwisler EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 10659973c98eSRoss Zwisler 1066e28cd3e5SShiyang Ruan static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos, 1067e28cd3e5SShiyang Ruan size_t size, void **kaddr, pfn_t *pfnp) 10685e161e40SJan Kara { 106960696eb2SChristoph Hellwig pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1070e28cd3e5SShiyang Ruan int id, rc = 0; 10715e161e40SJan Kara long length; 10725e161e40SJan Kara 1073cccbce67SDan Williams id = dax_read_lock(); 10745e161e40SJan Kara length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), 1075e28cd3e5SShiyang Ruan DAX_ACCESS, kaddr, pfnp); 10765e161e40SJan Kara if (length < 0) { 10775e161e40SJan Kara rc = length; 10785e161e40SJan Kara goto out; 10795e161e40SJan Kara } 1080e28cd3e5SShiyang Ruan if (!pfnp) 1081e28cd3e5SShiyang Ruan goto out_check_addr; 10825e161e40SJan Kara rc = -EINVAL; 10835e161e40SJan Kara if (PFN_PHYS(length) < size) 10845e161e40SJan Kara goto out; 10855e161e40SJan Kara if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) 10865e161e40SJan Kara goto out; 10875e161e40SJan Kara /* For larger pages we need devmap */ 10885e161e40SJan Kara if (length > 1 && !pfn_t_devmap(*pfnp)) 10895e161e40SJan Kara goto out; 10905e161e40SJan Kara rc = 0; 1091e28cd3e5SShiyang Ruan 1092e28cd3e5SShiyang Ruan out_check_addr: 1093e28cd3e5SShiyang Ruan if (!kaddr) 1094e28cd3e5SShiyang Ruan goto out; 1095e28cd3e5SShiyang Ruan if (!*kaddr) 1096e28cd3e5SShiyang Ruan rc = -EFAULT; 10975e161e40SJan Kara out: 1098cccbce67SDan Williams dax_read_unlock(id); 1099cccbce67SDan Williams return rc; 1100cccbce67SDan Williams } 1101f7ca90b1SMatthew Wilcox 1102ff17b8dfSShiyang Ruan /** 1103708dfad2SShiyang Ruan * dax_iomap_copy_around - Prepare for an unaligned write to a shared/cow page 1104708dfad2SShiyang Ruan * by copying the data before and after the range to be written. 1105ff17b8dfSShiyang Ruan * @pos: address to do copy from. 1106ff17b8dfSShiyang Ruan * @length: size of copy operation. 1107ff17b8dfSShiyang Ruan * @align_size: aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE) 1108ff17b8dfSShiyang Ruan * @srcmap: iomap srcmap 1109ff17b8dfSShiyang Ruan * @daddr: destination address to copy to. 1110ff17b8dfSShiyang Ruan * 1111ff17b8dfSShiyang Ruan * This can be called from two places. Either during DAX write fault (page 1112ff17b8dfSShiyang Ruan * aligned), to copy the length size data to daddr. Or, while doing normal DAX 1113708dfad2SShiyang Ruan * write operation, dax_iomap_iter() might call this to do the copy of either 1114ff17b8dfSShiyang Ruan * start or end unaligned address. In the latter case the rest of the copy of 1115708dfad2SShiyang Ruan * aligned ranges is taken care by dax_iomap_iter() itself. 1116708dfad2SShiyang Ruan * If the srcmap contains invalid data, such as HOLE and UNWRITTEN, zero the 1117708dfad2SShiyang Ruan * area to make sure no old data remains. 1118ff17b8dfSShiyang Ruan */ 1119708dfad2SShiyang Ruan static int dax_iomap_copy_around(loff_t pos, uint64_t length, size_t align_size, 1120ff17b8dfSShiyang Ruan const struct iomap *srcmap, void *daddr) 1121ff17b8dfSShiyang Ruan { 1122ff17b8dfSShiyang Ruan loff_t head_off = pos & (align_size - 1); 1123ff17b8dfSShiyang Ruan size_t size = ALIGN(head_off + length, align_size); 1124ff17b8dfSShiyang Ruan loff_t end = pos + length; 1125ff17b8dfSShiyang Ruan loff_t pg_end = round_up(end, align_size); 1126708dfad2SShiyang Ruan /* copy_all is usually in page fault case */ 1127ff17b8dfSShiyang Ruan bool copy_all = head_off == 0 && end == pg_end; 1128708dfad2SShiyang Ruan /* zero the edges if srcmap is a HOLE or IOMAP_UNWRITTEN */ 1129708dfad2SShiyang Ruan bool zero_edge = srcmap->flags & IOMAP_F_SHARED || 1130708dfad2SShiyang Ruan srcmap->type == IOMAP_UNWRITTEN; 1131ff17b8dfSShiyang Ruan void *saddr = 0; 1132ff17b8dfSShiyang Ruan int ret = 0; 1133ff17b8dfSShiyang Ruan 1134708dfad2SShiyang Ruan if (!zero_edge) { 1135ff17b8dfSShiyang Ruan ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL); 1136ff17b8dfSShiyang Ruan if (ret) 11371ea7ca1bSJane Chu return dax_mem2blk_err(ret); 1138708dfad2SShiyang Ruan } 1139ff17b8dfSShiyang Ruan 1140ff17b8dfSShiyang Ruan if (copy_all) { 1141708dfad2SShiyang Ruan if (zero_edge) 1142708dfad2SShiyang Ruan memset(daddr, 0, size); 1143708dfad2SShiyang Ruan else 1144ff17b8dfSShiyang Ruan ret = copy_mc_to_kernel(daddr, saddr, length); 1145708dfad2SShiyang Ruan goto out; 1146ff17b8dfSShiyang Ruan } 1147ff17b8dfSShiyang Ruan 1148ff17b8dfSShiyang Ruan /* Copy the head part of the range */ 1149ff17b8dfSShiyang Ruan if (head_off) { 1150708dfad2SShiyang Ruan if (zero_edge) 1151708dfad2SShiyang Ruan memset(daddr, 0, head_off); 1152708dfad2SShiyang Ruan else { 1153ff17b8dfSShiyang Ruan ret = copy_mc_to_kernel(daddr, saddr, head_off); 1154ff17b8dfSShiyang Ruan if (ret) 1155ff17b8dfSShiyang Ruan return -EIO; 1156ff17b8dfSShiyang Ruan } 1157708dfad2SShiyang Ruan } 1158ff17b8dfSShiyang Ruan 1159ff17b8dfSShiyang Ruan /* Copy the tail part of the range */ 1160ff17b8dfSShiyang Ruan if (end < pg_end) { 1161ff17b8dfSShiyang Ruan loff_t tail_off = head_off + length; 1162ff17b8dfSShiyang Ruan loff_t tail_len = pg_end - end; 1163ff17b8dfSShiyang Ruan 1164708dfad2SShiyang Ruan if (zero_edge) 1165708dfad2SShiyang Ruan memset(daddr + tail_off, 0, tail_len); 1166708dfad2SShiyang Ruan else { 1167708dfad2SShiyang Ruan ret = copy_mc_to_kernel(daddr + tail_off, 1168708dfad2SShiyang Ruan saddr + tail_off, tail_len); 1169ff17b8dfSShiyang Ruan if (ret) 1170ff17b8dfSShiyang Ruan return -EIO; 1171ff17b8dfSShiyang Ruan } 1172708dfad2SShiyang Ruan } 1173708dfad2SShiyang Ruan out: 1174708dfad2SShiyang Ruan if (zero_edge) 1175708dfad2SShiyang Ruan dax_flush(srcmap->dax_dev, daddr, size); 1176708dfad2SShiyang Ruan return ret ? -EIO : 0; 1177ff17b8dfSShiyang Ruan } 1178ff17b8dfSShiyang Ruan 11792f89dc12SJan Kara /* 118091d25ba8SRoss Zwisler * The user has performed a load from a hole in the file. Allocating a new 118191d25ba8SRoss Zwisler * page in the file would cause excessive storage usage for workloads with 118291d25ba8SRoss Zwisler * sparse files. Instead we insert a read-only mapping of the 4k zero page. 118391d25ba8SRoss Zwisler * If this page is ever written to we will re-fault and change the mapping to 118491d25ba8SRoss Zwisler * point to real DAX storage instead. 11852f89dc12SJan Kara */ 1186e5d6df73SShiyang Ruan static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, 1187e5d6df73SShiyang Ruan const struct iomap_iter *iter, void **entry) 1188e30331ffSRoss Zwisler { 1189e5d6df73SShiyang Ruan struct inode *inode = iter->inode; 119091d25ba8SRoss Zwisler unsigned long vaddr = vmf->address; 1191b90ca5ccSMatthew Wilcox pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); 1192b90ca5ccSMatthew Wilcox vm_fault_t ret; 1193e30331ffSRoss Zwisler 1194e5d6df73SShiyang Ruan *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); 11953159f943SMatthew Wilcox 1196ab77dab4SSouptick Joarder ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); 1197e30331ffSRoss Zwisler trace_dax_load_hole(inode, vmf, ret); 1198e30331ffSRoss Zwisler return ret; 1199e30331ffSRoss Zwisler } 1200e30331ffSRoss Zwisler 1201c2436190SShiyang Ruan #ifdef CONFIG_FS_DAX_PMD 1202c2436190SShiyang Ruan static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, 1203e5d6df73SShiyang Ruan const struct iomap_iter *iter, void **entry) 1204c2436190SShiyang Ruan { 1205c2436190SShiyang Ruan struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1206c2436190SShiyang Ruan unsigned long pmd_addr = vmf->address & PMD_MASK; 1207c2436190SShiyang Ruan struct vm_area_struct *vma = vmf->vma; 1208c2436190SShiyang Ruan struct inode *inode = mapping->host; 1209c2436190SShiyang Ruan pgtable_t pgtable = NULL; 1210c2436190SShiyang Ruan struct page *zero_page; 1211c2436190SShiyang Ruan spinlock_t *ptl; 1212c2436190SShiyang Ruan pmd_t pmd_entry; 1213c2436190SShiyang Ruan pfn_t pfn; 1214c2436190SShiyang Ruan 1215c2436190SShiyang Ruan zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1216c2436190SShiyang Ruan 1217c2436190SShiyang Ruan if (unlikely(!zero_page)) 1218c2436190SShiyang Ruan goto fallback; 1219c2436190SShiyang Ruan 1220c2436190SShiyang Ruan pfn = page_to_pfn_t(zero_page); 1221e5d6df73SShiyang Ruan *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, 1222e5d6df73SShiyang Ruan DAX_PMD | DAX_ZERO_PAGE); 1223c2436190SShiyang Ruan 1224c2436190SShiyang Ruan if (arch_needs_pgtable_deposit()) { 1225c2436190SShiyang Ruan pgtable = pte_alloc_one(vma->vm_mm); 1226c2436190SShiyang Ruan if (!pgtable) 1227c2436190SShiyang Ruan return VM_FAULT_OOM; 1228c2436190SShiyang Ruan } 1229c2436190SShiyang Ruan 1230c2436190SShiyang Ruan ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1231c2436190SShiyang Ruan if (!pmd_none(*(vmf->pmd))) { 1232c2436190SShiyang Ruan spin_unlock(ptl); 1233c2436190SShiyang Ruan goto fallback; 1234c2436190SShiyang Ruan } 1235c2436190SShiyang Ruan 1236c2436190SShiyang Ruan if (pgtable) { 1237c2436190SShiyang Ruan pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 1238c2436190SShiyang Ruan mm_inc_nr_ptes(vma->vm_mm); 1239c2436190SShiyang Ruan } 1240c2436190SShiyang Ruan pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1241c2436190SShiyang Ruan pmd_entry = pmd_mkhuge(pmd_entry); 1242c2436190SShiyang Ruan set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1243c2436190SShiyang Ruan spin_unlock(ptl); 1244c2436190SShiyang Ruan trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); 1245c2436190SShiyang Ruan return VM_FAULT_NOPAGE; 1246c2436190SShiyang Ruan 1247c2436190SShiyang Ruan fallback: 1248c2436190SShiyang Ruan if (pgtable) 1249c2436190SShiyang Ruan pte_free(vma->vm_mm, pgtable); 1250c2436190SShiyang Ruan trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); 1251c2436190SShiyang Ruan return VM_FAULT_FALLBACK; 1252c2436190SShiyang Ruan } 1253c2436190SShiyang Ruan #else 1254c2436190SShiyang Ruan static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, 1255e5d6df73SShiyang Ruan const struct iomap_iter *iter, void **entry) 1256c2436190SShiyang Ruan { 1257c2436190SShiyang Ruan return VM_FAULT_FALLBACK; 1258c2436190SShiyang Ruan } 1259c2436190SShiyang Ruan #endif /* CONFIG_FS_DAX_PMD */ 1260c2436190SShiyang Ruan 1261d984648eSShiyang Ruan static s64 dax_unshare_iter(struct iomap_iter *iter) 1262d984648eSShiyang Ruan { 1263d984648eSShiyang Ruan struct iomap *iomap = &iter->iomap; 1264d984648eSShiyang Ruan const struct iomap *srcmap = iomap_iter_srcmap(iter); 1265d984648eSShiyang Ruan loff_t pos = iter->pos; 1266d984648eSShiyang Ruan loff_t length = iomap_length(iter); 1267d984648eSShiyang Ruan int id = 0; 1268d984648eSShiyang Ruan s64 ret = 0; 1269d984648eSShiyang Ruan void *daddr = NULL, *saddr = NULL; 1270d984648eSShiyang Ruan 1271d984648eSShiyang Ruan /* don't bother with blocks that are not shared to start with */ 1272d984648eSShiyang Ruan if (!(iomap->flags & IOMAP_F_SHARED)) 1273d984648eSShiyang Ruan return length; 1274d984648eSShiyang Ruan 1275d984648eSShiyang Ruan id = dax_read_lock(); 1276d984648eSShiyang Ruan ret = dax_iomap_direct_access(iomap, pos, length, &daddr, NULL); 1277d984648eSShiyang Ruan if (ret < 0) 1278d984648eSShiyang Ruan goto out_unlock; 1279d984648eSShiyang Ruan 128013dd4e04SShiyang Ruan /* zero the distance if srcmap is HOLE or UNWRITTEN */ 128113dd4e04SShiyang Ruan if (srcmap->flags & IOMAP_F_SHARED || srcmap->type == IOMAP_UNWRITTEN) { 128213dd4e04SShiyang Ruan memset(daddr, 0, length); 128313dd4e04SShiyang Ruan dax_flush(iomap->dax_dev, daddr, length); 128413dd4e04SShiyang Ruan ret = length; 128513dd4e04SShiyang Ruan goto out_unlock; 128613dd4e04SShiyang Ruan } 128713dd4e04SShiyang Ruan 1288d984648eSShiyang Ruan ret = dax_iomap_direct_access(srcmap, pos, length, &saddr, NULL); 1289d984648eSShiyang Ruan if (ret < 0) 1290d984648eSShiyang Ruan goto out_unlock; 1291d984648eSShiyang Ruan 1292388bc034SShiyang Ruan if (copy_mc_to_kernel(daddr, saddr, length) == 0) 1293388bc034SShiyang Ruan ret = length; 1294388bc034SShiyang Ruan else 1295d984648eSShiyang Ruan ret = -EIO; 1296d984648eSShiyang Ruan 1297d984648eSShiyang Ruan out_unlock: 1298d984648eSShiyang Ruan dax_read_unlock(id); 12991ea7ca1bSJane Chu return dax_mem2blk_err(ret); 1300d984648eSShiyang Ruan } 1301d984648eSShiyang Ruan 1302d984648eSShiyang Ruan int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len, 1303d984648eSShiyang Ruan const struct iomap_ops *ops) 1304d984648eSShiyang Ruan { 1305d984648eSShiyang Ruan struct iomap_iter iter = { 1306d984648eSShiyang Ruan .inode = inode, 1307d984648eSShiyang Ruan .pos = pos, 1308d984648eSShiyang Ruan .len = len, 1309d984648eSShiyang Ruan .flags = IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX, 1310d984648eSShiyang Ruan }; 1311d984648eSShiyang Ruan int ret; 1312d984648eSShiyang Ruan 1313d984648eSShiyang Ruan while ((ret = iomap_iter(&iter, ops)) > 0) 1314d984648eSShiyang Ruan iter.processed = dax_unshare_iter(&iter); 1315d984648eSShiyang Ruan return ret; 1316d984648eSShiyang Ruan } 1317d984648eSShiyang Ruan EXPORT_SYMBOL_GPL(dax_file_unshare); 1318d984648eSShiyang Ruan 13198dbfc76dSShiyang Ruan static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size) 1320e5c71954SChristoph Hellwig { 13218dbfc76dSShiyang Ruan const struct iomap *iomap = &iter->iomap; 13228dbfc76dSShiyang Ruan const struct iomap *srcmap = iomap_iter_srcmap(iter); 13238dbfc76dSShiyang Ruan unsigned offset = offset_in_page(pos); 13248dbfc76dSShiyang Ruan pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1325e5c71954SChristoph Hellwig void *kaddr; 1326e5c71954SChristoph Hellwig long ret; 1327e5c71954SChristoph Hellwig 13288dbfc76dSShiyang Ruan ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr, 13298dbfc76dSShiyang Ruan NULL); 13308dbfc76dSShiyang Ruan if (ret < 0) 13311ea7ca1bSJane Chu return dax_mem2blk_err(ret); 13321ea7ca1bSJane Chu 1333e5c71954SChristoph Hellwig memset(kaddr + offset, 0, size); 1334708dfad2SShiyang Ruan if (iomap->flags & IOMAP_F_SHARED) 1335708dfad2SShiyang Ruan ret = dax_iomap_copy_around(pos, size, PAGE_SIZE, srcmap, 13368dbfc76dSShiyang Ruan kaddr); 1337708dfad2SShiyang Ruan else 13388dbfc76dSShiyang Ruan dax_flush(iomap->dax_dev, kaddr + offset, size); 1339e5c71954SChristoph Hellwig return ret; 1340e5c71954SChristoph Hellwig } 1341e5c71954SChristoph Hellwig 1342c6f40468SChristoph Hellwig static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero) 1343679c8bd3SChristoph Hellwig { 1344c6f40468SChristoph Hellwig const struct iomap *iomap = &iter->iomap; 1345c6f40468SChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 1346c6f40468SChristoph Hellwig loff_t pos = iter->pos; 1347c6f40468SChristoph Hellwig u64 length = iomap_length(iter); 1348c6f40468SChristoph Hellwig s64 written = 0; 1349c6f40468SChristoph Hellwig 1350c6f40468SChristoph Hellwig /* already zeroed? we're done. */ 1351c6f40468SChristoph Hellwig if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 1352c6f40468SChristoph Hellwig return length; 1353c6f40468SChristoph Hellwig 1354f80e1668SShiyang Ruan /* 1355f80e1668SShiyang Ruan * invalidate the pages whose sharing state is to be changed 1356f80e1668SShiyang Ruan * because of CoW. 1357f80e1668SShiyang Ruan */ 1358f80e1668SShiyang Ruan if (iomap->flags & IOMAP_F_SHARED) 1359f80e1668SShiyang Ruan invalidate_inode_pages2_range(iter->inode->i_mapping, 1360f80e1668SShiyang Ruan pos >> PAGE_SHIFT, 1361f80e1668SShiyang Ruan (pos + length - 1) >> PAGE_SHIFT); 1362f80e1668SShiyang Ruan 1363c6f40468SChristoph Hellwig do { 136481ee8e52SMatthew Wilcox (Oracle) unsigned offset = offset_in_page(pos); 136581ee8e52SMatthew Wilcox (Oracle) unsigned size = min_t(u64, PAGE_SIZE - offset, length); 1366c6f40468SChristoph Hellwig pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1367c6f40468SChristoph Hellwig long rc; 1368c6f40468SChristoph Hellwig int id; 13690a23f9ffSVivek Goyal 1370cccbce67SDan Williams id = dax_read_lock(); 1371e5c71954SChristoph Hellwig if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE) 137281ee8e52SMatthew Wilcox (Oracle) rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1); 13730a23f9ffSVivek Goyal else 13748dbfc76dSShiyang Ruan rc = dax_memzero(iter, pos, size); 1375cccbce67SDan Williams dax_read_unlock(id); 13760a23f9ffSVivek Goyal 1377e5c71954SChristoph Hellwig if (rc < 0) 1378e5c71954SChristoph Hellwig return rc; 1379c6f40468SChristoph Hellwig pos += size; 1380c6f40468SChristoph Hellwig length -= size; 1381c6f40468SChristoph Hellwig written += size; 1382c6f40468SChristoph Hellwig } while (length > 0); 1383c6f40468SChristoph Hellwig 1384f8189d5dSKaixu Xia if (did_zero) 1385f8189d5dSKaixu Xia *did_zero = true; 1386c6f40468SChristoph Hellwig return written; 1387679c8bd3SChristoph Hellwig } 1388679c8bd3SChristoph Hellwig 1389c6f40468SChristoph Hellwig int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 1390c6f40468SChristoph Hellwig const struct iomap_ops *ops) 1391c6f40468SChristoph Hellwig { 1392c6f40468SChristoph Hellwig struct iomap_iter iter = { 1393c6f40468SChristoph Hellwig .inode = inode, 1394c6f40468SChristoph Hellwig .pos = pos, 1395c6f40468SChristoph Hellwig .len = len, 1396952da063SChristoph Hellwig .flags = IOMAP_DAX | IOMAP_ZERO, 1397c6f40468SChristoph Hellwig }; 1398c6f40468SChristoph Hellwig int ret; 1399c6f40468SChristoph Hellwig 1400c6f40468SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 1401c6f40468SChristoph Hellwig iter.processed = dax_zero_iter(&iter, did_zero); 1402c6f40468SChristoph Hellwig return ret; 1403c6f40468SChristoph Hellwig } 1404c6f40468SChristoph Hellwig EXPORT_SYMBOL_GPL(dax_zero_range); 1405c6f40468SChristoph Hellwig 1406c6f40468SChristoph Hellwig int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 1407c6f40468SChristoph Hellwig const struct iomap_ops *ops) 1408c6f40468SChristoph Hellwig { 1409c6f40468SChristoph Hellwig unsigned int blocksize = i_blocksize(inode); 1410c6f40468SChristoph Hellwig unsigned int off = pos & (blocksize - 1); 1411c6f40468SChristoph Hellwig 1412c6f40468SChristoph Hellwig /* Block boundary? Nothing to do */ 1413c6f40468SChristoph Hellwig if (!off) 1414c6f40468SChristoph Hellwig return 0; 1415c6f40468SChristoph Hellwig return dax_zero_range(inode, pos, blocksize - off, did_zero, ops); 1416c6f40468SChristoph Hellwig } 1417c6f40468SChristoph Hellwig EXPORT_SYMBOL_GPL(dax_truncate_page); 1418c6f40468SChristoph Hellwig 1419ca289e0bSChristoph Hellwig static loff_t dax_iomap_iter(const struct iomap_iter *iomi, 1420ca289e0bSChristoph Hellwig struct iov_iter *iter) 1421a254e568SChristoph Hellwig { 1422ca289e0bSChristoph Hellwig const struct iomap *iomap = &iomi->iomap; 1423f80e1668SShiyang Ruan const struct iomap *srcmap = iomap_iter_srcmap(iomi); 1424ca289e0bSChristoph Hellwig loff_t length = iomap_length(iomi); 1425ca289e0bSChristoph Hellwig loff_t pos = iomi->pos; 1426cccbce67SDan Williams struct dax_device *dax_dev = iomap->dax_dev; 1427a254e568SChristoph Hellwig loff_t end = pos + length, done = 0; 1428ff17b8dfSShiyang Ruan bool write = iov_iter_rw(iter) == WRITE; 1429f80e1668SShiyang Ruan bool cow = write && iomap->flags & IOMAP_F_SHARED; 1430a254e568SChristoph Hellwig ssize_t ret = 0; 1431a77d4786SDan Williams size_t xfer; 1432cccbce67SDan Williams int id; 1433a254e568SChristoph Hellwig 1434ff17b8dfSShiyang Ruan if (!write) { 1435ca289e0bSChristoph Hellwig end = min(end, i_size_read(iomi->inode)); 1436a254e568SChristoph Hellwig if (pos >= end) 1437a254e568SChristoph Hellwig return 0; 1438a254e568SChristoph Hellwig 1439a254e568SChristoph Hellwig if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 1440a254e568SChristoph Hellwig return iov_iter_zero(min(length, end - pos), iter); 1441a254e568SChristoph Hellwig } 1442a254e568SChristoph Hellwig 1443ff17b8dfSShiyang Ruan /* 1444ff17b8dfSShiyang Ruan * In DAX mode, enforce either pure overwrites of written extents, or 1445ff17b8dfSShiyang Ruan * writes to unwritten extents as part of a copy-on-write operation. 1446ff17b8dfSShiyang Ruan */ 1447ff17b8dfSShiyang Ruan if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED && 1448ff17b8dfSShiyang Ruan !(iomap->flags & IOMAP_F_SHARED))) 1449a254e568SChristoph Hellwig return -EIO; 1450a254e568SChristoph Hellwig 1451e3fce68cSJan Kara /* 1452e3fce68cSJan Kara * Write can allocate block for an area which has a hole page mapped 1453e3fce68cSJan Kara * into page tables. We have to tear down these mappings so that data 1454e3fce68cSJan Kara * written by write(2) is visible in mmap. 1455e3fce68cSJan Kara */ 1456f80e1668SShiyang Ruan if (iomap->flags & IOMAP_F_NEW || cow) { 1457f76b3a32SShiyang Ruan /* 1458f76b3a32SShiyang Ruan * Filesystem allows CoW on non-shared extents. The src extents 1459f76b3a32SShiyang Ruan * may have been mmapped with dirty mark before. To be able to 1460f76b3a32SShiyang Ruan * invalidate its dax entries, we need to clear the dirty mark 1461f76b3a32SShiyang Ruan * in advance. 1462f76b3a32SShiyang Ruan */ 1463f76b3a32SShiyang Ruan if (cow) 1464f76b3a32SShiyang Ruan __dax_clear_dirty_range(iomi->inode->i_mapping, 1465f76b3a32SShiyang Ruan pos >> PAGE_SHIFT, 1466f76b3a32SShiyang Ruan (end - 1) >> PAGE_SHIFT); 1467ca289e0bSChristoph Hellwig invalidate_inode_pages2_range(iomi->inode->i_mapping, 1468e3fce68cSJan Kara pos >> PAGE_SHIFT, 1469e3fce68cSJan Kara (end - 1) >> PAGE_SHIFT); 1470e3fce68cSJan Kara } 1471e3fce68cSJan Kara 1472cccbce67SDan Williams id = dax_read_lock(); 1473a254e568SChristoph Hellwig while (pos < end) { 1474a254e568SChristoph Hellwig unsigned offset = pos & (PAGE_SIZE - 1); 1475cccbce67SDan Williams const size_t size = ALIGN(length + offset, PAGE_SIZE); 147660696eb2SChristoph Hellwig pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1477a254e568SChristoph Hellwig ssize_t map_len; 1478047218ecSJane Chu bool recovery = false; 1479cccbce67SDan Williams void *kaddr; 1480a254e568SChristoph Hellwig 1481d1908f52SMichal Hocko if (fatal_signal_pending(current)) { 1482d1908f52SMichal Hocko ret = -EINTR; 1483d1908f52SMichal Hocko break; 1484d1908f52SMichal Hocko } 1485d1908f52SMichal Hocko 1486cccbce67SDan Williams map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 1487e511c4a3SJane Chu DAX_ACCESS, &kaddr, NULL); 14881ea7ca1bSJane Chu if (map_len == -EHWPOISON && iov_iter_rw(iter) == WRITE) { 1489047218ecSJane Chu map_len = dax_direct_access(dax_dev, pgoff, 1490047218ecSJane Chu PHYS_PFN(size), DAX_RECOVERY_WRITE, 149186ed913bSHuaisheng Ye &kaddr, NULL); 1492047218ecSJane Chu if (map_len > 0) 1493047218ecSJane Chu recovery = true; 1494047218ecSJane Chu } 1495a254e568SChristoph Hellwig if (map_len < 0) { 14961ea7ca1bSJane Chu ret = dax_mem2blk_err(map_len); 1497a254e568SChristoph Hellwig break; 1498a254e568SChristoph Hellwig } 1499a254e568SChristoph Hellwig 1500f80e1668SShiyang Ruan if (cow) { 1501708dfad2SShiyang Ruan ret = dax_iomap_copy_around(pos, length, PAGE_SIZE, 1502708dfad2SShiyang Ruan srcmap, kaddr); 1503ff17b8dfSShiyang Ruan if (ret) 1504ff17b8dfSShiyang Ruan break; 1505ff17b8dfSShiyang Ruan } 1506ff17b8dfSShiyang Ruan 1507cccbce67SDan Williams map_len = PFN_PHYS(map_len); 1508cccbce67SDan Williams kaddr += offset; 1509a254e568SChristoph Hellwig map_len -= offset; 1510a254e568SChristoph Hellwig if (map_len > end - pos) 1511a254e568SChristoph Hellwig map_len = end - pos; 1512a254e568SChristoph Hellwig 1513047218ecSJane Chu if (recovery) 1514047218ecSJane Chu xfer = dax_recovery_write(dax_dev, pgoff, kaddr, 1515047218ecSJane Chu map_len, iter); 1516ff17b8dfSShiyang Ruan else if (write) 1517a77d4786SDan Williams xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1518fec53774SDan Williams map_len, iter); 1519a254e568SChristoph Hellwig else 1520a77d4786SDan Williams xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, 1521b3a9a0c3SDan Williams map_len, iter); 1522a254e568SChristoph Hellwig 1523a77d4786SDan Williams pos += xfer; 1524a77d4786SDan Williams length -= xfer; 1525a77d4786SDan Williams done += xfer; 1526a77d4786SDan Williams 1527a77d4786SDan Williams if (xfer == 0) 1528a77d4786SDan Williams ret = -EFAULT; 1529a77d4786SDan Williams if (xfer < map_len) 1530a77d4786SDan Williams break; 1531a254e568SChristoph Hellwig } 1532cccbce67SDan Williams dax_read_unlock(id); 1533a254e568SChristoph Hellwig 1534a254e568SChristoph Hellwig return done ? done : ret; 1535a254e568SChristoph Hellwig } 1536a254e568SChristoph Hellwig 1537a254e568SChristoph Hellwig /** 153811c59c92SRoss Zwisler * dax_iomap_rw - Perform I/O to a DAX file 1539a254e568SChristoph Hellwig * @iocb: The control block for this I/O 1540a254e568SChristoph Hellwig * @iter: The addresses to do I/O from or to 1541a254e568SChristoph Hellwig * @ops: iomap ops passed from the file system 1542a254e568SChristoph Hellwig * 1543a254e568SChristoph Hellwig * This function performs read and write operations to directly mapped 1544a254e568SChristoph Hellwig * persistent memory. The callers needs to take care of read/write exclusion 1545a254e568SChristoph Hellwig * and evicting any page cache pages in the region under I/O. 1546a254e568SChristoph Hellwig */ 1547a254e568SChristoph Hellwig ssize_t 154811c59c92SRoss Zwisler dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 15498ff6daa1SChristoph Hellwig const struct iomap_ops *ops) 1550a254e568SChristoph Hellwig { 1551ca289e0bSChristoph Hellwig struct iomap_iter iomi = { 1552ca289e0bSChristoph Hellwig .inode = iocb->ki_filp->f_mapping->host, 1553ca289e0bSChristoph Hellwig .pos = iocb->ki_pos, 1554ca289e0bSChristoph Hellwig .len = iov_iter_count(iter), 1555952da063SChristoph Hellwig .flags = IOMAP_DAX, 1556ca289e0bSChristoph Hellwig }; 1557ca289e0bSChristoph Hellwig loff_t done = 0; 1558ca289e0bSChristoph Hellwig int ret; 1559a254e568SChristoph Hellwig 156017d9c15cSLi Jinlin if (!iomi.len) 156117d9c15cSLi Jinlin return 0; 156217d9c15cSLi Jinlin 1563168316dbSChristoph Hellwig if (iov_iter_rw(iter) == WRITE) { 1564ca289e0bSChristoph Hellwig lockdep_assert_held_write(&iomi.inode->i_rwsem); 1565ca289e0bSChristoph Hellwig iomi.flags |= IOMAP_WRITE; 1566168316dbSChristoph Hellwig } else { 1567ca289e0bSChristoph Hellwig lockdep_assert_held(&iomi.inode->i_rwsem); 1568168316dbSChristoph Hellwig } 1569a254e568SChristoph Hellwig 157096222d53SJeff Moyer if (iocb->ki_flags & IOCB_NOWAIT) 1571ca289e0bSChristoph Hellwig iomi.flags |= IOMAP_NOWAIT; 157296222d53SJeff Moyer 1573ca289e0bSChristoph Hellwig while ((ret = iomap_iter(&iomi, ops)) > 0) 1574ca289e0bSChristoph Hellwig iomi.processed = dax_iomap_iter(&iomi, iter); 1575a254e568SChristoph Hellwig 1576ca289e0bSChristoph Hellwig done = iomi.pos - iocb->ki_pos; 1577ca289e0bSChristoph Hellwig iocb->ki_pos = iomi.pos; 1578a254e568SChristoph Hellwig return done ? done : ret; 1579a254e568SChristoph Hellwig } 158011c59c92SRoss Zwisler EXPORT_SYMBOL_GPL(dax_iomap_rw); 1581a7d73fe6SChristoph Hellwig 1582ab77dab4SSouptick Joarder static vm_fault_t dax_fault_return(int error) 15839f141d6eSJan Kara { 15849f141d6eSJan Kara if (error == 0) 15859f141d6eSJan Kara return VM_FAULT_NOPAGE; 1586c9aed74eSSouptick Joarder return vmf_error(error); 15879f141d6eSJan Kara } 15889f141d6eSJan Kara 1589aaa422c4SDan Williams /* 159055f81639SShiyang Ruan * When handling a synchronous page fault and the inode need a fsync, we can 159155f81639SShiyang Ruan * insert the PTE/PMD into page tables only after that fsync happened. Skip 159255f81639SShiyang Ruan * insertion for now and return the pfn so that caller can insert it after the 159355f81639SShiyang Ruan * fsync is done. 159455f81639SShiyang Ruan */ 159555f81639SShiyang Ruan static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn) 159655f81639SShiyang Ruan { 159755f81639SShiyang Ruan if (WARN_ON_ONCE(!pfnp)) 159855f81639SShiyang Ruan return VM_FAULT_SIGBUS; 159955f81639SShiyang Ruan *pfnp = pfn; 160055f81639SShiyang Ruan return VM_FAULT_NEEDDSYNC; 160155f81639SShiyang Ruan } 160255f81639SShiyang Ruan 160365dd814aSChristoph Hellwig static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf, 160465dd814aSChristoph Hellwig const struct iomap_iter *iter) 160555f81639SShiyang Ruan { 160655f81639SShiyang Ruan vm_fault_t ret; 160755f81639SShiyang Ruan int error = 0; 160855f81639SShiyang Ruan 160965dd814aSChristoph Hellwig switch (iter->iomap.type) { 161055f81639SShiyang Ruan case IOMAP_HOLE: 161155f81639SShiyang Ruan case IOMAP_UNWRITTEN: 1612429f8de7SChristoph Hellwig clear_user_highpage(vmf->cow_page, vmf->address); 161355f81639SShiyang Ruan break; 161455f81639SShiyang Ruan case IOMAP_MAPPED: 1615429f8de7SChristoph Hellwig error = copy_cow_page_dax(vmf, iter); 161655f81639SShiyang Ruan break; 161755f81639SShiyang Ruan default: 161855f81639SShiyang Ruan WARN_ON_ONCE(1); 161955f81639SShiyang Ruan error = -EIO; 162055f81639SShiyang Ruan break; 162155f81639SShiyang Ruan } 162255f81639SShiyang Ruan 162355f81639SShiyang Ruan if (error) 162455f81639SShiyang Ruan return dax_fault_return(error); 162555f81639SShiyang Ruan 162655f81639SShiyang Ruan __SetPageUptodate(vmf->cow_page); 162755f81639SShiyang Ruan ret = finish_fault(vmf); 162855f81639SShiyang Ruan if (!ret) 162955f81639SShiyang Ruan return VM_FAULT_DONE_COW; 163055f81639SShiyang Ruan return ret; 163155f81639SShiyang Ruan } 163255f81639SShiyang Ruan 1633c2436190SShiyang Ruan /** 163465dd814aSChristoph Hellwig * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault. 1635c2436190SShiyang Ruan * @vmf: vm fault instance 163665dd814aSChristoph Hellwig * @iter: iomap iter 1637c2436190SShiyang Ruan * @pfnp: pfn to be returned 1638c2436190SShiyang Ruan * @xas: the dax mapping tree of a file 1639c2436190SShiyang Ruan * @entry: an unlocked dax entry to be inserted 1640c2436190SShiyang Ruan * @pmd: distinguish whether it is a pmd fault 1641c2436190SShiyang Ruan */ 164265dd814aSChristoph Hellwig static vm_fault_t dax_fault_iter(struct vm_fault *vmf, 164365dd814aSChristoph Hellwig const struct iomap_iter *iter, pfn_t *pfnp, 164465dd814aSChristoph Hellwig struct xa_state *xas, void **entry, bool pmd) 1645c2436190SShiyang Ruan { 164665dd814aSChristoph Hellwig const struct iomap *iomap = &iter->iomap; 1647708dfad2SShiyang Ruan const struct iomap *srcmap = iomap_iter_srcmap(iter); 1648c2436190SShiyang Ruan size_t size = pmd ? PMD_SIZE : PAGE_SIZE; 1649c2436190SShiyang Ruan loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; 1650e5d6df73SShiyang Ruan bool write = iter->flags & IOMAP_WRITE; 1651c2436190SShiyang Ruan unsigned long entry_flags = pmd ? DAX_PMD : 0; 1652c2436190SShiyang Ruan int err = 0; 1653c2436190SShiyang Ruan pfn_t pfn; 1654ff17b8dfSShiyang Ruan void *kaddr; 1655c2436190SShiyang Ruan 165665dd814aSChristoph Hellwig if (!pmd && vmf->cow_page) 165765dd814aSChristoph Hellwig return dax_fault_cow_page(vmf, iter); 165865dd814aSChristoph Hellwig 1659c2436190SShiyang Ruan /* if we are reading UNWRITTEN and HOLE, return a hole. */ 1660c2436190SShiyang Ruan if (!write && 1661c2436190SShiyang Ruan (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) { 1662c2436190SShiyang Ruan if (!pmd) 1663e5d6df73SShiyang Ruan return dax_load_hole(xas, vmf, iter, entry); 1664e5d6df73SShiyang Ruan return dax_pmd_load_hole(xas, vmf, iter, entry); 1665c2436190SShiyang Ruan } 1666c2436190SShiyang Ruan 1667ff17b8dfSShiyang Ruan if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) { 1668c2436190SShiyang Ruan WARN_ON_ONCE(1); 1669c2436190SShiyang Ruan return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS; 1670c2436190SShiyang Ruan } 1671c2436190SShiyang Ruan 1672ff17b8dfSShiyang Ruan err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn); 1673c2436190SShiyang Ruan if (err) 1674c2436190SShiyang Ruan return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err); 1675c2436190SShiyang Ruan 1676e5d6df73SShiyang Ruan *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags); 1677c2436190SShiyang Ruan 1678708dfad2SShiyang Ruan if (write && iomap->flags & IOMAP_F_SHARED) { 1679708dfad2SShiyang Ruan err = dax_iomap_copy_around(pos, size, size, srcmap, kaddr); 1680ff17b8dfSShiyang Ruan if (err) 1681ff17b8dfSShiyang Ruan return dax_fault_return(err); 1682ff17b8dfSShiyang Ruan } 1683ff17b8dfSShiyang Ruan 1684e5d6df73SShiyang Ruan if (dax_fault_is_synchronous(iter, vmf->vma)) 1685c2436190SShiyang Ruan return dax_fault_synchronous_pfnp(pfnp, pfn); 1686c2436190SShiyang Ruan 1687c2436190SShiyang Ruan /* insert PMD pfn */ 1688c2436190SShiyang Ruan if (pmd) 1689c2436190SShiyang Ruan return vmf_insert_pfn_pmd(vmf, pfn, write); 1690c2436190SShiyang Ruan 1691c2436190SShiyang Ruan /* insert PTE pfn */ 1692c2436190SShiyang Ruan if (write) 1693c2436190SShiyang Ruan return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 1694c2436190SShiyang Ruan return vmf_insert_mixed(vmf->vma, vmf->address, pfn); 1695c2436190SShiyang Ruan } 1696c2436190SShiyang Ruan 1697ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1698c0b24625SJan Kara int *iomap_errp, const struct iomap_ops *ops) 1699a7d73fe6SChristoph Hellwig { 170065dd814aSChristoph Hellwig struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1701b15cd800SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, vmf->pgoff); 170265dd814aSChristoph Hellwig struct iomap_iter iter = { 170365dd814aSChristoph Hellwig .inode = mapping->host, 170465dd814aSChristoph Hellwig .pos = (loff_t)vmf->pgoff << PAGE_SHIFT, 170565dd814aSChristoph Hellwig .len = PAGE_SIZE, 1706952da063SChristoph Hellwig .flags = IOMAP_DAX | IOMAP_FAULT, 170765dd814aSChristoph Hellwig }; 1708ab77dab4SSouptick Joarder vm_fault_t ret = 0; 1709a7d73fe6SChristoph Hellwig void *entry; 171065dd814aSChristoph Hellwig int error; 1711a7d73fe6SChristoph Hellwig 171265dd814aSChristoph Hellwig trace_dax_pte_fault(iter.inode, vmf, ret); 1713a7d73fe6SChristoph Hellwig /* 1714a7d73fe6SChristoph Hellwig * Check whether offset isn't beyond end of file now. Caller is supposed 1715a7d73fe6SChristoph Hellwig * to hold locks serializing us with truncate / punch hole so this is 1716a7d73fe6SChristoph Hellwig * a reliable test. 1717a7d73fe6SChristoph Hellwig */ 171865dd814aSChristoph Hellwig if (iter.pos >= i_size_read(iter.inode)) { 1719ab77dab4SSouptick Joarder ret = VM_FAULT_SIGBUS; 1720a9c42b33SRoss Zwisler goto out; 1721a9c42b33SRoss Zwisler } 1722a7d73fe6SChristoph Hellwig 172365dd814aSChristoph Hellwig if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) 172465dd814aSChristoph Hellwig iter.flags |= IOMAP_WRITE; 1725a7d73fe6SChristoph Hellwig 1726b15cd800SMatthew Wilcox entry = grab_mapping_entry(&xas, mapping, 0); 1727b15cd800SMatthew Wilcox if (xa_is_internal(entry)) { 1728b15cd800SMatthew Wilcox ret = xa_to_internal(entry); 172913e451fdSJan Kara goto out; 173013e451fdSJan Kara } 173113e451fdSJan Kara 1732a7d73fe6SChristoph Hellwig /* 1733e2093926SRoss Zwisler * It is possible, particularly with mixed reads & writes to private 1734e2093926SRoss Zwisler * mappings, that we have raced with a PMD fault that overlaps with 1735e2093926SRoss Zwisler * the PTE we need to set up. If so just return and the fault will be 1736e2093926SRoss Zwisler * retried. 1737e2093926SRoss Zwisler */ 1738e2093926SRoss Zwisler if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1739ab77dab4SSouptick Joarder ret = VM_FAULT_NOPAGE; 1740e2093926SRoss Zwisler goto unlock_entry; 1741e2093926SRoss Zwisler } 1742e2093926SRoss Zwisler 174365dd814aSChristoph Hellwig while ((error = iomap_iter(&iter, ops)) > 0) { 174465dd814aSChristoph Hellwig if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) { 174565dd814aSChristoph Hellwig iter.processed = -EIO; /* fs corruption? */ 174665dd814aSChristoph Hellwig continue; 174765dd814aSChristoph Hellwig } 174865dd814aSChristoph Hellwig 174965dd814aSChristoph Hellwig ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false); 175065dd814aSChristoph Hellwig if (ret != VM_FAULT_SIGBUS && 175165dd814aSChristoph Hellwig (iter.iomap.flags & IOMAP_F_NEW)) { 175265dd814aSChristoph Hellwig count_vm_event(PGMAJFAULT); 175365dd814aSChristoph Hellwig count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); 175465dd814aSChristoph Hellwig ret |= VM_FAULT_MAJOR; 175565dd814aSChristoph Hellwig } 175665dd814aSChristoph Hellwig 175765dd814aSChristoph Hellwig if (!(ret & VM_FAULT_ERROR)) 175865dd814aSChristoph Hellwig iter.processed = PAGE_SIZE; 175965dd814aSChristoph Hellwig } 176065dd814aSChristoph Hellwig 1761c0b24625SJan Kara if (iomap_errp) 1762c0b24625SJan Kara *iomap_errp = error; 176365dd814aSChristoph Hellwig if (!ret && error) 1764ab77dab4SSouptick Joarder ret = dax_fault_return(error); 1765a7d73fe6SChristoph Hellwig 176613e451fdSJan Kara unlock_entry: 1767b15cd800SMatthew Wilcox dax_unlock_entry(&xas, entry); 1768a9c42b33SRoss Zwisler out: 176965dd814aSChristoph Hellwig trace_dax_pte_fault_done(iter.inode, vmf, ret); 177065dd814aSChristoph Hellwig return ret; 1771a7d73fe6SChristoph Hellwig } 1772642261acSRoss Zwisler 1773642261acSRoss Zwisler #ifdef CONFIG_FS_DAX_PMD 177455f81639SShiyang Ruan static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas, 177555f81639SShiyang Ruan pgoff_t max_pgoff) 1776642261acSRoss Zwisler { 1777d8a849e1SDave Jiang unsigned long pmd_addr = vmf->address & PMD_MASK; 1778d8a849e1SDave Jiang bool write = vmf->flags & FAULT_FLAG_WRITE; 1779282a8e03SRoss Zwisler 1780fffa281bSRoss Zwisler /* 1781fffa281bSRoss Zwisler * Make sure that the faulting address's PMD offset (color) matches 1782fffa281bSRoss Zwisler * the PMD offset from the start of the file. This is necessary so 1783fffa281bSRoss Zwisler * that a PMD range in the page table overlaps exactly with a PMD 1784a77d19f4SMatthew Wilcox * range in the page cache. 1785fffa281bSRoss Zwisler */ 1786fffa281bSRoss Zwisler if ((vmf->pgoff & PG_PMD_COLOUR) != 1787fffa281bSRoss Zwisler ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 178855f81639SShiyang Ruan return true; 1789fffa281bSRoss Zwisler 1790642261acSRoss Zwisler /* Fall back to PTEs if we're going to COW */ 179155f81639SShiyang Ruan if (write && !(vmf->vma->vm_flags & VM_SHARED)) 179255f81639SShiyang Ruan return true; 1793642261acSRoss Zwisler 1794642261acSRoss Zwisler /* If the PMD would extend outside the VMA */ 179555f81639SShiyang Ruan if (pmd_addr < vmf->vma->vm_start) 179655f81639SShiyang Ruan return true; 179755f81639SShiyang Ruan if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end) 179855f81639SShiyang Ruan return true; 179955f81639SShiyang Ruan 180055f81639SShiyang Ruan /* If the PMD would extend beyond the file size */ 180155f81639SShiyang Ruan if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff) 180255f81639SShiyang Ruan return true; 180355f81639SShiyang Ruan 180455f81639SShiyang Ruan return false; 180555f81639SShiyang Ruan } 180655f81639SShiyang Ruan 1807642261acSRoss Zwisler static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1808642261acSRoss Zwisler const struct iomap_ops *ops) 1809642261acSRoss Zwisler { 181065dd814aSChristoph Hellwig struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1811642261acSRoss Zwisler XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); 181265dd814aSChristoph Hellwig struct iomap_iter iter = { 181365dd814aSChristoph Hellwig .inode = mapping->host, 181465dd814aSChristoph Hellwig .len = PMD_SIZE, 1815952da063SChristoph Hellwig .flags = IOMAP_DAX | IOMAP_FAULT, 181665dd814aSChristoph Hellwig }; 1817c2436190SShiyang Ruan vm_fault_t ret = VM_FAULT_FALLBACK; 1818642261acSRoss Zwisler pgoff_t max_pgoff; 1819642261acSRoss Zwisler void *entry; 1820642261acSRoss Zwisler 182165dd814aSChristoph Hellwig if (vmf->flags & FAULT_FLAG_WRITE) 182265dd814aSChristoph Hellwig iter.flags |= IOMAP_WRITE; 182365dd814aSChristoph Hellwig 1824642261acSRoss Zwisler /* 1825642261acSRoss Zwisler * Check whether offset isn't beyond end of file now. Caller is 1826642261acSRoss Zwisler * supposed to hold locks serializing us with truncate / punch hole so 1827642261acSRoss Zwisler * this is a reliable test. 1828642261acSRoss Zwisler */ 182965dd814aSChristoph Hellwig max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE); 1830642261acSRoss Zwisler 183165dd814aSChristoph Hellwig trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0); 1832642261acSRoss Zwisler 1833b15cd800SMatthew Wilcox if (xas.xa_index >= max_pgoff) { 1834c2436190SShiyang Ruan ret = VM_FAULT_SIGBUS; 1835282a8e03SRoss Zwisler goto out; 1836282a8e03SRoss Zwisler } 1837642261acSRoss Zwisler 183855f81639SShiyang Ruan if (dax_fault_check_fallback(vmf, &xas, max_pgoff)) 1839642261acSRoss Zwisler goto fallback; 1840642261acSRoss Zwisler 1841642261acSRoss Zwisler /* 1842b15cd800SMatthew Wilcox * grab_mapping_entry() will make sure we get an empty PMD entry, 1843b15cd800SMatthew Wilcox * a zero PMD entry or a DAX PMD. If it can't (because a PTE 1844b15cd800SMatthew Wilcox * entry is already in the array, for instance), it will return 1845b15cd800SMatthew Wilcox * VM_FAULT_FALLBACK. 18469f141d6eSJan Kara */ 184723c84eb7SMatthew Wilcox (Oracle) entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); 1848b15cd800SMatthew Wilcox if (xa_is_internal(entry)) { 1849c2436190SShiyang Ruan ret = xa_to_internal(entry); 1850876f2946SRoss Zwisler goto fallback; 1851b15cd800SMatthew Wilcox } 1852876f2946SRoss Zwisler 1853876f2946SRoss Zwisler /* 1854e2093926SRoss Zwisler * It is possible, particularly with mixed reads & writes to private 1855e2093926SRoss Zwisler * mappings, that we have raced with a PTE fault that overlaps with 1856e2093926SRoss Zwisler * the PMD we need to set up. If so just return and the fault will be 1857e2093926SRoss Zwisler * retried. 1858e2093926SRoss Zwisler */ 1859e2093926SRoss Zwisler if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1860e2093926SRoss Zwisler !pmd_devmap(*vmf->pmd)) { 1861c2436190SShiyang Ruan ret = 0; 1862e2093926SRoss Zwisler goto unlock_entry; 1863e2093926SRoss Zwisler } 1864e2093926SRoss Zwisler 186565dd814aSChristoph Hellwig iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT; 1866dd0c6425SColin Ian King while (iomap_iter(&iter, ops) > 0) { 186765dd814aSChristoph Hellwig if (iomap_length(&iter) < PMD_SIZE) 186865dd814aSChristoph Hellwig continue; /* actually breaks out of the loop */ 1869876f2946SRoss Zwisler 187065dd814aSChristoph Hellwig ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true); 187165dd814aSChristoph Hellwig if (ret != VM_FAULT_FALLBACK) 187265dd814aSChristoph Hellwig iter.processed = PMD_SIZE; 1873caa51d26SJan Kara } 1874caa51d26SJan Kara 1875876f2946SRoss Zwisler unlock_entry: 1876b15cd800SMatthew Wilcox dax_unlock_entry(&xas, entry); 1877642261acSRoss Zwisler fallback: 1878c2436190SShiyang Ruan if (ret == VM_FAULT_FALLBACK) { 187965dd814aSChristoph Hellwig split_huge_pmd(vmf->vma, vmf->pmd, vmf->address); 1880642261acSRoss Zwisler count_vm_event(THP_FAULT_FALLBACK); 1881642261acSRoss Zwisler } 1882282a8e03SRoss Zwisler out: 188365dd814aSChristoph Hellwig trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret); 1884c2436190SShiyang Ruan return ret; 1885642261acSRoss Zwisler } 1886a2d58167SDave Jiang #else 1887ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 188801cddfe9SArnd Bergmann const struct iomap_ops *ops) 1889a2d58167SDave Jiang { 1890a2d58167SDave Jiang return VM_FAULT_FALLBACK; 1891a2d58167SDave Jiang } 1892642261acSRoss Zwisler #endif /* CONFIG_FS_DAX_PMD */ 1893a2d58167SDave Jiang 1894a2d58167SDave Jiang /** 1895a2d58167SDave Jiang * dax_iomap_fault - handle a page fault on a DAX file 1896a2d58167SDave Jiang * @vmf: The description of the fault 1897*1d024e7aSMatthew Wilcox (Oracle) * @order: Order of the page to fault in 18989a0dd422SJan Kara * @pfnp: PFN to insert for synchronous faults if fsync is required 1899c0b24625SJan Kara * @iomap_errp: Storage for detailed error code in case of error 1900cec04e8cSJan Kara * @ops: Iomap ops passed from the file system 1901a2d58167SDave Jiang * 1902a2d58167SDave Jiang * When a page fault occurs, filesystems may call this helper in 1903a2d58167SDave Jiang * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1904a2d58167SDave Jiang * has done all the necessary locking for page fault to proceed 1905a2d58167SDave Jiang * successfully. 1906a2d58167SDave Jiang */ 1907*1d024e7aSMatthew Wilcox (Oracle) vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order, 1908c0b24625SJan Kara pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) 1909a2d58167SDave Jiang { 1910*1d024e7aSMatthew Wilcox (Oracle) if (order == 0) 1911c0b24625SJan Kara return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); 1912*1d024e7aSMatthew Wilcox (Oracle) else if (order == PMD_ORDER) 19139a0dd422SJan Kara return dax_iomap_pmd_fault(vmf, pfnp, ops); 1914*1d024e7aSMatthew Wilcox (Oracle) else 1915a2d58167SDave Jiang return VM_FAULT_FALLBACK; 1916a2d58167SDave Jiang } 1917a2d58167SDave Jiang EXPORT_SYMBOL_GPL(dax_iomap_fault); 191871eab6dfSJan Kara 1919a77d19f4SMatthew Wilcox /* 192071eab6dfSJan Kara * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables 192171eab6dfSJan Kara * @vmf: The description of the fault 192271eab6dfSJan Kara * @pfn: PFN to insert 1923cfc93c6cSMatthew Wilcox * @order: Order of entry to insert. 192471eab6dfSJan Kara * 1925a77d19f4SMatthew Wilcox * This function inserts a writeable PTE or PMD entry into the page tables 1926a77d19f4SMatthew Wilcox * for an mmaped DAX file. It also marks the page cache entry as dirty. 192771eab6dfSJan Kara */ 1928cfc93c6cSMatthew Wilcox static vm_fault_t 1929cfc93c6cSMatthew Wilcox dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) 193071eab6dfSJan Kara { 193171eab6dfSJan Kara struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1932cfc93c6cSMatthew Wilcox XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); 1933cfc93c6cSMatthew Wilcox void *entry; 1934ab77dab4SSouptick Joarder vm_fault_t ret; 193571eab6dfSJan Kara 1936cfc93c6cSMatthew Wilcox xas_lock_irq(&xas); 193723c84eb7SMatthew Wilcox (Oracle) entry = get_unlocked_entry(&xas, order); 193871eab6dfSJan Kara /* Did we race with someone splitting entry or so? */ 193923c84eb7SMatthew Wilcox (Oracle) if (!entry || dax_is_conflict(entry) || 194023c84eb7SMatthew Wilcox (Oracle) (order == 0 && !dax_is_pte_entry(entry))) { 19414c3d043dSVivek Goyal put_unlocked_entry(&xas, entry, WAKE_NEXT); 1942cfc93c6cSMatthew Wilcox xas_unlock_irq(&xas); 194371eab6dfSJan Kara trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 194471eab6dfSJan Kara VM_FAULT_NOPAGE); 194571eab6dfSJan Kara return VM_FAULT_NOPAGE; 194671eab6dfSJan Kara } 1947cfc93c6cSMatthew Wilcox xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); 1948cfc93c6cSMatthew Wilcox dax_lock_entry(&xas, entry); 1949cfc93c6cSMatthew Wilcox xas_unlock_irq(&xas); 1950cfc93c6cSMatthew Wilcox if (order == 0) 1951ab77dab4SSouptick Joarder ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 195271eab6dfSJan Kara #ifdef CONFIG_FS_DAX_PMD 1953cfc93c6cSMatthew Wilcox else if (order == PMD_ORDER) 1954fce86ff5SDan Williams ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE); 195571eab6dfSJan Kara #endif 1956cfc93c6cSMatthew Wilcox else 1957ab77dab4SSouptick Joarder ret = VM_FAULT_FALLBACK; 1958cfc93c6cSMatthew Wilcox dax_unlock_entry(&xas, entry); 1959ab77dab4SSouptick Joarder trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); 1960ab77dab4SSouptick Joarder return ret; 196171eab6dfSJan Kara } 196271eab6dfSJan Kara 196371eab6dfSJan Kara /** 196471eab6dfSJan Kara * dax_finish_sync_fault - finish synchronous page fault 196571eab6dfSJan Kara * @vmf: The description of the fault 1966*1d024e7aSMatthew Wilcox (Oracle) * @order: Order of entry to be inserted 196771eab6dfSJan Kara * @pfn: PFN to insert 196871eab6dfSJan Kara * 196971eab6dfSJan Kara * This function ensures that the file range touched by the page fault is 197071eab6dfSJan Kara * stored persistently on the media and handles inserting of appropriate page 197171eab6dfSJan Kara * table entry. 197271eab6dfSJan Kara */ 1973*1d024e7aSMatthew Wilcox (Oracle) vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order, 1974*1d024e7aSMatthew Wilcox (Oracle) pfn_t pfn) 197571eab6dfSJan Kara { 197671eab6dfSJan Kara int err; 197771eab6dfSJan Kara loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; 1978cfc93c6cSMatthew Wilcox size_t len = PAGE_SIZE << order; 197971eab6dfSJan Kara 198071eab6dfSJan Kara err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); 198171eab6dfSJan Kara if (err) 198271eab6dfSJan Kara return VM_FAULT_SIGBUS; 1983cfc93c6cSMatthew Wilcox return dax_insert_pfn_mkwrite(vmf, pfn, order); 198471eab6dfSJan Kara } 198571eab6dfSJan Kara EXPORT_SYMBOL_GPL(dax_finish_sync_fault); 19866f7db389SShiyang Ruan 19876f7db389SShiyang Ruan static loff_t dax_range_compare_iter(struct iomap_iter *it_src, 19886f7db389SShiyang Ruan struct iomap_iter *it_dest, u64 len, bool *same) 19896f7db389SShiyang Ruan { 19906f7db389SShiyang Ruan const struct iomap *smap = &it_src->iomap; 19916f7db389SShiyang Ruan const struct iomap *dmap = &it_dest->iomap; 19926f7db389SShiyang Ruan loff_t pos1 = it_src->pos, pos2 = it_dest->pos; 19936f7db389SShiyang Ruan void *saddr, *daddr; 19946f7db389SShiyang Ruan int id, ret; 19956f7db389SShiyang Ruan 19966f7db389SShiyang Ruan len = min(len, min(smap->length, dmap->length)); 19976f7db389SShiyang Ruan 19986f7db389SShiyang Ruan if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) { 19996f7db389SShiyang Ruan *same = true; 20006f7db389SShiyang Ruan return len; 20016f7db389SShiyang Ruan } 20026f7db389SShiyang Ruan 20036f7db389SShiyang Ruan if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) { 20046f7db389SShiyang Ruan *same = false; 20056f7db389SShiyang Ruan return 0; 20066f7db389SShiyang Ruan } 20076f7db389SShiyang Ruan 20086f7db389SShiyang Ruan id = dax_read_lock(); 20096f7db389SShiyang Ruan ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE), 20106f7db389SShiyang Ruan &saddr, NULL); 20116f7db389SShiyang Ruan if (ret < 0) 20126f7db389SShiyang Ruan goto out_unlock; 20136f7db389SShiyang Ruan 20146f7db389SShiyang Ruan ret = dax_iomap_direct_access(dmap, pos2, ALIGN(pos2 + len, PAGE_SIZE), 20156f7db389SShiyang Ruan &daddr, NULL); 20166f7db389SShiyang Ruan if (ret < 0) 20176f7db389SShiyang Ruan goto out_unlock; 20186f7db389SShiyang Ruan 20196f7db389SShiyang Ruan *same = !memcmp(saddr, daddr, len); 20206f7db389SShiyang Ruan if (!*same) 20216f7db389SShiyang Ruan len = 0; 20226f7db389SShiyang Ruan dax_read_unlock(id); 20236f7db389SShiyang Ruan return len; 20246f7db389SShiyang Ruan 20256f7db389SShiyang Ruan out_unlock: 20266f7db389SShiyang Ruan dax_read_unlock(id); 20276f7db389SShiyang Ruan return -EIO; 20286f7db389SShiyang Ruan } 20296f7db389SShiyang Ruan 20306f7db389SShiyang Ruan int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff, 20316f7db389SShiyang Ruan struct inode *dst, loff_t dstoff, loff_t len, bool *same, 20326f7db389SShiyang Ruan const struct iomap_ops *ops) 20336f7db389SShiyang Ruan { 20346f7db389SShiyang Ruan struct iomap_iter src_iter = { 20356f7db389SShiyang Ruan .inode = src, 20366f7db389SShiyang Ruan .pos = srcoff, 20376f7db389SShiyang Ruan .len = len, 20386f7db389SShiyang Ruan .flags = IOMAP_DAX, 20396f7db389SShiyang Ruan }; 20406f7db389SShiyang Ruan struct iomap_iter dst_iter = { 20416f7db389SShiyang Ruan .inode = dst, 20426f7db389SShiyang Ruan .pos = dstoff, 20436f7db389SShiyang Ruan .len = len, 20446f7db389SShiyang Ruan .flags = IOMAP_DAX, 20456f7db389SShiyang Ruan }; 20460e79e373SShiyang Ruan int ret, compared = 0; 20476f7db389SShiyang Ruan 20480e79e373SShiyang Ruan while ((ret = iomap_iter(&src_iter, ops)) > 0 && 20490e79e373SShiyang Ruan (ret = iomap_iter(&dst_iter, ops)) > 0) { 2050e900ba10SShiyang Ruan compared = dax_range_compare_iter(&src_iter, &dst_iter, 2051e900ba10SShiyang Ruan min(src_iter.len, dst_iter.len), same); 20520e79e373SShiyang Ruan if (compared < 0) 20530e79e373SShiyang Ruan return ret; 20540e79e373SShiyang Ruan src_iter.processed = dst_iter.processed = compared; 20556f7db389SShiyang Ruan } 20566f7db389SShiyang Ruan return ret; 20576f7db389SShiyang Ruan } 20586f7db389SShiyang Ruan 20596f7db389SShiyang Ruan int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in, 20606f7db389SShiyang Ruan struct file *file_out, loff_t pos_out, 20616f7db389SShiyang Ruan loff_t *len, unsigned int remap_flags, 20626f7db389SShiyang Ruan const struct iomap_ops *ops) 20636f7db389SShiyang Ruan { 20646f7db389SShiyang Ruan return __generic_remap_file_range_prep(file_in, pos_in, file_out, 20656f7db389SShiyang Ruan pos_out, len, remap_flags, ops); 20666f7db389SShiyang Ruan } 20676f7db389SShiyang Ruan EXPORT_SYMBOL_GPL(dax_remap_file_range_prep); 2068