12025cf9eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2d475c634SMatthew Wilcox /* 3d475c634SMatthew Wilcox * fs/dax.c - Direct Access filesystem code 4d475c634SMatthew Wilcox * Copyright (c) 2013-2014 Intel Corporation 5d475c634SMatthew Wilcox * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 6d475c634SMatthew Wilcox * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 7d475c634SMatthew Wilcox */ 8d475c634SMatthew Wilcox 9d475c634SMatthew Wilcox #include <linux/atomic.h> 10d475c634SMatthew Wilcox #include <linux/blkdev.h> 11d475c634SMatthew Wilcox #include <linux/buffer_head.h> 12d77e92e2SRoss Zwisler #include <linux/dax.h> 13d475c634SMatthew Wilcox #include <linux/fs.h> 14d475c634SMatthew Wilcox #include <linux/genhd.h> 15f7ca90b1SMatthew Wilcox #include <linux/highmem.h> 16f7ca90b1SMatthew Wilcox #include <linux/memcontrol.h> 17f7ca90b1SMatthew Wilcox #include <linux/mm.h> 18d475c634SMatthew Wilcox #include <linux/mutex.h> 199973c98eSRoss Zwisler #include <linux/pagevec.h> 20289c6aedSMatthew Wilcox #include <linux/sched.h> 21f361bf4aSIngo Molnar #include <linux/sched/signal.h> 22d475c634SMatthew Wilcox #include <linux/uio.h> 23f7ca90b1SMatthew Wilcox #include <linux/vmstat.h> 2434c0fd54SDan Williams #include <linux/pfn_t.h> 250e749e54SDan Williams #include <linux/sizes.h> 264b4bb46dSJan Kara #include <linux/mmu_notifier.h> 27a254e568SChristoph Hellwig #include <linux/iomap.h> 2811cf9d86SAneesh Kumar K.V #include <asm/pgalloc.h> 29d475c634SMatthew Wilcox 30282a8e03SRoss Zwisler #define CREATE_TRACE_POINTS 31282a8e03SRoss Zwisler #include <trace/events/fs_dax.h> 32282a8e03SRoss Zwisler 33cfc93c6cSMatthew Wilcox static inline unsigned int pe_order(enum page_entry_size pe_size) 34cfc93c6cSMatthew Wilcox { 35cfc93c6cSMatthew Wilcox if (pe_size == PE_SIZE_PTE) 36cfc93c6cSMatthew Wilcox return PAGE_SHIFT - PAGE_SHIFT; 37cfc93c6cSMatthew Wilcox if (pe_size == PE_SIZE_PMD) 38cfc93c6cSMatthew Wilcox return PMD_SHIFT - PAGE_SHIFT; 39cfc93c6cSMatthew Wilcox if (pe_size == PE_SIZE_PUD) 40cfc93c6cSMatthew Wilcox return PUD_SHIFT - PAGE_SHIFT; 41cfc93c6cSMatthew Wilcox return ~0; 42cfc93c6cSMatthew Wilcox } 43cfc93c6cSMatthew Wilcox 44ac401cc7SJan Kara /* We choose 4096 entries - same as per-zone page wait tables */ 45ac401cc7SJan Kara #define DAX_WAIT_TABLE_BITS 12 46ac401cc7SJan Kara #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 47ac401cc7SJan Kara 48917f3452SRoss Zwisler /* The 'colour' (ie low bits) within a PMD of a page offset. */ 49917f3452SRoss Zwisler #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 50977fbdcdSMatthew Wilcox #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) 51917f3452SRoss Zwisler 52cfc93c6cSMatthew Wilcox /* The order of a PMD entry */ 53cfc93c6cSMatthew Wilcox #define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT) 54cfc93c6cSMatthew Wilcox 55ce95ab0fSRoss Zwisler static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 56ac401cc7SJan Kara 57ac401cc7SJan Kara static int __init init_dax_wait_table(void) 58ac401cc7SJan Kara { 59ac401cc7SJan Kara int i; 60ac401cc7SJan Kara 61ac401cc7SJan Kara for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 62ac401cc7SJan Kara init_waitqueue_head(wait_table + i); 63ac401cc7SJan Kara return 0; 64ac401cc7SJan Kara } 65ac401cc7SJan Kara fs_initcall(init_dax_wait_table); 66ac401cc7SJan Kara 67527b19d0SRoss Zwisler /* 683159f943SMatthew Wilcox * DAX pagecache entries use XArray value entries so they can't be mistaken 693159f943SMatthew Wilcox * for pages. We use one bit for locking, one bit for the entry size (PMD) 703159f943SMatthew Wilcox * and two more to tell us if the entry is a zero page or an empty entry that 713159f943SMatthew Wilcox * is just used for locking. In total four special bits. 72527b19d0SRoss Zwisler * 73527b19d0SRoss Zwisler * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE 74527b19d0SRoss Zwisler * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem 75527b19d0SRoss Zwisler * block allocation. 76527b19d0SRoss Zwisler */ 773159f943SMatthew Wilcox #define DAX_SHIFT (4) 783159f943SMatthew Wilcox #define DAX_LOCKED (1UL << 0) 793159f943SMatthew Wilcox #define DAX_PMD (1UL << 1) 803159f943SMatthew Wilcox #define DAX_ZERO_PAGE (1UL << 2) 813159f943SMatthew Wilcox #define DAX_EMPTY (1UL << 3) 82527b19d0SRoss Zwisler 83a77d19f4SMatthew Wilcox static unsigned long dax_to_pfn(void *entry) 84527b19d0SRoss Zwisler { 853159f943SMatthew Wilcox return xa_to_value(entry) >> DAX_SHIFT; 86527b19d0SRoss Zwisler } 87527b19d0SRoss Zwisler 889f32d221SMatthew Wilcox static void *dax_make_entry(pfn_t pfn, unsigned long flags) 899f32d221SMatthew Wilcox { 909f32d221SMatthew Wilcox return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); 919f32d221SMatthew Wilcox } 929f32d221SMatthew Wilcox 93cfc93c6cSMatthew Wilcox static bool dax_is_locked(void *entry) 94cfc93c6cSMatthew Wilcox { 95cfc93c6cSMatthew Wilcox return xa_to_value(entry) & DAX_LOCKED; 96cfc93c6cSMatthew Wilcox } 97cfc93c6cSMatthew Wilcox 98a77d19f4SMatthew Wilcox static unsigned int dax_entry_order(void *entry) 99527b19d0SRoss Zwisler { 1003159f943SMatthew Wilcox if (xa_to_value(entry) & DAX_PMD) 101cfc93c6cSMatthew Wilcox return PMD_ORDER; 102527b19d0SRoss Zwisler return 0; 103527b19d0SRoss Zwisler } 104527b19d0SRoss Zwisler 105fda490d3SMatthew Wilcox static unsigned long dax_is_pmd_entry(void *entry) 106642261acSRoss Zwisler { 1073159f943SMatthew Wilcox return xa_to_value(entry) & DAX_PMD; 108642261acSRoss Zwisler } 109642261acSRoss Zwisler 110fda490d3SMatthew Wilcox static bool dax_is_pte_entry(void *entry) 111642261acSRoss Zwisler { 1123159f943SMatthew Wilcox return !(xa_to_value(entry) & DAX_PMD); 113642261acSRoss Zwisler } 114642261acSRoss Zwisler 115642261acSRoss Zwisler static int dax_is_zero_entry(void *entry) 116642261acSRoss Zwisler { 1173159f943SMatthew Wilcox return xa_to_value(entry) & DAX_ZERO_PAGE; 118642261acSRoss Zwisler } 119642261acSRoss Zwisler 120642261acSRoss Zwisler static int dax_is_empty_entry(void *entry) 121642261acSRoss Zwisler { 1223159f943SMatthew Wilcox return xa_to_value(entry) & DAX_EMPTY; 123642261acSRoss Zwisler } 124642261acSRoss Zwisler 125f7ca90b1SMatthew Wilcox /* 12623c84eb7SMatthew Wilcox (Oracle) * true if the entry that was found is of a smaller order than the entry 12723c84eb7SMatthew Wilcox (Oracle) * we were looking for 12823c84eb7SMatthew Wilcox (Oracle) */ 12923c84eb7SMatthew Wilcox (Oracle) static bool dax_is_conflict(void *entry) 13023c84eb7SMatthew Wilcox (Oracle) { 13123c84eb7SMatthew Wilcox (Oracle) return entry == XA_RETRY_ENTRY; 13223c84eb7SMatthew Wilcox (Oracle) } 13323c84eb7SMatthew Wilcox (Oracle) 13423c84eb7SMatthew Wilcox (Oracle) /* 135a77d19f4SMatthew Wilcox * DAX page cache entry locking 136ac401cc7SJan Kara */ 137ac401cc7SJan Kara struct exceptional_entry_key { 138ec4907ffSMatthew Wilcox struct xarray *xa; 13963e95b5cSRoss Zwisler pgoff_t entry_start; 140ac401cc7SJan Kara }; 141ac401cc7SJan Kara 142ac401cc7SJan Kara struct wait_exceptional_entry_queue { 143ac6424b9SIngo Molnar wait_queue_entry_t wait; 144ac401cc7SJan Kara struct exceptional_entry_key key; 145ac401cc7SJan Kara }; 146ac401cc7SJan Kara 147698ab77aSVivek Goyal /** 148698ab77aSVivek Goyal * enum dax_wake_mode: waitqueue wakeup behaviour 149698ab77aSVivek Goyal * @WAKE_ALL: wake all waiters in the waitqueue 150698ab77aSVivek Goyal * @WAKE_NEXT: wake only the first waiter in the waitqueue 151698ab77aSVivek Goyal */ 152698ab77aSVivek Goyal enum dax_wake_mode { 153698ab77aSVivek Goyal WAKE_ALL, 154698ab77aSVivek Goyal WAKE_NEXT, 155698ab77aSVivek Goyal }; 156698ab77aSVivek Goyal 157b15cd800SMatthew Wilcox static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, 158b15cd800SMatthew Wilcox void *entry, struct exceptional_entry_key *key) 15963e95b5cSRoss Zwisler { 16063e95b5cSRoss Zwisler unsigned long hash; 161b15cd800SMatthew Wilcox unsigned long index = xas->xa_index; 16263e95b5cSRoss Zwisler 16363e95b5cSRoss Zwisler /* 16463e95b5cSRoss Zwisler * If 'entry' is a PMD, align the 'index' that we use for the wait 16563e95b5cSRoss Zwisler * queue to the start of that PMD. This ensures that all offsets in 16663e95b5cSRoss Zwisler * the range covered by the PMD map to the same bit lock. 16763e95b5cSRoss Zwisler */ 168642261acSRoss Zwisler if (dax_is_pmd_entry(entry)) 169917f3452SRoss Zwisler index &= ~PG_PMD_COLOUR; 170b15cd800SMatthew Wilcox key->xa = xas->xa; 17163e95b5cSRoss Zwisler key->entry_start = index; 17263e95b5cSRoss Zwisler 173b15cd800SMatthew Wilcox hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); 17463e95b5cSRoss Zwisler return wait_table + hash; 17563e95b5cSRoss Zwisler } 17663e95b5cSRoss Zwisler 177ec4907ffSMatthew Wilcox static int wake_exceptional_entry_func(wait_queue_entry_t *wait, 178ec4907ffSMatthew Wilcox unsigned int mode, int sync, void *keyp) 179ac401cc7SJan Kara { 180ac401cc7SJan Kara struct exceptional_entry_key *key = keyp; 181ac401cc7SJan Kara struct wait_exceptional_entry_queue *ewait = 182ac401cc7SJan Kara container_of(wait, struct wait_exceptional_entry_queue, wait); 183ac401cc7SJan Kara 184ec4907ffSMatthew Wilcox if (key->xa != ewait->key.xa || 18563e95b5cSRoss Zwisler key->entry_start != ewait->key.entry_start) 186ac401cc7SJan Kara return 0; 187ac401cc7SJan Kara return autoremove_wake_function(wait, mode, sync, NULL); 188ac401cc7SJan Kara } 189ac401cc7SJan Kara 190ac401cc7SJan Kara /* 191b93b0163SMatthew Wilcox * @entry may no longer be the entry at the index in the mapping. 192b93b0163SMatthew Wilcox * The important information it's conveying is whether the entry at 193b93b0163SMatthew Wilcox * this index used to be a PMD entry. 194e30331ffSRoss Zwisler */ 195698ab77aSVivek Goyal static void dax_wake_entry(struct xa_state *xas, void *entry, 196698ab77aSVivek Goyal enum dax_wake_mode mode) 197e30331ffSRoss Zwisler { 198e30331ffSRoss Zwisler struct exceptional_entry_key key; 199e30331ffSRoss Zwisler wait_queue_head_t *wq; 200e30331ffSRoss Zwisler 201b15cd800SMatthew Wilcox wq = dax_entry_waitqueue(xas, entry, &key); 202e30331ffSRoss Zwisler 203e30331ffSRoss Zwisler /* 204e30331ffSRoss Zwisler * Checking for locked entry and prepare_to_wait_exclusive() happens 205b93b0163SMatthew Wilcox * under the i_pages lock, ditto for entry handling in our callers. 206e30331ffSRoss Zwisler * So at this point all tasks that could have seen our entry locked 207e30331ffSRoss Zwisler * must be in the waitqueue and the following check will see them. 208e30331ffSRoss Zwisler */ 209e30331ffSRoss Zwisler if (waitqueue_active(wq)) 210698ab77aSVivek Goyal __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key); 211e30331ffSRoss Zwisler } 212e30331ffSRoss Zwisler 213cfc93c6cSMatthew Wilcox /* 214cfc93c6cSMatthew Wilcox * Look up entry in page cache, wait for it to become unlocked if it 215cfc93c6cSMatthew Wilcox * is a DAX entry and return it. The caller must subsequently call 216cfc93c6cSMatthew Wilcox * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry() 21723c84eb7SMatthew Wilcox (Oracle) * if it did. The entry returned may have a larger order than @order. 21823c84eb7SMatthew Wilcox (Oracle) * If @order is larger than the order of the entry found in i_pages, this 21923c84eb7SMatthew Wilcox (Oracle) * function returns a dax_is_conflict entry. 220cfc93c6cSMatthew Wilcox * 221cfc93c6cSMatthew Wilcox * Must be called with the i_pages lock held. 222cfc93c6cSMatthew Wilcox */ 22323c84eb7SMatthew Wilcox (Oracle) static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) 224cfc93c6cSMatthew Wilcox { 225cfc93c6cSMatthew Wilcox void *entry; 226cfc93c6cSMatthew Wilcox struct wait_exceptional_entry_queue ewait; 227cfc93c6cSMatthew Wilcox wait_queue_head_t *wq; 228cfc93c6cSMatthew Wilcox 229cfc93c6cSMatthew Wilcox init_wait(&ewait.wait); 230cfc93c6cSMatthew Wilcox ewait.wait.func = wake_exceptional_entry_func; 231cfc93c6cSMatthew Wilcox 232cfc93c6cSMatthew Wilcox for (;;) { 2330e40de03SMatthew Wilcox entry = xas_find_conflict(xas); 2346370740eSDan Williams if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 2356370740eSDan Williams return entry; 23623c84eb7SMatthew Wilcox (Oracle) if (dax_entry_order(entry) < order) 23723c84eb7SMatthew Wilcox (Oracle) return XA_RETRY_ENTRY; 2386370740eSDan Williams if (!dax_is_locked(entry)) 239cfc93c6cSMatthew Wilcox return entry; 240cfc93c6cSMatthew Wilcox 241b15cd800SMatthew Wilcox wq = dax_entry_waitqueue(xas, entry, &ewait.key); 242cfc93c6cSMatthew Wilcox prepare_to_wait_exclusive(wq, &ewait.wait, 243cfc93c6cSMatthew Wilcox TASK_UNINTERRUPTIBLE); 244cfc93c6cSMatthew Wilcox xas_unlock_irq(xas); 245cfc93c6cSMatthew Wilcox xas_reset(xas); 246cfc93c6cSMatthew Wilcox schedule(); 247cfc93c6cSMatthew Wilcox finish_wait(wq, &ewait.wait); 248cfc93c6cSMatthew Wilcox xas_lock_irq(xas); 249cfc93c6cSMatthew Wilcox } 250cfc93c6cSMatthew Wilcox } 251cfc93c6cSMatthew Wilcox 25255e56f06SMatthew Wilcox /* 25355e56f06SMatthew Wilcox * The only thing keeping the address space around is the i_pages lock 25455e56f06SMatthew Wilcox * (it's cycled in clear_inode() after removing the entries from i_pages) 25555e56f06SMatthew Wilcox * After we call xas_unlock_irq(), we cannot touch xas->xa. 25655e56f06SMatthew Wilcox */ 25755e56f06SMatthew Wilcox static void wait_entry_unlocked(struct xa_state *xas, void *entry) 25855e56f06SMatthew Wilcox { 25955e56f06SMatthew Wilcox struct wait_exceptional_entry_queue ewait; 26055e56f06SMatthew Wilcox wait_queue_head_t *wq; 26155e56f06SMatthew Wilcox 26255e56f06SMatthew Wilcox init_wait(&ewait.wait); 26355e56f06SMatthew Wilcox ewait.wait.func = wake_exceptional_entry_func; 26455e56f06SMatthew Wilcox 26555e56f06SMatthew Wilcox wq = dax_entry_waitqueue(xas, entry, &ewait.key); 266d8a70641SDan Williams /* 267d8a70641SDan Williams * Unlike get_unlocked_entry() there is no guarantee that this 268d8a70641SDan Williams * path ever successfully retrieves an unlocked entry before an 269d8a70641SDan Williams * inode dies. Perform a non-exclusive wait in case this path 270d8a70641SDan Williams * never successfully performs its own wake up. 271d8a70641SDan Williams */ 272d8a70641SDan Williams prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); 27355e56f06SMatthew Wilcox xas_unlock_irq(xas); 27455e56f06SMatthew Wilcox schedule(); 27555e56f06SMatthew Wilcox finish_wait(wq, &ewait.wait); 27655e56f06SMatthew Wilcox } 27755e56f06SMatthew Wilcox 2784c3d043dSVivek Goyal static void put_unlocked_entry(struct xa_state *xas, void *entry, 2794c3d043dSVivek Goyal enum dax_wake_mode mode) 280cfc93c6cSMatthew Wilcox { 28161c30c98SJan Kara if (entry && !dax_is_conflict(entry)) 2824c3d043dSVivek Goyal dax_wake_entry(xas, entry, mode); 283cfc93c6cSMatthew Wilcox } 284cfc93c6cSMatthew Wilcox 285cfc93c6cSMatthew Wilcox /* 286cfc93c6cSMatthew Wilcox * We used the xa_state to get the entry, but then we locked the entry and 287cfc93c6cSMatthew Wilcox * dropped the xa_lock, so we know the xa_state is stale and must be reset 288cfc93c6cSMatthew Wilcox * before use. 289cfc93c6cSMatthew Wilcox */ 290cfc93c6cSMatthew Wilcox static void dax_unlock_entry(struct xa_state *xas, void *entry) 291cfc93c6cSMatthew Wilcox { 292cfc93c6cSMatthew Wilcox void *old; 293cfc93c6cSMatthew Wilcox 2947ae2ea7dSMatthew Wilcox BUG_ON(dax_is_locked(entry)); 295cfc93c6cSMatthew Wilcox xas_reset(xas); 296cfc93c6cSMatthew Wilcox xas_lock_irq(xas); 297cfc93c6cSMatthew Wilcox old = xas_store(xas, entry); 298cfc93c6cSMatthew Wilcox xas_unlock_irq(xas); 299cfc93c6cSMatthew Wilcox BUG_ON(!dax_is_locked(old)); 300698ab77aSVivek Goyal dax_wake_entry(xas, entry, WAKE_NEXT); 301cfc93c6cSMatthew Wilcox } 302cfc93c6cSMatthew Wilcox 303cfc93c6cSMatthew Wilcox /* 304cfc93c6cSMatthew Wilcox * Return: The entry stored at this location before it was locked. 305cfc93c6cSMatthew Wilcox */ 306cfc93c6cSMatthew Wilcox static void *dax_lock_entry(struct xa_state *xas, void *entry) 307cfc93c6cSMatthew Wilcox { 308cfc93c6cSMatthew Wilcox unsigned long v = xa_to_value(entry); 309cfc93c6cSMatthew Wilcox return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); 310cfc93c6cSMatthew Wilcox } 311cfc93c6cSMatthew Wilcox 312d2c997c0SDan Williams static unsigned long dax_entry_size(void *entry) 313d2c997c0SDan Williams { 314d2c997c0SDan Williams if (dax_is_zero_entry(entry)) 315d2c997c0SDan Williams return 0; 316d2c997c0SDan Williams else if (dax_is_empty_entry(entry)) 317d2c997c0SDan Williams return 0; 318d2c997c0SDan Williams else if (dax_is_pmd_entry(entry)) 319d2c997c0SDan Williams return PMD_SIZE; 320d2c997c0SDan Williams else 321d2c997c0SDan Williams return PAGE_SIZE; 322d2c997c0SDan Williams } 323d2c997c0SDan Williams 324a77d19f4SMatthew Wilcox static unsigned long dax_end_pfn(void *entry) 325d2c997c0SDan Williams { 326a77d19f4SMatthew Wilcox return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; 327d2c997c0SDan Williams } 328d2c997c0SDan Williams 329d2c997c0SDan Williams /* 330d2c997c0SDan Williams * Iterate through all mapped pfns represented by an entry, i.e. skip 331d2c997c0SDan Williams * 'empty' and 'zero' entries. 332d2c997c0SDan Williams */ 333d2c997c0SDan Williams #define for_each_mapped_pfn(entry, pfn) \ 334a77d19f4SMatthew Wilcox for (pfn = dax_to_pfn(entry); \ 335a77d19f4SMatthew Wilcox pfn < dax_end_pfn(entry); pfn++) 336d2c997c0SDan Williams 33773449dafSDan Williams /* 33873449dafSDan Williams * TODO: for reflink+dax we need a way to associate a single page with 33973449dafSDan Williams * multiple address_space instances at different linear_page_index() 34073449dafSDan Williams * offsets. 34173449dafSDan Williams */ 34273449dafSDan Williams static void dax_associate_entry(void *entry, struct address_space *mapping, 34373449dafSDan Williams struct vm_area_struct *vma, unsigned long address) 344d2c997c0SDan Williams { 34573449dafSDan Williams unsigned long size = dax_entry_size(entry), pfn, index; 34673449dafSDan Williams int i = 0; 347d2c997c0SDan Williams 348d2c997c0SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 349d2c997c0SDan Williams return; 350d2c997c0SDan Williams 35173449dafSDan Williams index = linear_page_index(vma, address & ~(size - 1)); 352d2c997c0SDan Williams for_each_mapped_pfn(entry, pfn) { 353d2c997c0SDan Williams struct page *page = pfn_to_page(pfn); 354d2c997c0SDan Williams 355d2c997c0SDan Williams WARN_ON_ONCE(page->mapping); 356d2c997c0SDan Williams page->mapping = mapping; 35773449dafSDan Williams page->index = index + i++; 358d2c997c0SDan Williams } 359d2c997c0SDan Williams } 360d2c997c0SDan Williams 361d2c997c0SDan Williams static void dax_disassociate_entry(void *entry, struct address_space *mapping, 362d2c997c0SDan Williams bool trunc) 363d2c997c0SDan Williams { 364d2c997c0SDan Williams unsigned long pfn; 365d2c997c0SDan Williams 366d2c997c0SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 367d2c997c0SDan Williams return; 368d2c997c0SDan Williams 369d2c997c0SDan Williams for_each_mapped_pfn(entry, pfn) { 370d2c997c0SDan Williams struct page *page = pfn_to_page(pfn); 371d2c997c0SDan Williams 372d2c997c0SDan Williams WARN_ON_ONCE(trunc && page_ref_count(page) > 1); 373d2c997c0SDan Williams WARN_ON_ONCE(page->mapping && page->mapping != mapping); 374d2c997c0SDan Williams page->mapping = NULL; 37573449dafSDan Williams page->index = 0; 376d2c997c0SDan Williams } 377d2c997c0SDan Williams } 378d2c997c0SDan Williams 3795fac7408SDan Williams static struct page *dax_busy_page(void *entry) 3805fac7408SDan Williams { 3815fac7408SDan Williams unsigned long pfn; 3825fac7408SDan Williams 3835fac7408SDan Williams for_each_mapped_pfn(entry, pfn) { 3845fac7408SDan Williams struct page *page = pfn_to_page(pfn); 3855fac7408SDan Williams 3865fac7408SDan Williams if (page_ref_count(page) > 1) 3875fac7408SDan Williams return page; 3885fac7408SDan Williams } 3895fac7408SDan Williams return NULL; 3905fac7408SDan Williams } 3915fac7408SDan Williams 392c5bbd451SMatthew Wilcox /* 393c5bbd451SMatthew Wilcox * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page 394c5bbd451SMatthew Wilcox * @page: The page whose entry we want to lock 395c5bbd451SMatthew Wilcox * 396c5bbd451SMatthew Wilcox * Context: Process context. 39727359fd6SMatthew Wilcox * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could 39827359fd6SMatthew Wilcox * not be locked. 399c5bbd451SMatthew Wilcox */ 40027359fd6SMatthew Wilcox dax_entry_t dax_lock_page(struct page *page) 401c2a7d2a1SDan Williams { 4029f32d221SMatthew Wilcox XA_STATE(xas, NULL, 0); 4039f32d221SMatthew Wilcox void *entry; 404c2a7d2a1SDan Williams 405c5bbd451SMatthew Wilcox /* Ensure page->mapping isn't freed while we look at it */ 406c5bbd451SMatthew Wilcox rcu_read_lock(); 407c2a7d2a1SDan Williams for (;;) { 4089f32d221SMatthew Wilcox struct address_space *mapping = READ_ONCE(page->mapping); 409c2a7d2a1SDan Williams 41027359fd6SMatthew Wilcox entry = NULL; 411c93db7bbSMatthew Wilcox if (!mapping || !dax_mapping(mapping)) 412c5bbd451SMatthew Wilcox break; 413c2a7d2a1SDan Williams 414c2a7d2a1SDan Williams /* 415c2a7d2a1SDan Williams * In the device-dax case there's no need to lock, a 416c2a7d2a1SDan Williams * struct dev_pagemap pin is sufficient to keep the 417c2a7d2a1SDan Williams * inode alive, and we assume we have dev_pagemap pin 418c2a7d2a1SDan Williams * otherwise we would not have a valid pfn_to_page() 419c2a7d2a1SDan Williams * translation. 420c2a7d2a1SDan Williams */ 42127359fd6SMatthew Wilcox entry = (void *)~0UL; 4229f32d221SMatthew Wilcox if (S_ISCHR(mapping->host->i_mode)) 423c5bbd451SMatthew Wilcox break; 424c2a7d2a1SDan Williams 4259f32d221SMatthew Wilcox xas.xa = &mapping->i_pages; 4269f32d221SMatthew Wilcox xas_lock_irq(&xas); 427c2a7d2a1SDan Williams if (mapping != page->mapping) { 4289f32d221SMatthew Wilcox xas_unlock_irq(&xas); 429c2a7d2a1SDan Williams continue; 430c2a7d2a1SDan Williams } 4319f32d221SMatthew Wilcox xas_set(&xas, page->index); 4329f32d221SMatthew Wilcox entry = xas_load(&xas); 4339f32d221SMatthew Wilcox if (dax_is_locked(entry)) { 434c5bbd451SMatthew Wilcox rcu_read_unlock(); 43555e56f06SMatthew Wilcox wait_entry_unlocked(&xas, entry); 436c5bbd451SMatthew Wilcox rcu_read_lock(); 437c2a7d2a1SDan Williams continue; 438c2a7d2a1SDan Williams } 4399f32d221SMatthew Wilcox dax_lock_entry(&xas, entry); 4409f32d221SMatthew Wilcox xas_unlock_irq(&xas); 441c5bbd451SMatthew Wilcox break; 4429f32d221SMatthew Wilcox } 443c5bbd451SMatthew Wilcox rcu_read_unlock(); 44427359fd6SMatthew Wilcox return (dax_entry_t)entry; 445c2a7d2a1SDan Williams } 446c2a7d2a1SDan Williams 44727359fd6SMatthew Wilcox void dax_unlock_page(struct page *page, dax_entry_t cookie) 448c2a7d2a1SDan Williams { 449c2a7d2a1SDan Williams struct address_space *mapping = page->mapping; 4509f32d221SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, page->index); 451c2a7d2a1SDan Williams 4529f32d221SMatthew Wilcox if (S_ISCHR(mapping->host->i_mode)) 453c2a7d2a1SDan Williams return; 454c2a7d2a1SDan Williams 45527359fd6SMatthew Wilcox dax_unlock_entry(&xas, (void *)cookie); 456c2a7d2a1SDan Williams } 457c2a7d2a1SDan Williams 458ac401cc7SJan Kara /* 459a77d19f4SMatthew Wilcox * Find page cache entry at given index. If it is a DAX entry, return it 460a77d19f4SMatthew Wilcox * with the entry locked. If the page cache doesn't contain an entry at 461a77d19f4SMatthew Wilcox * that index, add a locked empty entry. 462ac401cc7SJan Kara * 4633159f943SMatthew Wilcox * When requesting an entry with size DAX_PMD, grab_mapping_entry() will 464b15cd800SMatthew Wilcox * either return that locked entry or will return VM_FAULT_FALLBACK. 465b15cd800SMatthew Wilcox * This will happen if there are any PTE entries within the PMD range 466b15cd800SMatthew Wilcox * that we are requesting. 467642261acSRoss Zwisler * 468b15cd800SMatthew Wilcox * We always favor PTE entries over PMD entries. There isn't a flow where we 469b15cd800SMatthew Wilcox * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD 470b15cd800SMatthew Wilcox * insertion will fail if it finds any PTE entries already in the tree, and a 471b15cd800SMatthew Wilcox * PTE insertion will cause an existing PMD entry to be unmapped and 472b15cd800SMatthew Wilcox * downgraded to PTE entries. This happens for both PMD zero pages as 473b15cd800SMatthew Wilcox * well as PMD empty entries. 474642261acSRoss Zwisler * 475b15cd800SMatthew Wilcox * The exception to this downgrade path is for PMD entries that have 476b15cd800SMatthew Wilcox * real storage backing them. We will leave these real PMD entries in 477b15cd800SMatthew Wilcox * the tree, and PTE writes will simply dirty the entire PMD entry. 478642261acSRoss Zwisler * 479ac401cc7SJan Kara * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 480ac401cc7SJan Kara * persistent memory the benefit is doubtful. We can add that later if we can 481ac401cc7SJan Kara * show it helps. 482b15cd800SMatthew Wilcox * 483b15cd800SMatthew Wilcox * On error, this function does not return an ERR_PTR. Instead it returns 484b15cd800SMatthew Wilcox * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values 485b15cd800SMatthew Wilcox * overlap with xarray value entries. 486ac401cc7SJan Kara */ 487b15cd800SMatthew Wilcox static void *grab_mapping_entry(struct xa_state *xas, 48823c84eb7SMatthew Wilcox (Oracle) struct address_space *mapping, unsigned int order) 489ac401cc7SJan Kara { 490b15cd800SMatthew Wilcox unsigned long index = xas->xa_index; 4911a14e377SJan Kara bool pmd_downgrade; /* splitting PMD entry into PTE entries? */ 492b15cd800SMatthew Wilcox void *entry; 493ac401cc7SJan Kara 494b15cd800SMatthew Wilcox retry: 4951a14e377SJan Kara pmd_downgrade = false; 496b15cd800SMatthew Wilcox xas_lock_irq(xas); 49723c84eb7SMatthew Wilcox (Oracle) entry = get_unlocked_entry(xas, order); 498642261acSRoss Zwisler 499b15cd800SMatthew Wilcox if (entry) { 50023c84eb7SMatthew Wilcox (Oracle) if (dax_is_conflict(entry)) 50123c84eb7SMatthew Wilcox (Oracle) goto fallback; 5020e40de03SMatthew Wilcox if (!xa_is_value(entry)) { 50349688e65SHao Li xas_set_err(xas, -EIO); 50491d25ba8SRoss Zwisler goto out_unlock; 50591d25ba8SRoss Zwisler } 50691d25ba8SRoss Zwisler 50723c84eb7SMatthew Wilcox (Oracle) if (order == 0) { 50891d25ba8SRoss Zwisler if (dax_is_pmd_entry(entry) && 509642261acSRoss Zwisler (dax_is_zero_entry(entry) || 510642261acSRoss Zwisler dax_is_empty_entry(entry))) { 511642261acSRoss Zwisler pmd_downgrade = true; 512642261acSRoss Zwisler } 513642261acSRoss Zwisler } 514642261acSRoss Zwisler } 515642261acSRoss Zwisler 516642261acSRoss Zwisler if (pmd_downgrade) { 517642261acSRoss Zwisler /* 518642261acSRoss Zwisler * Make sure 'entry' remains valid while we drop 519b93b0163SMatthew Wilcox * the i_pages lock. 520642261acSRoss Zwisler */ 521b15cd800SMatthew Wilcox dax_lock_entry(xas, entry); 522642261acSRoss Zwisler 523642261acSRoss Zwisler /* 524642261acSRoss Zwisler * Besides huge zero pages the only other thing that gets 525642261acSRoss Zwisler * downgraded are empty entries which don't need to be 526642261acSRoss Zwisler * unmapped. 527642261acSRoss Zwisler */ 528b15cd800SMatthew Wilcox if (dax_is_zero_entry(entry)) { 529b15cd800SMatthew Wilcox xas_unlock_irq(xas); 530b15cd800SMatthew Wilcox unmap_mapping_pages(mapping, 531b15cd800SMatthew Wilcox xas->xa_index & ~PG_PMD_COLOUR, 532977fbdcdSMatthew Wilcox PG_PMD_NR, false); 533b15cd800SMatthew Wilcox xas_reset(xas); 534b15cd800SMatthew Wilcox xas_lock_irq(xas); 535e11f8b7bSRoss Zwisler } 536e11f8b7bSRoss Zwisler 537d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, false); 538b15cd800SMatthew Wilcox xas_store(xas, NULL); /* undo the PMD join */ 539698ab77aSVivek Goyal dax_wake_entry(xas, entry, WAKE_ALL); 5407f0e07fbSMatthew Wilcox (Oracle) mapping->nrpages -= PG_PMD_NR; 541b15cd800SMatthew Wilcox entry = NULL; 542b15cd800SMatthew Wilcox xas_set(xas, index); 543642261acSRoss Zwisler } 544642261acSRoss Zwisler 545b15cd800SMatthew Wilcox if (entry) { 546b15cd800SMatthew Wilcox dax_lock_entry(xas, entry); 547b15cd800SMatthew Wilcox } else { 54823c84eb7SMatthew Wilcox (Oracle) unsigned long flags = DAX_EMPTY; 54923c84eb7SMatthew Wilcox (Oracle) 55023c84eb7SMatthew Wilcox (Oracle) if (order > 0) 55123c84eb7SMatthew Wilcox (Oracle) flags |= DAX_PMD; 55223c84eb7SMatthew Wilcox (Oracle) entry = dax_make_entry(pfn_to_pfn_t(0), flags); 553b15cd800SMatthew Wilcox dax_lock_entry(xas, entry); 554b15cd800SMatthew Wilcox if (xas_error(xas)) 555b15cd800SMatthew Wilcox goto out_unlock; 5567f0e07fbSMatthew Wilcox (Oracle) mapping->nrpages += 1UL << order; 557ac401cc7SJan Kara } 558b15cd800SMatthew Wilcox 559642261acSRoss Zwisler out_unlock: 560b15cd800SMatthew Wilcox xas_unlock_irq(xas); 561b15cd800SMatthew Wilcox if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) 562b15cd800SMatthew Wilcox goto retry; 563b15cd800SMatthew Wilcox if (xas->xa_node == XA_ERROR(-ENOMEM)) 564b15cd800SMatthew Wilcox return xa_mk_internal(VM_FAULT_OOM); 565b15cd800SMatthew Wilcox if (xas_error(xas)) 566b15cd800SMatthew Wilcox return xa_mk_internal(VM_FAULT_SIGBUS); 567e3ad61c6SRoss Zwisler return entry; 568b15cd800SMatthew Wilcox fallback: 569b15cd800SMatthew Wilcox xas_unlock_irq(xas); 570b15cd800SMatthew Wilcox return xa_mk_internal(VM_FAULT_FALLBACK); 571ac401cc7SJan Kara } 572ac401cc7SJan Kara 5735fac7408SDan Williams /** 5746bbdd563SVivek Goyal * dax_layout_busy_page_range - find first pinned page in @mapping 5755fac7408SDan Williams * @mapping: address space to scan for a page with ref count > 1 5766bbdd563SVivek Goyal * @start: Starting offset. Page containing 'start' is included. 5776bbdd563SVivek Goyal * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX, 5786bbdd563SVivek Goyal * pages from 'start' till the end of file are included. 5795fac7408SDan Williams * 5805fac7408SDan Williams * DAX requires ZONE_DEVICE mapped pages. These pages are never 5815fac7408SDan Williams * 'onlined' to the page allocator so they are considered idle when 5825fac7408SDan Williams * page->count == 1. A filesystem uses this interface to determine if 5835fac7408SDan Williams * any page in the mapping is busy, i.e. for DMA, or other 5845fac7408SDan Williams * get_user_pages() usages. 5855fac7408SDan Williams * 5865fac7408SDan Williams * It is expected that the filesystem is holding locks to block the 5875fac7408SDan Williams * establishment of new mappings in this address_space. I.e. it expects 5885fac7408SDan Williams * to be able to run unmap_mapping_range() and subsequently not race 5895fac7408SDan Williams * mapping_mapped() becoming true. 5905fac7408SDan Williams */ 5916bbdd563SVivek Goyal struct page *dax_layout_busy_page_range(struct address_space *mapping, 5926bbdd563SVivek Goyal loff_t start, loff_t end) 5935fac7408SDan Williams { 594084a8990SMatthew Wilcox void *entry; 595084a8990SMatthew Wilcox unsigned int scanned = 0; 5965fac7408SDan Williams struct page *page = NULL; 5976bbdd563SVivek Goyal pgoff_t start_idx = start >> PAGE_SHIFT; 5986bbdd563SVivek Goyal pgoff_t end_idx; 5996bbdd563SVivek Goyal XA_STATE(xas, &mapping->i_pages, start_idx); 6005fac7408SDan Williams 6015fac7408SDan Williams /* 6025fac7408SDan Williams * In the 'limited' case get_user_pages() for dax is disabled. 6035fac7408SDan Williams */ 6045fac7408SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 6055fac7408SDan Williams return NULL; 6065fac7408SDan Williams 6075fac7408SDan Williams if (!dax_mapping(mapping) || !mapping_mapped(mapping)) 6085fac7408SDan Williams return NULL; 6095fac7408SDan Williams 6106bbdd563SVivek Goyal /* If end == LLONG_MAX, all pages from start to till end of file */ 6116bbdd563SVivek Goyal if (end == LLONG_MAX) 6126bbdd563SVivek Goyal end_idx = ULONG_MAX; 6136bbdd563SVivek Goyal else 6146bbdd563SVivek Goyal end_idx = end >> PAGE_SHIFT; 6155fac7408SDan Williams /* 6165fac7408SDan Williams * If we race get_user_pages_fast() here either we'll see the 617084a8990SMatthew Wilcox * elevated page count in the iteration and wait, or 6185fac7408SDan Williams * get_user_pages_fast() will see that the page it took a reference 6195fac7408SDan Williams * against is no longer mapped in the page tables and bail to the 6205fac7408SDan Williams * get_user_pages() slow path. The slow path is protected by 6215fac7408SDan Williams * pte_lock() and pmd_lock(). New references are not taken without 6226bbdd563SVivek Goyal * holding those locks, and unmap_mapping_pages() will not zero the 6235fac7408SDan Williams * pte or pmd without holding the respective lock, so we are 6245fac7408SDan Williams * guaranteed to either see new references or prevent new 6255fac7408SDan Williams * references from being established. 6265fac7408SDan Williams */ 6276bbdd563SVivek Goyal unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0); 6285fac7408SDan Williams 629084a8990SMatthew Wilcox xas_lock_irq(&xas); 6306bbdd563SVivek Goyal xas_for_each(&xas, entry, end_idx) { 631084a8990SMatthew Wilcox if (WARN_ON_ONCE(!xa_is_value(entry))) 6325fac7408SDan Williams continue; 633084a8990SMatthew Wilcox if (unlikely(dax_is_locked(entry))) 63423c84eb7SMatthew Wilcox (Oracle) entry = get_unlocked_entry(&xas, 0); 6355fac7408SDan Williams if (entry) 6365fac7408SDan Williams page = dax_busy_page(entry); 6374c3d043dSVivek Goyal put_unlocked_entry(&xas, entry, WAKE_NEXT); 6385fac7408SDan Williams if (page) 6395fac7408SDan Williams break; 640084a8990SMatthew Wilcox if (++scanned % XA_CHECK_SCHED) 641084a8990SMatthew Wilcox continue; 642cdbf8897SRoss Zwisler 643084a8990SMatthew Wilcox xas_pause(&xas); 644084a8990SMatthew Wilcox xas_unlock_irq(&xas); 645084a8990SMatthew Wilcox cond_resched(); 646084a8990SMatthew Wilcox xas_lock_irq(&xas); 6475fac7408SDan Williams } 648084a8990SMatthew Wilcox xas_unlock_irq(&xas); 6495fac7408SDan Williams return page; 6505fac7408SDan Williams } 6516bbdd563SVivek Goyal EXPORT_SYMBOL_GPL(dax_layout_busy_page_range); 6526bbdd563SVivek Goyal 6536bbdd563SVivek Goyal struct page *dax_layout_busy_page(struct address_space *mapping) 6546bbdd563SVivek Goyal { 6556bbdd563SVivek Goyal return dax_layout_busy_page_range(mapping, 0, LLONG_MAX); 6566bbdd563SVivek Goyal } 6575fac7408SDan Williams EXPORT_SYMBOL_GPL(dax_layout_busy_page); 6585fac7408SDan Williams 659a77d19f4SMatthew Wilcox static int __dax_invalidate_entry(struct address_space *mapping, 660c6dcf52cSJan Kara pgoff_t index, bool trunc) 661c6dcf52cSJan Kara { 66207f2d89cSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, index); 663c6dcf52cSJan Kara int ret = 0; 664c6dcf52cSJan Kara void *entry; 665c6dcf52cSJan Kara 66607f2d89cSMatthew Wilcox xas_lock_irq(&xas); 66723c84eb7SMatthew Wilcox (Oracle) entry = get_unlocked_entry(&xas, 0); 6683159f943SMatthew Wilcox if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 669c6dcf52cSJan Kara goto out; 670c6dcf52cSJan Kara if (!trunc && 67107f2d89cSMatthew Wilcox (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || 67207f2d89cSMatthew Wilcox xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) 673c6dcf52cSJan Kara goto out; 674d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, trunc); 67507f2d89cSMatthew Wilcox xas_store(&xas, NULL); 6767f0e07fbSMatthew Wilcox (Oracle) mapping->nrpages -= 1UL << dax_entry_order(entry); 677c6dcf52cSJan Kara ret = 1; 678c6dcf52cSJan Kara out: 67923738832SVivek Goyal put_unlocked_entry(&xas, entry, WAKE_ALL); 68007f2d89cSMatthew Wilcox xas_unlock_irq(&xas); 681c6dcf52cSJan Kara return ret; 682c6dcf52cSJan Kara } 68307f2d89cSMatthew Wilcox 684ac401cc7SJan Kara /* 6853159f943SMatthew Wilcox * Delete DAX entry at @index from @mapping. Wait for it 6863159f943SMatthew Wilcox * to be unlocked before deleting it. 687ac401cc7SJan Kara */ 688ac401cc7SJan Kara int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 689ac401cc7SJan Kara { 690a77d19f4SMatthew Wilcox int ret = __dax_invalidate_entry(mapping, index, true); 691ac401cc7SJan Kara 692ac401cc7SJan Kara /* 693ac401cc7SJan Kara * This gets called from truncate / punch_hole path. As such, the caller 694ac401cc7SJan Kara * must hold locks protecting against concurrent modifications of the 695a77d19f4SMatthew Wilcox * page cache (usually fs-private i_mmap_sem for writing). Since the 6963159f943SMatthew Wilcox * caller has seen a DAX entry for this index, we better find it 697ac401cc7SJan Kara * at that index as well... 698ac401cc7SJan Kara */ 699c6dcf52cSJan Kara WARN_ON_ONCE(!ret); 700c6dcf52cSJan Kara return ret; 701ac401cc7SJan Kara } 702ac401cc7SJan Kara 703c6dcf52cSJan Kara /* 7043159f943SMatthew Wilcox * Invalidate DAX entry if it is clean. 705c6dcf52cSJan Kara */ 706c6dcf52cSJan Kara int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 707c6dcf52cSJan Kara pgoff_t index) 708c6dcf52cSJan Kara { 709a77d19f4SMatthew Wilcox return __dax_invalidate_entry(mapping, index, false); 710ac401cc7SJan Kara } 711ac401cc7SJan Kara 71260696eb2SChristoph Hellwig static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos) 713f7ca90b1SMatthew Wilcox { 71460696eb2SChristoph Hellwig phys_addr_t paddr = iomap->addr + (pos & PAGE_MASK) - iomap->offset; 71560696eb2SChristoph Hellwig 71660696eb2SChristoph Hellwig if (iomap->bdev) 71760696eb2SChristoph Hellwig paddr += (get_start_sect(iomap->bdev) << SECTOR_SHIFT); 71860696eb2SChristoph Hellwig return PHYS_PFN(paddr); 719429f8de7SChristoph Hellwig } 720429f8de7SChristoph Hellwig 721429f8de7SChristoph Hellwig static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter) 722429f8de7SChristoph Hellwig { 72360696eb2SChristoph Hellwig pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos); 724cccbce67SDan Williams void *vto, *kaddr; 725cccbce67SDan Williams long rc; 726cccbce67SDan Williams int id; 727e2e05394SRoss Zwisler 728cccbce67SDan Williams id = dax_read_lock(); 729429f8de7SChristoph Hellwig rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, &kaddr, NULL); 730cccbce67SDan Williams if (rc < 0) { 731cccbce67SDan Williams dax_read_unlock(id); 732cccbce67SDan Williams return rc; 733cccbce67SDan Williams } 734429f8de7SChristoph Hellwig vto = kmap_atomic(vmf->cow_page); 735429f8de7SChristoph Hellwig copy_user_page(vto, kaddr, vmf->address, vmf->cow_page); 736f7ca90b1SMatthew Wilcox kunmap_atomic(vto); 737cccbce67SDan Williams dax_read_unlock(id); 738f7ca90b1SMatthew Wilcox return 0; 739f7ca90b1SMatthew Wilcox } 740f7ca90b1SMatthew Wilcox 741642261acSRoss Zwisler /* 742642261acSRoss Zwisler * By this point grab_mapping_entry() has ensured that we have a locked entry 743642261acSRoss Zwisler * of the appropriate size so we don't have to worry about downgrading PMDs to 744642261acSRoss Zwisler * PTEs. If we happen to be trying to insert a PTE and there is a PMD 745642261acSRoss Zwisler * already in the tree, we will skip the insertion and just dirty the PMD as 746642261acSRoss Zwisler * appropriate. 747642261acSRoss Zwisler */ 748b15cd800SMatthew Wilcox static void *dax_insert_entry(struct xa_state *xas, 749b15cd800SMatthew Wilcox struct address_space *mapping, struct vm_fault *vmf, 750b15cd800SMatthew Wilcox void *entry, pfn_t pfn, unsigned long flags, bool dirty) 7519973c98eSRoss Zwisler { 752b15cd800SMatthew Wilcox void *new_entry = dax_make_entry(pfn, flags); 7539973c98eSRoss Zwisler 754f5b7b748SJan Kara if (dirty) 7559973c98eSRoss Zwisler __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 7569973c98eSRoss Zwisler 7573159f943SMatthew Wilcox if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) { 758b15cd800SMatthew Wilcox unsigned long index = xas->xa_index; 75991d25ba8SRoss Zwisler /* we are replacing a zero page with block mapping */ 76091d25ba8SRoss Zwisler if (dax_is_pmd_entry(entry)) 761977fbdcdSMatthew Wilcox unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 762977fbdcdSMatthew Wilcox PG_PMD_NR, false); 76391d25ba8SRoss Zwisler else /* pte entry */ 764b15cd800SMatthew Wilcox unmap_mapping_pages(mapping, index, 1, false); 765ac401cc7SJan Kara } 7669973c98eSRoss Zwisler 767b15cd800SMatthew Wilcox xas_reset(xas); 768b15cd800SMatthew Wilcox xas_lock_irq(xas); 7691571c029SJan Kara if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 7701571c029SJan Kara void *old; 7711571c029SJan Kara 772d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, false); 77373449dafSDan Williams dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); 774642261acSRoss Zwisler /* 775a77d19f4SMatthew Wilcox * Only swap our new entry into the page cache if the current 776642261acSRoss Zwisler * entry is a zero page or an empty entry. If a normal PTE or 777a77d19f4SMatthew Wilcox * PMD entry is already in the cache, we leave it alone. This 778642261acSRoss Zwisler * means that if we are trying to insert a PTE and the 779642261acSRoss Zwisler * existing entry is a PMD, we will just leave the PMD in the 780642261acSRoss Zwisler * tree and dirty it if necessary. 781642261acSRoss Zwisler */ 7821571c029SJan Kara old = dax_lock_entry(xas, new_entry); 783b15cd800SMatthew Wilcox WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | 784b15cd800SMatthew Wilcox DAX_LOCKED)); 78591d25ba8SRoss Zwisler entry = new_entry; 786b15cd800SMatthew Wilcox } else { 787b15cd800SMatthew Wilcox xas_load(xas); /* Walk the xa_state */ 788ac401cc7SJan Kara } 78991d25ba8SRoss Zwisler 790f5b7b748SJan Kara if (dirty) 791b15cd800SMatthew Wilcox xas_set_mark(xas, PAGECACHE_TAG_DIRTY); 79291d25ba8SRoss Zwisler 793b15cd800SMatthew Wilcox xas_unlock_irq(xas); 79491d25ba8SRoss Zwisler return entry; 7959973c98eSRoss Zwisler } 7969973c98eSRoss Zwisler 797a77d19f4SMatthew Wilcox static inline 798a77d19f4SMatthew Wilcox unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma) 7994b4bb46dSJan Kara { 8004b4bb46dSJan Kara unsigned long address; 8014b4bb46dSJan Kara 8024b4bb46dSJan Kara address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 8034b4bb46dSJan Kara VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 8044b4bb46dSJan Kara return address; 8054b4bb46dSJan Kara } 8064b4bb46dSJan Kara 8074b4bb46dSJan Kara /* Walk all mappings of a given index of a file and writeprotect them */ 808a77d19f4SMatthew Wilcox static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index, 809a77d19f4SMatthew Wilcox unsigned long pfn) 8104b4bb46dSJan Kara { 8114b4bb46dSJan Kara struct vm_area_struct *vma; 812f729c8c9SRoss Zwisler pte_t pte, *ptep = NULL; 813f729c8c9SRoss Zwisler pmd_t *pmdp = NULL; 8144b4bb46dSJan Kara spinlock_t *ptl; 8154b4bb46dSJan Kara 8164b4bb46dSJan Kara i_mmap_lock_read(mapping); 8174b4bb46dSJan Kara vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { 818ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 819ac46d4f3SJérôme Glisse unsigned long address; 8204b4bb46dSJan Kara 8214b4bb46dSJan Kara cond_resched(); 8224b4bb46dSJan Kara 8234b4bb46dSJan Kara if (!(vma->vm_flags & VM_SHARED)) 8244b4bb46dSJan Kara continue; 8254b4bb46dSJan Kara 8264b4bb46dSJan Kara address = pgoff_address(index, vma); 827a4d1a885SJérôme Glisse 828a4d1a885SJérôme Glisse /* 8299fd6dad1SPaolo Bonzini * follow_invalidate_pte() will use the range to call 830ff5c19edSChristoph Hellwig * mmu_notifier_invalidate_range_start() on our behalf before 831ff5c19edSChristoph Hellwig * taking any lock. 832a4d1a885SJérôme Glisse */ 8339fd6dad1SPaolo Bonzini if (follow_invalidate_pte(vma->vm_mm, address, &range, &ptep, 8349fd6dad1SPaolo Bonzini &pmdp, &ptl)) 8354b4bb46dSJan Kara continue; 836f729c8c9SRoss Zwisler 8370f10851eSJérôme Glisse /* 8380f10851eSJérôme Glisse * No need to call mmu_notifier_invalidate_range() as we are 8390f10851eSJérôme Glisse * downgrading page table protection not changing it to point 8400f10851eSJérôme Glisse * to a new page. 8410f10851eSJérôme Glisse * 842ad56b738SMike Rapoport * See Documentation/vm/mmu_notifier.rst 8430f10851eSJérôme Glisse */ 844f729c8c9SRoss Zwisler if (pmdp) { 845f729c8c9SRoss Zwisler #ifdef CONFIG_FS_DAX_PMD 846f729c8c9SRoss Zwisler pmd_t pmd; 847f729c8c9SRoss Zwisler 848f729c8c9SRoss Zwisler if (pfn != pmd_pfn(*pmdp)) 849f729c8c9SRoss Zwisler goto unlock_pmd; 850f6f37321SLinus Torvalds if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) 851f729c8c9SRoss Zwisler goto unlock_pmd; 852f729c8c9SRoss Zwisler 853f729c8c9SRoss Zwisler flush_cache_page(vma, address, pfn); 854024eee0eSAneesh Kumar K.V pmd = pmdp_invalidate(vma, address, pmdp); 855f729c8c9SRoss Zwisler pmd = pmd_wrprotect(pmd); 856f729c8c9SRoss Zwisler pmd = pmd_mkclean(pmd); 857f729c8c9SRoss Zwisler set_pmd_at(vma->vm_mm, address, pmdp, pmd); 858f729c8c9SRoss Zwisler unlock_pmd: 859f729c8c9SRoss Zwisler #endif 860ee190ca6SJan H. Schönherr spin_unlock(ptl); 861f729c8c9SRoss Zwisler } else { 8624b4bb46dSJan Kara if (pfn != pte_pfn(*ptep)) 863f729c8c9SRoss Zwisler goto unlock_pte; 8644b4bb46dSJan Kara if (!pte_dirty(*ptep) && !pte_write(*ptep)) 865f729c8c9SRoss Zwisler goto unlock_pte; 8664b4bb46dSJan Kara 8674b4bb46dSJan Kara flush_cache_page(vma, address, pfn); 8684b4bb46dSJan Kara pte = ptep_clear_flush(vma, address, ptep); 8694b4bb46dSJan Kara pte = pte_wrprotect(pte); 8704b4bb46dSJan Kara pte = pte_mkclean(pte); 8714b4bb46dSJan Kara set_pte_at(vma->vm_mm, address, ptep, pte); 872f729c8c9SRoss Zwisler unlock_pte: 8734b4bb46dSJan Kara pte_unmap_unlock(ptep, ptl); 874f729c8c9SRoss Zwisler } 8754b4bb46dSJan Kara 876ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 8774b4bb46dSJan Kara } 8784b4bb46dSJan Kara i_mmap_unlock_read(mapping); 8794b4bb46dSJan Kara } 8804b4bb46dSJan Kara 8819fc747f6SMatthew Wilcox static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, 8829fc747f6SMatthew Wilcox struct address_space *mapping, void *entry) 8839973c98eSRoss Zwisler { 884e4b3448bSMatthew Wilcox unsigned long pfn, index, count; 8853fe0791cSDan Williams long ret = 0; 8869973c98eSRoss Zwisler 8879973c98eSRoss Zwisler /* 888a6abc2c0SJan Kara * A page got tagged dirty in DAX mapping? Something is seriously 889a6abc2c0SJan Kara * wrong. 8909973c98eSRoss Zwisler */ 8913159f943SMatthew Wilcox if (WARN_ON(!xa_is_value(entry))) 892a6abc2c0SJan Kara return -EIO; 8939973c98eSRoss Zwisler 8949fc747f6SMatthew Wilcox if (unlikely(dax_is_locked(entry))) { 8959fc747f6SMatthew Wilcox void *old_entry = entry; 8969fc747f6SMatthew Wilcox 89723c84eb7SMatthew Wilcox (Oracle) entry = get_unlocked_entry(xas, 0); 8989fc747f6SMatthew Wilcox 899a6abc2c0SJan Kara /* Entry got punched out / reallocated? */ 9009fc747f6SMatthew Wilcox if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 901a6abc2c0SJan Kara goto put_unlocked; 902a6abc2c0SJan Kara /* 9039fc747f6SMatthew Wilcox * Entry got reallocated elsewhere? No need to writeback. 9049fc747f6SMatthew Wilcox * We have to compare pfns as we must not bail out due to 9059fc747f6SMatthew Wilcox * difference in lockbit or entry type. 906a6abc2c0SJan Kara */ 9079fc747f6SMatthew Wilcox if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) 908a6abc2c0SJan Kara goto put_unlocked; 909642261acSRoss Zwisler if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 910642261acSRoss Zwisler dax_is_zero_entry(entry))) { 9119973c98eSRoss Zwisler ret = -EIO; 912a6abc2c0SJan Kara goto put_unlocked; 9139973c98eSRoss Zwisler } 9149973c98eSRoss Zwisler 9159fc747f6SMatthew Wilcox /* Another fsync thread may have already done this entry */ 9169fc747f6SMatthew Wilcox if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) 917a6abc2c0SJan Kara goto put_unlocked; 9189fc747f6SMatthew Wilcox } 9199fc747f6SMatthew Wilcox 920a6abc2c0SJan Kara /* Lock the entry to serialize with page faults */ 9219fc747f6SMatthew Wilcox dax_lock_entry(xas, entry); 9229fc747f6SMatthew Wilcox 923a6abc2c0SJan Kara /* 924a6abc2c0SJan Kara * We can clear the tag now but we have to be careful so that concurrent 925a6abc2c0SJan Kara * dax_writeback_one() calls for the same index cannot finish before we 926a6abc2c0SJan Kara * actually flush the caches. This is achieved as the calls will look 927b93b0163SMatthew Wilcox * at the entry only under the i_pages lock and once they do that 928b93b0163SMatthew Wilcox * they will see the entry locked and wait for it to unlock. 929a6abc2c0SJan Kara */ 9309fc747f6SMatthew Wilcox xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); 9319fc747f6SMatthew Wilcox xas_unlock_irq(xas); 932a6abc2c0SJan Kara 933642261acSRoss Zwisler /* 934e4b3448bSMatthew Wilcox * If dax_writeback_mapping_range() was given a wbc->range_start 935e4b3448bSMatthew Wilcox * in the middle of a PMD, the 'index' we use needs to be 936e4b3448bSMatthew Wilcox * aligned to the start of the PMD. 9373fe0791cSDan Williams * This allows us to flush for PMD_SIZE and not have to worry about 9383fe0791cSDan Williams * partial PMD writebacks. 939642261acSRoss Zwisler */ 940a77d19f4SMatthew Wilcox pfn = dax_to_pfn(entry); 941e4b3448bSMatthew Wilcox count = 1UL << dax_entry_order(entry); 942e4b3448bSMatthew Wilcox index = xas->xa_index & ~(count - 1); 943cccbce67SDan Williams 944e4b3448bSMatthew Wilcox dax_entry_mkclean(mapping, index, pfn); 945e4b3448bSMatthew Wilcox dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE); 9464b4bb46dSJan Kara /* 9474b4bb46dSJan Kara * After we have flushed the cache, we can clear the dirty tag. There 9484b4bb46dSJan Kara * cannot be new dirty data in the pfn after the flush has completed as 9494b4bb46dSJan Kara * the pfn mappings are writeprotected and fault waits for mapping 9504b4bb46dSJan Kara * entry lock. 9514b4bb46dSJan Kara */ 9529fc747f6SMatthew Wilcox xas_reset(xas); 9539fc747f6SMatthew Wilcox xas_lock_irq(xas); 9549fc747f6SMatthew Wilcox xas_store(xas, entry); 9559fc747f6SMatthew Wilcox xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); 956698ab77aSVivek Goyal dax_wake_entry(xas, entry, WAKE_NEXT); 9579fc747f6SMatthew Wilcox 958e4b3448bSMatthew Wilcox trace_dax_writeback_one(mapping->host, index, count); 9599973c98eSRoss Zwisler return ret; 9609973c98eSRoss Zwisler 961a6abc2c0SJan Kara put_unlocked: 9624c3d043dSVivek Goyal put_unlocked_entry(xas, entry, WAKE_NEXT); 9639973c98eSRoss Zwisler return ret; 9649973c98eSRoss Zwisler } 9659973c98eSRoss Zwisler 9669973c98eSRoss Zwisler /* 9679973c98eSRoss Zwisler * Flush the mapping to the persistent domain within the byte range of [start, 9689973c98eSRoss Zwisler * end]. This is required by data integrity operations to ensure file data is 9699973c98eSRoss Zwisler * on persistent storage prior to completion of the operation. 9709973c98eSRoss Zwisler */ 9717f6d5b52SRoss Zwisler int dax_writeback_mapping_range(struct address_space *mapping, 9723f666c56SVivek Goyal struct dax_device *dax_dev, struct writeback_control *wbc) 9739973c98eSRoss Zwisler { 9749fc747f6SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); 9759973c98eSRoss Zwisler struct inode *inode = mapping->host; 9769fc747f6SMatthew Wilcox pgoff_t end_index = wbc->range_end >> PAGE_SHIFT; 9779fc747f6SMatthew Wilcox void *entry; 9789fc747f6SMatthew Wilcox int ret = 0; 9799fc747f6SMatthew Wilcox unsigned int scanned = 0; 9809973c98eSRoss Zwisler 9819973c98eSRoss Zwisler if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 9829973c98eSRoss Zwisler return -EIO; 9839973c98eSRoss Zwisler 9847716506aSMatthew Wilcox (Oracle) if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL) 9857f6d5b52SRoss Zwisler return 0; 9867f6d5b52SRoss Zwisler 9879fc747f6SMatthew Wilcox trace_dax_writeback_range(inode, xas.xa_index, end_index); 9889973c98eSRoss Zwisler 9899fc747f6SMatthew Wilcox tag_pages_for_writeback(mapping, xas.xa_index, end_index); 990d14a3f48SRoss Zwisler 9919fc747f6SMatthew Wilcox xas_lock_irq(&xas); 9929fc747f6SMatthew Wilcox xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { 9939fc747f6SMatthew Wilcox ret = dax_writeback_one(&xas, dax_dev, mapping, entry); 994819ec6b9SJeff Layton if (ret < 0) { 995819ec6b9SJeff Layton mapping_set_error(mapping, ret); 9969fc747f6SMatthew Wilcox break; 997d14a3f48SRoss Zwisler } 9989fc747f6SMatthew Wilcox if (++scanned % XA_CHECK_SCHED) 9999fc747f6SMatthew Wilcox continue; 10009fc747f6SMatthew Wilcox 10019fc747f6SMatthew Wilcox xas_pause(&xas); 10029fc747f6SMatthew Wilcox xas_unlock_irq(&xas); 10039fc747f6SMatthew Wilcox cond_resched(); 10049fc747f6SMatthew Wilcox xas_lock_irq(&xas); 1005d14a3f48SRoss Zwisler } 10069fc747f6SMatthew Wilcox xas_unlock_irq(&xas); 10079fc747f6SMatthew Wilcox trace_dax_writeback_range_done(inode, xas.xa_index, end_index); 10089fc747f6SMatthew Wilcox return ret; 10099973c98eSRoss Zwisler } 10109973c98eSRoss Zwisler EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 10119973c98eSRoss Zwisler 101265dd814aSChristoph Hellwig static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size, 10135e161e40SJan Kara pfn_t *pfnp) 10145e161e40SJan Kara { 101560696eb2SChristoph Hellwig pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 10165e161e40SJan Kara int id, rc; 10175e161e40SJan Kara long length; 10185e161e40SJan Kara 1019cccbce67SDan Williams id = dax_read_lock(); 10205e161e40SJan Kara length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), 102186ed913bSHuaisheng Ye NULL, pfnp); 10225e161e40SJan Kara if (length < 0) { 10235e161e40SJan Kara rc = length; 10245e161e40SJan Kara goto out; 10255e161e40SJan Kara } 10265e161e40SJan Kara rc = -EINVAL; 10275e161e40SJan Kara if (PFN_PHYS(length) < size) 10285e161e40SJan Kara goto out; 10295e161e40SJan Kara if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) 10305e161e40SJan Kara goto out; 10315e161e40SJan Kara /* For larger pages we need devmap */ 10325e161e40SJan Kara if (length > 1 && !pfn_t_devmap(*pfnp)) 10335e161e40SJan Kara goto out; 10345e161e40SJan Kara rc = 0; 10355e161e40SJan Kara out: 1036cccbce67SDan Williams dax_read_unlock(id); 1037cccbce67SDan Williams return rc; 1038cccbce67SDan Williams } 1039f7ca90b1SMatthew Wilcox 10402f89dc12SJan Kara /* 104191d25ba8SRoss Zwisler * The user has performed a load from a hole in the file. Allocating a new 104291d25ba8SRoss Zwisler * page in the file would cause excessive storage usage for workloads with 104391d25ba8SRoss Zwisler * sparse files. Instead we insert a read-only mapping of the 4k zero page. 104491d25ba8SRoss Zwisler * If this page is ever written to we will re-fault and change the mapping to 104591d25ba8SRoss Zwisler * point to real DAX storage instead. 10462f89dc12SJan Kara */ 1047b15cd800SMatthew Wilcox static vm_fault_t dax_load_hole(struct xa_state *xas, 1048b15cd800SMatthew Wilcox struct address_space *mapping, void **entry, 1049e30331ffSRoss Zwisler struct vm_fault *vmf) 1050e30331ffSRoss Zwisler { 1051e30331ffSRoss Zwisler struct inode *inode = mapping->host; 105291d25ba8SRoss Zwisler unsigned long vaddr = vmf->address; 1053b90ca5ccSMatthew Wilcox pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); 1054b90ca5ccSMatthew Wilcox vm_fault_t ret; 1055e30331ffSRoss Zwisler 1056b15cd800SMatthew Wilcox *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, 10573159f943SMatthew Wilcox DAX_ZERO_PAGE, false); 10583159f943SMatthew Wilcox 1059ab77dab4SSouptick Joarder ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); 1060e30331ffSRoss Zwisler trace_dax_load_hole(inode, vmf, ret); 1061e30331ffSRoss Zwisler return ret; 1062e30331ffSRoss Zwisler } 1063e30331ffSRoss Zwisler 1064c2436190SShiyang Ruan #ifdef CONFIG_FS_DAX_PMD 1065c2436190SShiyang Ruan static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, 106665dd814aSChristoph Hellwig const struct iomap *iomap, void **entry) 1067c2436190SShiyang Ruan { 1068c2436190SShiyang Ruan struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1069c2436190SShiyang Ruan unsigned long pmd_addr = vmf->address & PMD_MASK; 1070c2436190SShiyang Ruan struct vm_area_struct *vma = vmf->vma; 1071c2436190SShiyang Ruan struct inode *inode = mapping->host; 1072c2436190SShiyang Ruan pgtable_t pgtable = NULL; 1073c2436190SShiyang Ruan struct page *zero_page; 1074c2436190SShiyang Ruan spinlock_t *ptl; 1075c2436190SShiyang Ruan pmd_t pmd_entry; 1076c2436190SShiyang Ruan pfn_t pfn; 1077c2436190SShiyang Ruan 1078c2436190SShiyang Ruan zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1079c2436190SShiyang Ruan 1080c2436190SShiyang Ruan if (unlikely(!zero_page)) 1081c2436190SShiyang Ruan goto fallback; 1082c2436190SShiyang Ruan 1083c2436190SShiyang Ruan pfn = page_to_pfn_t(zero_page); 1084c2436190SShiyang Ruan *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, 1085c2436190SShiyang Ruan DAX_PMD | DAX_ZERO_PAGE, false); 1086c2436190SShiyang Ruan 1087c2436190SShiyang Ruan if (arch_needs_pgtable_deposit()) { 1088c2436190SShiyang Ruan pgtable = pte_alloc_one(vma->vm_mm); 1089c2436190SShiyang Ruan if (!pgtable) 1090c2436190SShiyang Ruan return VM_FAULT_OOM; 1091c2436190SShiyang Ruan } 1092c2436190SShiyang Ruan 1093c2436190SShiyang Ruan ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1094c2436190SShiyang Ruan if (!pmd_none(*(vmf->pmd))) { 1095c2436190SShiyang Ruan spin_unlock(ptl); 1096c2436190SShiyang Ruan goto fallback; 1097c2436190SShiyang Ruan } 1098c2436190SShiyang Ruan 1099c2436190SShiyang Ruan if (pgtable) { 1100c2436190SShiyang Ruan pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 1101c2436190SShiyang Ruan mm_inc_nr_ptes(vma->vm_mm); 1102c2436190SShiyang Ruan } 1103c2436190SShiyang Ruan pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1104c2436190SShiyang Ruan pmd_entry = pmd_mkhuge(pmd_entry); 1105c2436190SShiyang Ruan set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1106c2436190SShiyang Ruan spin_unlock(ptl); 1107c2436190SShiyang Ruan trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); 1108c2436190SShiyang Ruan return VM_FAULT_NOPAGE; 1109c2436190SShiyang Ruan 1110c2436190SShiyang Ruan fallback: 1111c2436190SShiyang Ruan if (pgtable) 1112c2436190SShiyang Ruan pte_free(vma->vm_mm, pgtable); 1113c2436190SShiyang Ruan trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); 1114c2436190SShiyang Ruan return VM_FAULT_FALLBACK; 1115c2436190SShiyang Ruan } 1116c2436190SShiyang Ruan #else 1117c2436190SShiyang Ruan static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, 111865dd814aSChristoph Hellwig const struct iomap *iomap, void **entry) 1119c2436190SShiyang Ruan { 1120c2436190SShiyang Ruan return VM_FAULT_FALLBACK; 1121c2436190SShiyang Ruan } 1122c2436190SShiyang Ruan #endif /* CONFIG_FS_DAX_PMD */ 1123c2436190SShiyang Ruan 1124e5c71954SChristoph Hellwig static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff, 1125e5c71954SChristoph Hellwig unsigned int offset, size_t size) 1126e5c71954SChristoph Hellwig { 1127e5c71954SChristoph Hellwig void *kaddr; 1128e5c71954SChristoph Hellwig long ret; 1129e5c71954SChristoph Hellwig 1130e5c71954SChristoph Hellwig ret = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL); 1131e5c71954SChristoph Hellwig if (ret > 0) { 1132e5c71954SChristoph Hellwig memset(kaddr + offset, 0, size); 1133e5c71954SChristoph Hellwig dax_flush(dax_dev, kaddr + offset, size); 1134e5c71954SChristoph Hellwig } 1135e5c71954SChristoph Hellwig return ret; 1136e5c71954SChristoph Hellwig } 1137e5c71954SChristoph Hellwig 1138c6f40468SChristoph Hellwig static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero) 1139679c8bd3SChristoph Hellwig { 1140c6f40468SChristoph Hellwig const struct iomap *iomap = &iter->iomap; 1141c6f40468SChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 1142c6f40468SChristoph Hellwig loff_t pos = iter->pos; 1143c6f40468SChristoph Hellwig u64 length = iomap_length(iter); 1144c6f40468SChristoph Hellwig s64 written = 0; 1145c6f40468SChristoph Hellwig 1146c6f40468SChristoph Hellwig /* already zeroed? we're done. */ 1147c6f40468SChristoph Hellwig if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 1148c6f40468SChristoph Hellwig return length; 1149c6f40468SChristoph Hellwig 1150c6f40468SChristoph Hellwig do { 115181ee8e52SMatthew Wilcox (Oracle) unsigned offset = offset_in_page(pos); 115281ee8e52SMatthew Wilcox (Oracle) unsigned size = min_t(u64, PAGE_SIZE - offset, length); 1153c6f40468SChristoph Hellwig pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1154c6f40468SChristoph Hellwig long rc; 1155c6f40468SChristoph Hellwig int id; 11560a23f9ffSVivek Goyal 1157cccbce67SDan Williams id = dax_read_lock(); 1158e5c71954SChristoph Hellwig if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE) 115981ee8e52SMatthew Wilcox (Oracle) rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1); 11600a23f9ffSVivek Goyal else 1161e5c71954SChristoph Hellwig rc = dax_memzero(iomap->dax_dev, pgoff, offset, size); 1162cccbce67SDan Williams dax_read_unlock(id); 11630a23f9ffSVivek Goyal 1164e5c71954SChristoph Hellwig if (rc < 0) 1165e5c71954SChristoph Hellwig return rc; 1166c6f40468SChristoph Hellwig pos += size; 1167c6f40468SChristoph Hellwig length -= size; 1168c6f40468SChristoph Hellwig written += size; 1169c6f40468SChristoph Hellwig if (did_zero) 1170c6f40468SChristoph Hellwig *did_zero = true; 1171c6f40468SChristoph Hellwig } while (length > 0); 1172c6f40468SChristoph Hellwig 1173c6f40468SChristoph Hellwig return written; 1174679c8bd3SChristoph Hellwig } 1175679c8bd3SChristoph Hellwig 1176c6f40468SChristoph Hellwig int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 1177c6f40468SChristoph Hellwig const struct iomap_ops *ops) 1178c6f40468SChristoph Hellwig { 1179c6f40468SChristoph Hellwig struct iomap_iter iter = { 1180c6f40468SChristoph Hellwig .inode = inode, 1181c6f40468SChristoph Hellwig .pos = pos, 1182c6f40468SChristoph Hellwig .len = len, 1183*952da063SChristoph Hellwig .flags = IOMAP_DAX | IOMAP_ZERO, 1184c6f40468SChristoph Hellwig }; 1185c6f40468SChristoph Hellwig int ret; 1186c6f40468SChristoph Hellwig 1187c6f40468SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 1188c6f40468SChristoph Hellwig iter.processed = dax_zero_iter(&iter, did_zero); 1189c6f40468SChristoph Hellwig return ret; 1190c6f40468SChristoph Hellwig } 1191c6f40468SChristoph Hellwig EXPORT_SYMBOL_GPL(dax_zero_range); 1192c6f40468SChristoph Hellwig 1193c6f40468SChristoph Hellwig int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 1194c6f40468SChristoph Hellwig const struct iomap_ops *ops) 1195c6f40468SChristoph Hellwig { 1196c6f40468SChristoph Hellwig unsigned int blocksize = i_blocksize(inode); 1197c6f40468SChristoph Hellwig unsigned int off = pos & (blocksize - 1); 1198c6f40468SChristoph Hellwig 1199c6f40468SChristoph Hellwig /* Block boundary? Nothing to do */ 1200c6f40468SChristoph Hellwig if (!off) 1201c6f40468SChristoph Hellwig return 0; 1202c6f40468SChristoph Hellwig return dax_zero_range(inode, pos, blocksize - off, did_zero, ops); 1203c6f40468SChristoph Hellwig } 1204c6f40468SChristoph Hellwig EXPORT_SYMBOL_GPL(dax_truncate_page); 1205c6f40468SChristoph Hellwig 1206ca289e0bSChristoph Hellwig static loff_t dax_iomap_iter(const struct iomap_iter *iomi, 1207ca289e0bSChristoph Hellwig struct iov_iter *iter) 1208a254e568SChristoph Hellwig { 1209ca289e0bSChristoph Hellwig const struct iomap *iomap = &iomi->iomap; 1210ca289e0bSChristoph Hellwig loff_t length = iomap_length(iomi); 1211ca289e0bSChristoph Hellwig loff_t pos = iomi->pos; 1212cccbce67SDan Williams struct dax_device *dax_dev = iomap->dax_dev; 1213a254e568SChristoph Hellwig loff_t end = pos + length, done = 0; 1214a254e568SChristoph Hellwig ssize_t ret = 0; 1215a77d4786SDan Williams size_t xfer; 1216cccbce67SDan Williams int id; 1217a254e568SChristoph Hellwig 1218a254e568SChristoph Hellwig if (iov_iter_rw(iter) == READ) { 1219ca289e0bSChristoph Hellwig end = min(end, i_size_read(iomi->inode)); 1220a254e568SChristoph Hellwig if (pos >= end) 1221a254e568SChristoph Hellwig return 0; 1222a254e568SChristoph Hellwig 1223a254e568SChristoph Hellwig if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 1224a254e568SChristoph Hellwig return iov_iter_zero(min(length, end - pos), iter); 1225a254e568SChristoph Hellwig } 1226a254e568SChristoph Hellwig 1227a254e568SChristoph Hellwig if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 1228a254e568SChristoph Hellwig return -EIO; 1229a254e568SChristoph Hellwig 1230e3fce68cSJan Kara /* 1231e3fce68cSJan Kara * Write can allocate block for an area which has a hole page mapped 1232e3fce68cSJan Kara * into page tables. We have to tear down these mappings so that data 1233e3fce68cSJan Kara * written by write(2) is visible in mmap. 1234e3fce68cSJan Kara */ 1235cd656375SJan Kara if (iomap->flags & IOMAP_F_NEW) { 1236ca289e0bSChristoph Hellwig invalidate_inode_pages2_range(iomi->inode->i_mapping, 1237e3fce68cSJan Kara pos >> PAGE_SHIFT, 1238e3fce68cSJan Kara (end - 1) >> PAGE_SHIFT); 1239e3fce68cSJan Kara } 1240e3fce68cSJan Kara 1241cccbce67SDan Williams id = dax_read_lock(); 1242a254e568SChristoph Hellwig while (pos < end) { 1243a254e568SChristoph Hellwig unsigned offset = pos & (PAGE_SIZE - 1); 1244cccbce67SDan Williams const size_t size = ALIGN(length + offset, PAGE_SIZE); 124560696eb2SChristoph Hellwig pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1246a254e568SChristoph Hellwig ssize_t map_len; 1247cccbce67SDan Williams void *kaddr; 1248a254e568SChristoph Hellwig 1249d1908f52SMichal Hocko if (fatal_signal_pending(current)) { 1250d1908f52SMichal Hocko ret = -EINTR; 1251d1908f52SMichal Hocko break; 1252d1908f52SMichal Hocko } 1253d1908f52SMichal Hocko 1254cccbce67SDan Williams map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 125586ed913bSHuaisheng Ye &kaddr, NULL); 1256a254e568SChristoph Hellwig if (map_len < 0) { 1257a254e568SChristoph Hellwig ret = map_len; 1258a254e568SChristoph Hellwig break; 1259a254e568SChristoph Hellwig } 1260a254e568SChristoph Hellwig 1261cccbce67SDan Williams map_len = PFN_PHYS(map_len); 1262cccbce67SDan Williams kaddr += offset; 1263a254e568SChristoph Hellwig map_len -= offset; 1264a254e568SChristoph Hellwig if (map_len > end - pos) 1265a254e568SChristoph Hellwig map_len = end - pos; 1266a254e568SChristoph Hellwig 1267a2e050f5SRoss Zwisler /* 1268a2e050f5SRoss Zwisler * The userspace address for the memory copy has already been 1269a2e050f5SRoss Zwisler * validated via access_ok() in either vfs_read() or 1270a2e050f5SRoss Zwisler * vfs_write(), depending on which operation we are doing. 1271a2e050f5SRoss Zwisler */ 1272a254e568SChristoph Hellwig if (iov_iter_rw(iter) == WRITE) 1273a77d4786SDan Williams xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1274fec53774SDan Williams map_len, iter); 1275a254e568SChristoph Hellwig else 1276a77d4786SDan Williams xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, 1277b3a9a0c3SDan Williams map_len, iter); 1278a254e568SChristoph Hellwig 1279a77d4786SDan Williams pos += xfer; 1280a77d4786SDan Williams length -= xfer; 1281a77d4786SDan Williams done += xfer; 1282a77d4786SDan Williams 1283a77d4786SDan Williams if (xfer == 0) 1284a77d4786SDan Williams ret = -EFAULT; 1285a77d4786SDan Williams if (xfer < map_len) 1286a77d4786SDan Williams break; 1287a254e568SChristoph Hellwig } 1288cccbce67SDan Williams dax_read_unlock(id); 1289a254e568SChristoph Hellwig 1290a254e568SChristoph Hellwig return done ? done : ret; 1291a254e568SChristoph Hellwig } 1292a254e568SChristoph Hellwig 1293a254e568SChristoph Hellwig /** 129411c59c92SRoss Zwisler * dax_iomap_rw - Perform I/O to a DAX file 1295a254e568SChristoph Hellwig * @iocb: The control block for this I/O 1296a254e568SChristoph Hellwig * @iter: The addresses to do I/O from or to 1297a254e568SChristoph Hellwig * @ops: iomap ops passed from the file system 1298a254e568SChristoph Hellwig * 1299a254e568SChristoph Hellwig * This function performs read and write operations to directly mapped 1300a254e568SChristoph Hellwig * persistent memory. The callers needs to take care of read/write exclusion 1301a254e568SChristoph Hellwig * and evicting any page cache pages in the region under I/O. 1302a254e568SChristoph Hellwig */ 1303a254e568SChristoph Hellwig ssize_t 130411c59c92SRoss Zwisler dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 13058ff6daa1SChristoph Hellwig const struct iomap_ops *ops) 1306a254e568SChristoph Hellwig { 1307ca289e0bSChristoph Hellwig struct iomap_iter iomi = { 1308ca289e0bSChristoph Hellwig .inode = iocb->ki_filp->f_mapping->host, 1309ca289e0bSChristoph Hellwig .pos = iocb->ki_pos, 1310ca289e0bSChristoph Hellwig .len = iov_iter_count(iter), 1311*952da063SChristoph Hellwig .flags = IOMAP_DAX, 1312ca289e0bSChristoph Hellwig }; 1313ca289e0bSChristoph Hellwig loff_t done = 0; 1314ca289e0bSChristoph Hellwig int ret; 1315a254e568SChristoph Hellwig 1316168316dbSChristoph Hellwig if (iov_iter_rw(iter) == WRITE) { 1317ca289e0bSChristoph Hellwig lockdep_assert_held_write(&iomi.inode->i_rwsem); 1318ca289e0bSChristoph Hellwig iomi.flags |= IOMAP_WRITE; 1319168316dbSChristoph Hellwig } else { 1320ca289e0bSChristoph Hellwig lockdep_assert_held(&iomi.inode->i_rwsem); 1321168316dbSChristoph Hellwig } 1322a254e568SChristoph Hellwig 132396222d53SJeff Moyer if (iocb->ki_flags & IOCB_NOWAIT) 1324ca289e0bSChristoph Hellwig iomi.flags |= IOMAP_NOWAIT; 132596222d53SJeff Moyer 1326ca289e0bSChristoph Hellwig while ((ret = iomap_iter(&iomi, ops)) > 0) 1327ca289e0bSChristoph Hellwig iomi.processed = dax_iomap_iter(&iomi, iter); 1328a254e568SChristoph Hellwig 1329ca289e0bSChristoph Hellwig done = iomi.pos - iocb->ki_pos; 1330ca289e0bSChristoph Hellwig iocb->ki_pos = iomi.pos; 1331a254e568SChristoph Hellwig return done ? done : ret; 1332a254e568SChristoph Hellwig } 133311c59c92SRoss Zwisler EXPORT_SYMBOL_GPL(dax_iomap_rw); 1334a7d73fe6SChristoph Hellwig 1335ab77dab4SSouptick Joarder static vm_fault_t dax_fault_return(int error) 13369f141d6eSJan Kara { 13379f141d6eSJan Kara if (error == 0) 13389f141d6eSJan Kara return VM_FAULT_NOPAGE; 1339c9aed74eSSouptick Joarder return vmf_error(error); 13409f141d6eSJan Kara } 13419f141d6eSJan Kara 1342aaa422c4SDan Williams /* 1343aaa422c4SDan Williams * MAP_SYNC on a dax mapping guarantees dirty metadata is 1344aaa422c4SDan Williams * flushed on write-faults (non-cow), but not read-faults. 1345aaa422c4SDan Williams */ 1346aaa422c4SDan Williams static bool dax_fault_is_synchronous(unsigned long flags, 134765dd814aSChristoph Hellwig struct vm_area_struct *vma, const struct iomap *iomap) 1348aaa422c4SDan Williams { 1349aaa422c4SDan Williams return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) 1350aaa422c4SDan Williams && (iomap->flags & IOMAP_F_DIRTY); 1351aaa422c4SDan Williams } 1352aaa422c4SDan Williams 135355f81639SShiyang Ruan /* 135455f81639SShiyang Ruan * When handling a synchronous page fault and the inode need a fsync, we can 135555f81639SShiyang Ruan * insert the PTE/PMD into page tables only after that fsync happened. Skip 135655f81639SShiyang Ruan * insertion for now and return the pfn so that caller can insert it after the 135755f81639SShiyang Ruan * fsync is done. 135855f81639SShiyang Ruan */ 135955f81639SShiyang Ruan static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn) 136055f81639SShiyang Ruan { 136155f81639SShiyang Ruan if (WARN_ON_ONCE(!pfnp)) 136255f81639SShiyang Ruan return VM_FAULT_SIGBUS; 136355f81639SShiyang Ruan *pfnp = pfn; 136455f81639SShiyang Ruan return VM_FAULT_NEEDDSYNC; 136555f81639SShiyang Ruan } 136655f81639SShiyang Ruan 136765dd814aSChristoph Hellwig static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf, 136865dd814aSChristoph Hellwig const struct iomap_iter *iter) 136955f81639SShiyang Ruan { 137055f81639SShiyang Ruan vm_fault_t ret; 137155f81639SShiyang Ruan int error = 0; 137255f81639SShiyang Ruan 137365dd814aSChristoph Hellwig switch (iter->iomap.type) { 137455f81639SShiyang Ruan case IOMAP_HOLE: 137555f81639SShiyang Ruan case IOMAP_UNWRITTEN: 1376429f8de7SChristoph Hellwig clear_user_highpage(vmf->cow_page, vmf->address); 137755f81639SShiyang Ruan break; 137855f81639SShiyang Ruan case IOMAP_MAPPED: 1379429f8de7SChristoph Hellwig error = copy_cow_page_dax(vmf, iter); 138055f81639SShiyang Ruan break; 138155f81639SShiyang Ruan default: 138255f81639SShiyang Ruan WARN_ON_ONCE(1); 138355f81639SShiyang Ruan error = -EIO; 138455f81639SShiyang Ruan break; 138555f81639SShiyang Ruan } 138655f81639SShiyang Ruan 138755f81639SShiyang Ruan if (error) 138855f81639SShiyang Ruan return dax_fault_return(error); 138955f81639SShiyang Ruan 139055f81639SShiyang Ruan __SetPageUptodate(vmf->cow_page); 139155f81639SShiyang Ruan ret = finish_fault(vmf); 139255f81639SShiyang Ruan if (!ret) 139355f81639SShiyang Ruan return VM_FAULT_DONE_COW; 139455f81639SShiyang Ruan return ret; 139555f81639SShiyang Ruan } 139655f81639SShiyang Ruan 1397c2436190SShiyang Ruan /** 139865dd814aSChristoph Hellwig * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault. 1399c2436190SShiyang Ruan * @vmf: vm fault instance 140065dd814aSChristoph Hellwig * @iter: iomap iter 1401c2436190SShiyang Ruan * @pfnp: pfn to be returned 1402c2436190SShiyang Ruan * @xas: the dax mapping tree of a file 1403c2436190SShiyang Ruan * @entry: an unlocked dax entry to be inserted 1404c2436190SShiyang Ruan * @pmd: distinguish whether it is a pmd fault 1405c2436190SShiyang Ruan */ 140665dd814aSChristoph Hellwig static vm_fault_t dax_fault_iter(struct vm_fault *vmf, 140765dd814aSChristoph Hellwig const struct iomap_iter *iter, pfn_t *pfnp, 140865dd814aSChristoph Hellwig struct xa_state *xas, void **entry, bool pmd) 1409c2436190SShiyang Ruan { 1410c2436190SShiyang Ruan struct address_space *mapping = vmf->vma->vm_file->f_mapping; 141165dd814aSChristoph Hellwig const struct iomap *iomap = &iter->iomap; 1412c2436190SShiyang Ruan size_t size = pmd ? PMD_SIZE : PAGE_SIZE; 1413c2436190SShiyang Ruan loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; 1414c2436190SShiyang Ruan bool write = vmf->flags & FAULT_FLAG_WRITE; 141565dd814aSChristoph Hellwig bool sync = dax_fault_is_synchronous(iter->flags, vmf->vma, iomap); 1416c2436190SShiyang Ruan unsigned long entry_flags = pmd ? DAX_PMD : 0; 1417c2436190SShiyang Ruan int err = 0; 1418c2436190SShiyang Ruan pfn_t pfn; 1419c2436190SShiyang Ruan 142065dd814aSChristoph Hellwig if (!pmd && vmf->cow_page) 142165dd814aSChristoph Hellwig return dax_fault_cow_page(vmf, iter); 142265dd814aSChristoph Hellwig 1423c2436190SShiyang Ruan /* if we are reading UNWRITTEN and HOLE, return a hole. */ 1424c2436190SShiyang Ruan if (!write && 1425c2436190SShiyang Ruan (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) { 1426c2436190SShiyang Ruan if (!pmd) 1427c2436190SShiyang Ruan return dax_load_hole(xas, mapping, entry, vmf); 1428c2436190SShiyang Ruan return dax_pmd_load_hole(xas, vmf, iomap, entry); 1429c2436190SShiyang Ruan } 1430c2436190SShiyang Ruan 1431c2436190SShiyang Ruan if (iomap->type != IOMAP_MAPPED) { 1432c2436190SShiyang Ruan WARN_ON_ONCE(1); 1433c2436190SShiyang Ruan return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS; 1434c2436190SShiyang Ruan } 1435c2436190SShiyang Ruan 143665dd814aSChristoph Hellwig err = dax_iomap_pfn(&iter->iomap, pos, size, &pfn); 1437c2436190SShiyang Ruan if (err) 1438c2436190SShiyang Ruan return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err); 1439c2436190SShiyang Ruan 1440c2436190SShiyang Ruan *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, entry_flags, 1441c2436190SShiyang Ruan write && !sync); 1442c2436190SShiyang Ruan 1443c2436190SShiyang Ruan if (sync) 1444c2436190SShiyang Ruan return dax_fault_synchronous_pfnp(pfnp, pfn); 1445c2436190SShiyang Ruan 1446c2436190SShiyang Ruan /* insert PMD pfn */ 1447c2436190SShiyang Ruan if (pmd) 1448c2436190SShiyang Ruan return vmf_insert_pfn_pmd(vmf, pfn, write); 1449c2436190SShiyang Ruan 1450c2436190SShiyang Ruan /* insert PTE pfn */ 1451c2436190SShiyang Ruan if (write) 1452c2436190SShiyang Ruan return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 1453c2436190SShiyang Ruan return vmf_insert_mixed(vmf->vma, vmf->address, pfn); 1454c2436190SShiyang Ruan } 1455c2436190SShiyang Ruan 1456ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1457c0b24625SJan Kara int *iomap_errp, const struct iomap_ops *ops) 1458a7d73fe6SChristoph Hellwig { 145965dd814aSChristoph Hellwig struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1460b15cd800SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, vmf->pgoff); 146165dd814aSChristoph Hellwig struct iomap_iter iter = { 146265dd814aSChristoph Hellwig .inode = mapping->host, 146365dd814aSChristoph Hellwig .pos = (loff_t)vmf->pgoff << PAGE_SHIFT, 146465dd814aSChristoph Hellwig .len = PAGE_SIZE, 1465*952da063SChristoph Hellwig .flags = IOMAP_DAX | IOMAP_FAULT, 146665dd814aSChristoph Hellwig }; 1467ab77dab4SSouptick Joarder vm_fault_t ret = 0; 1468a7d73fe6SChristoph Hellwig void *entry; 146965dd814aSChristoph Hellwig int error; 1470a7d73fe6SChristoph Hellwig 147165dd814aSChristoph Hellwig trace_dax_pte_fault(iter.inode, vmf, ret); 1472a7d73fe6SChristoph Hellwig /* 1473a7d73fe6SChristoph Hellwig * Check whether offset isn't beyond end of file now. Caller is supposed 1474a7d73fe6SChristoph Hellwig * to hold locks serializing us with truncate / punch hole so this is 1475a7d73fe6SChristoph Hellwig * a reliable test. 1476a7d73fe6SChristoph Hellwig */ 147765dd814aSChristoph Hellwig if (iter.pos >= i_size_read(iter.inode)) { 1478ab77dab4SSouptick Joarder ret = VM_FAULT_SIGBUS; 1479a9c42b33SRoss Zwisler goto out; 1480a9c42b33SRoss Zwisler } 1481a7d73fe6SChristoph Hellwig 148265dd814aSChristoph Hellwig if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) 148365dd814aSChristoph Hellwig iter.flags |= IOMAP_WRITE; 1484a7d73fe6SChristoph Hellwig 1485b15cd800SMatthew Wilcox entry = grab_mapping_entry(&xas, mapping, 0); 1486b15cd800SMatthew Wilcox if (xa_is_internal(entry)) { 1487b15cd800SMatthew Wilcox ret = xa_to_internal(entry); 148813e451fdSJan Kara goto out; 148913e451fdSJan Kara } 149013e451fdSJan Kara 1491a7d73fe6SChristoph Hellwig /* 1492e2093926SRoss Zwisler * It is possible, particularly with mixed reads & writes to private 1493e2093926SRoss Zwisler * mappings, that we have raced with a PMD fault that overlaps with 1494e2093926SRoss Zwisler * the PTE we need to set up. If so just return and the fault will be 1495e2093926SRoss Zwisler * retried. 1496e2093926SRoss Zwisler */ 1497e2093926SRoss Zwisler if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1498ab77dab4SSouptick Joarder ret = VM_FAULT_NOPAGE; 1499e2093926SRoss Zwisler goto unlock_entry; 1500e2093926SRoss Zwisler } 1501e2093926SRoss Zwisler 150265dd814aSChristoph Hellwig while ((error = iomap_iter(&iter, ops)) > 0) { 150365dd814aSChristoph Hellwig if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) { 150465dd814aSChristoph Hellwig iter.processed = -EIO; /* fs corruption? */ 150565dd814aSChristoph Hellwig continue; 150665dd814aSChristoph Hellwig } 150765dd814aSChristoph Hellwig 150865dd814aSChristoph Hellwig ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false); 150965dd814aSChristoph Hellwig if (ret != VM_FAULT_SIGBUS && 151065dd814aSChristoph Hellwig (iter.iomap.flags & IOMAP_F_NEW)) { 151165dd814aSChristoph Hellwig count_vm_event(PGMAJFAULT); 151265dd814aSChristoph Hellwig count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); 151365dd814aSChristoph Hellwig ret |= VM_FAULT_MAJOR; 151465dd814aSChristoph Hellwig } 151565dd814aSChristoph Hellwig 151665dd814aSChristoph Hellwig if (!(ret & VM_FAULT_ERROR)) 151765dd814aSChristoph Hellwig iter.processed = PAGE_SIZE; 151865dd814aSChristoph Hellwig } 151965dd814aSChristoph Hellwig 1520c0b24625SJan Kara if (iomap_errp) 1521c0b24625SJan Kara *iomap_errp = error; 152265dd814aSChristoph Hellwig if (!ret && error) 1523ab77dab4SSouptick Joarder ret = dax_fault_return(error); 1524a7d73fe6SChristoph Hellwig 152513e451fdSJan Kara unlock_entry: 1526b15cd800SMatthew Wilcox dax_unlock_entry(&xas, entry); 1527a9c42b33SRoss Zwisler out: 152865dd814aSChristoph Hellwig trace_dax_pte_fault_done(iter.inode, vmf, ret); 152965dd814aSChristoph Hellwig return ret; 1530a7d73fe6SChristoph Hellwig } 1531642261acSRoss Zwisler 1532642261acSRoss Zwisler #ifdef CONFIG_FS_DAX_PMD 153355f81639SShiyang Ruan static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas, 153455f81639SShiyang Ruan pgoff_t max_pgoff) 1535642261acSRoss Zwisler { 1536d8a849e1SDave Jiang unsigned long pmd_addr = vmf->address & PMD_MASK; 1537d8a849e1SDave Jiang bool write = vmf->flags & FAULT_FLAG_WRITE; 1538282a8e03SRoss Zwisler 1539fffa281bSRoss Zwisler /* 1540fffa281bSRoss Zwisler * Make sure that the faulting address's PMD offset (color) matches 1541fffa281bSRoss Zwisler * the PMD offset from the start of the file. This is necessary so 1542fffa281bSRoss Zwisler * that a PMD range in the page table overlaps exactly with a PMD 1543a77d19f4SMatthew Wilcox * range in the page cache. 1544fffa281bSRoss Zwisler */ 1545fffa281bSRoss Zwisler if ((vmf->pgoff & PG_PMD_COLOUR) != 1546fffa281bSRoss Zwisler ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 154755f81639SShiyang Ruan return true; 1548fffa281bSRoss Zwisler 1549642261acSRoss Zwisler /* Fall back to PTEs if we're going to COW */ 155055f81639SShiyang Ruan if (write && !(vmf->vma->vm_flags & VM_SHARED)) 155155f81639SShiyang Ruan return true; 1552642261acSRoss Zwisler 1553642261acSRoss Zwisler /* If the PMD would extend outside the VMA */ 155455f81639SShiyang Ruan if (pmd_addr < vmf->vma->vm_start) 155555f81639SShiyang Ruan return true; 155655f81639SShiyang Ruan if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end) 155755f81639SShiyang Ruan return true; 155855f81639SShiyang Ruan 155955f81639SShiyang Ruan /* If the PMD would extend beyond the file size */ 156055f81639SShiyang Ruan if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff) 156155f81639SShiyang Ruan return true; 156255f81639SShiyang Ruan 156355f81639SShiyang Ruan return false; 156455f81639SShiyang Ruan } 156555f81639SShiyang Ruan 1566642261acSRoss Zwisler static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1567642261acSRoss Zwisler const struct iomap_ops *ops) 1568642261acSRoss Zwisler { 156965dd814aSChristoph Hellwig struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1570642261acSRoss Zwisler XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); 157165dd814aSChristoph Hellwig struct iomap_iter iter = { 157265dd814aSChristoph Hellwig .inode = mapping->host, 157365dd814aSChristoph Hellwig .len = PMD_SIZE, 1574*952da063SChristoph Hellwig .flags = IOMAP_DAX | IOMAP_FAULT, 157565dd814aSChristoph Hellwig }; 1576c2436190SShiyang Ruan vm_fault_t ret = VM_FAULT_FALLBACK; 1577642261acSRoss Zwisler pgoff_t max_pgoff; 1578642261acSRoss Zwisler void *entry; 1579642261acSRoss Zwisler int error; 1580642261acSRoss Zwisler 158165dd814aSChristoph Hellwig if (vmf->flags & FAULT_FLAG_WRITE) 158265dd814aSChristoph Hellwig iter.flags |= IOMAP_WRITE; 158365dd814aSChristoph Hellwig 1584642261acSRoss Zwisler /* 1585642261acSRoss Zwisler * Check whether offset isn't beyond end of file now. Caller is 1586642261acSRoss Zwisler * supposed to hold locks serializing us with truncate / punch hole so 1587642261acSRoss Zwisler * this is a reliable test. 1588642261acSRoss Zwisler */ 158965dd814aSChristoph Hellwig max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE); 1590642261acSRoss Zwisler 159165dd814aSChristoph Hellwig trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0); 1592642261acSRoss Zwisler 1593b15cd800SMatthew Wilcox if (xas.xa_index >= max_pgoff) { 1594c2436190SShiyang Ruan ret = VM_FAULT_SIGBUS; 1595282a8e03SRoss Zwisler goto out; 1596282a8e03SRoss Zwisler } 1597642261acSRoss Zwisler 159855f81639SShiyang Ruan if (dax_fault_check_fallback(vmf, &xas, max_pgoff)) 1599642261acSRoss Zwisler goto fallback; 1600642261acSRoss Zwisler 1601642261acSRoss Zwisler /* 1602b15cd800SMatthew Wilcox * grab_mapping_entry() will make sure we get an empty PMD entry, 1603b15cd800SMatthew Wilcox * a zero PMD entry or a DAX PMD. If it can't (because a PTE 1604b15cd800SMatthew Wilcox * entry is already in the array, for instance), it will return 1605b15cd800SMatthew Wilcox * VM_FAULT_FALLBACK. 16069f141d6eSJan Kara */ 160723c84eb7SMatthew Wilcox (Oracle) entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); 1608b15cd800SMatthew Wilcox if (xa_is_internal(entry)) { 1609c2436190SShiyang Ruan ret = xa_to_internal(entry); 1610876f2946SRoss Zwisler goto fallback; 1611b15cd800SMatthew Wilcox } 1612876f2946SRoss Zwisler 1613876f2946SRoss Zwisler /* 1614e2093926SRoss Zwisler * It is possible, particularly with mixed reads & writes to private 1615e2093926SRoss Zwisler * mappings, that we have raced with a PTE fault that overlaps with 1616e2093926SRoss Zwisler * the PMD we need to set up. If so just return and the fault will be 1617e2093926SRoss Zwisler * retried. 1618e2093926SRoss Zwisler */ 1619e2093926SRoss Zwisler if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1620e2093926SRoss Zwisler !pmd_devmap(*vmf->pmd)) { 1621c2436190SShiyang Ruan ret = 0; 1622e2093926SRoss Zwisler goto unlock_entry; 1623e2093926SRoss Zwisler } 1624e2093926SRoss Zwisler 162565dd814aSChristoph Hellwig iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT; 162665dd814aSChristoph Hellwig while ((error = iomap_iter(&iter, ops)) > 0) { 162765dd814aSChristoph Hellwig if (iomap_length(&iter) < PMD_SIZE) 162865dd814aSChristoph Hellwig continue; /* actually breaks out of the loop */ 1629876f2946SRoss Zwisler 163065dd814aSChristoph Hellwig ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true); 163165dd814aSChristoph Hellwig if (ret != VM_FAULT_FALLBACK) 163265dd814aSChristoph Hellwig iter.processed = PMD_SIZE; 1633caa51d26SJan Kara } 1634caa51d26SJan Kara 1635876f2946SRoss Zwisler unlock_entry: 1636b15cd800SMatthew Wilcox dax_unlock_entry(&xas, entry); 1637642261acSRoss Zwisler fallback: 1638c2436190SShiyang Ruan if (ret == VM_FAULT_FALLBACK) { 163965dd814aSChristoph Hellwig split_huge_pmd(vmf->vma, vmf->pmd, vmf->address); 1640642261acSRoss Zwisler count_vm_event(THP_FAULT_FALLBACK); 1641642261acSRoss Zwisler } 1642282a8e03SRoss Zwisler out: 164365dd814aSChristoph Hellwig trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret); 1644c2436190SShiyang Ruan return ret; 1645642261acSRoss Zwisler } 1646a2d58167SDave Jiang #else 1647ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 164801cddfe9SArnd Bergmann const struct iomap_ops *ops) 1649a2d58167SDave Jiang { 1650a2d58167SDave Jiang return VM_FAULT_FALLBACK; 1651a2d58167SDave Jiang } 1652642261acSRoss Zwisler #endif /* CONFIG_FS_DAX_PMD */ 1653a2d58167SDave Jiang 1654a2d58167SDave Jiang /** 1655a2d58167SDave Jiang * dax_iomap_fault - handle a page fault on a DAX file 1656a2d58167SDave Jiang * @vmf: The description of the fault 1657cec04e8cSJan Kara * @pe_size: Size of the page to fault in 16589a0dd422SJan Kara * @pfnp: PFN to insert for synchronous faults if fsync is required 1659c0b24625SJan Kara * @iomap_errp: Storage for detailed error code in case of error 1660cec04e8cSJan Kara * @ops: Iomap ops passed from the file system 1661a2d58167SDave Jiang * 1662a2d58167SDave Jiang * When a page fault occurs, filesystems may call this helper in 1663a2d58167SDave Jiang * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1664a2d58167SDave Jiang * has done all the necessary locking for page fault to proceed 1665a2d58167SDave Jiang * successfully. 1666a2d58167SDave Jiang */ 1667ab77dab4SSouptick Joarder vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1668c0b24625SJan Kara pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) 1669a2d58167SDave Jiang { 1670c791ace1SDave Jiang switch (pe_size) { 1671c791ace1SDave Jiang case PE_SIZE_PTE: 1672c0b24625SJan Kara return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); 1673c791ace1SDave Jiang case PE_SIZE_PMD: 16749a0dd422SJan Kara return dax_iomap_pmd_fault(vmf, pfnp, ops); 1675a2d58167SDave Jiang default: 1676a2d58167SDave Jiang return VM_FAULT_FALLBACK; 1677a2d58167SDave Jiang } 1678a2d58167SDave Jiang } 1679a2d58167SDave Jiang EXPORT_SYMBOL_GPL(dax_iomap_fault); 168071eab6dfSJan Kara 1681a77d19f4SMatthew Wilcox /* 168271eab6dfSJan Kara * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables 168371eab6dfSJan Kara * @vmf: The description of the fault 168471eab6dfSJan Kara * @pfn: PFN to insert 1685cfc93c6cSMatthew Wilcox * @order: Order of entry to insert. 168671eab6dfSJan Kara * 1687a77d19f4SMatthew Wilcox * This function inserts a writeable PTE or PMD entry into the page tables 1688a77d19f4SMatthew Wilcox * for an mmaped DAX file. It also marks the page cache entry as dirty. 168971eab6dfSJan Kara */ 1690cfc93c6cSMatthew Wilcox static vm_fault_t 1691cfc93c6cSMatthew Wilcox dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) 169271eab6dfSJan Kara { 169371eab6dfSJan Kara struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1694cfc93c6cSMatthew Wilcox XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); 1695cfc93c6cSMatthew Wilcox void *entry; 1696ab77dab4SSouptick Joarder vm_fault_t ret; 169771eab6dfSJan Kara 1698cfc93c6cSMatthew Wilcox xas_lock_irq(&xas); 169923c84eb7SMatthew Wilcox (Oracle) entry = get_unlocked_entry(&xas, order); 170071eab6dfSJan Kara /* Did we race with someone splitting entry or so? */ 170123c84eb7SMatthew Wilcox (Oracle) if (!entry || dax_is_conflict(entry) || 170223c84eb7SMatthew Wilcox (Oracle) (order == 0 && !dax_is_pte_entry(entry))) { 17034c3d043dSVivek Goyal put_unlocked_entry(&xas, entry, WAKE_NEXT); 1704cfc93c6cSMatthew Wilcox xas_unlock_irq(&xas); 170571eab6dfSJan Kara trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 170671eab6dfSJan Kara VM_FAULT_NOPAGE); 170771eab6dfSJan Kara return VM_FAULT_NOPAGE; 170871eab6dfSJan Kara } 1709cfc93c6cSMatthew Wilcox xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); 1710cfc93c6cSMatthew Wilcox dax_lock_entry(&xas, entry); 1711cfc93c6cSMatthew Wilcox xas_unlock_irq(&xas); 1712cfc93c6cSMatthew Wilcox if (order == 0) 1713ab77dab4SSouptick Joarder ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 171471eab6dfSJan Kara #ifdef CONFIG_FS_DAX_PMD 1715cfc93c6cSMatthew Wilcox else if (order == PMD_ORDER) 1716fce86ff5SDan Williams ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE); 171771eab6dfSJan Kara #endif 1718cfc93c6cSMatthew Wilcox else 1719ab77dab4SSouptick Joarder ret = VM_FAULT_FALLBACK; 1720cfc93c6cSMatthew Wilcox dax_unlock_entry(&xas, entry); 1721ab77dab4SSouptick Joarder trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); 1722ab77dab4SSouptick Joarder return ret; 172371eab6dfSJan Kara } 172471eab6dfSJan Kara 172571eab6dfSJan Kara /** 172671eab6dfSJan Kara * dax_finish_sync_fault - finish synchronous page fault 172771eab6dfSJan Kara * @vmf: The description of the fault 172871eab6dfSJan Kara * @pe_size: Size of entry to be inserted 172971eab6dfSJan Kara * @pfn: PFN to insert 173071eab6dfSJan Kara * 173171eab6dfSJan Kara * This function ensures that the file range touched by the page fault is 173271eab6dfSJan Kara * stored persistently on the media and handles inserting of appropriate page 173371eab6dfSJan Kara * table entry. 173471eab6dfSJan Kara */ 1735ab77dab4SSouptick Joarder vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 1736ab77dab4SSouptick Joarder enum page_entry_size pe_size, pfn_t pfn) 173771eab6dfSJan Kara { 173871eab6dfSJan Kara int err; 173971eab6dfSJan Kara loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; 1740cfc93c6cSMatthew Wilcox unsigned int order = pe_order(pe_size); 1741cfc93c6cSMatthew Wilcox size_t len = PAGE_SIZE << order; 174271eab6dfSJan Kara 174371eab6dfSJan Kara err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); 174471eab6dfSJan Kara if (err) 174571eab6dfSJan Kara return VM_FAULT_SIGBUS; 1746cfc93c6cSMatthew Wilcox return dax_insert_pfn_mkwrite(vmf, pfn, order); 174771eab6dfSJan Kara } 174871eab6dfSJan Kara EXPORT_SYMBOL_GPL(dax_finish_sync_fault); 1749