12025cf9eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2d475c634SMatthew Wilcox /* 3d475c634SMatthew Wilcox * fs/dax.c - Direct Access filesystem code 4d475c634SMatthew Wilcox * Copyright (c) 2013-2014 Intel Corporation 5d475c634SMatthew Wilcox * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 6d475c634SMatthew Wilcox * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 7d475c634SMatthew Wilcox */ 8d475c634SMatthew Wilcox 9d475c634SMatthew Wilcox #include <linux/atomic.h> 10d475c634SMatthew Wilcox #include <linux/blkdev.h> 11d475c634SMatthew Wilcox #include <linux/buffer_head.h> 12d77e92e2SRoss Zwisler #include <linux/dax.h> 13d475c634SMatthew Wilcox #include <linux/fs.h> 14f7ca90b1SMatthew Wilcox #include <linux/highmem.h> 15f7ca90b1SMatthew Wilcox #include <linux/memcontrol.h> 16f7ca90b1SMatthew Wilcox #include <linux/mm.h> 17d475c634SMatthew Wilcox #include <linux/mutex.h> 189973c98eSRoss Zwisler #include <linux/pagevec.h> 19289c6aedSMatthew Wilcox #include <linux/sched.h> 20f361bf4aSIngo Molnar #include <linux/sched/signal.h> 21d475c634SMatthew Wilcox #include <linux/uio.h> 22f7ca90b1SMatthew Wilcox #include <linux/vmstat.h> 2334c0fd54SDan Williams #include <linux/pfn_t.h> 240e749e54SDan Williams #include <linux/sizes.h> 254b4bb46dSJan Kara #include <linux/mmu_notifier.h> 26a254e568SChristoph Hellwig #include <linux/iomap.h> 2706083a09SMuchun Song #include <linux/rmap.h> 2811cf9d86SAneesh Kumar K.V #include <asm/pgalloc.h> 29d475c634SMatthew Wilcox 30282a8e03SRoss Zwisler #define CREATE_TRACE_POINTS 31282a8e03SRoss Zwisler #include <trace/events/fs_dax.h> 32282a8e03SRoss Zwisler 33cfc93c6cSMatthew Wilcox static inline unsigned int pe_order(enum page_entry_size pe_size) 34cfc93c6cSMatthew Wilcox { 35cfc93c6cSMatthew Wilcox if (pe_size == PE_SIZE_PTE) 36cfc93c6cSMatthew Wilcox return PAGE_SHIFT - PAGE_SHIFT; 37cfc93c6cSMatthew Wilcox if (pe_size == PE_SIZE_PMD) 38cfc93c6cSMatthew Wilcox return PMD_SHIFT - PAGE_SHIFT; 39cfc93c6cSMatthew Wilcox if (pe_size == PE_SIZE_PUD) 40cfc93c6cSMatthew Wilcox return PUD_SHIFT - PAGE_SHIFT; 41cfc93c6cSMatthew Wilcox return ~0; 42cfc93c6cSMatthew Wilcox } 43cfc93c6cSMatthew Wilcox 44ac401cc7SJan Kara /* We choose 4096 entries - same as per-zone page wait tables */ 45ac401cc7SJan Kara #define DAX_WAIT_TABLE_BITS 12 46ac401cc7SJan Kara #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 47ac401cc7SJan Kara 48917f3452SRoss Zwisler /* The 'colour' (ie low bits) within a PMD of a page offset. */ 49917f3452SRoss Zwisler #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 50977fbdcdSMatthew Wilcox #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) 51917f3452SRoss Zwisler 52cfc93c6cSMatthew Wilcox /* The order of a PMD entry */ 53cfc93c6cSMatthew Wilcox #define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT) 54cfc93c6cSMatthew Wilcox 55ce95ab0fSRoss Zwisler static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 56ac401cc7SJan Kara 57ac401cc7SJan Kara static int __init init_dax_wait_table(void) 58ac401cc7SJan Kara { 59ac401cc7SJan Kara int i; 60ac401cc7SJan Kara 61ac401cc7SJan Kara for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 62ac401cc7SJan Kara init_waitqueue_head(wait_table + i); 63ac401cc7SJan Kara return 0; 64ac401cc7SJan Kara } 65ac401cc7SJan Kara fs_initcall(init_dax_wait_table); 66ac401cc7SJan Kara 67527b19d0SRoss Zwisler /* 683159f943SMatthew Wilcox * DAX pagecache entries use XArray value entries so they can't be mistaken 693159f943SMatthew Wilcox * for pages. We use one bit for locking, one bit for the entry size (PMD) 703159f943SMatthew Wilcox * and two more to tell us if the entry is a zero page or an empty entry that 713159f943SMatthew Wilcox * is just used for locking. In total four special bits. 72527b19d0SRoss Zwisler * 73527b19d0SRoss Zwisler * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE 74527b19d0SRoss Zwisler * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem 75527b19d0SRoss Zwisler * block allocation. 76527b19d0SRoss Zwisler */ 773159f943SMatthew Wilcox #define DAX_SHIFT (4) 783159f943SMatthew Wilcox #define DAX_LOCKED (1UL << 0) 793159f943SMatthew Wilcox #define DAX_PMD (1UL << 1) 803159f943SMatthew Wilcox #define DAX_ZERO_PAGE (1UL << 2) 813159f943SMatthew Wilcox #define DAX_EMPTY (1UL << 3) 82527b19d0SRoss Zwisler 83a77d19f4SMatthew Wilcox static unsigned long dax_to_pfn(void *entry) 84527b19d0SRoss Zwisler { 853159f943SMatthew Wilcox return xa_to_value(entry) >> DAX_SHIFT; 86527b19d0SRoss Zwisler } 87527b19d0SRoss Zwisler 889f32d221SMatthew Wilcox static void *dax_make_entry(pfn_t pfn, unsigned long flags) 899f32d221SMatthew Wilcox { 909f32d221SMatthew Wilcox return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); 919f32d221SMatthew Wilcox } 929f32d221SMatthew Wilcox 93cfc93c6cSMatthew Wilcox static bool dax_is_locked(void *entry) 94cfc93c6cSMatthew Wilcox { 95cfc93c6cSMatthew Wilcox return xa_to_value(entry) & DAX_LOCKED; 96cfc93c6cSMatthew Wilcox } 97cfc93c6cSMatthew Wilcox 98a77d19f4SMatthew Wilcox static unsigned int dax_entry_order(void *entry) 99527b19d0SRoss Zwisler { 1003159f943SMatthew Wilcox if (xa_to_value(entry) & DAX_PMD) 101cfc93c6cSMatthew Wilcox return PMD_ORDER; 102527b19d0SRoss Zwisler return 0; 103527b19d0SRoss Zwisler } 104527b19d0SRoss Zwisler 105fda490d3SMatthew Wilcox static unsigned long dax_is_pmd_entry(void *entry) 106642261acSRoss Zwisler { 1073159f943SMatthew Wilcox return xa_to_value(entry) & DAX_PMD; 108642261acSRoss Zwisler } 109642261acSRoss Zwisler 110fda490d3SMatthew Wilcox static bool dax_is_pte_entry(void *entry) 111642261acSRoss Zwisler { 1123159f943SMatthew Wilcox return !(xa_to_value(entry) & DAX_PMD); 113642261acSRoss Zwisler } 114642261acSRoss Zwisler 115642261acSRoss Zwisler static int dax_is_zero_entry(void *entry) 116642261acSRoss Zwisler { 1173159f943SMatthew Wilcox return xa_to_value(entry) & DAX_ZERO_PAGE; 118642261acSRoss Zwisler } 119642261acSRoss Zwisler 120642261acSRoss Zwisler static int dax_is_empty_entry(void *entry) 121642261acSRoss Zwisler { 1223159f943SMatthew Wilcox return xa_to_value(entry) & DAX_EMPTY; 123642261acSRoss Zwisler } 124642261acSRoss Zwisler 125f7ca90b1SMatthew Wilcox /* 12623c84eb7SMatthew Wilcox (Oracle) * true if the entry that was found is of a smaller order than the entry 12723c84eb7SMatthew Wilcox (Oracle) * we were looking for 12823c84eb7SMatthew Wilcox (Oracle) */ 12923c84eb7SMatthew Wilcox (Oracle) static bool dax_is_conflict(void *entry) 13023c84eb7SMatthew Wilcox (Oracle) { 13123c84eb7SMatthew Wilcox (Oracle) return entry == XA_RETRY_ENTRY; 13223c84eb7SMatthew Wilcox (Oracle) } 13323c84eb7SMatthew Wilcox (Oracle) 13423c84eb7SMatthew Wilcox (Oracle) /* 135a77d19f4SMatthew Wilcox * DAX page cache entry locking 136ac401cc7SJan Kara */ 137ac401cc7SJan Kara struct exceptional_entry_key { 138ec4907ffSMatthew Wilcox struct xarray *xa; 13963e95b5cSRoss Zwisler pgoff_t entry_start; 140ac401cc7SJan Kara }; 141ac401cc7SJan Kara 142ac401cc7SJan Kara struct wait_exceptional_entry_queue { 143ac6424b9SIngo Molnar wait_queue_entry_t wait; 144ac401cc7SJan Kara struct exceptional_entry_key key; 145ac401cc7SJan Kara }; 146ac401cc7SJan Kara 147698ab77aSVivek Goyal /** 148698ab77aSVivek Goyal * enum dax_wake_mode: waitqueue wakeup behaviour 149698ab77aSVivek Goyal * @WAKE_ALL: wake all waiters in the waitqueue 150698ab77aSVivek Goyal * @WAKE_NEXT: wake only the first waiter in the waitqueue 151698ab77aSVivek Goyal */ 152698ab77aSVivek Goyal enum dax_wake_mode { 153698ab77aSVivek Goyal WAKE_ALL, 154698ab77aSVivek Goyal WAKE_NEXT, 155698ab77aSVivek Goyal }; 156698ab77aSVivek Goyal 157b15cd800SMatthew Wilcox static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, 158b15cd800SMatthew Wilcox void *entry, struct exceptional_entry_key *key) 15963e95b5cSRoss Zwisler { 16063e95b5cSRoss Zwisler unsigned long hash; 161b15cd800SMatthew Wilcox unsigned long index = xas->xa_index; 16263e95b5cSRoss Zwisler 16363e95b5cSRoss Zwisler /* 16463e95b5cSRoss Zwisler * If 'entry' is a PMD, align the 'index' that we use for the wait 16563e95b5cSRoss Zwisler * queue to the start of that PMD. This ensures that all offsets in 16663e95b5cSRoss Zwisler * the range covered by the PMD map to the same bit lock. 16763e95b5cSRoss Zwisler */ 168642261acSRoss Zwisler if (dax_is_pmd_entry(entry)) 169917f3452SRoss Zwisler index &= ~PG_PMD_COLOUR; 170b15cd800SMatthew Wilcox key->xa = xas->xa; 17163e95b5cSRoss Zwisler key->entry_start = index; 17263e95b5cSRoss Zwisler 173b15cd800SMatthew Wilcox hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); 17463e95b5cSRoss Zwisler return wait_table + hash; 17563e95b5cSRoss Zwisler } 17663e95b5cSRoss Zwisler 177ec4907ffSMatthew Wilcox static int wake_exceptional_entry_func(wait_queue_entry_t *wait, 178ec4907ffSMatthew Wilcox unsigned int mode, int sync, void *keyp) 179ac401cc7SJan Kara { 180ac401cc7SJan Kara struct exceptional_entry_key *key = keyp; 181ac401cc7SJan Kara struct wait_exceptional_entry_queue *ewait = 182ac401cc7SJan Kara container_of(wait, struct wait_exceptional_entry_queue, wait); 183ac401cc7SJan Kara 184ec4907ffSMatthew Wilcox if (key->xa != ewait->key.xa || 18563e95b5cSRoss Zwisler key->entry_start != ewait->key.entry_start) 186ac401cc7SJan Kara return 0; 187ac401cc7SJan Kara return autoremove_wake_function(wait, mode, sync, NULL); 188ac401cc7SJan Kara } 189ac401cc7SJan Kara 190ac401cc7SJan Kara /* 191b93b0163SMatthew Wilcox * @entry may no longer be the entry at the index in the mapping. 192b93b0163SMatthew Wilcox * The important information it's conveying is whether the entry at 193b93b0163SMatthew Wilcox * this index used to be a PMD entry. 194e30331ffSRoss Zwisler */ 195698ab77aSVivek Goyal static void dax_wake_entry(struct xa_state *xas, void *entry, 196698ab77aSVivek Goyal enum dax_wake_mode mode) 197e30331ffSRoss Zwisler { 198e30331ffSRoss Zwisler struct exceptional_entry_key key; 199e30331ffSRoss Zwisler wait_queue_head_t *wq; 200e30331ffSRoss Zwisler 201b15cd800SMatthew Wilcox wq = dax_entry_waitqueue(xas, entry, &key); 202e30331ffSRoss Zwisler 203e30331ffSRoss Zwisler /* 204e30331ffSRoss Zwisler * Checking for locked entry and prepare_to_wait_exclusive() happens 205b93b0163SMatthew Wilcox * under the i_pages lock, ditto for entry handling in our callers. 206e30331ffSRoss Zwisler * So at this point all tasks that could have seen our entry locked 207e30331ffSRoss Zwisler * must be in the waitqueue and the following check will see them. 208e30331ffSRoss Zwisler */ 209e30331ffSRoss Zwisler if (waitqueue_active(wq)) 210698ab77aSVivek Goyal __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key); 211e30331ffSRoss Zwisler } 212e30331ffSRoss Zwisler 213cfc93c6cSMatthew Wilcox /* 214cfc93c6cSMatthew Wilcox * Look up entry in page cache, wait for it to become unlocked if it 215cfc93c6cSMatthew Wilcox * is a DAX entry and return it. The caller must subsequently call 216cfc93c6cSMatthew Wilcox * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry() 21723c84eb7SMatthew Wilcox (Oracle) * if it did. The entry returned may have a larger order than @order. 21823c84eb7SMatthew Wilcox (Oracle) * If @order is larger than the order of the entry found in i_pages, this 21923c84eb7SMatthew Wilcox (Oracle) * function returns a dax_is_conflict entry. 220cfc93c6cSMatthew Wilcox * 221cfc93c6cSMatthew Wilcox * Must be called with the i_pages lock held. 222cfc93c6cSMatthew Wilcox */ 22323c84eb7SMatthew Wilcox (Oracle) static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) 224cfc93c6cSMatthew Wilcox { 225cfc93c6cSMatthew Wilcox void *entry; 226cfc93c6cSMatthew Wilcox struct wait_exceptional_entry_queue ewait; 227cfc93c6cSMatthew Wilcox wait_queue_head_t *wq; 228cfc93c6cSMatthew Wilcox 229cfc93c6cSMatthew Wilcox init_wait(&ewait.wait); 230cfc93c6cSMatthew Wilcox ewait.wait.func = wake_exceptional_entry_func; 231cfc93c6cSMatthew Wilcox 232cfc93c6cSMatthew Wilcox for (;;) { 2330e40de03SMatthew Wilcox entry = xas_find_conflict(xas); 2346370740eSDan Williams if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 2356370740eSDan Williams return entry; 23623c84eb7SMatthew Wilcox (Oracle) if (dax_entry_order(entry) < order) 23723c84eb7SMatthew Wilcox (Oracle) return XA_RETRY_ENTRY; 2386370740eSDan Williams if (!dax_is_locked(entry)) 239cfc93c6cSMatthew Wilcox return entry; 240cfc93c6cSMatthew Wilcox 241b15cd800SMatthew Wilcox wq = dax_entry_waitqueue(xas, entry, &ewait.key); 242cfc93c6cSMatthew Wilcox prepare_to_wait_exclusive(wq, &ewait.wait, 243cfc93c6cSMatthew Wilcox TASK_UNINTERRUPTIBLE); 244cfc93c6cSMatthew Wilcox xas_unlock_irq(xas); 245cfc93c6cSMatthew Wilcox xas_reset(xas); 246cfc93c6cSMatthew Wilcox schedule(); 247cfc93c6cSMatthew Wilcox finish_wait(wq, &ewait.wait); 248cfc93c6cSMatthew Wilcox xas_lock_irq(xas); 249cfc93c6cSMatthew Wilcox } 250cfc93c6cSMatthew Wilcox } 251cfc93c6cSMatthew Wilcox 25255e56f06SMatthew Wilcox /* 25355e56f06SMatthew Wilcox * The only thing keeping the address space around is the i_pages lock 25455e56f06SMatthew Wilcox * (it's cycled in clear_inode() after removing the entries from i_pages) 25555e56f06SMatthew Wilcox * After we call xas_unlock_irq(), we cannot touch xas->xa. 25655e56f06SMatthew Wilcox */ 25755e56f06SMatthew Wilcox static void wait_entry_unlocked(struct xa_state *xas, void *entry) 25855e56f06SMatthew Wilcox { 25955e56f06SMatthew Wilcox struct wait_exceptional_entry_queue ewait; 26055e56f06SMatthew Wilcox wait_queue_head_t *wq; 26155e56f06SMatthew Wilcox 26255e56f06SMatthew Wilcox init_wait(&ewait.wait); 26355e56f06SMatthew Wilcox ewait.wait.func = wake_exceptional_entry_func; 26455e56f06SMatthew Wilcox 26555e56f06SMatthew Wilcox wq = dax_entry_waitqueue(xas, entry, &ewait.key); 266d8a70641SDan Williams /* 267d8a70641SDan Williams * Unlike get_unlocked_entry() there is no guarantee that this 268d8a70641SDan Williams * path ever successfully retrieves an unlocked entry before an 269d8a70641SDan Williams * inode dies. Perform a non-exclusive wait in case this path 270d8a70641SDan Williams * never successfully performs its own wake up. 271d8a70641SDan Williams */ 272d8a70641SDan Williams prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); 27355e56f06SMatthew Wilcox xas_unlock_irq(xas); 27455e56f06SMatthew Wilcox schedule(); 27555e56f06SMatthew Wilcox finish_wait(wq, &ewait.wait); 27655e56f06SMatthew Wilcox } 27755e56f06SMatthew Wilcox 2784c3d043dSVivek Goyal static void put_unlocked_entry(struct xa_state *xas, void *entry, 2794c3d043dSVivek Goyal enum dax_wake_mode mode) 280cfc93c6cSMatthew Wilcox { 28161c30c98SJan Kara if (entry && !dax_is_conflict(entry)) 2824c3d043dSVivek Goyal dax_wake_entry(xas, entry, mode); 283cfc93c6cSMatthew Wilcox } 284cfc93c6cSMatthew Wilcox 285cfc93c6cSMatthew Wilcox /* 286cfc93c6cSMatthew Wilcox * We used the xa_state to get the entry, but then we locked the entry and 287cfc93c6cSMatthew Wilcox * dropped the xa_lock, so we know the xa_state is stale and must be reset 288cfc93c6cSMatthew Wilcox * before use. 289cfc93c6cSMatthew Wilcox */ 290cfc93c6cSMatthew Wilcox static void dax_unlock_entry(struct xa_state *xas, void *entry) 291cfc93c6cSMatthew Wilcox { 292cfc93c6cSMatthew Wilcox void *old; 293cfc93c6cSMatthew Wilcox 2947ae2ea7dSMatthew Wilcox BUG_ON(dax_is_locked(entry)); 295cfc93c6cSMatthew Wilcox xas_reset(xas); 296cfc93c6cSMatthew Wilcox xas_lock_irq(xas); 297cfc93c6cSMatthew Wilcox old = xas_store(xas, entry); 298cfc93c6cSMatthew Wilcox xas_unlock_irq(xas); 299cfc93c6cSMatthew Wilcox BUG_ON(!dax_is_locked(old)); 300698ab77aSVivek Goyal dax_wake_entry(xas, entry, WAKE_NEXT); 301cfc93c6cSMatthew Wilcox } 302cfc93c6cSMatthew Wilcox 303cfc93c6cSMatthew Wilcox /* 304cfc93c6cSMatthew Wilcox * Return: The entry stored at this location before it was locked. 305cfc93c6cSMatthew Wilcox */ 306cfc93c6cSMatthew Wilcox static void *dax_lock_entry(struct xa_state *xas, void *entry) 307cfc93c6cSMatthew Wilcox { 308cfc93c6cSMatthew Wilcox unsigned long v = xa_to_value(entry); 309cfc93c6cSMatthew Wilcox return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); 310cfc93c6cSMatthew Wilcox } 311cfc93c6cSMatthew Wilcox 312d2c997c0SDan Williams static unsigned long dax_entry_size(void *entry) 313d2c997c0SDan Williams { 314d2c997c0SDan Williams if (dax_is_zero_entry(entry)) 315d2c997c0SDan Williams return 0; 316d2c997c0SDan Williams else if (dax_is_empty_entry(entry)) 317d2c997c0SDan Williams return 0; 318d2c997c0SDan Williams else if (dax_is_pmd_entry(entry)) 319d2c997c0SDan Williams return PMD_SIZE; 320d2c997c0SDan Williams else 321d2c997c0SDan Williams return PAGE_SIZE; 322d2c997c0SDan Williams } 323d2c997c0SDan Williams 324a77d19f4SMatthew Wilcox static unsigned long dax_end_pfn(void *entry) 325d2c997c0SDan Williams { 326a77d19f4SMatthew Wilcox return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; 327d2c997c0SDan Williams } 328d2c997c0SDan Williams 329d2c997c0SDan Williams /* 330d2c997c0SDan Williams * Iterate through all mapped pfns represented by an entry, i.e. skip 331d2c997c0SDan Williams * 'empty' and 'zero' entries. 332d2c997c0SDan Williams */ 333d2c997c0SDan Williams #define for_each_mapped_pfn(entry, pfn) \ 334a77d19f4SMatthew Wilcox for (pfn = dax_to_pfn(entry); \ 335a77d19f4SMatthew Wilcox pfn < dax_end_pfn(entry); pfn++) 336d2c997c0SDan Williams 33716900426SShiyang Ruan static inline bool dax_page_is_shared(struct page *page) 3386061b69bSShiyang Ruan { 33916900426SShiyang Ruan return page->mapping == PAGE_MAPPING_DAX_SHARED; 3406061b69bSShiyang Ruan } 3416061b69bSShiyang Ruan 34273449dafSDan Williams /* 34316900426SShiyang Ruan * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the 34416900426SShiyang Ruan * refcount. 3456061b69bSShiyang Ruan */ 34616900426SShiyang Ruan static inline void dax_page_share_get(struct page *page) 3476061b69bSShiyang Ruan { 34816900426SShiyang Ruan if (page->mapping != PAGE_MAPPING_DAX_SHARED) { 3496061b69bSShiyang Ruan /* 3506061b69bSShiyang Ruan * Reset the index if the page was already mapped 3516061b69bSShiyang Ruan * regularly before. 3526061b69bSShiyang Ruan */ 3536061b69bSShiyang Ruan if (page->mapping) 35416900426SShiyang Ruan page->share = 1; 35516900426SShiyang Ruan page->mapping = PAGE_MAPPING_DAX_SHARED; 3566061b69bSShiyang Ruan } 35716900426SShiyang Ruan page->share++; 35816900426SShiyang Ruan } 35916900426SShiyang Ruan 36016900426SShiyang Ruan static inline unsigned long dax_page_share_put(struct page *page) 36116900426SShiyang Ruan { 36216900426SShiyang Ruan return --page->share; 3636061b69bSShiyang Ruan } 3646061b69bSShiyang Ruan 3656061b69bSShiyang Ruan /* 36616900426SShiyang Ruan * When it is called in dax_insert_entry(), the shared flag will indicate that 3676061b69bSShiyang Ruan * whether this entry is shared by multiple files. If so, set the page->mapping 36816900426SShiyang Ruan * PAGE_MAPPING_DAX_SHARED, and use page->share as refcount. 36973449dafSDan Williams */ 37073449dafSDan Williams static void dax_associate_entry(void *entry, struct address_space *mapping, 37116900426SShiyang Ruan struct vm_area_struct *vma, unsigned long address, bool shared) 372d2c997c0SDan Williams { 37373449dafSDan Williams unsigned long size = dax_entry_size(entry), pfn, index; 37473449dafSDan Williams int i = 0; 375d2c997c0SDan Williams 376d2c997c0SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 377d2c997c0SDan Williams return; 378d2c997c0SDan Williams 37973449dafSDan Williams index = linear_page_index(vma, address & ~(size - 1)); 380d2c997c0SDan Williams for_each_mapped_pfn(entry, pfn) { 381d2c997c0SDan Williams struct page *page = pfn_to_page(pfn); 382d2c997c0SDan Williams 38316900426SShiyang Ruan if (shared) { 38416900426SShiyang Ruan dax_page_share_get(page); 3856061b69bSShiyang Ruan } else { 386d2c997c0SDan Williams WARN_ON_ONCE(page->mapping); 387d2c997c0SDan Williams page->mapping = mapping; 38873449dafSDan Williams page->index = index + i++; 389d2c997c0SDan Williams } 390d2c997c0SDan Williams } 3916061b69bSShiyang Ruan } 392d2c997c0SDan Williams 393d2c997c0SDan Williams static void dax_disassociate_entry(void *entry, struct address_space *mapping, 394d2c997c0SDan Williams bool trunc) 395d2c997c0SDan Williams { 396d2c997c0SDan Williams unsigned long pfn; 397d2c997c0SDan Williams 398d2c997c0SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 399d2c997c0SDan Williams return; 400d2c997c0SDan Williams 401d2c997c0SDan Williams for_each_mapped_pfn(entry, pfn) { 402d2c997c0SDan Williams struct page *page = pfn_to_page(pfn); 403d2c997c0SDan Williams 404d2c997c0SDan Williams WARN_ON_ONCE(trunc && page_ref_count(page) > 1); 40516900426SShiyang Ruan if (dax_page_is_shared(page)) { 40616900426SShiyang Ruan /* keep the shared flag if this page is still shared */ 40716900426SShiyang Ruan if (dax_page_share_put(page) > 0) 4086061b69bSShiyang Ruan continue; 4096061b69bSShiyang Ruan } else 410d2c997c0SDan Williams WARN_ON_ONCE(page->mapping && page->mapping != mapping); 411d2c997c0SDan Williams page->mapping = NULL; 41273449dafSDan Williams page->index = 0; 413d2c997c0SDan Williams } 414d2c997c0SDan Williams } 415d2c997c0SDan Williams 4165fac7408SDan Williams static struct page *dax_busy_page(void *entry) 4175fac7408SDan Williams { 4185fac7408SDan Williams unsigned long pfn; 4195fac7408SDan Williams 4205fac7408SDan Williams for_each_mapped_pfn(entry, pfn) { 4215fac7408SDan Williams struct page *page = pfn_to_page(pfn); 4225fac7408SDan Williams 4235fac7408SDan Williams if (page_ref_count(page) > 1) 4245fac7408SDan Williams return page; 4255fac7408SDan Williams } 4265fac7408SDan Williams return NULL; 4275fac7408SDan Williams } 4285fac7408SDan Williams 429c5bbd451SMatthew Wilcox /* 430c2e8021aSShiyang Ruan * dax_lock_page - Lock the DAX entry corresponding to a page 431c5bbd451SMatthew Wilcox * @page: The page whose entry we want to lock 432c5bbd451SMatthew Wilcox * 433c5bbd451SMatthew Wilcox * Context: Process context. 43427359fd6SMatthew Wilcox * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could 43527359fd6SMatthew Wilcox * not be locked. 436c5bbd451SMatthew Wilcox */ 43727359fd6SMatthew Wilcox dax_entry_t dax_lock_page(struct page *page) 438c2a7d2a1SDan Williams { 4399f32d221SMatthew Wilcox XA_STATE(xas, NULL, 0); 4409f32d221SMatthew Wilcox void *entry; 441c2a7d2a1SDan Williams 442c5bbd451SMatthew Wilcox /* Ensure page->mapping isn't freed while we look at it */ 443c5bbd451SMatthew Wilcox rcu_read_lock(); 444c2a7d2a1SDan Williams for (;;) { 4459f32d221SMatthew Wilcox struct address_space *mapping = READ_ONCE(page->mapping); 446c2a7d2a1SDan Williams 44727359fd6SMatthew Wilcox entry = NULL; 448c93db7bbSMatthew Wilcox if (!mapping || !dax_mapping(mapping)) 449c5bbd451SMatthew Wilcox break; 450c2a7d2a1SDan Williams 451c2a7d2a1SDan Williams /* 452c2a7d2a1SDan Williams * In the device-dax case there's no need to lock, a 453c2a7d2a1SDan Williams * struct dev_pagemap pin is sufficient to keep the 454c2a7d2a1SDan Williams * inode alive, and we assume we have dev_pagemap pin 455c2a7d2a1SDan Williams * otherwise we would not have a valid pfn_to_page() 456c2a7d2a1SDan Williams * translation. 457c2a7d2a1SDan Williams */ 45827359fd6SMatthew Wilcox entry = (void *)~0UL; 4599f32d221SMatthew Wilcox if (S_ISCHR(mapping->host->i_mode)) 460c5bbd451SMatthew Wilcox break; 461c2a7d2a1SDan Williams 4629f32d221SMatthew Wilcox xas.xa = &mapping->i_pages; 4639f32d221SMatthew Wilcox xas_lock_irq(&xas); 464c2a7d2a1SDan Williams if (mapping != page->mapping) { 4659f32d221SMatthew Wilcox xas_unlock_irq(&xas); 466c2a7d2a1SDan Williams continue; 467c2a7d2a1SDan Williams } 4689f32d221SMatthew Wilcox xas_set(&xas, page->index); 4699f32d221SMatthew Wilcox entry = xas_load(&xas); 4709f32d221SMatthew Wilcox if (dax_is_locked(entry)) { 471c5bbd451SMatthew Wilcox rcu_read_unlock(); 47255e56f06SMatthew Wilcox wait_entry_unlocked(&xas, entry); 473c5bbd451SMatthew Wilcox rcu_read_lock(); 474c2a7d2a1SDan Williams continue; 475c2a7d2a1SDan Williams } 4769f32d221SMatthew Wilcox dax_lock_entry(&xas, entry); 4779f32d221SMatthew Wilcox xas_unlock_irq(&xas); 478c5bbd451SMatthew Wilcox break; 4799f32d221SMatthew Wilcox } 480c5bbd451SMatthew Wilcox rcu_read_unlock(); 48127359fd6SMatthew Wilcox return (dax_entry_t)entry; 482c2a7d2a1SDan Williams } 483c2a7d2a1SDan Williams 48427359fd6SMatthew Wilcox void dax_unlock_page(struct page *page, dax_entry_t cookie) 485c2a7d2a1SDan Williams { 486c2a7d2a1SDan Williams struct address_space *mapping = page->mapping; 4879f32d221SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, page->index); 488c2a7d2a1SDan Williams 4899f32d221SMatthew Wilcox if (S_ISCHR(mapping->host->i_mode)) 490c2a7d2a1SDan Williams return; 491c2a7d2a1SDan Williams 49227359fd6SMatthew Wilcox dax_unlock_entry(&xas, (void *)cookie); 493c2a7d2a1SDan Williams } 494c2a7d2a1SDan Williams 495ac401cc7SJan Kara /* 4962f437effSShiyang Ruan * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping 4972f437effSShiyang Ruan * @mapping: the file's mapping whose entry we want to lock 4982f437effSShiyang Ruan * @index: the offset within this file 4992f437effSShiyang Ruan * @page: output the dax page corresponding to this dax entry 5002f437effSShiyang Ruan * 5012f437effSShiyang Ruan * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry 5022f437effSShiyang Ruan * could not be locked. 5032f437effSShiyang Ruan */ 5042f437effSShiyang Ruan dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index, 5052f437effSShiyang Ruan struct page **page) 5062f437effSShiyang Ruan { 5072f437effSShiyang Ruan XA_STATE(xas, NULL, 0); 5082f437effSShiyang Ruan void *entry; 5092f437effSShiyang Ruan 5102f437effSShiyang Ruan rcu_read_lock(); 5112f437effSShiyang Ruan for (;;) { 5122f437effSShiyang Ruan entry = NULL; 5132f437effSShiyang Ruan if (!dax_mapping(mapping)) 5142f437effSShiyang Ruan break; 5152f437effSShiyang Ruan 5162f437effSShiyang Ruan xas.xa = &mapping->i_pages; 5172f437effSShiyang Ruan xas_lock_irq(&xas); 5182f437effSShiyang Ruan xas_set(&xas, index); 5192f437effSShiyang Ruan entry = xas_load(&xas); 5202f437effSShiyang Ruan if (dax_is_locked(entry)) { 5212f437effSShiyang Ruan rcu_read_unlock(); 5222f437effSShiyang Ruan wait_entry_unlocked(&xas, entry); 5232f437effSShiyang Ruan rcu_read_lock(); 5242f437effSShiyang Ruan continue; 5252f437effSShiyang Ruan } 5262f437effSShiyang Ruan if (!entry || 5272f437effSShiyang Ruan dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 5282f437effSShiyang Ruan /* 5292f437effSShiyang Ruan * Because we are looking for entry from file's mapping 5302f437effSShiyang Ruan * and index, so the entry may not be inserted for now, 5312f437effSShiyang Ruan * or even a zero/empty entry. We don't think this is 5322f437effSShiyang Ruan * an error case. So, return a special value and do 5332f437effSShiyang Ruan * not output @page. 5342f437effSShiyang Ruan */ 5352f437effSShiyang Ruan entry = (void *)~0UL; 5362f437effSShiyang Ruan } else { 5372f437effSShiyang Ruan *page = pfn_to_page(dax_to_pfn(entry)); 5382f437effSShiyang Ruan dax_lock_entry(&xas, entry); 5392f437effSShiyang Ruan } 5402f437effSShiyang Ruan xas_unlock_irq(&xas); 5412f437effSShiyang Ruan break; 5422f437effSShiyang Ruan } 5432f437effSShiyang Ruan rcu_read_unlock(); 5442f437effSShiyang Ruan return (dax_entry_t)entry; 5452f437effSShiyang Ruan } 5462f437effSShiyang Ruan 5472f437effSShiyang Ruan void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index, 5482f437effSShiyang Ruan dax_entry_t cookie) 5492f437effSShiyang Ruan { 5502f437effSShiyang Ruan XA_STATE(xas, &mapping->i_pages, index); 5512f437effSShiyang Ruan 5522f437effSShiyang Ruan if (cookie == ~0UL) 5532f437effSShiyang Ruan return; 5542f437effSShiyang Ruan 5552f437effSShiyang Ruan dax_unlock_entry(&xas, (void *)cookie); 5562f437effSShiyang Ruan } 5572f437effSShiyang Ruan 5582f437effSShiyang Ruan /* 559a77d19f4SMatthew Wilcox * Find page cache entry at given index. If it is a DAX entry, return it 560a77d19f4SMatthew Wilcox * with the entry locked. If the page cache doesn't contain an entry at 561a77d19f4SMatthew Wilcox * that index, add a locked empty entry. 562ac401cc7SJan Kara * 5633159f943SMatthew Wilcox * When requesting an entry with size DAX_PMD, grab_mapping_entry() will 564b15cd800SMatthew Wilcox * either return that locked entry or will return VM_FAULT_FALLBACK. 565b15cd800SMatthew Wilcox * This will happen if there are any PTE entries within the PMD range 566b15cd800SMatthew Wilcox * that we are requesting. 567642261acSRoss Zwisler * 568b15cd800SMatthew Wilcox * We always favor PTE entries over PMD entries. There isn't a flow where we 569b15cd800SMatthew Wilcox * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD 570b15cd800SMatthew Wilcox * insertion will fail if it finds any PTE entries already in the tree, and a 571b15cd800SMatthew Wilcox * PTE insertion will cause an existing PMD entry to be unmapped and 572b15cd800SMatthew Wilcox * downgraded to PTE entries. This happens for both PMD zero pages as 573b15cd800SMatthew Wilcox * well as PMD empty entries. 574642261acSRoss Zwisler * 575b15cd800SMatthew Wilcox * The exception to this downgrade path is for PMD entries that have 576b15cd800SMatthew Wilcox * real storage backing them. We will leave these real PMD entries in 577b15cd800SMatthew Wilcox * the tree, and PTE writes will simply dirty the entire PMD entry. 578642261acSRoss Zwisler * 579ac401cc7SJan Kara * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 580ac401cc7SJan Kara * persistent memory the benefit is doubtful. We can add that later if we can 581ac401cc7SJan Kara * show it helps. 582b15cd800SMatthew Wilcox * 583b15cd800SMatthew Wilcox * On error, this function does not return an ERR_PTR. Instead it returns 584b15cd800SMatthew Wilcox * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values 585b15cd800SMatthew Wilcox * overlap with xarray value entries. 586ac401cc7SJan Kara */ 587b15cd800SMatthew Wilcox static void *grab_mapping_entry(struct xa_state *xas, 58823c84eb7SMatthew Wilcox (Oracle) struct address_space *mapping, unsigned int order) 589ac401cc7SJan Kara { 590b15cd800SMatthew Wilcox unsigned long index = xas->xa_index; 5911a14e377SJan Kara bool pmd_downgrade; /* splitting PMD entry into PTE entries? */ 592b15cd800SMatthew Wilcox void *entry; 593ac401cc7SJan Kara 594b15cd800SMatthew Wilcox retry: 5951a14e377SJan Kara pmd_downgrade = false; 596b15cd800SMatthew Wilcox xas_lock_irq(xas); 59723c84eb7SMatthew Wilcox (Oracle) entry = get_unlocked_entry(xas, order); 598642261acSRoss Zwisler 599b15cd800SMatthew Wilcox if (entry) { 60023c84eb7SMatthew Wilcox (Oracle) if (dax_is_conflict(entry)) 60123c84eb7SMatthew Wilcox (Oracle) goto fallback; 6020e40de03SMatthew Wilcox if (!xa_is_value(entry)) { 60349688e65SHao Li xas_set_err(xas, -EIO); 60491d25ba8SRoss Zwisler goto out_unlock; 60591d25ba8SRoss Zwisler } 60691d25ba8SRoss Zwisler 60723c84eb7SMatthew Wilcox (Oracle) if (order == 0) { 60891d25ba8SRoss Zwisler if (dax_is_pmd_entry(entry) && 609642261acSRoss Zwisler (dax_is_zero_entry(entry) || 610642261acSRoss Zwisler dax_is_empty_entry(entry))) { 611642261acSRoss Zwisler pmd_downgrade = true; 612642261acSRoss Zwisler } 613642261acSRoss Zwisler } 614642261acSRoss Zwisler } 615642261acSRoss Zwisler 616642261acSRoss Zwisler if (pmd_downgrade) { 617642261acSRoss Zwisler /* 618642261acSRoss Zwisler * Make sure 'entry' remains valid while we drop 619b93b0163SMatthew Wilcox * the i_pages lock. 620642261acSRoss Zwisler */ 621b15cd800SMatthew Wilcox dax_lock_entry(xas, entry); 622642261acSRoss Zwisler 623642261acSRoss Zwisler /* 624642261acSRoss Zwisler * Besides huge zero pages the only other thing that gets 625642261acSRoss Zwisler * downgraded are empty entries which don't need to be 626642261acSRoss Zwisler * unmapped. 627642261acSRoss Zwisler */ 628b15cd800SMatthew Wilcox if (dax_is_zero_entry(entry)) { 629b15cd800SMatthew Wilcox xas_unlock_irq(xas); 630b15cd800SMatthew Wilcox unmap_mapping_pages(mapping, 631b15cd800SMatthew Wilcox xas->xa_index & ~PG_PMD_COLOUR, 632977fbdcdSMatthew Wilcox PG_PMD_NR, false); 633b15cd800SMatthew Wilcox xas_reset(xas); 634b15cd800SMatthew Wilcox xas_lock_irq(xas); 635e11f8b7bSRoss Zwisler } 636e11f8b7bSRoss Zwisler 637d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, false); 638b15cd800SMatthew Wilcox xas_store(xas, NULL); /* undo the PMD join */ 639698ab77aSVivek Goyal dax_wake_entry(xas, entry, WAKE_ALL); 6407f0e07fbSMatthew Wilcox (Oracle) mapping->nrpages -= PG_PMD_NR; 641b15cd800SMatthew Wilcox entry = NULL; 642b15cd800SMatthew Wilcox xas_set(xas, index); 643642261acSRoss Zwisler } 644642261acSRoss Zwisler 645b15cd800SMatthew Wilcox if (entry) { 646b15cd800SMatthew Wilcox dax_lock_entry(xas, entry); 647b15cd800SMatthew Wilcox } else { 64823c84eb7SMatthew Wilcox (Oracle) unsigned long flags = DAX_EMPTY; 64923c84eb7SMatthew Wilcox (Oracle) 65023c84eb7SMatthew Wilcox (Oracle) if (order > 0) 65123c84eb7SMatthew Wilcox (Oracle) flags |= DAX_PMD; 65223c84eb7SMatthew Wilcox (Oracle) entry = dax_make_entry(pfn_to_pfn_t(0), flags); 653b15cd800SMatthew Wilcox dax_lock_entry(xas, entry); 654b15cd800SMatthew Wilcox if (xas_error(xas)) 655b15cd800SMatthew Wilcox goto out_unlock; 6567f0e07fbSMatthew Wilcox (Oracle) mapping->nrpages += 1UL << order; 657ac401cc7SJan Kara } 658b15cd800SMatthew Wilcox 659642261acSRoss Zwisler out_unlock: 660b15cd800SMatthew Wilcox xas_unlock_irq(xas); 661b15cd800SMatthew Wilcox if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) 662b15cd800SMatthew Wilcox goto retry; 663b15cd800SMatthew Wilcox if (xas->xa_node == XA_ERROR(-ENOMEM)) 664b15cd800SMatthew Wilcox return xa_mk_internal(VM_FAULT_OOM); 665b15cd800SMatthew Wilcox if (xas_error(xas)) 666b15cd800SMatthew Wilcox return xa_mk_internal(VM_FAULT_SIGBUS); 667e3ad61c6SRoss Zwisler return entry; 668b15cd800SMatthew Wilcox fallback: 669b15cd800SMatthew Wilcox xas_unlock_irq(xas); 670b15cd800SMatthew Wilcox return xa_mk_internal(VM_FAULT_FALLBACK); 671ac401cc7SJan Kara } 672ac401cc7SJan Kara 6735fac7408SDan Williams /** 6746bbdd563SVivek Goyal * dax_layout_busy_page_range - find first pinned page in @mapping 6755fac7408SDan Williams * @mapping: address space to scan for a page with ref count > 1 6766bbdd563SVivek Goyal * @start: Starting offset. Page containing 'start' is included. 6776bbdd563SVivek Goyal * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX, 6786bbdd563SVivek Goyal * pages from 'start' till the end of file are included. 6795fac7408SDan Williams * 6805fac7408SDan Williams * DAX requires ZONE_DEVICE mapped pages. These pages are never 6815fac7408SDan Williams * 'onlined' to the page allocator so they are considered idle when 6825fac7408SDan Williams * page->count == 1. A filesystem uses this interface to determine if 6835fac7408SDan Williams * any page in the mapping is busy, i.e. for DMA, or other 6845fac7408SDan Williams * get_user_pages() usages. 6855fac7408SDan Williams * 6865fac7408SDan Williams * It is expected that the filesystem is holding locks to block the 6875fac7408SDan Williams * establishment of new mappings in this address_space. I.e. it expects 6885fac7408SDan Williams * to be able to run unmap_mapping_range() and subsequently not race 6895fac7408SDan Williams * mapping_mapped() becoming true. 6905fac7408SDan Williams */ 6916bbdd563SVivek Goyal struct page *dax_layout_busy_page_range(struct address_space *mapping, 6926bbdd563SVivek Goyal loff_t start, loff_t end) 6935fac7408SDan Williams { 694084a8990SMatthew Wilcox void *entry; 695084a8990SMatthew Wilcox unsigned int scanned = 0; 6965fac7408SDan Williams struct page *page = NULL; 6976bbdd563SVivek Goyal pgoff_t start_idx = start >> PAGE_SHIFT; 6986bbdd563SVivek Goyal pgoff_t end_idx; 6996bbdd563SVivek Goyal XA_STATE(xas, &mapping->i_pages, start_idx); 7005fac7408SDan Williams 7015fac7408SDan Williams /* 7025fac7408SDan Williams * In the 'limited' case get_user_pages() for dax is disabled. 7035fac7408SDan Williams */ 7045fac7408SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 7055fac7408SDan Williams return NULL; 7065fac7408SDan Williams 7075fac7408SDan Williams if (!dax_mapping(mapping) || !mapping_mapped(mapping)) 7085fac7408SDan Williams return NULL; 7095fac7408SDan Williams 7106bbdd563SVivek Goyal /* If end == LLONG_MAX, all pages from start to till end of file */ 7116bbdd563SVivek Goyal if (end == LLONG_MAX) 7126bbdd563SVivek Goyal end_idx = ULONG_MAX; 7136bbdd563SVivek Goyal else 7146bbdd563SVivek Goyal end_idx = end >> PAGE_SHIFT; 7155fac7408SDan Williams /* 7165fac7408SDan Williams * If we race get_user_pages_fast() here either we'll see the 717084a8990SMatthew Wilcox * elevated page count in the iteration and wait, or 7185fac7408SDan Williams * get_user_pages_fast() will see that the page it took a reference 7195fac7408SDan Williams * against is no longer mapped in the page tables and bail to the 7205fac7408SDan Williams * get_user_pages() slow path. The slow path is protected by 7215fac7408SDan Williams * pte_lock() and pmd_lock(). New references are not taken without 7226bbdd563SVivek Goyal * holding those locks, and unmap_mapping_pages() will not zero the 7235fac7408SDan Williams * pte or pmd without holding the respective lock, so we are 7245fac7408SDan Williams * guaranteed to either see new references or prevent new 7255fac7408SDan Williams * references from being established. 7265fac7408SDan Williams */ 7276bbdd563SVivek Goyal unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0); 7285fac7408SDan Williams 729084a8990SMatthew Wilcox xas_lock_irq(&xas); 7306bbdd563SVivek Goyal xas_for_each(&xas, entry, end_idx) { 731084a8990SMatthew Wilcox if (WARN_ON_ONCE(!xa_is_value(entry))) 7325fac7408SDan Williams continue; 733084a8990SMatthew Wilcox if (unlikely(dax_is_locked(entry))) 73423c84eb7SMatthew Wilcox (Oracle) entry = get_unlocked_entry(&xas, 0); 7355fac7408SDan Williams if (entry) 7365fac7408SDan Williams page = dax_busy_page(entry); 7374c3d043dSVivek Goyal put_unlocked_entry(&xas, entry, WAKE_NEXT); 7385fac7408SDan Williams if (page) 7395fac7408SDan Williams break; 740084a8990SMatthew Wilcox if (++scanned % XA_CHECK_SCHED) 741084a8990SMatthew Wilcox continue; 742cdbf8897SRoss Zwisler 743084a8990SMatthew Wilcox xas_pause(&xas); 744084a8990SMatthew Wilcox xas_unlock_irq(&xas); 745084a8990SMatthew Wilcox cond_resched(); 746084a8990SMatthew Wilcox xas_lock_irq(&xas); 7475fac7408SDan Williams } 748084a8990SMatthew Wilcox xas_unlock_irq(&xas); 7495fac7408SDan Williams return page; 7505fac7408SDan Williams } 7516bbdd563SVivek Goyal EXPORT_SYMBOL_GPL(dax_layout_busy_page_range); 7526bbdd563SVivek Goyal 7536bbdd563SVivek Goyal struct page *dax_layout_busy_page(struct address_space *mapping) 7546bbdd563SVivek Goyal { 7556bbdd563SVivek Goyal return dax_layout_busy_page_range(mapping, 0, LLONG_MAX); 7566bbdd563SVivek Goyal } 7575fac7408SDan Williams EXPORT_SYMBOL_GPL(dax_layout_busy_page); 7585fac7408SDan Williams 759a77d19f4SMatthew Wilcox static int __dax_invalidate_entry(struct address_space *mapping, 760c6dcf52cSJan Kara pgoff_t index, bool trunc) 761c6dcf52cSJan Kara { 76207f2d89cSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, index); 763c6dcf52cSJan Kara int ret = 0; 764c6dcf52cSJan Kara void *entry; 765c6dcf52cSJan Kara 76607f2d89cSMatthew Wilcox xas_lock_irq(&xas); 76723c84eb7SMatthew Wilcox (Oracle) entry = get_unlocked_entry(&xas, 0); 7683159f943SMatthew Wilcox if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 769c6dcf52cSJan Kara goto out; 770c6dcf52cSJan Kara if (!trunc && 77107f2d89cSMatthew Wilcox (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || 77207f2d89cSMatthew Wilcox xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) 773c6dcf52cSJan Kara goto out; 774d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, trunc); 77507f2d89cSMatthew Wilcox xas_store(&xas, NULL); 7767f0e07fbSMatthew Wilcox (Oracle) mapping->nrpages -= 1UL << dax_entry_order(entry); 777c6dcf52cSJan Kara ret = 1; 778c6dcf52cSJan Kara out: 77923738832SVivek Goyal put_unlocked_entry(&xas, entry, WAKE_ALL); 78007f2d89cSMatthew Wilcox xas_unlock_irq(&xas); 781c6dcf52cSJan Kara return ret; 782c6dcf52cSJan Kara } 78307f2d89cSMatthew Wilcox 784*f76b3a32SShiyang Ruan static int __dax_clear_dirty_range(struct address_space *mapping, 785*f76b3a32SShiyang Ruan pgoff_t start, pgoff_t end) 786*f76b3a32SShiyang Ruan { 787*f76b3a32SShiyang Ruan XA_STATE(xas, &mapping->i_pages, start); 788*f76b3a32SShiyang Ruan unsigned int scanned = 0; 789*f76b3a32SShiyang Ruan void *entry; 790*f76b3a32SShiyang Ruan 791*f76b3a32SShiyang Ruan xas_lock_irq(&xas); 792*f76b3a32SShiyang Ruan xas_for_each(&xas, entry, end) { 793*f76b3a32SShiyang Ruan entry = get_unlocked_entry(&xas, 0); 794*f76b3a32SShiyang Ruan xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); 795*f76b3a32SShiyang Ruan xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE); 796*f76b3a32SShiyang Ruan put_unlocked_entry(&xas, entry, WAKE_NEXT); 797*f76b3a32SShiyang Ruan 798*f76b3a32SShiyang Ruan if (++scanned % XA_CHECK_SCHED) 799*f76b3a32SShiyang Ruan continue; 800*f76b3a32SShiyang Ruan 801*f76b3a32SShiyang Ruan xas_pause(&xas); 802*f76b3a32SShiyang Ruan xas_unlock_irq(&xas); 803*f76b3a32SShiyang Ruan cond_resched(); 804*f76b3a32SShiyang Ruan xas_lock_irq(&xas); 805*f76b3a32SShiyang Ruan } 806*f76b3a32SShiyang Ruan xas_unlock_irq(&xas); 807*f76b3a32SShiyang Ruan 808*f76b3a32SShiyang Ruan return 0; 809*f76b3a32SShiyang Ruan } 810*f76b3a32SShiyang Ruan 811ac401cc7SJan Kara /* 8123159f943SMatthew Wilcox * Delete DAX entry at @index from @mapping. Wait for it 8133159f943SMatthew Wilcox * to be unlocked before deleting it. 814ac401cc7SJan Kara */ 815ac401cc7SJan Kara int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 816ac401cc7SJan Kara { 817a77d19f4SMatthew Wilcox int ret = __dax_invalidate_entry(mapping, index, true); 818ac401cc7SJan Kara 819ac401cc7SJan Kara /* 820ac401cc7SJan Kara * This gets called from truncate / punch_hole path. As such, the caller 821ac401cc7SJan Kara * must hold locks protecting against concurrent modifications of the 822a77d19f4SMatthew Wilcox * page cache (usually fs-private i_mmap_sem for writing). Since the 8233159f943SMatthew Wilcox * caller has seen a DAX entry for this index, we better find it 824ac401cc7SJan Kara * at that index as well... 825ac401cc7SJan Kara */ 826c6dcf52cSJan Kara WARN_ON_ONCE(!ret); 827c6dcf52cSJan Kara return ret; 828ac401cc7SJan Kara } 829ac401cc7SJan Kara 830c6dcf52cSJan Kara /* 8313159f943SMatthew Wilcox * Invalidate DAX entry if it is clean. 832c6dcf52cSJan Kara */ 833c6dcf52cSJan Kara int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 834c6dcf52cSJan Kara pgoff_t index) 835c6dcf52cSJan Kara { 836a77d19f4SMatthew Wilcox return __dax_invalidate_entry(mapping, index, false); 837ac401cc7SJan Kara } 838ac401cc7SJan Kara 83960696eb2SChristoph Hellwig static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos) 840f7ca90b1SMatthew Wilcox { 841de205114SChristoph Hellwig return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset); 842429f8de7SChristoph Hellwig } 843429f8de7SChristoph Hellwig 844429f8de7SChristoph Hellwig static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter) 845429f8de7SChristoph Hellwig { 84660696eb2SChristoph Hellwig pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos); 847cccbce67SDan Williams void *vto, *kaddr; 848cccbce67SDan Williams long rc; 849cccbce67SDan Williams int id; 850e2e05394SRoss Zwisler 851cccbce67SDan Williams id = dax_read_lock(); 852e511c4a3SJane Chu rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS, 853e511c4a3SJane Chu &kaddr, NULL); 854cccbce67SDan Williams if (rc < 0) { 855cccbce67SDan Williams dax_read_unlock(id); 856cccbce67SDan Williams return rc; 857cccbce67SDan Williams } 858429f8de7SChristoph Hellwig vto = kmap_atomic(vmf->cow_page); 859429f8de7SChristoph Hellwig copy_user_page(vto, kaddr, vmf->address, vmf->cow_page); 860f7ca90b1SMatthew Wilcox kunmap_atomic(vto); 861cccbce67SDan Williams dax_read_unlock(id); 862f7ca90b1SMatthew Wilcox return 0; 863f7ca90b1SMatthew Wilcox } 864f7ca90b1SMatthew Wilcox 865642261acSRoss Zwisler /* 866e5d6df73SShiyang Ruan * MAP_SYNC on a dax mapping guarantees dirty metadata is 867e5d6df73SShiyang Ruan * flushed on write-faults (non-cow), but not read-faults. 868e5d6df73SShiyang Ruan */ 869e5d6df73SShiyang Ruan static bool dax_fault_is_synchronous(const struct iomap_iter *iter, 870e5d6df73SShiyang Ruan struct vm_area_struct *vma) 871e5d6df73SShiyang Ruan { 872e5d6df73SShiyang Ruan return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) && 873e5d6df73SShiyang Ruan (iter->iomap.flags & IOMAP_F_DIRTY); 874e5d6df73SShiyang Ruan } 875e5d6df73SShiyang Ruan 876e5d6df73SShiyang Ruan /* 877642261acSRoss Zwisler * By this point grab_mapping_entry() has ensured that we have a locked entry 878642261acSRoss Zwisler * of the appropriate size so we don't have to worry about downgrading PMDs to 879642261acSRoss Zwisler * PTEs. If we happen to be trying to insert a PTE and there is a PMD 880642261acSRoss Zwisler * already in the tree, we will skip the insertion and just dirty the PMD as 881642261acSRoss Zwisler * appropriate. 882642261acSRoss Zwisler */ 883e5d6df73SShiyang Ruan static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, 884e5d6df73SShiyang Ruan const struct iomap_iter *iter, void *entry, pfn_t pfn, 885e5d6df73SShiyang Ruan unsigned long flags) 8869973c98eSRoss Zwisler { 887e5d6df73SShiyang Ruan struct address_space *mapping = vmf->vma->vm_file->f_mapping; 888b15cd800SMatthew Wilcox void *new_entry = dax_make_entry(pfn, flags); 889c6f0b395SShiyang Ruan bool write = iter->flags & IOMAP_WRITE; 890c6f0b395SShiyang Ruan bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma); 891c6f0b395SShiyang Ruan bool shared = iter->iomap.flags & IOMAP_F_SHARED; 8929973c98eSRoss Zwisler 893f5b7b748SJan Kara if (dirty) 8949973c98eSRoss Zwisler __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 8959973c98eSRoss Zwisler 896c6f0b395SShiyang Ruan if (shared || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) { 897b15cd800SMatthew Wilcox unsigned long index = xas->xa_index; 89891d25ba8SRoss Zwisler /* we are replacing a zero page with block mapping */ 89991d25ba8SRoss Zwisler if (dax_is_pmd_entry(entry)) 900977fbdcdSMatthew Wilcox unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 901977fbdcdSMatthew Wilcox PG_PMD_NR, false); 90291d25ba8SRoss Zwisler else /* pte entry */ 903b15cd800SMatthew Wilcox unmap_mapping_pages(mapping, index, 1, false); 904ac401cc7SJan Kara } 9059973c98eSRoss Zwisler 906b15cd800SMatthew Wilcox xas_reset(xas); 907b15cd800SMatthew Wilcox xas_lock_irq(xas); 908c6f0b395SShiyang Ruan if (shared || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 9091571c029SJan Kara void *old; 9101571c029SJan Kara 911d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, false); 9126061b69bSShiyang Ruan dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, 913c6f0b395SShiyang Ruan shared); 914642261acSRoss Zwisler /* 915a77d19f4SMatthew Wilcox * Only swap our new entry into the page cache if the current 916642261acSRoss Zwisler * entry is a zero page or an empty entry. If a normal PTE or 917a77d19f4SMatthew Wilcox * PMD entry is already in the cache, we leave it alone. This 918642261acSRoss Zwisler * means that if we are trying to insert a PTE and the 919642261acSRoss Zwisler * existing entry is a PMD, we will just leave the PMD in the 920642261acSRoss Zwisler * tree and dirty it if necessary. 921642261acSRoss Zwisler */ 9221571c029SJan Kara old = dax_lock_entry(xas, new_entry); 923b15cd800SMatthew Wilcox WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | 924b15cd800SMatthew Wilcox DAX_LOCKED)); 92591d25ba8SRoss Zwisler entry = new_entry; 926b15cd800SMatthew Wilcox } else { 927b15cd800SMatthew Wilcox xas_load(xas); /* Walk the xa_state */ 928ac401cc7SJan Kara } 92991d25ba8SRoss Zwisler 930f5b7b748SJan Kara if (dirty) 931b15cd800SMatthew Wilcox xas_set_mark(xas, PAGECACHE_TAG_DIRTY); 93291d25ba8SRoss Zwisler 933c6f0b395SShiyang Ruan if (write && shared) 934e5d6df73SShiyang Ruan xas_set_mark(xas, PAGECACHE_TAG_TOWRITE); 935e5d6df73SShiyang Ruan 936b15cd800SMatthew Wilcox xas_unlock_irq(xas); 93791d25ba8SRoss Zwisler return entry; 9389973c98eSRoss Zwisler } 9399973c98eSRoss Zwisler 9409fc747f6SMatthew Wilcox static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, 9419fc747f6SMatthew Wilcox struct address_space *mapping, void *entry) 9429973c98eSRoss Zwisler { 94306083a09SMuchun Song unsigned long pfn, index, count, end; 9443fe0791cSDan Williams long ret = 0; 94506083a09SMuchun Song struct vm_area_struct *vma; 9469973c98eSRoss Zwisler 9479973c98eSRoss Zwisler /* 948a6abc2c0SJan Kara * A page got tagged dirty in DAX mapping? Something is seriously 949a6abc2c0SJan Kara * wrong. 9509973c98eSRoss Zwisler */ 9513159f943SMatthew Wilcox if (WARN_ON(!xa_is_value(entry))) 952a6abc2c0SJan Kara return -EIO; 9539973c98eSRoss Zwisler 9549fc747f6SMatthew Wilcox if (unlikely(dax_is_locked(entry))) { 9559fc747f6SMatthew Wilcox void *old_entry = entry; 9569fc747f6SMatthew Wilcox 95723c84eb7SMatthew Wilcox (Oracle) entry = get_unlocked_entry(xas, 0); 9589fc747f6SMatthew Wilcox 959a6abc2c0SJan Kara /* Entry got punched out / reallocated? */ 9609fc747f6SMatthew Wilcox if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 961a6abc2c0SJan Kara goto put_unlocked; 962a6abc2c0SJan Kara /* 9639fc747f6SMatthew Wilcox * Entry got reallocated elsewhere? No need to writeback. 9649fc747f6SMatthew Wilcox * We have to compare pfns as we must not bail out due to 9659fc747f6SMatthew Wilcox * difference in lockbit or entry type. 966a6abc2c0SJan Kara */ 9679fc747f6SMatthew Wilcox if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) 968a6abc2c0SJan Kara goto put_unlocked; 969642261acSRoss Zwisler if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 970642261acSRoss Zwisler dax_is_zero_entry(entry))) { 9719973c98eSRoss Zwisler ret = -EIO; 972a6abc2c0SJan Kara goto put_unlocked; 9739973c98eSRoss Zwisler } 9749973c98eSRoss Zwisler 9759fc747f6SMatthew Wilcox /* Another fsync thread may have already done this entry */ 9769fc747f6SMatthew Wilcox if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) 977a6abc2c0SJan Kara goto put_unlocked; 9789fc747f6SMatthew Wilcox } 9799fc747f6SMatthew Wilcox 980a6abc2c0SJan Kara /* Lock the entry to serialize with page faults */ 9819fc747f6SMatthew Wilcox dax_lock_entry(xas, entry); 9829fc747f6SMatthew Wilcox 983a6abc2c0SJan Kara /* 984a6abc2c0SJan Kara * We can clear the tag now but we have to be careful so that concurrent 985a6abc2c0SJan Kara * dax_writeback_one() calls for the same index cannot finish before we 986a6abc2c0SJan Kara * actually flush the caches. This is achieved as the calls will look 987b93b0163SMatthew Wilcox * at the entry only under the i_pages lock and once they do that 988b93b0163SMatthew Wilcox * they will see the entry locked and wait for it to unlock. 989a6abc2c0SJan Kara */ 9909fc747f6SMatthew Wilcox xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); 9919fc747f6SMatthew Wilcox xas_unlock_irq(xas); 992a6abc2c0SJan Kara 993642261acSRoss Zwisler /* 994e4b3448bSMatthew Wilcox * If dax_writeback_mapping_range() was given a wbc->range_start 995e4b3448bSMatthew Wilcox * in the middle of a PMD, the 'index' we use needs to be 996e4b3448bSMatthew Wilcox * aligned to the start of the PMD. 9973fe0791cSDan Williams * This allows us to flush for PMD_SIZE and not have to worry about 9983fe0791cSDan Williams * partial PMD writebacks. 999642261acSRoss Zwisler */ 1000a77d19f4SMatthew Wilcox pfn = dax_to_pfn(entry); 1001e4b3448bSMatthew Wilcox count = 1UL << dax_entry_order(entry); 1002e4b3448bSMatthew Wilcox index = xas->xa_index & ~(count - 1); 100306083a09SMuchun Song end = index + count - 1; 1004cccbce67SDan Williams 100506083a09SMuchun Song /* Walk all mappings of a given index of a file and writeprotect them */ 100606083a09SMuchun Song i_mmap_lock_read(mapping); 100706083a09SMuchun Song vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) { 100806083a09SMuchun Song pfn_mkclean_range(pfn, count, index, vma); 100906083a09SMuchun Song cond_resched(); 101006083a09SMuchun Song } 101106083a09SMuchun Song i_mmap_unlock_read(mapping); 101206083a09SMuchun Song 1013e4b3448bSMatthew Wilcox dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE); 10144b4bb46dSJan Kara /* 10154b4bb46dSJan Kara * After we have flushed the cache, we can clear the dirty tag. There 10164b4bb46dSJan Kara * cannot be new dirty data in the pfn after the flush has completed as 10174b4bb46dSJan Kara * the pfn mappings are writeprotected and fault waits for mapping 10184b4bb46dSJan Kara * entry lock. 10194b4bb46dSJan Kara */ 10209fc747f6SMatthew Wilcox xas_reset(xas); 10219fc747f6SMatthew Wilcox xas_lock_irq(xas); 10229fc747f6SMatthew Wilcox xas_store(xas, entry); 10239fc747f6SMatthew Wilcox xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); 1024698ab77aSVivek Goyal dax_wake_entry(xas, entry, WAKE_NEXT); 10259fc747f6SMatthew Wilcox 1026e4b3448bSMatthew Wilcox trace_dax_writeback_one(mapping->host, index, count); 10279973c98eSRoss Zwisler return ret; 10289973c98eSRoss Zwisler 1029a6abc2c0SJan Kara put_unlocked: 10304c3d043dSVivek Goyal put_unlocked_entry(xas, entry, WAKE_NEXT); 10319973c98eSRoss Zwisler return ret; 10329973c98eSRoss Zwisler } 10339973c98eSRoss Zwisler 10349973c98eSRoss Zwisler /* 10359973c98eSRoss Zwisler * Flush the mapping to the persistent domain within the byte range of [start, 10369973c98eSRoss Zwisler * end]. This is required by data integrity operations to ensure file data is 10379973c98eSRoss Zwisler * on persistent storage prior to completion of the operation. 10389973c98eSRoss Zwisler */ 10397f6d5b52SRoss Zwisler int dax_writeback_mapping_range(struct address_space *mapping, 10403f666c56SVivek Goyal struct dax_device *dax_dev, struct writeback_control *wbc) 10419973c98eSRoss Zwisler { 10429fc747f6SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); 10439973c98eSRoss Zwisler struct inode *inode = mapping->host; 10449fc747f6SMatthew Wilcox pgoff_t end_index = wbc->range_end >> PAGE_SHIFT; 10459fc747f6SMatthew Wilcox void *entry; 10469fc747f6SMatthew Wilcox int ret = 0; 10479fc747f6SMatthew Wilcox unsigned int scanned = 0; 10489973c98eSRoss Zwisler 10499973c98eSRoss Zwisler if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 10509973c98eSRoss Zwisler return -EIO; 10519973c98eSRoss Zwisler 10527716506aSMatthew Wilcox (Oracle) if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL) 10537f6d5b52SRoss Zwisler return 0; 10547f6d5b52SRoss Zwisler 10559fc747f6SMatthew Wilcox trace_dax_writeback_range(inode, xas.xa_index, end_index); 10569973c98eSRoss Zwisler 10579fc747f6SMatthew Wilcox tag_pages_for_writeback(mapping, xas.xa_index, end_index); 1058d14a3f48SRoss Zwisler 10599fc747f6SMatthew Wilcox xas_lock_irq(&xas); 10609fc747f6SMatthew Wilcox xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { 10619fc747f6SMatthew Wilcox ret = dax_writeback_one(&xas, dax_dev, mapping, entry); 1062819ec6b9SJeff Layton if (ret < 0) { 1063819ec6b9SJeff Layton mapping_set_error(mapping, ret); 10649fc747f6SMatthew Wilcox break; 1065d14a3f48SRoss Zwisler } 10669fc747f6SMatthew Wilcox if (++scanned % XA_CHECK_SCHED) 10679fc747f6SMatthew Wilcox continue; 10689fc747f6SMatthew Wilcox 10699fc747f6SMatthew Wilcox xas_pause(&xas); 10709fc747f6SMatthew Wilcox xas_unlock_irq(&xas); 10719fc747f6SMatthew Wilcox cond_resched(); 10729fc747f6SMatthew Wilcox xas_lock_irq(&xas); 1073d14a3f48SRoss Zwisler } 10749fc747f6SMatthew Wilcox xas_unlock_irq(&xas); 10759fc747f6SMatthew Wilcox trace_dax_writeback_range_done(inode, xas.xa_index, end_index); 10769fc747f6SMatthew Wilcox return ret; 10779973c98eSRoss Zwisler } 10789973c98eSRoss Zwisler EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 10799973c98eSRoss Zwisler 1080e28cd3e5SShiyang Ruan static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos, 1081e28cd3e5SShiyang Ruan size_t size, void **kaddr, pfn_t *pfnp) 10825e161e40SJan Kara { 108360696eb2SChristoph Hellwig pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1084e28cd3e5SShiyang Ruan int id, rc = 0; 10855e161e40SJan Kara long length; 10865e161e40SJan Kara 1087cccbce67SDan Williams id = dax_read_lock(); 10885e161e40SJan Kara length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), 1089e28cd3e5SShiyang Ruan DAX_ACCESS, kaddr, pfnp); 10905e161e40SJan Kara if (length < 0) { 10915e161e40SJan Kara rc = length; 10925e161e40SJan Kara goto out; 10935e161e40SJan Kara } 1094e28cd3e5SShiyang Ruan if (!pfnp) 1095e28cd3e5SShiyang Ruan goto out_check_addr; 10965e161e40SJan Kara rc = -EINVAL; 10975e161e40SJan Kara if (PFN_PHYS(length) < size) 10985e161e40SJan Kara goto out; 10995e161e40SJan Kara if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) 11005e161e40SJan Kara goto out; 11015e161e40SJan Kara /* For larger pages we need devmap */ 11025e161e40SJan Kara if (length > 1 && !pfn_t_devmap(*pfnp)) 11035e161e40SJan Kara goto out; 11045e161e40SJan Kara rc = 0; 1105e28cd3e5SShiyang Ruan 1106e28cd3e5SShiyang Ruan out_check_addr: 1107e28cd3e5SShiyang Ruan if (!kaddr) 1108e28cd3e5SShiyang Ruan goto out; 1109e28cd3e5SShiyang Ruan if (!*kaddr) 1110e28cd3e5SShiyang Ruan rc = -EFAULT; 11115e161e40SJan Kara out: 1112cccbce67SDan Williams dax_read_unlock(id); 1113cccbce67SDan Williams return rc; 1114cccbce67SDan Williams } 1115f7ca90b1SMatthew Wilcox 1116ff17b8dfSShiyang Ruan /** 1117708dfad2SShiyang Ruan * dax_iomap_copy_around - Prepare for an unaligned write to a shared/cow page 1118708dfad2SShiyang Ruan * by copying the data before and after the range to be written. 1119ff17b8dfSShiyang Ruan * @pos: address to do copy from. 1120ff17b8dfSShiyang Ruan * @length: size of copy operation. 1121ff17b8dfSShiyang Ruan * @align_size: aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE) 1122ff17b8dfSShiyang Ruan * @srcmap: iomap srcmap 1123ff17b8dfSShiyang Ruan * @daddr: destination address to copy to. 1124ff17b8dfSShiyang Ruan * 1125ff17b8dfSShiyang Ruan * This can be called from two places. Either during DAX write fault (page 1126ff17b8dfSShiyang Ruan * aligned), to copy the length size data to daddr. Or, while doing normal DAX 1127708dfad2SShiyang Ruan * write operation, dax_iomap_iter() might call this to do the copy of either 1128ff17b8dfSShiyang Ruan * start or end unaligned address. In the latter case the rest of the copy of 1129708dfad2SShiyang Ruan * aligned ranges is taken care by dax_iomap_iter() itself. 1130708dfad2SShiyang Ruan * If the srcmap contains invalid data, such as HOLE and UNWRITTEN, zero the 1131708dfad2SShiyang Ruan * area to make sure no old data remains. 1132ff17b8dfSShiyang Ruan */ 1133708dfad2SShiyang Ruan static int dax_iomap_copy_around(loff_t pos, uint64_t length, size_t align_size, 1134ff17b8dfSShiyang Ruan const struct iomap *srcmap, void *daddr) 1135ff17b8dfSShiyang Ruan { 1136ff17b8dfSShiyang Ruan loff_t head_off = pos & (align_size - 1); 1137ff17b8dfSShiyang Ruan size_t size = ALIGN(head_off + length, align_size); 1138ff17b8dfSShiyang Ruan loff_t end = pos + length; 1139ff17b8dfSShiyang Ruan loff_t pg_end = round_up(end, align_size); 1140708dfad2SShiyang Ruan /* copy_all is usually in page fault case */ 1141ff17b8dfSShiyang Ruan bool copy_all = head_off == 0 && end == pg_end; 1142708dfad2SShiyang Ruan /* zero the edges if srcmap is a HOLE or IOMAP_UNWRITTEN */ 1143708dfad2SShiyang Ruan bool zero_edge = srcmap->flags & IOMAP_F_SHARED || 1144708dfad2SShiyang Ruan srcmap->type == IOMAP_UNWRITTEN; 1145ff17b8dfSShiyang Ruan void *saddr = 0; 1146ff17b8dfSShiyang Ruan int ret = 0; 1147ff17b8dfSShiyang Ruan 1148708dfad2SShiyang Ruan if (!zero_edge) { 1149ff17b8dfSShiyang Ruan ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL); 1150ff17b8dfSShiyang Ruan if (ret) 1151ff17b8dfSShiyang Ruan return ret; 1152708dfad2SShiyang Ruan } 1153ff17b8dfSShiyang Ruan 1154ff17b8dfSShiyang Ruan if (copy_all) { 1155708dfad2SShiyang Ruan if (zero_edge) 1156708dfad2SShiyang Ruan memset(daddr, 0, size); 1157708dfad2SShiyang Ruan else 1158ff17b8dfSShiyang Ruan ret = copy_mc_to_kernel(daddr, saddr, length); 1159708dfad2SShiyang Ruan goto out; 1160ff17b8dfSShiyang Ruan } 1161ff17b8dfSShiyang Ruan 1162ff17b8dfSShiyang Ruan /* Copy the head part of the range */ 1163ff17b8dfSShiyang Ruan if (head_off) { 1164708dfad2SShiyang Ruan if (zero_edge) 1165708dfad2SShiyang Ruan memset(daddr, 0, head_off); 1166708dfad2SShiyang Ruan else { 1167ff17b8dfSShiyang Ruan ret = copy_mc_to_kernel(daddr, saddr, head_off); 1168ff17b8dfSShiyang Ruan if (ret) 1169ff17b8dfSShiyang Ruan return -EIO; 1170ff17b8dfSShiyang Ruan } 1171708dfad2SShiyang Ruan } 1172ff17b8dfSShiyang Ruan 1173ff17b8dfSShiyang Ruan /* Copy the tail part of the range */ 1174ff17b8dfSShiyang Ruan if (end < pg_end) { 1175ff17b8dfSShiyang Ruan loff_t tail_off = head_off + length; 1176ff17b8dfSShiyang Ruan loff_t tail_len = pg_end - end; 1177ff17b8dfSShiyang Ruan 1178708dfad2SShiyang Ruan if (zero_edge) 1179708dfad2SShiyang Ruan memset(daddr + tail_off, 0, tail_len); 1180708dfad2SShiyang Ruan else { 1181708dfad2SShiyang Ruan ret = copy_mc_to_kernel(daddr + tail_off, 1182708dfad2SShiyang Ruan saddr + tail_off, tail_len); 1183ff17b8dfSShiyang Ruan if (ret) 1184ff17b8dfSShiyang Ruan return -EIO; 1185ff17b8dfSShiyang Ruan } 1186708dfad2SShiyang Ruan } 1187708dfad2SShiyang Ruan out: 1188708dfad2SShiyang Ruan if (zero_edge) 1189708dfad2SShiyang Ruan dax_flush(srcmap->dax_dev, daddr, size); 1190708dfad2SShiyang Ruan return ret ? -EIO : 0; 1191ff17b8dfSShiyang Ruan } 1192ff17b8dfSShiyang Ruan 11932f89dc12SJan Kara /* 119491d25ba8SRoss Zwisler * The user has performed a load from a hole in the file. Allocating a new 119591d25ba8SRoss Zwisler * page in the file would cause excessive storage usage for workloads with 119691d25ba8SRoss Zwisler * sparse files. Instead we insert a read-only mapping of the 4k zero page. 119791d25ba8SRoss Zwisler * If this page is ever written to we will re-fault and change the mapping to 119891d25ba8SRoss Zwisler * point to real DAX storage instead. 11992f89dc12SJan Kara */ 1200e5d6df73SShiyang Ruan static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, 1201e5d6df73SShiyang Ruan const struct iomap_iter *iter, void **entry) 1202e30331ffSRoss Zwisler { 1203e5d6df73SShiyang Ruan struct inode *inode = iter->inode; 120491d25ba8SRoss Zwisler unsigned long vaddr = vmf->address; 1205b90ca5ccSMatthew Wilcox pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); 1206b90ca5ccSMatthew Wilcox vm_fault_t ret; 1207e30331ffSRoss Zwisler 1208e5d6df73SShiyang Ruan *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); 12093159f943SMatthew Wilcox 1210ab77dab4SSouptick Joarder ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); 1211e30331ffSRoss Zwisler trace_dax_load_hole(inode, vmf, ret); 1212e30331ffSRoss Zwisler return ret; 1213e30331ffSRoss Zwisler } 1214e30331ffSRoss Zwisler 1215c2436190SShiyang Ruan #ifdef CONFIG_FS_DAX_PMD 1216c2436190SShiyang Ruan static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, 1217e5d6df73SShiyang Ruan const struct iomap_iter *iter, void **entry) 1218c2436190SShiyang Ruan { 1219c2436190SShiyang Ruan struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1220c2436190SShiyang Ruan unsigned long pmd_addr = vmf->address & PMD_MASK; 1221c2436190SShiyang Ruan struct vm_area_struct *vma = vmf->vma; 1222c2436190SShiyang Ruan struct inode *inode = mapping->host; 1223c2436190SShiyang Ruan pgtable_t pgtable = NULL; 1224c2436190SShiyang Ruan struct page *zero_page; 1225c2436190SShiyang Ruan spinlock_t *ptl; 1226c2436190SShiyang Ruan pmd_t pmd_entry; 1227c2436190SShiyang Ruan pfn_t pfn; 1228c2436190SShiyang Ruan 1229c2436190SShiyang Ruan zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1230c2436190SShiyang Ruan 1231c2436190SShiyang Ruan if (unlikely(!zero_page)) 1232c2436190SShiyang Ruan goto fallback; 1233c2436190SShiyang Ruan 1234c2436190SShiyang Ruan pfn = page_to_pfn_t(zero_page); 1235e5d6df73SShiyang Ruan *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, 1236e5d6df73SShiyang Ruan DAX_PMD | DAX_ZERO_PAGE); 1237c2436190SShiyang Ruan 1238c2436190SShiyang Ruan if (arch_needs_pgtable_deposit()) { 1239c2436190SShiyang Ruan pgtable = pte_alloc_one(vma->vm_mm); 1240c2436190SShiyang Ruan if (!pgtable) 1241c2436190SShiyang Ruan return VM_FAULT_OOM; 1242c2436190SShiyang Ruan } 1243c2436190SShiyang Ruan 1244c2436190SShiyang Ruan ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1245c2436190SShiyang Ruan if (!pmd_none(*(vmf->pmd))) { 1246c2436190SShiyang Ruan spin_unlock(ptl); 1247c2436190SShiyang Ruan goto fallback; 1248c2436190SShiyang Ruan } 1249c2436190SShiyang Ruan 1250c2436190SShiyang Ruan if (pgtable) { 1251c2436190SShiyang Ruan pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 1252c2436190SShiyang Ruan mm_inc_nr_ptes(vma->vm_mm); 1253c2436190SShiyang Ruan } 1254c2436190SShiyang Ruan pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1255c2436190SShiyang Ruan pmd_entry = pmd_mkhuge(pmd_entry); 1256c2436190SShiyang Ruan set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1257c2436190SShiyang Ruan spin_unlock(ptl); 1258c2436190SShiyang Ruan trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); 1259c2436190SShiyang Ruan return VM_FAULT_NOPAGE; 1260c2436190SShiyang Ruan 1261c2436190SShiyang Ruan fallback: 1262c2436190SShiyang Ruan if (pgtable) 1263c2436190SShiyang Ruan pte_free(vma->vm_mm, pgtable); 1264c2436190SShiyang Ruan trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); 1265c2436190SShiyang Ruan return VM_FAULT_FALLBACK; 1266c2436190SShiyang Ruan } 1267c2436190SShiyang Ruan #else 1268c2436190SShiyang Ruan static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, 1269e5d6df73SShiyang Ruan const struct iomap_iter *iter, void **entry) 1270c2436190SShiyang Ruan { 1271c2436190SShiyang Ruan return VM_FAULT_FALLBACK; 1272c2436190SShiyang Ruan } 1273c2436190SShiyang Ruan #endif /* CONFIG_FS_DAX_PMD */ 1274c2436190SShiyang Ruan 1275d984648eSShiyang Ruan static s64 dax_unshare_iter(struct iomap_iter *iter) 1276d984648eSShiyang Ruan { 1277d984648eSShiyang Ruan struct iomap *iomap = &iter->iomap; 1278d984648eSShiyang Ruan const struct iomap *srcmap = iomap_iter_srcmap(iter); 1279d984648eSShiyang Ruan loff_t pos = iter->pos; 1280d984648eSShiyang Ruan loff_t length = iomap_length(iter); 1281d984648eSShiyang Ruan int id = 0; 1282d984648eSShiyang Ruan s64 ret = 0; 1283d984648eSShiyang Ruan void *daddr = NULL, *saddr = NULL; 1284d984648eSShiyang Ruan 1285d984648eSShiyang Ruan /* don't bother with blocks that are not shared to start with */ 1286d984648eSShiyang Ruan if (!(iomap->flags & IOMAP_F_SHARED)) 1287d984648eSShiyang Ruan return length; 1288d984648eSShiyang Ruan 1289d984648eSShiyang Ruan id = dax_read_lock(); 1290d984648eSShiyang Ruan ret = dax_iomap_direct_access(iomap, pos, length, &daddr, NULL); 1291d984648eSShiyang Ruan if (ret < 0) 1292d984648eSShiyang Ruan goto out_unlock; 1293d984648eSShiyang Ruan 129413dd4e04SShiyang Ruan /* zero the distance if srcmap is HOLE or UNWRITTEN */ 129513dd4e04SShiyang Ruan if (srcmap->flags & IOMAP_F_SHARED || srcmap->type == IOMAP_UNWRITTEN) { 129613dd4e04SShiyang Ruan memset(daddr, 0, length); 129713dd4e04SShiyang Ruan dax_flush(iomap->dax_dev, daddr, length); 129813dd4e04SShiyang Ruan ret = length; 129913dd4e04SShiyang Ruan goto out_unlock; 130013dd4e04SShiyang Ruan } 130113dd4e04SShiyang Ruan 1302d984648eSShiyang Ruan ret = dax_iomap_direct_access(srcmap, pos, length, &saddr, NULL); 1303d984648eSShiyang Ruan if (ret < 0) 1304d984648eSShiyang Ruan goto out_unlock; 1305d984648eSShiyang Ruan 1306388bc034SShiyang Ruan if (copy_mc_to_kernel(daddr, saddr, length) == 0) 1307388bc034SShiyang Ruan ret = length; 1308388bc034SShiyang Ruan else 1309d984648eSShiyang Ruan ret = -EIO; 1310d984648eSShiyang Ruan 1311d984648eSShiyang Ruan out_unlock: 1312d984648eSShiyang Ruan dax_read_unlock(id); 1313d984648eSShiyang Ruan return ret; 1314d984648eSShiyang Ruan } 1315d984648eSShiyang Ruan 1316d984648eSShiyang Ruan int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len, 1317d984648eSShiyang Ruan const struct iomap_ops *ops) 1318d984648eSShiyang Ruan { 1319d984648eSShiyang Ruan struct iomap_iter iter = { 1320d984648eSShiyang Ruan .inode = inode, 1321d984648eSShiyang Ruan .pos = pos, 1322d984648eSShiyang Ruan .len = len, 1323d984648eSShiyang Ruan .flags = IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX, 1324d984648eSShiyang Ruan }; 1325d984648eSShiyang Ruan int ret; 1326d984648eSShiyang Ruan 1327d984648eSShiyang Ruan while ((ret = iomap_iter(&iter, ops)) > 0) 1328d984648eSShiyang Ruan iter.processed = dax_unshare_iter(&iter); 1329d984648eSShiyang Ruan return ret; 1330d984648eSShiyang Ruan } 1331d984648eSShiyang Ruan EXPORT_SYMBOL_GPL(dax_file_unshare); 1332d984648eSShiyang Ruan 13338dbfc76dSShiyang Ruan static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size) 1334e5c71954SChristoph Hellwig { 13358dbfc76dSShiyang Ruan const struct iomap *iomap = &iter->iomap; 13368dbfc76dSShiyang Ruan const struct iomap *srcmap = iomap_iter_srcmap(iter); 13378dbfc76dSShiyang Ruan unsigned offset = offset_in_page(pos); 13388dbfc76dSShiyang Ruan pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1339e5c71954SChristoph Hellwig void *kaddr; 1340e5c71954SChristoph Hellwig long ret; 1341e5c71954SChristoph Hellwig 13428dbfc76dSShiyang Ruan ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr, 13438dbfc76dSShiyang Ruan NULL); 13448dbfc76dSShiyang Ruan if (ret < 0) 13458dbfc76dSShiyang Ruan return ret; 1346e5c71954SChristoph Hellwig memset(kaddr + offset, 0, size); 1347708dfad2SShiyang Ruan if (iomap->flags & IOMAP_F_SHARED) 1348708dfad2SShiyang Ruan ret = dax_iomap_copy_around(pos, size, PAGE_SIZE, srcmap, 13498dbfc76dSShiyang Ruan kaddr); 1350708dfad2SShiyang Ruan else 13518dbfc76dSShiyang Ruan dax_flush(iomap->dax_dev, kaddr + offset, size); 1352e5c71954SChristoph Hellwig return ret; 1353e5c71954SChristoph Hellwig } 1354e5c71954SChristoph Hellwig 1355c6f40468SChristoph Hellwig static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero) 1356679c8bd3SChristoph Hellwig { 1357c6f40468SChristoph Hellwig const struct iomap *iomap = &iter->iomap; 1358c6f40468SChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 1359c6f40468SChristoph Hellwig loff_t pos = iter->pos; 1360c6f40468SChristoph Hellwig u64 length = iomap_length(iter); 1361c6f40468SChristoph Hellwig s64 written = 0; 1362c6f40468SChristoph Hellwig 1363c6f40468SChristoph Hellwig /* already zeroed? we're done. */ 1364c6f40468SChristoph Hellwig if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 1365c6f40468SChristoph Hellwig return length; 1366c6f40468SChristoph Hellwig 1367f80e1668SShiyang Ruan /* 1368f80e1668SShiyang Ruan * invalidate the pages whose sharing state is to be changed 1369f80e1668SShiyang Ruan * because of CoW. 1370f80e1668SShiyang Ruan */ 1371f80e1668SShiyang Ruan if (iomap->flags & IOMAP_F_SHARED) 1372f80e1668SShiyang Ruan invalidate_inode_pages2_range(iter->inode->i_mapping, 1373f80e1668SShiyang Ruan pos >> PAGE_SHIFT, 1374f80e1668SShiyang Ruan (pos + length - 1) >> PAGE_SHIFT); 1375f80e1668SShiyang Ruan 1376c6f40468SChristoph Hellwig do { 137781ee8e52SMatthew Wilcox (Oracle) unsigned offset = offset_in_page(pos); 137881ee8e52SMatthew Wilcox (Oracle) unsigned size = min_t(u64, PAGE_SIZE - offset, length); 1379c6f40468SChristoph Hellwig pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1380c6f40468SChristoph Hellwig long rc; 1381c6f40468SChristoph Hellwig int id; 13820a23f9ffSVivek Goyal 1383cccbce67SDan Williams id = dax_read_lock(); 1384e5c71954SChristoph Hellwig if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE) 138581ee8e52SMatthew Wilcox (Oracle) rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1); 13860a23f9ffSVivek Goyal else 13878dbfc76dSShiyang Ruan rc = dax_memzero(iter, pos, size); 1388cccbce67SDan Williams dax_read_unlock(id); 13890a23f9ffSVivek Goyal 1390e5c71954SChristoph Hellwig if (rc < 0) 1391e5c71954SChristoph Hellwig return rc; 1392c6f40468SChristoph Hellwig pos += size; 1393c6f40468SChristoph Hellwig length -= size; 1394c6f40468SChristoph Hellwig written += size; 1395c6f40468SChristoph Hellwig } while (length > 0); 1396c6f40468SChristoph Hellwig 1397f8189d5dSKaixu Xia if (did_zero) 1398f8189d5dSKaixu Xia *did_zero = true; 1399c6f40468SChristoph Hellwig return written; 1400679c8bd3SChristoph Hellwig } 1401679c8bd3SChristoph Hellwig 1402c6f40468SChristoph Hellwig int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 1403c6f40468SChristoph Hellwig const struct iomap_ops *ops) 1404c6f40468SChristoph Hellwig { 1405c6f40468SChristoph Hellwig struct iomap_iter iter = { 1406c6f40468SChristoph Hellwig .inode = inode, 1407c6f40468SChristoph Hellwig .pos = pos, 1408c6f40468SChristoph Hellwig .len = len, 1409952da063SChristoph Hellwig .flags = IOMAP_DAX | IOMAP_ZERO, 1410c6f40468SChristoph Hellwig }; 1411c6f40468SChristoph Hellwig int ret; 1412c6f40468SChristoph Hellwig 1413c6f40468SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 1414c6f40468SChristoph Hellwig iter.processed = dax_zero_iter(&iter, did_zero); 1415c6f40468SChristoph Hellwig return ret; 1416c6f40468SChristoph Hellwig } 1417c6f40468SChristoph Hellwig EXPORT_SYMBOL_GPL(dax_zero_range); 1418c6f40468SChristoph Hellwig 1419c6f40468SChristoph Hellwig int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 1420c6f40468SChristoph Hellwig const struct iomap_ops *ops) 1421c6f40468SChristoph Hellwig { 1422c6f40468SChristoph Hellwig unsigned int blocksize = i_blocksize(inode); 1423c6f40468SChristoph Hellwig unsigned int off = pos & (blocksize - 1); 1424c6f40468SChristoph Hellwig 1425c6f40468SChristoph Hellwig /* Block boundary? Nothing to do */ 1426c6f40468SChristoph Hellwig if (!off) 1427c6f40468SChristoph Hellwig return 0; 1428c6f40468SChristoph Hellwig return dax_zero_range(inode, pos, blocksize - off, did_zero, ops); 1429c6f40468SChristoph Hellwig } 1430c6f40468SChristoph Hellwig EXPORT_SYMBOL_GPL(dax_truncate_page); 1431c6f40468SChristoph Hellwig 1432ca289e0bSChristoph Hellwig static loff_t dax_iomap_iter(const struct iomap_iter *iomi, 1433ca289e0bSChristoph Hellwig struct iov_iter *iter) 1434a254e568SChristoph Hellwig { 1435ca289e0bSChristoph Hellwig const struct iomap *iomap = &iomi->iomap; 1436f80e1668SShiyang Ruan const struct iomap *srcmap = iomap_iter_srcmap(iomi); 1437ca289e0bSChristoph Hellwig loff_t length = iomap_length(iomi); 1438ca289e0bSChristoph Hellwig loff_t pos = iomi->pos; 1439cccbce67SDan Williams struct dax_device *dax_dev = iomap->dax_dev; 1440a254e568SChristoph Hellwig loff_t end = pos + length, done = 0; 1441ff17b8dfSShiyang Ruan bool write = iov_iter_rw(iter) == WRITE; 1442f80e1668SShiyang Ruan bool cow = write && iomap->flags & IOMAP_F_SHARED; 1443a254e568SChristoph Hellwig ssize_t ret = 0; 1444a77d4786SDan Williams size_t xfer; 1445cccbce67SDan Williams int id; 1446a254e568SChristoph Hellwig 1447ff17b8dfSShiyang Ruan if (!write) { 1448ca289e0bSChristoph Hellwig end = min(end, i_size_read(iomi->inode)); 1449a254e568SChristoph Hellwig if (pos >= end) 1450a254e568SChristoph Hellwig return 0; 1451a254e568SChristoph Hellwig 1452a254e568SChristoph Hellwig if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 1453a254e568SChristoph Hellwig return iov_iter_zero(min(length, end - pos), iter); 1454a254e568SChristoph Hellwig } 1455a254e568SChristoph Hellwig 1456ff17b8dfSShiyang Ruan /* 1457ff17b8dfSShiyang Ruan * In DAX mode, enforce either pure overwrites of written extents, or 1458ff17b8dfSShiyang Ruan * writes to unwritten extents as part of a copy-on-write operation. 1459ff17b8dfSShiyang Ruan */ 1460ff17b8dfSShiyang Ruan if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED && 1461ff17b8dfSShiyang Ruan !(iomap->flags & IOMAP_F_SHARED))) 1462a254e568SChristoph Hellwig return -EIO; 1463a254e568SChristoph Hellwig 1464e3fce68cSJan Kara /* 1465e3fce68cSJan Kara * Write can allocate block for an area which has a hole page mapped 1466e3fce68cSJan Kara * into page tables. We have to tear down these mappings so that data 1467e3fce68cSJan Kara * written by write(2) is visible in mmap. 1468e3fce68cSJan Kara */ 1469f80e1668SShiyang Ruan if (iomap->flags & IOMAP_F_NEW || cow) { 1470*f76b3a32SShiyang Ruan /* 1471*f76b3a32SShiyang Ruan * Filesystem allows CoW on non-shared extents. The src extents 1472*f76b3a32SShiyang Ruan * may have been mmapped with dirty mark before. To be able to 1473*f76b3a32SShiyang Ruan * invalidate its dax entries, we need to clear the dirty mark 1474*f76b3a32SShiyang Ruan * in advance. 1475*f76b3a32SShiyang Ruan */ 1476*f76b3a32SShiyang Ruan if (cow) 1477*f76b3a32SShiyang Ruan __dax_clear_dirty_range(iomi->inode->i_mapping, 1478*f76b3a32SShiyang Ruan pos >> PAGE_SHIFT, 1479*f76b3a32SShiyang Ruan (end - 1) >> PAGE_SHIFT); 1480ca289e0bSChristoph Hellwig invalidate_inode_pages2_range(iomi->inode->i_mapping, 1481e3fce68cSJan Kara pos >> PAGE_SHIFT, 1482e3fce68cSJan Kara (end - 1) >> PAGE_SHIFT); 1483e3fce68cSJan Kara } 1484e3fce68cSJan Kara 1485cccbce67SDan Williams id = dax_read_lock(); 1486a254e568SChristoph Hellwig while (pos < end) { 1487a254e568SChristoph Hellwig unsigned offset = pos & (PAGE_SIZE - 1); 1488cccbce67SDan Williams const size_t size = ALIGN(length + offset, PAGE_SIZE); 148960696eb2SChristoph Hellwig pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1490a254e568SChristoph Hellwig ssize_t map_len; 1491047218ecSJane Chu bool recovery = false; 1492cccbce67SDan Williams void *kaddr; 1493a254e568SChristoph Hellwig 1494d1908f52SMichal Hocko if (fatal_signal_pending(current)) { 1495d1908f52SMichal Hocko ret = -EINTR; 1496d1908f52SMichal Hocko break; 1497d1908f52SMichal Hocko } 1498d1908f52SMichal Hocko 1499cccbce67SDan Williams map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 1500e511c4a3SJane Chu DAX_ACCESS, &kaddr, NULL); 1501047218ecSJane Chu if (map_len == -EIO && iov_iter_rw(iter) == WRITE) { 1502047218ecSJane Chu map_len = dax_direct_access(dax_dev, pgoff, 1503047218ecSJane Chu PHYS_PFN(size), DAX_RECOVERY_WRITE, 150486ed913bSHuaisheng Ye &kaddr, NULL); 1505047218ecSJane Chu if (map_len > 0) 1506047218ecSJane Chu recovery = true; 1507047218ecSJane Chu } 1508a254e568SChristoph Hellwig if (map_len < 0) { 1509a254e568SChristoph Hellwig ret = map_len; 1510a254e568SChristoph Hellwig break; 1511a254e568SChristoph Hellwig } 1512a254e568SChristoph Hellwig 1513f80e1668SShiyang Ruan if (cow) { 1514708dfad2SShiyang Ruan ret = dax_iomap_copy_around(pos, length, PAGE_SIZE, 1515708dfad2SShiyang Ruan srcmap, kaddr); 1516ff17b8dfSShiyang Ruan if (ret) 1517ff17b8dfSShiyang Ruan break; 1518ff17b8dfSShiyang Ruan } 1519ff17b8dfSShiyang Ruan 1520cccbce67SDan Williams map_len = PFN_PHYS(map_len); 1521cccbce67SDan Williams kaddr += offset; 1522a254e568SChristoph Hellwig map_len -= offset; 1523a254e568SChristoph Hellwig if (map_len > end - pos) 1524a254e568SChristoph Hellwig map_len = end - pos; 1525a254e568SChristoph Hellwig 1526047218ecSJane Chu if (recovery) 1527047218ecSJane Chu xfer = dax_recovery_write(dax_dev, pgoff, kaddr, 1528047218ecSJane Chu map_len, iter); 1529ff17b8dfSShiyang Ruan else if (write) 1530a77d4786SDan Williams xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1531fec53774SDan Williams map_len, iter); 1532a254e568SChristoph Hellwig else 1533a77d4786SDan Williams xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, 1534b3a9a0c3SDan Williams map_len, iter); 1535a254e568SChristoph Hellwig 1536a77d4786SDan Williams pos += xfer; 1537a77d4786SDan Williams length -= xfer; 1538a77d4786SDan Williams done += xfer; 1539a77d4786SDan Williams 1540a77d4786SDan Williams if (xfer == 0) 1541a77d4786SDan Williams ret = -EFAULT; 1542a77d4786SDan Williams if (xfer < map_len) 1543a77d4786SDan Williams break; 1544a254e568SChristoph Hellwig } 1545cccbce67SDan Williams dax_read_unlock(id); 1546a254e568SChristoph Hellwig 1547a254e568SChristoph Hellwig return done ? done : ret; 1548a254e568SChristoph Hellwig } 1549a254e568SChristoph Hellwig 1550a254e568SChristoph Hellwig /** 155111c59c92SRoss Zwisler * dax_iomap_rw - Perform I/O to a DAX file 1552a254e568SChristoph Hellwig * @iocb: The control block for this I/O 1553a254e568SChristoph Hellwig * @iter: The addresses to do I/O from or to 1554a254e568SChristoph Hellwig * @ops: iomap ops passed from the file system 1555a254e568SChristoph Hellwig * 1556a254e568SChristoph Hellwig * This function performs read and write operations to directly mapped 1557a254e568SChristoph Hellwig * persistent memory. The callers needs to take care of read/write exclusion 1558a254e568SChristoph Hellwig * and evicting any page cache pages in the region under I/O. 1559a254e568SChristoph Hellwig */ 1560a254e568SChristoph Hellwig ssize_t 156111c59c92SRoss Zwisler dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 15628ff6daa1SChristoph Hellwig const struct iomap_ops *ops) 1563a254e568SChristoph Hellwig { 1564ca289e0bSChristoph Hellwig struct iomap_iter iomi = { 1565ca289e0bSChristoph Hellwig .inode = iocb->ki_filp->f_mapping->host, 1566ca289e0bSChristoph Hellwig .pos = iocb->ki_pos, 1567ca289e0bSChristoph Hellwig .len = iov_iter_count(iter), 1568952da063SChristoph Hellwig .flags = IOMAP_DAX, 1569ca289e0bSChristoph Hellwig }; 1570ca289e0bSChristoph Hellwig loff_t done = 0; 1571ca289e0bSChristoph Hellwig int ret; 1572a254e568SChristoph Hellwig 157317d9c15cSLi Jinlin if (!iomi.len) 157417d9c15cSLi Jinlin return 0; 157517d9c15cSLi Jinlin 1576168316dbSChristoph Hellwig if (iov_iter_rw(iter) == WRITE) { 1577ca289e0bSChristoph Hellwig lockdep_assert_held_write(&iomi.inode->i_rwsem); 1578ca289e0bSChristoph Hellwig iomi.flags |= IOMAP_WRITE; 1579168316dbSChristoph Hellwig } else { 1580ca289e0bSChristoph Hellwig lockdep_assert_held(&iomi.inode->i_rwsem); 1581168316dbSChristoph Hellwig } 1582a254e568SChristoph Hellwig 158396222d53SJeff Moyer if (iocb->ki_flags & IOCB_NOWAIT) 1584ca289e0bSChristoph Hellwig iomi.flags |= IOMAP_NOWAIT; 158596222d53SJeff Moyer 1586ca289e0bSChristoph Hellwig while ((ret = iomap_iter(&iomi, ops)) > 0) 1587ca289e0bSChristoph Hellwig iomi.processed = dax_iomap_iter(&iomi, iter); 1588a254e568SChristoph Hellwig 1589ca289e0bSChristoph Hellwig done = iomi.pos - iocb->ki_pos; 1590ca289e0bSChristoph Hellwig iocb->ki_pos = iomi.pos; 1591a254e568SChristoph Hellwig return done ? done : ret; 1592a254e568SChristoph Hellwig } 159311c59c92SRoss Zwisler EXPORT_SYMBOL_GPL(dax_iomap_rw); 1594a7d73fe6SChristoph Hellwig 1595ab77dab4SSouptick Joarder static vm_fault_t dax_fault_return(int error) 15969f141d6eSJan Kara { 15979f141d6eSJan Kara if (error == 0) 15989f141d6eSJan Kara return VM_FAULT_NOPAGE; 1599c9aed74eSSouptick Joarder return vmf_error(error); 16009f141d6eSJan Kara } 16019f141d6eSJan Kara 1602aaa422c4SDan Williams /* 160355f81639SShiyang Ruan * When handling a synchronous page fault and the inode need a fsync, we can 160455f81639SShiyang Ruan * insert the PTE/PMD into page tables only after that fsync happened. Skip 160555f81639SShiyang Ruan * insertion for now and return the pfn so that caller can insert it after the 160655f81639SShiyang Ruan * fsync is done. 160755f81639SShiyang Ruan */ 160855f81639SShiyang Ruan static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn) 160955f81639SShiyang Ruan { 161055f81639SShiyang Ruan if (WARN_ON_ONCE(!pfnp)) 161155f81639SShiyang Ruan return VM_FAULT_SIGBUS; 161255f81639SShiyang Ruan *pfnp = pfn; 161355f81639SShiyang Ruan return VM_FAULT_NEEDDSYNC; 161455f81639SShiyang Ruan } 161555f81639SShiyang Ruan 161665dd814aSChristoph Hellwig static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf, 161765dd814aSChristoph Hellwig const struct iomap_iter *iter) 161855f81639SShiyang Ruan { 161955f81639SShiyang Ruan vm_fault_t ret; 162055f81639SShiyang Ruan int error = 0; 162155f81639SShiyang Ruan 162265dd814aSChristoph Hellwig switch (iter->iomap.type) { 162355f81639SShiyang Ruan case IOMAP_HOLE: 162455f81639SShiyang Ruan case IOMAP_UNWRITTEN: 1625429f8de7SChristoph Hellwig clear_user_highpage(vmf->cow_page, vmf->address); 162655f81639SShiyang Ruan break; 162755f81639SShiyang Ruan case IOMAP_MAPPED: 1628429f8de7SChristoph Hellwig error = copy_cow_page_dax(vmf, iter); 162955f81639SShiyang Ruan break; 163055f81639SShiyang Ruan default: 163155f81639SShiyang Ruan WARN_ON_ONCE(1); 163255f81639SShiyang Ruan error = -EIO; 163355f81639SShiyang Ruan break; 163455f81639SShiyang Ruan } 163555f81639SShiyang Ruan 163655f81639SShiyang Ruan if (error) 163755f81639SShiyang Ruan return dax_fault_return(error); 163855f81639SShiyang Ruan 163955f81639SShiyang Ruan __SetPageUptodate(vmf->cow_page); 164055f81639SShiyang Ruan ret = finish_fault(vmf); 164155f81639SShiyang Ruan if (!ret) 164255f81639SShiyang Ruan return VM_FAULT_DONE_COW; 164355f81639SShiyang Ruan return ret; 164455f81639SShiyang Ruan } 164555f81639SShiyang Ruan 1646c2436190SShiyang Ruan /** 164765dd814aSChristoph Hellwig * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault. 1648c2436190SShiyang Ruan * @vmf: vm fault instance 164965dd814aSChristoph Hellwig * @iter: iomap iter 1650c2436190SShiyang Ruan * @pfnp: pfn to be returned 1651c2436190SShiyang Ruan * @xas: the dax mapping tree of a file 1652c2436190SShiyang Ruan * @entry: an unlocked dax entry to be inserted 1653c2436190SShiyang Ruan * @pmd: distinguish whether it is a pmd fault 1654c2436190SShiyang Ruan */ 165565dd814aSChristoph Hellwig static vm_fault_t dax_fault_iter(struct vm_fault *vmf, 165665dd814aSChristoph Hellwig const struct iomap_iter *iter, pfn_t *pfnp, 165765dd814aSChristoph Hellwig struct xa_state *xas, void **entry, bool pmd) 1658c2436190SShiyang Ruan { 165965dd814aSChristoph Hellwig const struct iomap *iomap = &iter->iomap; 1660708dfad2SShiyang Ruan const struct iomap *srcmap = iomap_iter_srcmap(iter); 1661c2436190SShiyang Ruan size_t size = pmd ? PMD_SIZE : PAGE_SIZE; 1662c2436190SShiyang Ruan loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; 1663e5d6df73SShiyang Ruan bool write = iter->flags & IOMAP_WRITE; 1664c2436190SShiyang Ruan unsigned long entry_flags = pmd ? DAX_PMD : 0; 1665c2436190SShiyang Ruan int err = 0; 1666c2436190SShiyang Ruan pfn_t pfn; 1667ff17b8dfSShiyang Ruan void *kaddr; 1668c2436190SShiyang Ruan 166965dd814aSChristoph Hellwig if (!pmd && vmf->cow_page) 167065dd814aSChristoph Hellwig return dax_fault_cow_page(vmf, iter); 167165dd814aSChristoph Hellwig 1672c2436190SShiyang Ruan /* if we are reading UNWRITTEN and HOLE, return a hole. */ 1673c2436190SShiyang Ruan if (!write && 1674c2436190SShiyang Ruan (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) { 1675c2436190SShiyang Ruan if (!pmd) 1676e5d6df73SShiyang Ruan return dax_load_hole(xas, vmf, iter, entry); 1677e5d6df73SShiyang Ruan return dax_pmd_load_hole(xas, vmf, iter, entry); 1678c2436190SShiyang Ruan } 1679c2436190SShiyang Ruan 1680ff17b8dfSShiyang Ruan if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) { 1681c2436190SShiyang Ruan WARN_ON_ONCE(1); 1682c2436190SShiyang Ruan return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS; 1683c2436190SShiyang Ruan } 1684c2436190SShiyang Ruan 1685ff17b8dfSShiyang Ruan err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn); 1686c2436190SShiyang Ruan if (err) 1687c2436190SShiyang Ruan return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err); 1688c2436190SShiyang Ruan 1689e5d6df73SShiyang Ruan *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags); 1690c2436190SShiyang Ruan 1691708dfad2SShiyang Ruan if (write && iomap->flags & IOMAP_F_SHARED) { 1692708dfad2SShiyang Ruan err = dax_iomap_copy_around(pos, size, size, srcmap, kaddr); 1693ff17b8dfSShiyang Ruan if (err) 1694ff17b8dfSShiyang Ruan return dax_fault_return(err); 1695ff17b8dfSShiyang Ruan } 1696ff17b8dfSShiyang Ruan 1697e5d6df73SShiyang Ruan if (dax_fault_is_synchronous(iter, vmf->vma)) 1698c2436190SShiyang Ruan return dax_fault_synchronous_pfnp(pfnp, pfn); 1699c2436190SShiyang Ruan 1700c2436190SShiyang Ruan /* insert PMD pfn */ 1701c2436190SShiyang Ruan if (pmd) 1702c2436190SShiyang Ruan return vmf_insert_pfn_pmd(vmf, pfn, write); 1703c2436190SShiyang Ruan 1704c2436190SShiyang Ruan /* insert PTE pfn */ 1705c2436190SShiyang Ruan if (write) 1706c2436190SShiyang Ruan return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 1707c2436190SShiyang Ruan return vmf_insert_mixed(vmf->vma, vmf->address, pfn); 1708c2436190SShiyang Ruan } 1709c2436190SShiyang Ruan 1710ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1711c0b24625SJan Kara int *iomap_errp, const struct iomap_ops *ops) 1712a7d73fe6SChristoph Hellwig { 171365dd814aSChristoph Hellwig struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1714b15cd800SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, vmf->pgoff); 171565dd814aSChristoph Hellwig struct iomap_iter iter = { 171665dd814aSChristoph Hellwig .inode = mapping->host, 171765dd814aSChristoph Hellwig .pos = (loff_t)vmf->pgoff << PAGE_SHIFT, 171865dd814aSChristoph Hellwig .len = PAGE_SIZE, 1719952da063SChristoph Hellwig .flags = IOMAP_DAX | IOMAP_FAULT, 172065dd814aSChristoph Hellwig }; 1721ab77dab4SSouptick Joarder vm_fault_t ret = 0; 1722a7d73fe6SChristoph Hellwig void *entry; 172365dd814aSChristoph Hellwig int error; 1724a7d73fe6SChristoph Hellwig 172565dd814aSChristoph Hellwig trace_dax_pte_fault(iter.inode, vmf, ret); 1726a7d73fe6SChristoph Hellwig /* 1727a7d73fe6SChristoph Hellwig * Check whether offset isn't beyond end of file now. Caller is supposed 1728a7d73fe6SChristoph Hellwig * to hold locks serializing us with truncate / punch hole so this is 1729a7d73fe6SChristoph Hellwig * a reliable test. 1730a7d73fe6SChristoph Hellwig */ 173165dd814aSChristoph Hellwig if (iter.pos >= i_size_read(iter.inode)) { 1732ab77dab4SSouptick Joarder ret = VM_FAULT_SIGBUS; 1733a9c42b33SRoss Zwisler goto out; 1734a9c42b33SRoss Zwisler } 1735a7d73fe6SChristoph Hellwig 173665dd814aSChristoph Hellwig if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) 173765dd814aSChristoph Hellwig iter.flags |= IOMAP_WRITE; 1738a7d73fe6SChristoph Hellwig 1739b15cd800SMatthew Wilcox entry = grab_mapping_entry(&xas, mapping, 0); 1740b15cd800SMatthew Wilcox if (xa_is_internal(entry)) { 1741b15cd800SMatthew Wilcox ret = xa_to_internal(entry); 174213e451fdSJan Kara goto out; 174313e451fdSJan Kara } 174413e451fdSJan Kara 1745a7d73fe6SChristoph Hellwig /* 1746e2093926SRoss Zwisler * It is possible, particularly with mixed reads & writes to private 1747e2093926SRoss Zwisler * mappings, that we have raced with a PMD fault that overlaps with 1748e2093926SRoss Zwisler * the PTE we need to set up. If so just return and the fault will be 1749e2093926SRoss Zwisler * retried. 1750e2093926SRoss Zwisler */ 1751e2093926SRoss Zwisler if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1752ab77dab4SSouptick Joarder ret = VM_FAULT_NOPAGE; 1753e2093926SRoss Zwisler goto unlock_entry; 1754e2093926SRoss Zwisler } 1755e2093926SRoss Zwisler 175665dd814aSChristoph Hellwig while ((error = iomap_iter(&iter, ops)) > 0) { 175765dd814aSChristoph Hellwig if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) { 175865dd814aSChristoph Hellwig iter.processed = -EIO; /* fs corruption? */ 175965dd814aSChristoph Hellwig continue; 176065dd814aSChristoph Hellwig } 176165dd814aSChristoph Hellwig 176265dd814aSChristoph Hellwig ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false); 176365dd814aSChristoph Hellwig if (ret != VM_FAULT_SIGBUS && 176465dd814aSChristoph Hellwig (iter.iomap.flags & IOMAP_F_NEW)) { 176565dd814aSChristoph Hellwig count_vm_event(PGMAJFAULT); 176665dd814aSChristoph Hellwig count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); 176765dd814aSChristoph Hellwig ret |= VM_FAULT_MAJOR; 176865dd814aSChristoph Hellwig } 176965dd814aSChristoph Hellwig 177065dd814aSChristoph Hellwig if (!(ret & VM_FAULT_ERROR)) 177165dd814aSChristoph Hellwig iter.processed = PAGE_SIZE; 177265dd814aSChristoph Hellwig } 177365dd814aSChristoph Hellwig 1774c0b24625SJan Kara if (iomap_errp) 1775c0b24625SJan Kara *iomap_errp = error; 177665dd814aSChristoph Hellwig if (!ret && error) 1777ab77dab4SSouptick Joarder ret = dax_fault_return(error); 1778a7d73fe6SChristoph Hellwig 177913e451fdSJan Kara unlock_entry: 1780b15cd800SMatthew Wilcox dax_unlock_entry(&xas, entry); 1781a9c42b33SRoss Zwisler out: 178265dd814aSChristoph Hellwig trace_dax_pte_fault_done(iter.inode, vmf, ret); 178365dd814aSChristoph Hellwig return ret; 1784a7d73fe6SChristoph Hellwig } 1785642261acSRoss Zwisler 1786642261acSRoss Zwisler #ifdef CONFIG_FS_DAX_PMD 178755f81639SShiyang Ruan static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas, 178855f81639SShiyang Ruan pgoff_t max_pgoff) 1789642261acSRoss Zwisler { 1790d8a849e1SDave Jiang unsigned long pmd_addr = vmf->address & PMD_MASK; 1791d8a849e1SDave Jiang bool write = vmf->flags & FAULT_FLAG_WRITE; 1792282a8e03SRoss Zwisler 1793fffa281bSRoss Zwisler /* 1794fffa281bSRoss Zwisler * Make sure that the faulting address's PMD offset (color) matches 1795fffa281bSRoss Zwisler * the PMD offset from the start of the file. This is necessary so 1796fffa281bSRoss Zwisler * that a PMD range in the page table overlaps exactly with a PMD 1797a77d19f4SMatthew Wilcox * range in the page cache. 1798fffa281bSRoss Zwisler */ 1799fffa281bSRoss Zwisler if ((vmf->pgoff & PG_PMD_COLOUR) != 1800fffa281bSRoss Zwisler ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 180155f81639SShiyang Ruan return true; 1802fffa281bSRoss Zwisler 1803642261acSRoss Zwisler /* Fall back to PTEs if we're going to COW */ 180455f81639SShiyang Ruan if (write && !(vmf->vma->vm_flags & VM_SHARED)) 180555f81639SShiyang Ruan return true; 1806642261acSRoss Zwisler 1807642261acSRoss Zwisler /* If the PMD would extend outside the VMA */ 180855f81639SShiyang Ruan if (pmd_addr < vmf->vma->vm_start) 180955f81639SShiyang Ruan return true; 181055f81639SShiyang Ruan if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end) 181155f81639SShiyang Ruan return true; 181255f81639SShiyang Ruan 181355f81639SShiyang Ruan /* If the PMD would extend beyond the file size */ 181455f81639SShiyang Ruan if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff) 181555f81639SShiyang Ruan return true; 181655f81639SShiyang Ruan 181755f81639SShiyang Ruan return false; 181855f81639SShiyang Ruan } 181955f81639SShiyang Ruan 1820642261acSRoss Zwisler static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1821642261acSRoss Zwisler const struct iomap_ops *ops) 1822642261acSRoss Zwisler { 182365dd814aSChristoph Hellwig struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1824642261acSRoss Zwisler XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); 182565dd814aSChristoph Hellwig struct iomap_iter iter = { 182665dd814aSChristoph Hellwig .inode = mapping->host, 182765dd814aSChristoph Hellwig .len = PMD_SIZE, 1828952da063SChristoph Hellwig .flags = IOMAP_DAX | IOMAP_FAULT, 182965dd814aSChristoph Hellwig }; 1830c2436190SShiyang Ruan vm_fault_t ret = VM_FAULT_FALLBACK; 1831642261acSRoss Zwisler pgoff_t max_pgoff; 1832642261acSRoss Zwisler void *entry; 1833642261acSRoss Zwisler int error; 1834642261acSRoss Zwisler 183565dd814aSChristoph Hellwig if (vmf->flags & FAULT_FLAG_WRITE) 183665dd814aSChristoph Hellwig iter.flags |= IOMAP_WRITE; 183765dd814aSChristoph Hellwig 1838642261acSRoss Zwisler /* 1839642261acSRoss Zwisler * Check whether offset isn't beyond end of file now. Caller is 1840642261acSRoss Zwisler * supposed to hold locks serializing us with truncate / punch hole so 1841642261acSRoss Zwisler * this is a reliable test. 1842642261acSRoss Zwisler */ 184365dd814aSChristoph Hellwig max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE); 1844642261acSRoss Zwisler 184565dd814aSChristoph Hellwig trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0); 1846642261acSRoss Zwisler 1847b15cd800SMatthew Wilcox if (xas.xa_index >= max_pgoff) { 1848c2436190SShiyang Ruan ret = VM_FAULT_SIGBUS; 1849282a8e03SRoss Zwisler goto out; 1850282a8e03SRoss Zwisler } 1851642261acSRoss Zwisler 185255f81639SShiyang Ruan if (dax_fault_check_fallback(vmf, &xas, max_pgoff)) 1853642261acSRoss Zwisler goto fallback; 1854642261acSRoss Zwisler 1855642261acSRoss Zwisler /* 1856b15cd800SMatthew Wilcox * grab_mapping_entry() will make sure we get an empty PMD entry, 1857b15cd800SMatthew Wilcox * a zero PMD entry or a DAX PMD. If it can't (because a PTE 1858b15cd800SMatthew Wilcox * entry is already in the array, for instance), it will return 1859b15cd800SMatthew Wilcox * VM_FAULT_FALLBACK. 18609f141d6eSJan Kara */ 186123c84eb7SMatthew Wilcox (Oracle) entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); 1862b15cd800SMatthew Wilcox if (xa_is_internal(entry)) { 1863c2436190SShiyang Ruan ret = xa_to_internal(entry); 1864876f2946SRoss Zwisler goto fallback; 1865b15cd800SMatthew Wilcox } 1866876f2946SRoss Zwisler 1867876f2946SRoss Zwisler /* 1868e2093926SRoss Zwisler * It is possible, particularly with mixed reads & writes to private 1869e2093926SRoss Zwisler * mappings, that we have raced with a PTE fault that overlaps with 1870e2093926SRoss Zwisler * the PMD we need to set up. If so just return and the fault will be 1871e2093926SRoss Zwisler * retried. 1872e2093926SRoss Zwisler */ 1873e2093926SRoss Zwisler if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1874e2093926SRoss Zwisler !pmd_devmap(*vmf->pmd)) { 1875c2436190SShiyang Ruan ret = 0; 1876e2093926SRoss Zwisler goto unlock_entry; 1877e2093926SRoss Zwisler } 1878e2093926SRoss Zwisler 187965dd814aSChristoph Hellwig iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT; 188065dd814aSChristoph Hellwig while ((error = iomap_iter(&iter, ops)) > 0) { 188165dd814aSChristoph Hellwig if (iomap_length(&iter) < PMD_SIZE) 188265dd814aSChristoph Hellwig continue; /* actually breaks out of the loop */ 1883876f2946SRoss Zwisler 188465dd814aSChristoph Hellwig ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true); 188565dd814aSChristoph Hellwig if (ret != VM_FAULT_FALLBACK) 188665dd814aSChristoph Hellwig iter.processed = PMD_SIZE; 1887caa51d26SJan Kara } 1888caa51d26SJan Kara 1889876f2946SRoss Zwisler unlock_entry: 1890b15cd800SMatthew Wilcox dax_unlock_entry(&xas, entry); 1891642261acSRoss Zwisler fallback: 1892c2436190SShiyang Ruan if (ret == VM_FAULT_FALLBACK) { 189365dd814aSChristoph Hellwig split_huge_pmd(vmf->vma, vmf->pmd, vmf->address); 1894642261acSRoss Zwisler count_vm_event(THP_FAULT_FALLBACK); 1895642261acSRoss Zwisler } 1896282a8e03SRoss Zwisler out: 189765dd814aSChristoph Hellwig trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret); 1898c2436190SShiyang Ruan return ret; 1899642261acSRoss Zwisler } 1900a2d58167SDave Jiang #else 1901ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 190201cddfe9SArnd Bergmann const struct iomap_ops *ops) 1903a2d58167SDave Jiang { 1904a2d58167SDave Jiang return VM_FAULT_FALLBACK; 1905a2d58167SDave Jiang } 1906642261acSRoss Zwisler #endif /* CONFIG_FS_DAX_PMD */ 1907a2d58167SDave Jiang 1908a2d58167SDave Jiang /** 1909a2d58167SDave Jiang * dax_iomap_fault - handle a page fault on a DAX file 1910a2d58167SDave Jiang * @vmf: The description of the fault 1911cec04e8cSJan Kara * @pe_size: Size of the page to fault in 19129a0dd422SJan Kara * @pfnp: PFN to insert for synchronous faults if fsync is required 1913c0b24625SJan Kara * @iomap_errp: Storage for detailed error code in case of error 1914cec04e8cSJan Kara * @ops: Iomap ops passed from the file system 1915a2d58167SDave Jiang * 1916a2d58167SDave Jiang * When a page fault occurs, filesystems may call this helper in 1917a2d58167SDave Jiang * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1918a2d58167SDave Jiang * has done all the necessary locking for page fault to proceed 1919a2d58167SDave Jiang * successfully. 1920a2d58167SDave Jiang */ 1921ab77dab4SSouptick Joarder vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1922c0b24625SJan Kara pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) 1923a2d58167SDave Jiang { 1924c791ace1SDave Jiang switch (pe_size) { 1925c791ace1SDave Jiang case PE_SIZE_PTE: 1926c0b24625SJan Kara return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); 1927c791ace1SDave Jiang case PE_SIZE_PMD: 19289a0dd422SJan Kara return dax_iomap_pmd_fault(vmf, pfnp, ops); 1929a2d58167SDave Jiang default: 1930a2d58167SDave Jiang return VM_FAULT_FALLBACK; 1931a2d58167SDave Jiang } 1932a2d58167SDave Jiang } 1933a2d58167SDave Jiang EXPORT_SYMBOL_GPL(dax_iomap_fault); 193471eab6dfSJan Kara 1935a77d19f4SMatthew Wilcox /* 193671eab6dfSJan Kara * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables 193771eab6dfSJan Kara * @vmf: The description of the fault 193871eab6dfSJan Kara * @pfn: PFN to insert 1939cfc93c6cSMatthew Wilcox * @order: Order of entry to insert. 194071eab6dfSJan Kara * 1941a77d19f4SMatthew Wilcox * This function inserts a writeable PTE or PMD entry into the page tables 1942a77d19f4SMatthew Wilcox * for an mmaped DAX file. It also marks the page cache entry as dirty. 194371eab6dfSJan Kara */ 1944cfc93c6cSMatthew Wilcox static vm_fault_t 1945cfc93c6cSMatthew Wilcox dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) 194671eab6dfSJan Kara { 194771eab6dfSJan Kara struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1948cfc93c6cSMatthew Wilcox XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); 1949cfc93c6cSMatthew Wilcox void *entry; 1950ab77dab4SSouptick Joarder vm_fault_t ret; 195171eab6dfSJan Kara 1952cfc93c6cSMatthew Wilcox xas_lock_irq(&xas); 195323c84eb7SMatthew Wilcox (Oracle) entry = get_unlocked_entry(&xas, order); 195471eab6dfSJan Kara /* Did we race with someone splitting entry or so? */ 195523c84eb7SMatthew Wilcox (Oracle) if (!entry || dax_is_conflict(entry) || 195623c84eb7SMatthew Wilcox (Oracle) (order == 0 && !dax_is_pte_entry(entry))) { 19574c3d043dSVivek Goyal put_unlocked_entry(&xas, entry, WAKE_NEXT); 1958cfc93c6cSMatthew Wilcox xas_unlock_irq(&xas); 195971eab6dfSJan Kara trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 196071eab6dfSJan Kara VM_FAULT_NOPAGE); 196171eab6dfSJan Kara return VM_FAULT_NOPAGE; 196271eab6dfSJan Kara } 1963cfc93c6cSMatthew Wilcox xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); 1964cfc93c6cSMatthew Wilcox dax_lock_entry(&xas, entry); 1965cfc93c6cSMatthew Wilcox xas_unlock_irq(&xas); 1966cfc93c6cSMatthew Wilcox if (order == 0) 1967ab77dab4SSouptick Joarder ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 196871eab6dfSJan Kara #ifdef CONFIG_FS_DAX_PMD 1969cfc93c6cSMatthew Wilcox else if (order == PMD_ORDER) 1970fce86ff5SDan Williams ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE); 197171eab6dfSJan Kara #endif 1972cfc93c6cSMatthew Wilcox else 1973ab77dab4SSouptick Joarder ret = VM_FAULT_FALLBACK; 1974cfc93c6cSMatthew Wilcox dax_unlock_entry(&xas, entry); 1975ab77dab4SSouptick Joarder trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); 1976ab77dab4SSouptick Joarder return ret; 197771eab6dfSJan Kara } 197871eab6dfSJan Kara 197971eab6dfSJan Kara /** 198071eab6dfSJan Kara * dax_finish_sync_fault - finish synchronous page fault 198171eab6dfSJan Kara * @vmf: The description of the fault 198271eab6dfSJan Kara * @pe_size: Size of entry to be inserted 198371eab6dfSJan Kara * @pfn: PFN to insert 198471eab6dfSJan Kara * 198571eab6dfSJan Kara * This function ensures that the file range touched by the page fault is 198671eab6dfSJan Kara * stored persistently on the media and handles inserting of appropriate page 198771eab6dfSJan Kara * table entry. 198871eab6dfSJan Kara */ 1989ab77dab4SSouptick Joarder vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 1990ab77dab4SSouptick Joarder enum page_entry_size pe_size, pfn_t pfn) 199171eab6dfSJan Kara { 199271eab6dfSJan Kara int err; 199371eab6dfSJan Kara loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; 1994cfc93c6cSMatthew Wilcox unsigned int order = pe_order(pe_size); 1995cfc93c6cSMatthew Wilcox size_t len = PAGE_SIZE << order; 199671eab6dfSJan Kara 199771eab6dfSJan Kara err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); 199871eab6dfSJan Kara if (err) 199971eab6dfSJan Kara return VM_FAULT_SIGBUS; 2000cfc93c6cSMatthew Wilcox return dax_insert_pfn_mkwrite(vmf, pfn, order); 200171eab6dfSJan Kara } 200271eab6dfSJan Kara EXPORT_SYMBOL_GPL(dax_finish_sync_fault); 20036f7db389SShiyang Ruan 20046f7db389SShiyang Ruan static loff_t dax_range_compare_iter(struct iomap_iter *it_src, 20056f7db389SShiyang Ruan struct iomap_iter *it_dest, u64 len, bool *same) 20066f7db389SShiyang Ruan { 20076f7db389SShiyang Ruan const struct iomap *smap = &it_src->iomap; 20086f7db389SShiyang Ruan const struct iomap *dmap = &it_dest->iomap; 20096f7db389SShiyang Ruan loff_t pos1 = it_src->pos, pos2 = it_dest->pos; 20106f7db389SShiyang Ruan void *saddr, *daddr; 20116f7db389SShiyang Ruan int id, ret; 20126f7db389SShiyang Ruan 20136f7db389SShiyang Ruan len = min(len, min(smap->length, dmap->length)); 20146f7db389SShiyang Ruan 20156f7db389SShiyang Ruan if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) { 20166f7db389SShiyang Ruan *same = true; 20176f7db389SShiyang Ruan return len; 20186f7db389SShiyang Ruan } 20196f7db389SShiyang Ruan 20206f7db389SShiyang Ruan if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) { 20216f7db389SShiyang Ruan *same = false; 20226f7db389SShiyang Ruan return 0; 20236f7db389SShiyang Ruan } 20246f7db389SShiyang Ruan 20256f7db389SShiyang Ruan id = dax_read_lock(); 20266f7db389SShiyang Ruan ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE), 20276f7db389SShiyang Ruan &saddr, NULL); 20286f7db389SShiyang Ruan if (ret < 0) 20296f7db389SShiyang Ruan goto out_unlock; 20306f7db389SShiyang Ruan 20316f7db389SShiyang Ruan ret = dax_iomap_direct_access(dmap, pos2, ALIGN(pos2 + len, PAGE_SIZE), 20326f7db389SShiyang Ruan &daddr, NULL); 20336f7db389SShiyang Ruan if (ret < 0) 20346f7db389SShiyang Ruan goto out_unlock; 20356f7db389SShiyang Ruan 20366f7db389SShiyang Ruan *same = !memcmp(saddr, daddr, len); 20376f7db389SShiyang Ruan if (!*same) 20386f7db389SShiyang Ruan len = 0; 20396f7db389SShiyang Ruan dax_read_unlock(id); 20406f7db389SShiyang Ruan return len; 20416f7db389SShiyang Ruan 20426f7db389SShiyang Ruan out_unlock: 20436f7db389SShiyang Ruan dax_read_unlock(id); 20446f7db389SShiyang Ruan return -EIO; 20456f7db389SShiyang Ruan } 20466f7db389SShiyang Ruan 20476f7db389SShiyang Ruan int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff, 20486f7db389SShiyang Ruan struct inode *dst, loff_t dstoff, loff_t len, bool *same, 20496f7db389SShiyang Ruan const struct iomap_ops *ops) 20506f7db389SShiyang Ruan { 20516f7db389SShiyang Ruan struct iomap_iter src_iter = { 20526f7db389SShiyang Ruan .inode = src, 20536f7db389SShiyang Ruan .pos = srcoff, 20546f7db389SShiyang Ruan .len = len, 20556f7db389SShiyang Ruan .flags = IOMAP_DAX, 20566f7db389SShiyang Ruan }; 20576f7db389SShiyang Ruan struct iomap_iter dst_iter = { 20586f7db389SShiyang Ruan .inode = dst, 20596f7db389SShiyang Ruan .pos = dstoff, 20606f7db389SShiyang Ruan .len = len, 20616f7db389SShiyang Ruan .flags = IOMAP_DAX, 20626f7db389SShiyang Ruan }; 20630e79e373SShiyang Ruan int ret, compared = 0; 20646f7db389SShiyang Ruan 20650e79e373SShiyang Ruan while ((ret = iomap_iter(&src_iter, ops)) > 0 && 20660e79e373SShiyang Ruan (ret = iomap_iter(&dst_iter, ops)) > 0) { 2067e900ba10SShiyang Ruan compared = dax_range_compare_iter(&src_iter, &dst_iter, 2068e900ba10SShiyang Ruan min(src_iter.len, dst_iter.len), same); 20690e79e373SShiyang Ruan if (compared < 0) 20700e79e373SShiyang Ruan return ret; 20710e79e373SShiyang Ruan src_iter.processed = dst_iter.processed = compared; 20726f7db389SShiyang Ruan } 20736f7db389SShiyang Ruan return ret; 20746f7db389SShiyang Ruan } 20756f7db389SShiyang Ruan 20766f7db389SShiyang Ruan int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in, 20776f7db389SShiyang Ruan struct file *file_out, loff_t pos_out, 20786f7db389SShiyang Ruan loff_t *len, unsigned int remap_flags, 20796f7db389SShiyang Ruan const struct iomap_ops *ops) 20806f7db389SShiyang Ruan { 20816f7db389SShiyang Ruan return __generic_remap_file_range_prep(file_in, pos_in, file_out, 20826f7db389SShiyang Ruan pos_out, len, remap_flags, ops); 20836f7db389SShiyang Ruan } 20846f7db389SShiyang Ruan EXPORT_SYMBOL_GPL(dax_remap_file_range_prep); 2085