12025cf9eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2d475c634SMatthew Wilcox /* 3d475c634SMatthew Wilcox * fs/dax.c - Direct Access filesystem code 4d475c634SMatthew Wilcox * Copyright (c) 2013-2014 Intel Corporation 5d475c634SMatthew Wilcox * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 6d475c634SMatthew Wilcox * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 7d475c634SMatthew Wilcox */ 8d475c634SMatthew Wilcox 9d475c634SMatthew Wilcox #include <linux/atomic.h> 10d475c634SMatthew Wilcox #include <linux/blkdev.h> 11d475c634SMatthew Wilcox #include <linux/buffer_head.h> 12d77e92e2SRoss Zwisler #include <linux/dax.h> 13d475c634SMatthew Wilcox #include <linux/fs.h> 14d475c634SMatthew Wilcox #include <linux/genhd.h> 15f7ca90b1SMatthew Wilcox #include <linux/highmem.h> 16f7ca90b1SMatthew Wilcox #include <linux/memcontrol.h> 17f7ca90b1SMatthew Wilcox #include <linux/mm.h> 18d475c634SMatthew Wilcox #include <linux/mutex.h> 199973c98eSRoss Zwisler #include <linux/pagevec.h> 20289c6aedSMatthew Wilcox #include <linux/sched.h> 21f361bf4aSIngo Molnar #include <linux/sched/signal.h> 22d475c634SMatthew Wilcox #include <linux/uio.h> 23f7ca90b1SMatthew Wilcox #include <linux/vmstat.h> 2434c0fd54SDan Williams #include <linux/pfn_t.h> 250e749e54SDan Williams #include <linux/sizes.h> 264b4bb46dSJan Kara #include <linux/mmu_notifier.h> 27a254e568SChristoph Hellwig #include <linux/iomap.h> 2811cf9d86SAneesh Kumar K.V #include <asm/pgalloc.h> 29d475c634SMatthew Wilcox 30282a8e03SRoss Zwisler #define CREATE_TRACE_POINTS 31282a8e03SRoss Zwisler #include <trace/events/fs_dax.h> 32282a8e03SRoss Zwisler 33cfc93c6cSMatthew Wilcox static inline unsigned int pe_order(enum page_entry_size pe_size) 34cfc93c6cSMatthew Wilcox { 35cfc93c6cSMatthew Wilcox if (pe_size == PE_SIZE_PTE) 36cfc93c6cSMatthew Wilcox return PAGE_SHIFT - PAGE_SHIFT; 37cfc93c6cSMatthew Wilcox if (pe_size == PE_SIZE_PMD) 38cfc93c6cSMatthew Wilcox return PMD_SHIFT - PAGE_SHIFT; 39cfc93c6cSMatthew Wilcox if (pe_size == PE_SIZE_PUD) 40cfc93c6cSMatthew Wilcox return PUD_SHIFT - PAGE_SHIFT; 41cfc93c6cSMatthew Wilcox return ~0; 42cfc93c6cSMatthew Wilcox } 43cfc93c6cSMatthew Wilcox 44ac401cc7SJan Kara /* We choose 4096 entries - same as per-zone page wait tables */ 45ac401cc7SJan Kara #define DAX_WAIT_TABLE_BITS 12 46ac401cc7SJan Kara #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 47ac401cc7SJan Kara 48917f3452SRoss Zwisler /* The 'colour' (ie low bits) within a PMD of a page offset. */ 49917f3452SRoss Zwisler #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 50977fbdcdSMatthew Wilcox #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) 51917f3452SRoss Zwisler 52cfc93c6cSMatthew Wilcox /* The order of a PMD entry */ 53cfc93c6cSMatthew Wilcox #define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT) 54cfc93c6cSMatthew Wilcox 55ce95ab0fSRoss Zwisler static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 56ac401cc7SJan Kara 57ac401cc7SJan Kara static int __init init_dax_wait_table(void) 58ac401cc7SJan Kara { 59ac401cc7SJan Kara int i; 60ac401cc7SJan Kara 61ac401cc7SJan Kara for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 62ac401cc7SJan Kara init_waitqueue_head(wait_table + i); 63ac401cc7SJan Kara return 0; 64ac401cc7SJan Kara } 65ac401cc7SJan Kara fs_initcall(init_dax_wait_table); 66ac401cc7SJan Kara 67527b19d0SRoss Zwisler /* 683159f943SMatthew Wilcox * DAX pagecache entries use XArray value entries so they can't be mistaken 693159f943SMatthew Wilcox * for pages. We use one bit for locking, one bit for the entry size (PMD) 703159f943SMatthew Wilcox * and two more to tell us if the entry is a zero page or an empty entry that 713159f943SMatthew Wilcox * is just used for locking. In total four special bits. 72527b19d0SRoss Zwisler * 73527b19d0SRoss Zwisler * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE 74527b19d0SRoss Zwisler * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem 75527b19d0SRoss Zwisler * block allocation. 76527b19d0SRoss Zwisler */ 773159f943SMatthew Wilcox #define DAX_SHIFT (4) 783159f943SMatthew Wilcox #define DAX_LOCKED (1UL << 0) 793159f943SMatthew Wilcox #define DAX_PMD (1UL << 1) 803159f943SMatthew Wilcox #define DAX_ZERO_PAGE (1UL << 2) 813159f943SMatthew Wilcox #define DAX_EMPTY (1UL << 3) 82527b19d0SRoss Zwisler 83a77d19f4SMatthew Wilcox static unsigned long dax_to_pfn(void *entry) 84527b19d0SRoss Zwisler { 853159f943SMatthew Wilcox return xa_to_value(entry) >> DAX_SHIFT; 86527b19d0SRoss Zwisler } 87527b19d0SRoss Zwisler 889f32d221SMatthew Wilcox static void *dax_make_entry(pfn_t pfn, unsigned long flags) 899f32d221SMatthew Wilcox { 909f32d221SMatthew Wilcox return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); 919f32d221SMatthew Wilcox } 929f32d221SMatthew Wilcox 93cfc93c6cSMatthew Wilcox static bool dax_is_locked(void *entry) 94cfc93c6cSMatthew Wilcox { 95cfc93c6cSMatthew Wilcox return xa_to_value(entry) & DAX_LOCKED; 96cfc93c6cSMatthew Wilcox } 97cfc93c6cSMatthew Wilcox 98a77d19f4SMatthew Wilcox static unsigned int dax_entry_order(void *entry) 99527b19d0SRoss Zwisler { 1003159f943SMatthew Wilcox if (xa_to_value(entry) & DAX_PMD) 101cfc93c6cSMatthew Wilcox return PMD_ORDER; 102527b19d0SRoss Zwisler return 0; 103527b19d0SRoss Zwisler } 104527b19d0SRoss Zwisler 105fda490d3SMatthew Wilcox static unsigned long dax_is_pmd_entry(void *entry) 106642261acSRoss Zwisler { 1073159f943SMatthew Wilcox return xa_to_value(entry) & DAX_PMD; 108642261acSRoss Zwisler } 109642261acSRoss Zwisler 110fda490d3SMatthew Wilcox static bool dax_is_pte_entry(void *entry) 111642261acSRoss Zwisler { 1123159f943SMatthew Wilcox return !(xa_to_value(entry) & DAX_PMD); 113642261acSRoss Zwisler } 114642261acSRoss Zwisler 115642261acSRoss Zwisler static int dax_is_zero_entry(void *entry) 116642261acSRoss Zwisler { 1173159f943SMatthew Wilcox return xa_to_value(entry) & DAX_ZERO_PAGE; 118642261acSRoss Zwisler } 119642261acSRoss Zwisler 120642261acSRoss Zwisler static int dax_is_empty_entry(void *entry) 121642261acSRoss Zwisler { 1223159f943SMatthew Wilcox return xa_to_value(entry) & DAX_EMPTY; 123642261acSRoss Zwisler } 124642261acSRoss Zwisler 125f7ca90b1SMatthew Wilcox /* 12623c84eb7SMatthew Wilcox (Oracle) * true if the entry that was found is of a smaller order than the entry 12723c84eb7SMatthew Wilcox (Oracle) * we were looking for 12823c84eb7SMatthew Wilcox (Oracle) */ 12923c84eb7SMatthew Wilcox (Oracle) static bool dax_is_conflict(void *entry) 13023c84eb7SMatthew Wilcox (Oracle) { 13123c84eb7SMatthew Wilcox (Oracle) return entry == XA_RETRY_ENTRY; 13223c84eb7SMatthew Wilcox (Oracle) } 13323c84eb7SMatthew Wilcox (Oracle) 13423c84eb7SMatthew Wilcox (Oracle) /* 135a77d19f4SMatthew Wilcox * DAX page cache entry locking 136ac401cc7SJan Kara */ 137ac401cc7SJan Kara struct exceptional_entry_key { 138ec4907ffSMatthew Wilcox struct xarray *xa; 13963e95b5cSRoss Zwisler pgoff_t entry_start; 140ac401cc7SJan Kara }; 141ac401cc7SJan Kara 142ac401cc7SJan Kara struct wait_exceptional_entry_queue { 143ac6424b9SIngo Molnar wait_queue_entry_t wait; 144ac401cc7SJan Kara struct exceptional_entry_key key; 145ac401cc7SJan Kara }; 146ac401cc7SJan Kara 147b15cd800SMatthew Wilcox static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, 148b15cd800SMatthew Wilcox void *entry, struct exceptional_entry_key *key) 14963e95b5cSRoss Zwisler { 15063e95b5cSRoss Zwisler unsigned long hash; 151b15cd800SMatthew Wilcox unsigned long index = xas->xa_index; 15263e95b5cSRoss Zwisler 15363e95b5cSRoss Zwisler /* 15463e95b5cSRoss Zwisler * If 'entry' is a PMD, align the 'index' that we use for the wait 15563e95b5cSRoss Zwisler * queue to the start of that PMD. This ensures that all offsets in 15663e95b5cSRoss Zwisler * the range covered by the PMD map to the same bit lock. 15763e95b5cSRoss Zwisler */ 158642261acSRoss Zwisler if (dax_is_pmd_entry(entry)) 159917f3452SRoss Zwisler index &= ~PG_PMD_COLOUR; 160b15cd800SMatthew Wilcox key->xa = xas->xa; 16163e95b5cSRoss Zwisler key->entry_start = index; 16263e95b5cSRoss Zwisler 163b15cd800SMatthew Wilcox hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); 16463e95b5cSRoss Zwisler return wait_table + hash; 16563e95b5cSRoss Zwisler } 16663e95b5cSRoss Zwisler 167ec4907ffSMatthew Wilcox static int wake_exceptional_entry_func(wait_queue_entry_t *wait, 168ec4907ffSMatthew Wilcox unsigned int mode, int sync, void *keyp) 169ac401cc7SJan Kara { 170ac401cc7SJan Kara struct exceptional_entry_key *key = keyp; 171ac401cc7SJan Kara struct wait_exceptional_entry_queue *ewait = 172ac401cc7SJan Kara container_of(wait, struct wait_exceptional_entry_queue, wait); 173ac401cc7SJan Kara 174ec4907ffSMatthew Wilcox if (key->xa != ewait->key.xa || 17563e95b5cSRoss Zwisler key->entry_start != ewait->key.entry_start) 176ac401cc7SJan Kara return 0; 177ac401cc7SJan Kara return autoremove_wake_function(wait, mode, sync, NULL); 178ac401cc7SJan Kara } 179ac401cc7SJan Kara 180ac401cc7SJan Kara /* 181b93b0163SMatthew Wilcox * @entry may no longer be the entry at the index in the mapping. 182b93b0163SMatthew Wilcox * The important information it's conveying is whether the entry at 183b93b0163SMatthew Wilcox * this index used to be a PMD entry. 184e30331ffSRoss Zwisler */ 185b15cd800SMatthew Wilcox static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all) 186e30331ffSRoss Zwisler { 187e30331ffSRoss Zwisler struct exceptional_entry_key key; 188e30331ffSRoss Zwisler wait_queue_head_t *wq; 189e30331ffSRoss Zwisler 190b15cd800SMatthew Wilcox wq = dax_entry_waitqueue(xas, entry, &key); 191e30331ffSRoss Zwisler 192e30331ffSRoss Zwisler /* 193e30331ffSRoss Zwisler * Checking for locked entry and prepare_to_wait_exclusive() happens 194b93b0163SMatthew Wilcox * under the i_pages lock, ditto for entry handling in our callers. 195e30331ffSRoss Zwisler * So at this point all tasks that could have seen our entry locked 196e30331ffSRoss Zwisler * must be in the waitqueue and the following check will see them. 197e30331ffSRoss Zwisler */ 198e30331ffSRoss Zwisler if (waitqueue_active(wq)) 199e30331ffSRoss Zwisler __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); 200e30331ffSRoss Zwisler } 201e30331ffSRoss Zwisler 202cfc93c6cSMatthew Wilcox /* 203cfc93c6cSMatthew Wilcox * Look up entry in page cache, wait for it to become unlocked if it 204cfc93c6cSMatthew Wilcox * is a DAX entry and return it. The caller must subsequently call 205cfc93c6cSMatthew Wilcox * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry() 20623c84eb7SMatthew Wilcox (Oracle) * if it did. The entry returned may have a larger order than @order. 20723c84eb7SMatthew Wilcox (Oracle) * If @order is larger than the order of the entry found in i_pages, this 20823c84eb7SMatthew Wilcox (Oracle) * function returns a dax_is_conflict entry. 209cfc93c6cSMatthew Wilcox * 210cfc93c6cSMatthew Wilcox * Must be called with the i_pages lock held. 211cfc93c6cSMatthew Wilcox */ 21223c84eb7SMatthew Wilcox (Oracle) static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) 213cfc93c6cSMatthew Wilcox { 214cfc93c6cSMatthew Wilcox void *entry; 215cfc93c6cSMatthew Wilcox struct wait_exceptional_entry_queue ewait; 216cfc93c6cSMatthew Wilcox wait_queue_head_t *wq; 217cfc93c6cSMatthew Wilcox 218cfc93c6cSMatthew Wilcox init_wait(&ewait.wait); 219cfc93c6cSMatthew Wilcox ewait.wait.func = wake_exceptional_entry_func; 220cfc93c6cSMatthew Wilcox 221cfc93c6cSMatthew Wilcox for (;;) { 2220e40de03SMatthew Wilcox entry = xas_find_conflict(xas); 2236370740eSDan Williams if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 2246370740eSDan Williams return entry; 22523c84eb7SMatthew Wilcox (Oracle) if (dax_entry_order(entry) < order) 22623c84eb7SMatthew Wilcox (Oracle) return XA_RETRY_ENTRY; 2276370740eSDan Williams if (!dax_is_locked(entry)) 228cfc93c6cSMatthew Wilcox return entry; 229cfc93c6cSMatthew Wilcox 230b15cd800SMatthew Wilcox wq = dax_entry_waitqueue(xas, entry, &ewait.key); 231cfc93c6cSMatthew Wilcox prepare_to_wait_exclusive(wq, &ewait.wait, 232cfc93c6cSMatthew Wilcox TASK_UNINTERRUPTIBLE); 233cfc93c6cSMatthew Wilcox xas_unlock_irq(xas); 234cfc93c6cSMatthew Wilcox xas_reset(xas); 235cfc93c6cSMatthew Wilcox schedule(); 236cfc93c6cSMatthew Wilcox finish_wait(wq, &ewait.wait); 237cfc93c6cSMatthew Wilcox xas_lock_irq(xas); 238cfc93c6cSMatthew Wilcox } 239cfc93c6cSMatthew Wilcox } 240cfc93c6cSMatthew Wilcox 24155e56f06SMatthew Wilcox /* 24255e56f06SMatthew Wilcox * The only thing keeping the address space around is the i_pages lock 24355e56f06SMatthew Wilcox * (it's cycled in clear_inode() after removing the entries from i_pages) 24455e56f06SMatthew Wilcox * After we call xas_unlock_irq(), we cannot touch xas->xa. 24555e56f06SMatthew Wilcox */ 24655e56f06SMatthew Wilcox static void wait_entry_unlocked(struct xa_state *xas, void *entry) 24755e56f06SMatthew Wilcox { 24855e56f06SMatthew Wilcox struct wait_exceptional_entry_queue ewait; 24955e56f06SMatthew Wilcox wait_queue_head_t *wq; 25055e56f06SMatthew Wilcox 25155e56f06SMatthew Wilcox init_wait(&ewait.wait); 25255e56f06SMatthew Wilcox ewait.wait.func = wake_exceptional_entry_func; 25355e56f06SMatthew Wilcox 25455e56f06SMatthew Wilcox wq = dax_entry_waitqueue(xas, entry, &ewait.key); 255d8a70641SDan Williams /* 256d8a70641SDan Williams * Unlike get_unlocked_entry() there is no guarantee that this 257d8a70641SDan Williams * path ever successfully retrieves an unlocked entry before an 258d8a70641SDan Williams * inode dies. Perform a non-exclusive wait in case this path 259d8a70641SDan Williams * never successfully performs its own wake up. 260d8a70641SDan Williams */ 261d8a70641SDan Williams prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); 26255e56f06SMatthew Wilcox xas_unlock_irq(xas); 26355e56f06SMatthew Wilcox schedule(); 26455e56f06SMatthew Wilcox finish_wait(wq, &ewait.wait); 26555e56f06SMatthew Wilcox } 26655e56f06SMatthew Wilcox 267cfc93c6cSMatthew Wilcox static void put_unlocked_entry(struct xa_state *xas, void *entry) 268cfc93c6cSMatthew Wilcox { 269cfc93c6cSMatthew Wilcox /* If we were the only waiter woken, wake the next one */ 27061c30c98SJan Kara if (entry && !dax_is_conflict(entry)) 271cfc93c6cSMatthew Wilcox dax_wake_entry(xas, entry, false); 272cfc93c6cSMatthew Wilcox } 273cfc93c6cSMatthew Wilcox 274cfc93c6cSMatthew Wilcox /* 275cfc93c6cSMatthew Wilcox * We used the xa_state to get the entry, but then we locked the entry and 276cfc93c6cSMatthew Wilcox * dropped the xa_lock, so we know the xa_state is stale and must be reset 277cfc93c6cSMatthew Wilcox * before use. 278cfc93c6cSMatthew Wilcox */ 279cfc93c6cSMatthew Wilcox static void dax_unlock_entry(struct xa_state *xas, void *entry) 280cfc93c6cSMatthew Wilcox { 281cfc93c6cSMatthew Wilcox void *old; 282cfc93c6cSMatthew Wilcox 2837ae2ea7dSMatthew Wilcox BUG_ON(dax_is_locked(entry)); 284cfc93c6cSMatthew Wilcox xas_reset(xas); 285cfc93c6cSMatthew Wilcox xas_lock_irq(xas); 286cfc93c6cSMatthew Wilcox old = xas_store(xas, entry); 287cfc93c6cSMatthew Wilcox xas_unlock_irq(xas); 288cfc93c6cSMatthew Wilcox BUG_ON(!dax_is_locked(old)); 289cfc93c6cSMatthew Wilcox dax_wake_entry(xas, entry, false); 290cfc93c6cSMatthew Wilcox } 291cfc93c6cSMatthew Wilcox 292cfc93c6cSMatthew Wilcox /* 293cfc93c6cSMatthew Wilcox * Return: The entry stored at this location before it was locked. 294cfc93c6cSMatthew Wilcox */ 295cfc93c6cSMatthew Wilcox static void *dax_lock_entry(struct xa_state *xas, void *entry) 296cfc93c6cSMatthew Wilcox { 297cfc93c6cSMatthew Wilcox unsigned long v = xa_to_value(entry); 298cfc93c6cSMatthew Wilcox return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); 299cfc93c6cSMatthew Wilcox } 300cfc93c6cSMatthew Wilcox 301d2c997c0SDan Williams static unsigned long dax_entry_size(void *entry) 302d2c997c0SDan Williams { 303d2c997c0SDan Williams if (dax_is_zero_entry(entry)) 304d2c997c0SDan Williams return 0; 305d2c997c0SDan Williams else if (dax_is_empty_entry(entry)) 306d2c997c0SDan Williams return 0; 307d2c997c0SDan Williams else if (dax_is_pmd_entry(entry)) 308d2c997c0SDan Williams return PMD_SIZE; 309d2c997c0SDan Williams else 310d2c997c0SDan Williams return PAGE_SIZE; 311d2c997c0SDan Williams } 312d2c997c0SDan Williams 313a77d19f4SMatthew Wilcox static unsigned long dax_end_pfn(void *entry) 314d2c997c0SDan Williams { 315a77d19f4SMatthew Wilcox return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; 316d2c997c0SDan Williams } 317d2c997c0SDan Williams 318d2c997c0SDan Williams /* 319d2c997c0SDan Williams * Iterate through all mapped pfns represented by an entry, i.e. skip 320d2c997c0SDan Williams * 'empty' and 'zero' entries. 321d2c997c0SDan Williams */ 322d2c997c0SDan Williams #define for_each_mapped_pfn(entry, pfn) \ 323a77d19f4SMatthew Wilcox for (pfn = dax_to_pfn(entry); \ 324a77d19f4SMatthew Wilcox pfn < dax_end_pfn(entry); pfn++) 325d2c997c0SDan Williams 32673449dafSDan Williams /* 32773449dafSDan Williams * TODO: for reflink+dax we need a way to associate a single page with 32873449dafSDan Williams * multiple address_space instances at different linear_page_index() 32973449dafSDan Williams * offsets. 33073449dafSDan Williams */ 33173449dafSDan Williams static void dax_associate_entry(void *entry, struct address_space *mapping, 33273449dafSDan Williams struct vm_area_struct *vma, unsigned long address) 333d2c997c0SDan Williams { 33473449dafSDan Williams unsigned long size = dax_entry_size(entry), pfn, index; 33573449dafSDan Williams int i = 0; 336d2c997c0SDan Williams 337d2c997c0SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 338d2c997c0SDan Williams return; 339d2c997c0SDan Williams 34073449dafSDan Williams index = linear_page_index(vma, address & ~(size - 1)); 341d2c997c0SDan Williams for_each_mapped_pfn(entry, pfn) { 342d2c997c0SDan Williams struct page *page = pfn_to_page(pfn); 343d2c997c0SDan Williams 344d2c997c0SDan Williams WARN_ON_ONCE(page->mapping); 345d2c997c0SDan Williams page->mapping = mapping; 34673449dafSDan Williams page->index = index + i++; 347d2c997c0SDan Williams } 348d2c997c0SDan Williams } 349d2c997c0SDan Williams 350d2c997c0SDan Williams static void dax_disassociate_entry(void *entry, struct address_space *mapping, 351d2c997c0SDan Williams bool trunc) 352d2c997c0SDan Williams { 353d2c997c0SDan Williams unsigned long pfn; 354d2c997c0SDan Williams 355d2c997c0SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 356d2c997c0SDan Williams return; 357d2c997c0SDan Williams 358d2c997c0SDan Williams for_each_mapped_pfn(entry, pfn) { 359d2c997c0SDan Williams struct page *page = pfn_to_page(pfn); 360d2c997c0SDan Williams 361d2c997c0SDan Williams WARN_ON_ONCE(trunc && page_ref_count(page) > 1); 362d2c997c0SDan Williams WARN_ON_ONCE(page->mapping && page->mapping != mapping); 363d2c997c0SDan Williams page->mapping = NULL; 36473449dafSDan Williams page->index = 0; 365d2c997c0SDan Williams } 366d2c997c0SDan Williams } 367d2c997c0SDan Williams 3685fac7408SDan Williams static struct page *dax_busy_page(void *entry) 3695fac7408SDan Williams { 3705fac7408SDan Williams unsigned long pfn; 3715fac7408SDan Williams 3725fac7408SDan Williams for_each_mapped_pfn(entry, pfn) { 3735fac7408SDan Williams struct page *page = pfn_to_page(pfn); 3745fac7408SDan Williams 3755fac7408SDan Williams if (page_ref_count(page) > 1) 3765fac7408SDan Williams return page; 3775fac7408SDan Williams } 3785fac7408SDan Williams return NULL; 3795fac7408SDan Williams } 3805fac7408SDan Williams 381c5bbd451SMatthew Wilcox /* 382c5bbd451SMatthew Wilcox * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page 383c5bbd451SMatthew Wilcox * @page: The page whose entry we want to lock 384c5bbd451SMatthew Wilcox * 385c5bbd451SMatthew Wilcox * Context: Process context. 38627359fd6SMatthew Wilcox * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could 38727359fd6SMatthew Wilcox * not be locked. 388c5bbd451SMatthew Wilcox */ 38927359fd6SMatthew Wilcox dax_entry_t dax_lock_page(struct page *page) 390c2a7d2a1SDan Williams { 3919f32d221SMatthew Wilcox XA_STATE(xas, NULL, 0); 3929f32d221SMatthew Wilcox void *entry; 393c2a7d2a1SDan Williams 394c5bbd451SMatthew Wilcox /* Ensure page->mapping isn't freed while we look at it */ 395c5bbd451SMatthew Wilcox rcu_read_lock(); 396c2a7d2a1SDan Williams for (;;) { 3979f32d221SMatthew Wilcox struct address_space *mapping = READ_ONCE(page->mapping); 398c2a7d2a1SDan Williams 39927359fd6SMatthew Wilcox entry = NULL; 400c93db7bbSMatthew Wilcox if (!mapping || !dax_mapping(mapping)) 401c5bbd451SMatthew Wilcox break; 402c2a7d2a1SDan Williams 403c2a7d2a1SDan Williams /* 404c2a7d2a1SDan Williams * In the device-dax case there's no need to lock, a 405c2a7d2a1SDan Williams * struct dev_pagemap pin is sufficient to keep the 406c2a7d2a1SDan Williams * inode alive, and we assume we have dev_pagemap pin 407c2a7d2a1SDan Williams * otherwise we would not have a valid pfn_to_page() 408c2a7d2a1SDan Williams * translation. 409c2a7d2a1SDan Williams */ 41027359fd6SMatthew Wilcox entry = (void *)~0UL; 4119f32d221SMatthew Wilcox if (S_ISCHR(mapping->host->i_mode)) 412c5bbd451SMatthew Wilcox break; 413c2a7d2a1SDan Williams 4149f32d221SMatthew Wilcox xas.xa = &mapping->i_pages; 4159f32d221SMatthew Wilcox xas_lock_irq(&xas); 416c2a7d2a1SDan Williams if (mapping != page->mapping) { 4179f32d221SMatthew Wilcox xas_unlock_irq(&xas); 418c2a7d2a1SDan Williams continue; 419c2a7d2a1SDan Williams } 4209f32d221SMatthew Wilcox xas_set(&xas, page->index); 4219f32d221SMatthew Wilcox entry = xas_load(&xas); 4229f32d221SMatthew Wilcox if (dax_is_locked(entry)) { 423c5bbd451SMatthew Wilcox rcu_read_unlock(); 42455e56f06SMatthew Wilcox wait_entry_unlocked(&xas, entry); 425c5bbd451SMatthew Wilcox rcu_read_lock(); 426c2a7d2a1SDan Williams continue; 427c2a7d2a1SDan Williams } 4289f32d221SMatthew Wilcox dax_lock_entry(&xas, entry); 4299f32d221SMatthew Wilcox xas_unlock_irq(&xas); 430c5bbd451SMatthew Wilcox break; 4319f32d221SMatthew Wilcox } 432c5bbd451SMatthew Wilcox rcu_read_unlock(); 43327359fd6SMatthew Wilcox return (dax_entry_t)entry; 434c2a7d2a1SDan Williams } 435c2a7d2a1SDan Williams 43627359fd6SMatthew Wilcox void dax_unlock_page(struct page *page, dax_entry_t cookie) 437c2a7d2a1SDan Williams { 438c2a7d2a1SDan Williams struct address_space *mapping = page->mapping; 4399f32d221SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, page->index); 440c2a7d2a1SDan Williams 4419f32d221SMatthew Wilcox if (S_ISCHR(mapping->host->i_mode)) 442c2a7d2a1SDan Williams return; 443c2a7d2a1SDan Williams 44427359fd6SMatthew Wilcox dax_unlock_entry(&xas, (void *)cookie); 445c2a7d2a1SDan Williams } 446c2a7d2a1SDan Williams 447ac401cc7SJan Kara /* 448a77d19f4SMatthew Wilcox * Find page cache entry at given index. If it is a DAX entry, return it 449a77d19f4SMatthew Wilcox * with the entry locked. If the page cache doesn't contain an entry at 450a77d19f4SMatthew Wilcox * that index, add a locked empty entry. 451ac401cc7SJan Kara * 4523159f943SMatthew Wilcox * When requesting an entry with size DAX_PMD, grab_mapping_entry() will 453b15cd800SMatthew Wilcox * either return that locked entry or will return VM_FAULT_FALLBACK. 454b15cd800SMatthew Wilcox * This will happen if there are any PTE entries within the PMD range 455b15cd800SMatthew Wilcox * that we are requesting. 456642261acSRoss Zwisler * 457b15cd800SMatthew Wilcox * We always favor PTE entries over PMD entries. There isn't a flow where we 458b15cd800SMatthew Wilcox * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD 459b15cd800SMatthew Wilcox * insertion will fail if it finds any PTE entries already in the tree, and a 460b15cd800SMatthew Wilcox * PTE insertion will cause an existing PMD entry to be unmapped and 461b15cd800SMatthew Wilcox * downgraded to PTE entries. This happens for both PMD zero pages as 462b15cd800SMatthew Wilcox * well as PMD empty entries. 463642261acSRoss Zwisler * 464b15cd800SMatthew Wilcox * The exception to this downgrade path is for PMD entries that have 465b15cd800SMatthew Wilcox * real storage backing them. We will leave these real PMD entries in 466b15cd800SMatthew Wilcox * the tree, and PTE writes will simply dirty the entire PMD entry. 467642261acSRoss Zwisler * 468ac401cc7SJan Kara * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 469ac401cc7SJan Kara * persistent memory the benefit is doubtful. We can add that later if we can 470ac401cc7SJan Kara * show it helps. 471b15cd800SMatthew Wilcox * 472b15cd800SMatthew Wilcox * On error, this function does not return an ERR_PTR. Instead it returns 473b15cd800SMatthew Wilcox * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values 474b15cd800SMatthew Wilcox * overlap with xarray value entries. 475ac401cc7SJan Kara */ 476b15cd800SMatthew Wilcox static void *grab_mapping_entry(struct xa_state *xas, 47723c84eb7SMatthew Wilcox (Oracle) struct address_space *mapping, unsigned int order) 478ac401cc7SJan Kara { 479b15cd800SMatthew Wilcox unsigned long index = xas->xa_index; 480b15cd800SMatthew Wilcox bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */ 481b15cd800SMatthew Wilcox void *entry; 482ac401cc7SJan Kara 483b15cd800SMatthew Wilcox retry: 484b15cd800SMatthew Wilcox xas_lock_irq(xas); 48523c84eb7SMatthew Wilcox (Oracle) entry = get_unlocked_entry(xas, order); 486642261acSRoss Zwisler 487b15cd800SMatthew Wilcox if (entry) { 48823c84eb7SMatthew Wilcox (Oracle) if (dax_is_conflict(entry)) 48923c84eb7SMatthew Wilcox (Oracle) goto fallback; 4900e40de03SMatthew Wilcox if (!xa_is_value(entry)) { 491b15cd800SMatthew Wilcox xas_set_err(xas, EIO); 49291d25ba8SRoss Zwisler goto out_unlock; 49391d25ba8SRoss Zwisler } 49491d25ba8SRoss Zwisler 49523c84eb7SMatthew Wilcox (Oracle) if (order == 0) { 49691d25ba8SRoss Zwisler if (dax_is_pmd_entry(entry) && 497642261acSRoss Zwisler (dax_is_zero_entry(entry) || 498642261acSRoss Zwisler dax_is_empty_entry(entry))) { 499642261acSRoss Zwisler pmd_downgrade = true; 500642261acSRoss Zwisler } 501642261acSRoss Zwisler } 502642261acSRoss Zwisler } 503642261acSRoss Zwisler 504642261acSRoss Zwisler if (pmd_downgrade) { 505642261acSRoss Zwisler /* 506642261acSRoss Zwisler * Make sure 'entry' remains valid while we drop 507b93b0163SMatthew Wilcox * the i_pages lock. 508642261acSRoss Zwisler */ 509b15cd800SMatthew Wilcox dax_lock_entry(xas, entry); 510642261acSRoss Zwisler 511642261acSRoss Zwisler /* 512642261acSRoss Zwisler * Besides huge zero pages the only other thing that gets 513642261acSRoss Zwisler * downgraded are empty entries which don't need to be 514642261acSRoss Zwisler * unmapped. 515642261acSRoss Zwisler */ 516b15cd800SMatthew Wilcox if (dax_is_zero_entry(entry)) { 517b15cd800SMatthew Wilcox xas_unlock_irq(xas); 518b15cd800SMatthew Wilcox unmap_mapping_pages(mapping, 519b15cd800SMatthew Wilcox xas->xa_index & ~PG_PMD_COLOUR, 520977fbdcdSMatthew Wilcox PG_PMD_NR, false); 521b15cd800SMatthew Wilcox xas_reset(xas); 522b15cd800SMatthew Wilcox xas_lock_irq(xas); 523e11f8b7bSRoss Zwisler } 524e11f8b7bSRoss Zwisler 525d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, false); 526b15cd800SMatthew Wilcox xas_store(xas, NULL); /* undo the PMD join */ 527b15cd800SMatthew Wilcox dax_wake_entry(xas, entry, true); 528642261acSRoss Zwisler mapping->nrexceptional--; 529b15cd800SMatthew Wilcox entry = NULL; 530b15cd800SMatthew Wilcox xas_set(xas, index); 531642261acSRoss Zwisler } 532642261acSRoss Zwisler 533b15cd800SMatthew Wilcox if (entry) { 534b15cd800SMatthew Wilcox dax_lock_entry(xas, entry); 535b15cd800SMatthew Wilcox } else { 53623c84eb7SMatthew Wilcox (Oracle) unsigned long flags = DAX_EMPTY; 53723c84eb7SMatthew Wilcox (Oracle) 53823c84eb7SMatthew Wilcox (Oracle) if (order > 0) 53923c84eb7SMatthew Wilcox (Oracle) flags |= DAX_PMD; 54023c84eb7SMatthew Wilcox (Oracle) entry = dax_make_entry(pfn_to_pfn_t(0), flags); 541b15cd800SMatthew Wilcox dax_lock_entry(xas, entry); 542b15cd800SMatthew Wilcox if (xas_error(xas)) 543b15cd800SMatthew Wilcox goto out_unlock; 544ac401cc7SJan Kara mapping->nrexceptional++; 545ac401cc7SJan Kara } 546b15cd800SMatthew Wilcox 547642261acSRoss Zwisler out_unlock: 548b15cd800SMatthew Wilcox xas_unlock_irq(xas); 549b15cd800SMatthew Wilcox if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) 550b15cd800SMatthew Wilcox goto retry; 551b15cd800SMatthew Wilcox if (xas->xa_node == XA_ERROR(-ENOMEM)) 552b15cd800SMatthew Wilcox return xa_mk_internal(VM_FAULT_OOM); 553b15cd800SMatthew Wilcox if (xas_error(xas)) 554b15cd800SMatthew Wilcox return xa_mk_internal(VM_FAULT_SIGBUS); 555e3ad61c6SRoss Zwisler return entry; 556b15cd800SMatthew Wilcox fallback: 557b15cd800SMatthew Wilcox xas_unlock_irq(xas); 558b15cd800SMatthew Wilcox return xa_mk_internal(VM_FAULT_FALLBACK); 559ac401cc7SJan Kara } 560ac401cc7SJan Kara 5615fac7408SDan Williams /** 5625fac7408SDan Williams * dax_layout_busy_page - find first pinned page in @mapping 5635fac7408SDan Williams * @mapping: address space to scan for a page with ref count > 1 5645fac7408SDan Williams * 5655fac7408SDan Williams * DAX requires ZONE_DEVICE mapped pages. These pages are never 5665fac7408SDan Williams * 'onlined' to the page allocator so they are considered idle when 5675fac7408SDan Williams * page->count == 1. A filesystem uses this interface to determine if 5685fac7408SDan Williams * any page in the mapping is busy, i.e. for DMA, or other 5695fac7408SDan Williams * get_user_pages() usages. 5705fac7408SDan Williams * 5715fac7408SDan Williams * It is expected that the filesystem is holding locks to block the 5725fac7408SDan Williams * establishment of new mappings in this address_space. I.e. it expects 5735fac7408SDan Williams * to be able to run unmap_mapping_range() and subsequently not race 5745fac7408SDan Williams * mapping_mapped() becoming true. 5755fac7408SDan Williams */ 5765fac7408SDan Williams struct page *dax_layout_busy_page(struct address_space *mapping) 5775fac7408SDan Williams { 578084a8990SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, 0); 579084a8990SMatthew Wilcox void *entry; 580084a8990SMatthew Wilcox unsigned int scanned = 0; 5815fac7408SDan Williams struct page *page = NULL; 5825fac7408SDan Williams 5835fac7408SDan Williams /* 5845fac7408SDan Williams * In the 'limited' case get_user_pages() for dax is disabled. 5855fac7408SDan Williams */ 5865fac7408SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 5875fac7408SDan Williams return NULL; 5885fac7408SDan Williams 5895fac7408SDan Williams if (!dax_mapping(mapping) || !mapping_mapped(mapping)) 5905fac7408SDan Williams return NULL; 5915fac7408SDan Williams 5925fac7408SDan Williams /* 5935fac7408SDan Williams * If we race get_user_pages_fast() here either we'll see the 594084a8990SMatthew Wilcox * elevated page count in the iteration and wait, or 5955fac7408SDan Williams * get_user_pages_fast() will see that the page it took a reference 5965fac7408SDan Williams * against is no longer mapped in the page tables and bail to the 5975fac7408SDan Williams * get_user_pages() slow path. The slow path is protected by 5985fac7408SDan Williams * pte_lock() and pmd_lock(). New references are not taken without 5995fac7408SDan Williams * holding those locks, and unmap_mapping_range() will not zero the 6005fac7408SDan Williams * pte or pmd without holding the respective lock, so we are 6015fac7408SDan Williams * guaranteed to either see new references or prevent new 6025fac7408SDan Williams * references from being established. 6035fac7408SDan Williams */ 604d75996ddSVivek Goyal unmap_mapping_range(mapping, 0, 0, 0); 6055fac7408SDan Williams 606084a8990SMatthew Wilcox xas_lock_irq(&xas); 607084a8990SMatthew Wilcox xas_for_each(&xas, entry, ULONG_MAX) { 608084a8990SMatthew Wilcox if (WARN_ON_ONCE(!xa_is_value(entry))) 6095fac7408SDan Williams continue; 610084a8990SMatthew Wilcox if (unlikely(dax_is_locked(entry))) 61123c84eb7SMatthew Wilcox (Oracle) entry = get_unlocked_entry(&xas, 0); 6125fac7408SDan Williams if (entry) 6135fac7408SDan Williams page = dax_busy_page(entry); 614084a8990SMatthew Wilcox put_unlocked_entry(&xas, entry); 6155fac7408SDan Williams if (page) 6165fac7408SDan Williams break; 617084a8990SMatthew Wilcox if (++scanned % XA_CHECK_SCHED) 618084a8990SMatthew Wilcox continue; 619cdbf8897SRoss Zwisler 620084a8990SMatthew Wilcox xas_pause(&xas); 621084a8990SMatthew Wilcox xas_unlock_irq(&xas); 622084a8990SMatthew Wilcox cond_resched(); 623084a8990SMatthew Wilcox xas_lock_irq(&xas); 6245fac7408SDan Williams } 625084a8990SMatthew Wilcox xas_unlock_irq(&xas); 6265fac7408SDan Williams return page; 6275fac7408SDan Williams } 6285fac7408SDan Williams EXPORT_SYMBOL_GPL(dax_layout_busy_page); 6295fac7408SDan Williams 630a77d19f4SMatthew Wilcox static int __dax_invalidate_entry(struct address_space *mapping, 631c6dcf52cSJan Kara pgoff_t index, bool trunc) 632c6dcf52cSJan Kara { 63307f2d89cSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, index); 634c6dcf52cSJan Kara int ret = 0; 635c6dcf52cSJan Kara void *entry; 636c6dcf52cSJan Kara 63707f2d89cSMatthew Wilcox xas_lock_irq(&xas); 63823c84eb7SMatthew Wilcox (Oracle) entry = get_unlocked_entry(&xas, 0); 6393159f943SMatthew Wilcox if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 640c6dcf52cSJan Kara goto out; 641c6dcf52cSJan Kara if (!trunc && 64207f2d89cSMatthew Wilcox (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || 64307f2d89cSMatthew Wilcox xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) 644c6dcf52cSJan Kara goto out; 645d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, trunc); 64607f2d89cSMatthew Wilcox xas_store(&xas, NULL); 647c6dcf52cSJan Kara mapping->nrexceptional--; 648c6dcf52cSJan Kara ret = 1; 649c6dcf52cSJan Kara out: 65007f2d89cSMatthew Wilcox put_unlocked_entry(&xas, entry); 65107f2d89cSMatthew Wilcox xas_unlock_irq(&xas); 652c6dcf52cSJan Kara return ret; 653c6dcf52cSJan Kara } 65407f2d89cSMatthew Wilcox 655ac401cc7SJan Kara /* 6563159f943SMatthew Wilcox * Delete DAX entry at @index from @mapping. Wait for it 6573159f943SMatthew Wilcox * to be unlocked before deleting it. 658ac401cc7SJan Kara */ 659ac401cc7SJan Kara int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 660ac401cc7SJan Kara { 661a77d19f4SMatthew Wilcox int ret = __dax_invalidate_entry(mapping, index, true); 662ac401cc7SJan Kara 663ac401cc7SJan Kara /* 664ac401cc7SJan Kara * This gets called from truncate / punch_hole path. As such, the caller 665ac401cc7SJan Kara * must hold locks protecting against concurrent modifications of the 666a77d19f4SMatthew Wilcox * page cache (usually fs-private i_mmap_sem for writing). Since the 6673159f943SMatthew Wilcox * caller has seen a DAX entry for this index, we better find it 668ac401cc7SJan Kara * at that index as well... 669ac401cc7SJan Kara */ 670c6dcf52cSJan Kara WARN_ON_ONCE(!ret); 671c6dcf52cSJan Kara return ret; 672ac401cc7SJan Kara } 673ac401cc7SJan Kara 674c6dcf52cSJan Kara /* 6753159f943SMatthew Wilcox * Invalidate DAX entry if it is clean. 676c6dcf52cSJan Kara */ 677c6dcf52cSJan Kara int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 678c6dcf52cSJan Kara pgoff_t index) 679c6dcf52cSJan Kara { 680a77d19f4SMatthew Wilcox return __dax_invalidate_entry(mapping, index, false); 681ac401cc7SJan Kara } 682ac401cc7SJan Kara 683cccbce67SDan Williams static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, 684cccbce67SDan Williams sector_t sector, size_t size, struct page *to, 685cccbce67SDan Williams unsigned long vaddr) 686f7ca90b1SMatthew Wilcox { 687cccbce67SDan Williams void *vto, *kaddr; 688cccbce67SDan Williams pgoff_t pgoff; 689cccbce67SDan Williams long rc; 690cccbce67SDan Williams int id; 691e2e05394SRoss Zwisler 692cccbce67SDan Williams rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); 693cccbce67SDan Williams if (rc) 694cccbce67SDan Williams return rc; 695cccbce67SDan Williams 696cccbce67SDan Williams id = dax_read_lock(); 69786ed913bSHuaisheng Ye rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL); 698cccbce67SDan Williams if (rc < 0) { 699cccbce67SDan Williams dax_read_unlock(id); 700cccbce67SDan Williams return rc; 701cccbce67SDan Williams } 702f7ca90b1SMatthew Wilcox vto = kmap_atomic(to); 703cccbce67SDan Williams copy_user_page(vto, (void __force *)kaddr, vaddr, to); 704f7ca90b1SMatthew Wilcox kunmap_atomic(vto); 705cccbce67SDan Williams dax_read_unlock(id); 706f7ca90b1SMatthew Wilcox return 0; 707f7ca90b1SMatthew Wilcox } 708f7ca90b1SMatthew Wilcox 709642261acSRoss Zwisler /* 710642261acSRoss Zwisler * By this point grab_mapping_entry() has ensured that we have a locked entry 711642261acSRoss Zwisler * of the appropriate size so we don't have to worry about downgrading PMDs to 712642261acSRoss Zwisler * PTEs. If we happen to be trying to insert a PTE and there is a PMD 713642261acSRoss Zwisler * already in the tree, we will skip the insertion and just dirty the PMD as 714642261acSRoss Zwisler * appropriate. 715642261acSRoss Zwisler */ 716b15cd800SMatthew Wilcox static void *dax_insert_entry(struct xa_state *xas, 717b15cd800SMatthew Wilcox struct address_space *mapping, struct vm_fault *vmf, 718b15cd800SMatthew Wilcox void *entry, pfn_t pfn, unsigned long flags, bool dirty) 7199973c98eSRoss Zwisler { 720b15cd800SMatthew Wilcox void *new_entry = dax_make_entry(pfn, flags); 7219973c98eSRoss Zwisler 722f5b7b748SJan Kara if (dirty) 7239973c98eSRoss Zwisler __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 7249973c98eSRoss Zwisler 7253159f943SMatthew Wilcox if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) { 726b15cd800SMatthew Wilcox unsigned long index = xas->xa_index; 72791d25ba8SRoss Zwisler /* we are replacing a zero page with block mapping */ 72891d25ba8SRoss Zwisler if (dax_is_pmd_entry(entry)) 729977fbdcdSMatthew Wilcox unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 730977fbdcdSMatthew Wilcox PG_PMD_NR, false); 73191d25ba8SRoss Zwisler else /* pte entry */ 732b15cd800SMatthew Wilcox unmap_mapping_pages(mapping, index, 1, false); 733ac401cc7SJan Kara } 7349973c98eSRoss Zwisler 735b15cd800SMatthew Wilcox xas_reset(xas); 736b15cd800SMatthew Wilcox xas_lock_irq(xas); 7371571c029SJan Kara if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 7381571c029SJan Kara void *old; 7391571c029SJan Kara 740d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, false); 74173449dafSDan Williams dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); 742642261acSRoss Zwisler /* 743a77d19f4SMatthew Wilcox * Only swap our new entry into the page cache if the current 744642261acSRoss Zwisler * entry is a zero page or an empty entry. If a normal PTE or 745a77d19f4SMatthew Wilcox * PMD entry is already in the cache, we leave it alone. This 746642261acSRoss Zwisler * means that if we are trying to insert a PTE and the 747642261acSRoss Zwisler * existing entry is a PMD, we will just leave the PMD in the 748642261acSRoss Zwisler * tree and dirty it if necessary. 749642261acSRoss Zwisler */ 7501571c029SJan Kara old = dax_lock_entry(xas, new_entry); 751b15cd800SMatthew Wilcox WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | 752b15cd800SMatthew Wilcox DAX_LOCKED)); 75391d25ba8SRoss Zwisler entry = new_entry; 754b15cd800SMatthew Wilcox } else { 755b15cd800SMatthew Wilcox xas_load(xas); /* Walk the xa_state */ 756ac401cc7SJan Kara } 75791d25ba8SRoss Zwisler 758f5b7b748SJan Kara if (dirty) 759b15cd800SMatthew Wilcox xas_set_mark(xas, PAGECACHE_TAG_DIRTY); 76091d25ba8SRoss Zwisler 761b15cd800SMatthew Wilcox xas_unlock_irq(xas); 76291d25ba8SRoss Zwisler return entry; 7639973c98eSRoss Zwisler } 7649973c98eSRoss Zwisler 765a77d19f4SMatthew Wilcox static inline 766a77d19f4SMatthew Wilcox unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma) 7674b4bb46dSJan Kara { 7684b4bb46dSJan Kara unsigned long address; 7694b4bb46dSJan Kara 7704b4bb46dSJan Kara address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 7714b4bb46dSJan Kara VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 7724b4bb46dSJan Kara return address; 7734b4bb46dSJan Kara } 7744b4bb46dSJan Kara 7754b4bb46dSJan Kara /* Walk all mappings of a given index of a file and writeprotect them */ 776a77d19f4SMatthew Wilcox static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index, 777a77d19f4SMatthew Wilcox unsigned long pfn) 7784b4bb46dSJan Kara { 7794b4bb46dSJan Kara struct vm_area_struct *vma; 780f729c8c9SRoss Zwisler pte_t pte, *ptep = NULL; 781f729c8c9SRoss Zwisler pmd_t *pmdp = NULL; 7824b4bb46dSJan Kara spinlock_t *ptl; 7834b4bb46dSJan Kara 7844b4bb46dSJan Kara i_mmap_lock_read(mapping); 7854b4bb46dSJan Kara vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { 786ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 787ac46d4f3SJérôme Glisse unsigned long address; 7884b4bb46dSJan Kara 7894b4bb46dSJan Kara cond_resched(); 7904b4bb46dSJan Kara 7914b4bb46dSJan Kara if (!(vma->vm_flags & VM_SHARED)) 7924b4bb46dSJan Kara continue; 7934b4bb46dSJan Kara 7944b4bb46dSJan Kara address = pgoff_address(index, vma); 795a4d1a885SJérôme Glisse 796a4d1a885SJérôme Glisse /* 7970cefc36bSIra Weiny * Note because we provide range to follow_pte_pmd it will 798a4d1a885SJérôme Glisse * call mmu_notifier_invalidate_range_start() on our behalf 799a4d1a885SJérôme Glisse * before taking any lock. 800a4d1a885SJérôme Glisse */ 801ac46d4f3SJérôme Glisse if (follow_pte_pmd(vma->vm_mm, address, &range, 802ac46d4f3SJérôme Glisse &ptep, &pmdp, &ptl)) 8034b4bb46dSJan Kara continue; 804f729c8c9SRoss Zwisler 8050f10851eSJérôme Glisse /* 8060f10851eSJérôme Glisse * No need to call mmu_notifier_invalidate_range() as we are 8070f10851eSJérôme Glisse * downgrading page table protection not changing it to point 8080f10851eSJérôme Glisse * to a new page. 8090f10851eSJérôme Glisse * 810ad56b738SMike Rapoport * See Documentation/vm/mmu_notifier.rst 8110f10851eSJérôme Glisse */ 812f729c8c9SRoss Zwisler if (pmdp) { 813f729c8c9SRoss Zwisler #ifdef CONFIG_FS_DAX_PMD 814f729c8c9SRoss Zwisler pmd_t pmd; 815f729c8c9SRoss Zwisler 816f729c8c9SRoss Zwisler if (pfn != pmd_pfn(*pmdp)) 817f729c8c9SRoss Zwisler goto unlock_pmd; 818f6f37321SLinus Torvalds if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) 819f729c8c9SRoss Zwisler goto unlock_pmd; 820f729c8c9SRoss Zwisler 821f729c8c9SRoss Zwisler flush_cache_page(vma, address, pfn); 822024eee0eSAneesh Kumar K.V pmd = pmdp_invalidate(vma, address, pmdp); 823f729c8c9SRoss Zwisler pmd = pmd_wrprotect(pmd); 824f729c8c9SRoss Zwisler pmd = pmd_mkclean(pmd); 825f729c8c9SRoss Zwisler set_pmd_at(vma->vm_mm, address, pmdp, pmd); 826f729c8c9SRoss Zwisler unlock_pmd: 827f729c8c9SRoss Zwisler #endif 828ee190ca6SJan H. Schönherr spin_unlock(ptl); 829f729c8c9SRoss Zwisler } else { 8304b4bb46dSJan Kara if (pfn != pte_pfn(*ptep)) 831f729c8c9SRoss Zwisler goto unlock_pte; 8324b4bb46dSJan Kara if (!pte_dirty(*ptep) && !pte_write(*ptep)) 833f729c8c9SRoss Zwisler goto unlock_pte; 8344b4bb46dSJan Kara 8354b4bb46dSJan Kara flush_cache_page(vma, address, pfn); 8364b4bb46dSJan Kara pte = ptep_clear_flush(vma, address, ptep); 8374b4bb46dSJan Kara pte = pte_wrprotect(pte); 8384b4bb46dSJan Kara pte = pte_mkclean(pte); 8394b4bb46dSJan Kara set_pte_at(vma->vm_mm, address, ptep, pte); 840f729c8c9SRoss Zwisler unlock_pte: 8414b4bb46dSJan Kara pte_unmap_unlock(ptep, ptl); 842f729c8c9SRoss Zwisler } 8434b4bb46dSJan Kara 844ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 8454b4bb46dSJan Kara } 8464b4bb46dSJan Kara i_mmap_unlock_read(mapping); 8474b4bb46dSJan Kara } 8484b4bb46dSJan Kara 8499fc747f6SMatthew Wilcox static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, 8509fc747f6SMatthew Wilcox struct address_space *mapping, void *entry) 8519973c98eSRoss Zwisler { 852e4b3448bSMatthew Wilcox unsigned long pfn, index, count; 8533fe0791cSDan Williams long ret = 0; 8549973c98eSRoss Zwisler 8559973c98eSRoss Zwisler /* 856a6abc2c0SJan Kara * A page got tagged dirty in DAX mapping? Something is seriously 857a6abc2c0SJan Kara * wrong. 8589973c98eSRoss Zwisler */ 8593159f943SMatthew Wilcox if (WARN_ON(!xa_is_value(entry))) 860a6abc2c0SJan Kara return -EIO; 8619973c98eSRoss Zwisler 8629fc747f6SMatthew Wilcox if (unlikely(dax_is_locked(entry))) { 8639fc747f6SMatthew Wilcox void *old_entry = entry; 8649fc747f6SMatthew Wilcox 86523c84eb7SMatthew Wilcox (Oracle) entry = get_unlocked_entry(xas, 0); 8669fc747f6SMatthew Wilcox 867a6abc2c0SJan Kara /* Entry got punched out / reallocated? */ 8689fc747f6SMatthew Wilcox if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 869a6abc2c0SJan Kara goto put_unlocked; 870a6abc2c0SJan Kara /* 8719fc747f6SMatthew Wilcox * Entry got reallocated elsewhere? No need to writeback. 8729fc747f6SMatthew Wilcox * We have to compare pfns as we must not bail out due to 8739fc747f6SMatthew Wilcox * difference in lockbit or entry type. 874a6abc2c0SJan Kara */ 8759fc747f6SMatthew Wilcox if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) 876a6abc2c0SJan Kara goto put_unlocked; 877642261acSRoss Zwisler if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 878642261acSRoss Zwisler dax_is_zero_entry(entry))) { 8799973c98eSRoss Zwisler ret = -EIO; 880a6abc2c0SJan Kara goto put_unlocked; 8819973c98eSRoss Zwisler } 8829973c98eSRoss Zwisler 8839fc747f6SMatthew Wilcox /* Another fsync thread may have already done this entry */ 8849fc747f6SMatthew Wilcox if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) 885a6abc2c0SJan Kara goto put_unlocked; 8869fc747f6SMatthew Wilcox } 8879fc747f6SMatthew Wilcox 888a6abc2c0SJan Kara /* Lock the entry to serialize with page faults */ 8899fc747f6SMatthew Wilcox dax_lock_entry(xas, entry); 8909fc747f6SMatthew Wilcox 891a6abc2c0SJan Kara /* 892a6abc2c0SJan Kara * We can clear the tag now but we have to be careful so that concurrent 893a6abc2c0SJan Kara * dax_writeback_one() calls for the same index cannot finish before we 894a6abc2c0SJan Kara * actually flush the caches. This is achieved as the calls will look 895b93b0163SMatthew Wilcox * at the entry only under the i_pages lock and once they do that 896b93b0163SMatthew Wilcox * they will see the entry locked and wait for it to unlock. 897a6abc2c0SJan Kara */ 8989fc747f6SMatthew Wilcox xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); 8999fc747f6SMatthew Wilcox xas_unlock_irq(xas); 900a6abc2c0SJan Kara 901642261acSRoss Zwisler /* 902e4b3448bSMatthew Wilcox * If dax_writeback_mapping_range() was given a wbc->range_start 903e4b3448bSMatthew Wilcox * in the middle of a PMD, the 'index' we use needs to be 904e4b3448bSMatthew Wilcox * aligned to the start of the PMD. 9053fe0791cSDan Williams * This allows us to flush for PMD_SIZE and not have to worry about 9063fe0791cSDan Williams * partial PMD writebacks. 907642261acSRoss Zwisler */ 908a77d19f4SMatthew Wilcox pfn = dax_to_pfn(entry); 909e4b3448bSMatthew Wilcox count = 1UL << dax_entry_order(entry); 910e4b3448bSMatthew Wilcox index = xas->xa_index & ~(count - 1); 911cccbce67SDan Williams 912e4b3448bSMatthew Wilcox dax_entry_mkclean(mapping, index, pfn); 913e4b3448bSMatthew Wilcox dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE); 9144b4bb46dSJan Kara /* 9154b4bb46dSJan Kara * After we have flushed the cache, we can clear the dirty tag. There 9164b4bb46dSJan Kara * cannot be new dirty data in the pfn after the flush has completed as 9174b4bb46dSJan Kara * the pfn mappings are writeprotected and fault waits for mapping 9184b4bb46dSJan Kara * entry lock. 9194b4bb46dSJan Kara */ 9209fc747f6SMatthew Wilcox xas_reset(xas); 9219fc747f6SMatthew Wilcox xas_lock_irq(xas); 9229fc747f6SMatthew Wilcox xas_store(xas, entry); 9239fc747f6SMatthew Wilcox xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); 9249fc747f6SMatthew Wilcox dax_wake_entry(xas, entry, false); 9259fc747f6SMatthew Wilcox 926e4b3448bSMatthew Wilcox trace_dax_writeback_one(mapping->host, index, count); 9279973c98eSRoss Zwisler return ret; 9289973c98eSRoss Zwisler 929a6abc2c0SJan Kara put_unlocked: 9309fc747f6SMatthew Wilcox put_unlocked_entry(xas, entry); 9319973c98eSRoss Zwisler return ret; 9329973c98eSRoss Zwisler } 9339973c98eSRoss Zwisler 9349973c98eSRoss Zwisler /* 9359973c98eSRoss Zwisler * Flush the mapping to the persistent domain within the byte range of [start, 9369973c98eSRoss Zwisler * end]. This is required by data integrity operations to ensure file data is 9379973c98eSRoss Zwisler * on persistent storage prior to completion of the operation. 9389973c98eSRoss Zwisler */ 9397f6d5b52SRoss Zwisler int dax_writeback_mapping_range(struct address_space *mapping, 9403f666c56SVivek Goyal struct dax_device *dax_dev, struct writeback_control *wbc) 9419973c98eSRoss Zwisler { 9429fc747f6SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); 9439973c98eSRoss Zwisler struct inode *inode = mapping->host; 9449fc747f6SMatthew Wilcox pgoff_t end_index = wbc->range_end >> PAGE_SHIFT; 9459fc747f6SMatthew Wilcox void *entry; 9469fc747f6SMatthew Wilcox int ret = 0; 9479fc747f6SMatthew Wilcox unsigned int scanned = 0; 9489973c98eSRoss Zwisler 9499973c98eSRoss Zwisler if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 9509973c98eSRoss Zwisler return -EIO; 9519973c98eSRoss Zwisler 9527f6d5b52SRoss Zwisler if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) 9537f6d5b52SRoss Zwisler return 0; 9547f6d5b52SRoss Zwisler 9559fc747f6SMatthew Wilcox trace_dax_writeback_range(inode, xas.xa_index, end_index); 9569973c98eSRoss Zwisler 9579fc747f6SMatthew Wilcox tag_pages_for_writeback(mapping, xas.xa_index, end_index); 958d14a3f48SRoss Zwisler 9599fc747f6SMatthew Wilcox xas_lock_irq(&xas); 9609fc747f6SMatthew Wilcox xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { 9619fc747f6SMatthew Wilcox ret = dax_writeback_one(&xas, dax_dev, mapping, entry); 962819ec6b9SJeff Layton if (ret < 0) { 963819ec6b9SJeff Layton mapping_set_error(mapping, ret); 9649fc747f6SMatthew Wilcox break; 965d14a3f48SRoss Zwisler } 9669fc747f6SMatthew Wilcox if (++scanned % XA_CHECK_SCHED) 9679fc747f6SMatthew Wilcox continue; 9689fc747f6SMatthew Wilcox 9699fc747f6SMatthew Wilcox xas_pause(&xas); 9709fc747f6SMatthew Wilcox xas_unlock_irq(&xas); 9719fc747f6SMatthew Wilcox cond_resched(); 9729fc747f6SMatthew Wilcox xas_lock_irq(&xas); 973d14a3f48SRoss Zwisler } 9749fc747f6SMatthew Wilcox xas_unlock_irq(&xas); 9759fc747f6SMatthew Wilcox trace_dax_writeback_range_done(inode, xas.xa_index, end_index); 9769fc747f6SMatthew Wilcox return ret; 9779973c98eSRoss Zwisler } 9789973c98eSRoss Zwisler EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 9799973c98eSRoss Zwisler 98031a6f1a6SJan Kara static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 981f7ca90b1SMatthew Wilcox { 982a3841f94SLinus Torvalds return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9; 98331a6f1a6SJan Kara } 984f7ca90b1SMatthew Wilcox 9855e161e40SJan Kara static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size, 9865e161e40SJan Kara pfn_t *pfnp) 9875e161e40SJan Kara { 9885e161e40SJan Kara const sector_t sector = dax_iomap_sector(iomap, pos); 9895e161e40SJan Kara pgoff_t pgoff; 9905e161e40SJan Kara int id, rc; 9915e161e40SJan Kara long length; 9925e161e40SJan Kara 9935e161e40SJan Kara rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff); 994cccbce67SDan Williams if (rc) 995cccbce67SDan Williams return rc; 996cccbce67SDan Williams id = dax_read_lock(); 9975e161e40SJan Kara length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), 99886ed913bSHuaisheng Ye NULL, pfnp); 9995e161e40SJan Kara if (length < 0) { 10005e161e40SJan Kara rc = length; 10015e161e40SJan Kara goto out; 10025e161e40SJan Kara } 10035e161e40SJan Kara rc = -EINVAL; 10045e161e40SJan Kara if (PFN_PHYS(length) < size) 10055e161e40SJan Kara goto out; 10065e161e40SJan Kara if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) 10075e161e40SJan Kara goto out; 10085e161e40SJan Kara /* For larger pages we need devmap */ 10095e161e40SJan Kara if (length > 1 && !pfn_t_devmap(*pfnp)) 10105e161e40SJan Kara goto out; 10115e161e40SJan Kara rc = 0; 10125e161e40SJan Kara out: 1013cccbce67SDan Williams dax_read_unlock(id); 1014cccbce67SDan Williams return rc; 1015cccbce67SDan Williams } 1016f7ca90b1SMatthew Wilcox 10172f89dc12SJan Kara /* 101891d25ba8SRoss Zwisler * The user has performed a load from a hole in the file. Allocating a new 101991d25ba8SRoss Zwisler * page in the file would cause excessive storage usage for workloads with 102091d25ba8SRoss Zwisler * sparse files. Instead we insert a read-only mapping of the 4k zero page. 102191d25ba8SRoss Zwisler * If this page is ever written to we will re-fault and change the mapping to 102291d25ba8SRoss Zwisler * point to real DAX storage instead. 10232f89dc12SJan Kara */ 1024b15cd800SMatthew Wilcox static vm_fault_t dax_load_hole(struct xa_state *xas, 1025b15cd800SMatthew Wilcox struct address_space *mapping, void **entry, 1026e30331ffSRoss Zwisler struct vm_fault *vmf) 1027e30331ffSRoss Zwisler { 1028e30331ffSRoss Zwisler struct inode *inode = mapping->host; 102991d25ba8SRoss Zwisler unsigned long vaddr = vmf->address; 1030b90ca5ccSMatthew Wilcox pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); 1031b90ca5ccSMatthew Wilcox vm_fault_t ret; 1032e30331ffSRoss Zwisler 1033b15cd800SMatthew Wilcox *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, 10343159f943SMatthew Wilcox DAX_ZERO_PAGE, false); 10353159f943SMatthew Wilcox 1036ab77dab4SSouptick Joarder ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); 1037e30331ffSRoss Zwisler trace_dax_load_hole(inode, vmf, ret); 1038e30331ffSRoss Zwisler return ret; 1039e30331ffSRoss Zwisler } 1040e30331ffSRoss Zwisler 10414b0228faSVishal Verma static bool dax_range_is_aligned(struct block_device *bdev, 10424b0228faSVishal Verma unsigned int offset, unsigned int length) 10434b0228faSVishal Verma { 10444b0228faSVishal Verma unsigned short sector_size = bdev_logical_block_size(bdev); 10454b0228faSVishal Verma 10464b0228faSVishal Verma if (!IS_ALIGNED(offset, sector_size)) 10474b0228faSVishal Verma return false; 10484b0228faSVishal Verma if (!IS_ALIGNED(length, sector_size)) 10494b0228faSVishal Verma return false; 10504b0228faSVishal Verma 10514b0228faSVishal Verma return true; 10524b0228faSVishal Verma } 10534b0228faSVishal Verma 1054cccbce67SDan Williams int __dax_zero_page_range(struct block_device *bdev, 1055cccbce67SDan Williams struct dax_device *dax_dev, sector_t sector, 1056cccbce67SDan Williams unsigned int offset, unsigned int size) 1057679c8bd3SChristoph Hellwig { 1058cccbce67SDan Williams if (dax_range_is_aligned(bdev, offset, size)) { 1059cccbce67SDan Williams sector_t start_sector = sector + (offset >> 9); 10604b0228faSVishal Verma 10614b0228faSVishal Verma return blkdev_issue_zeroout(bdev, start_sector, 106253ef7d0eSLinus Torvalds size >> 9, GFP_NOFS, 0); 10634b0228faSVishal Verma } else { 1064cccbce67SDan Williams pgoff_t pgoff; 1065cccbce67SDan Williams long rc, id; 1066cccbce67SDan Williams void *kaddr; 1067cccbce67SDan Williams 1068e84b83b9SDan Williams rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); 1069cccbce67SDan Williams if (rc) 1070cccbce67SDan Williams return rc; 1071cccbce67SDan Williams 1072cccbce67SDan Williams id = dax_read_lock(); 107386ed913bSHuaisheng Ye rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL); 1074cccbce67SDan Williams if (rc < 0) { 1075cccbce67SDan Williams dax_read_unlock(id); 1076cccbce67SDan Williams return rc; 1077cccbce67SDan Williams } 107881f55870SDan Williams memset(kaddr + offset, 0, size); 1079c3ca015fSMikulas Patocka dax_flush(dax_dev, kaddr + offset, size); 1080cccbce67SDan Williams dax_read_unlock(id); 10814b0228faSVishal Verma } 1082679c8bd3SChristoph Hellwig return 0; 1083679c8bd3SChristoph Hellwig } 1084679c8bd3SChristoph Hellwig EXPORT_SYMBOL_GPL(__dax_zero_page_range); 1085679c8bd3SChristoph Hellwig 1086a254e568SChristoph Hellwig static loff_t 108711c59c92SRoss Zwisler dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 1088c039b997SGoldwyn Rodrigues struct iomap *iomap, struct iomap *srcmap) 1089a254e568SChristoph Hellwig { 1090cccbce67SDan Williams struct block_device *bdev = iomap->bdev; 1091cccbce67SDan Williams struct dax_device *dax_dev = iomap->dax_dev; 1092a254e568SChristoph Hellwig struct iov_iter *iter = data; 1093a254e568SChristoph Hellwig loff_t end = pos + length, done = 0; 1094a254e568SChristoph Hellwig ssize_t ret = 0; 1095a77d4786SDan Williams size_t xfer; 1096cccbce67SDan Williams int id; 1097a254e568SChristoph Hellwig 1098a254e568SChristoph Hellwig if (iov_iter_rw(iter) == READ) { 1099a254e568SChristoph Hellwig end = min(end, i_size_read(inode)); 1100a254e568SChristoph Hellwig if (pos >= end) 1101a254e568SChristoph Hellwig return 0; 1102a254e568SChristoph Hellwig 1103a254e568SChristoph Hellwig if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 1104a254e568SChristoph Hellwig return iov_iter_zero(min(length, end - pos), iter); 1105a254e568SChristoph Hellwig } 1106a254e568SChristoph Hellwig 1107a254e568SChristoph Hellwig if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 1108a254e568SChristoph Hellwig return -EIO; 1109a254e568SChristoph Hellwig 1110e3fce68cSJan Kara /* 1111e3fce68cSJan Kara * Write can allocate block for an area which has a hole page mapped 1112e3fce68cSJan Kara * into page tables. We have to tear down these mappings so that data 1113e3fce68cSJan Kara * written by write(2) is visible in mmap. 1114e3fce68cSJan Kara */ 1115cd656375SJan Kara if (iomap->flags & IOMAP_F_NEW) { 1116e3fce68cSJan Kara invalidate_inode_pages2_range(inode->i_mapping, 1117e3fce68cSJan Kara pos >> PAGE_SHIFT, 1118e3fce68cSJan Kara (end - 1) >> PAGE_SHIFT); 1119e3fce68cSJan Kara } 1120e3fce68cSJan Kara 1121cccbce67SDan Williams id = dax_read_lock(); 1122a254e568SChristoph Hellwig while (pos < end) { 1123a254e568SChristoph Hellwig unsigned offset = pos & (PAGE_SIZE - 1); 1124cccbce67SDan Williams const size_t size = ALIGN(length + offset, PAGE_SIZE); 1125cccbce67SDan Williams const sector_t sector = dax_iomap_sector(iomap, pos); 1126a254e568SChristoph Hellwig ssize_t map_len; 1127cccbce67SDan Williams pgoff_t pgoff; 1128cccbce67SDan Williams void *kaddr; 1129a254e568SChristoph Hellwig 1130d1908f52SMichal Hocko if (fatal_signal_pending(current)) { 1131d1908f52SMichal Hocko ret = -EINTR; 1132d1908f52SMichal Hocko break; 1133d1908f52SMichal Hocko } 1134d1908f52SMichal Hocko 1135cccbce67SDan Williams ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 1136cccbce67SDan Williams if (ret) 1137cccbce67SDan Williams break; 1138cccbce67SDan Williams 1139cccbce67SDan Williams map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 114086ed913bSHuaisheng Ye &kaddr, NULL); 1141a254e568SChristoph Hellwig if (map_len < 0) { 1142a254e568SChristoph Hellwig ret = map_len; 1143a254e568SChristoph Hellwig break; 1144a254e568SChristoph Hellwig } 1145a254e568SChristoph Hellwig 1146cccbce67SDan Williams map_len = PFN_PHYS(map_len); 1147cccbce67SDan Williams kaddr += offset; 1148a254e568SChristoph Hellwig map_len -= offset; 1149a254e568SChristoph Hellwig if (map_len > end - pos) 1150a254e568SChristoph Hellwig map_len = end - pos; 1151a254e568SChristoph Hellwig 1152a2e050f5SRoss Zwisler /* 1153a2e050f5SRoss Zwisler * The userspace address for the memory copy has already been 1154a2e050f5SRoss Zwisler * validated via access_ok() in either vfs_read() or 1155a2e050f5SRoss Zwisler * vfs_write(), depending on which operation we are doing. 1156a2e050f5SRoss Zwisler */ 1157a254e568SChristoph Hellwig if (iov_iter_rw(iter) == WRITE) 1158a77d4786SDan Williams xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1159fec53774SDan Williams map_len, iter); 1160a254e568SChristoph Hellwig else 1161a77d4786SDan Williams xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, 1162b3a9a0c3SDan Williams map_len, iter); 1163a254e568SChristoph Hellwig 1164a77d4786SDan Williams pos += xfer; 1165a77d4786SDan Williams length -= xfer; 1166a77d4786SDan Williams done += xfer; 1167a77d4786SDan Williams 1168a77d4786SDan Williams if (xfer == 0) 1169a77d4786SDan Williams ret = -EFAULT; 1170a77d4786SDan Williams if (xfer < map_len) 1171a77d4786SDan Williams break; 1172a254e568SChristoph Hellwig } 1173cccbce67SDan Williams dax_read_unlock(id); 1174a254e568SChristoph Hellwig 1175a254e568SChristoph Hellwig return done ? done : ret; 1176a254e568SChristoph Hellwig } 1177a254e568SChristoph Hellwig 1178a254e568SChristoph Hellwig /** 117911c59c92SRoss Zwisler * dax_iomap_rw - Perform I/O to a DAX file 1180a254e568SChristoph Hellwig * @iocb: The control block for this I/O 1181a254e568SChristoph Hellwig * @iter: The addresses to do I/O from or to 1182a254e568SChristoph Hellwig * @ops: iomap ops passed from the file system 1183a254e568SChristoph Hellwig * 1184a254e568SChristoph Hellwig * This function performs read and write operations to directly mapped 1185a254e568SChristoph Hellwig * persistent memory. The callers needs to take care of read/write exclusion 1186a254e568SChristoph Hellwig * and evicting any page cache pages in the region under I/O. 1187a254e568SChristoph Hellwig */ 1188a254e568SChristoph Hellwig ssize_t 118911c59c92SRoss Zwisler dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 11908ff6daa1SChristoph Hellwig const struct iomap_ops *ops) 1191a254e568SChristoph Hellwig { 1192a254e568SChristoph Hellwig struct address_space *mapping = iocb->ki_filp->f_mapping; 1193a254e568SChristoph Hellwig struct inode *inode = mapping->host; 1194a254e568SChristoph Hellwig loff_t pos = iocb->ki_pos, ret = 0, done = 0; 1195a254e568SChristoph Hellwig unsigned flags = 0; 1196a254e568SChristoph Hellwig 1197168316dbSChristoph Hellwig if (iov_iter_rw(iter) == WRITE) { 11989ffbe8acSNikolay Borisov lockdep_assert_held_write(&inode->i_rwsem); 1199a254e568SChristoph Hellwig flags |= IOMAP_WRITE; 1200168316dbSChristoph Hellwig } else { 1201168316dbSChristoph Hellwig lockdep_assert_held(&inode->i_rwsem); 1202168316dbSChristoph Hellwig } 1203a254e568SChristoph Hellwig 1204a254e568SChristoph Hellwig while (iov_iter_count(iter)) { 1205a254e568SChristoph Hellwig ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, 120611c59c92SRoss Zwisler iter, dax_iomap_actor); 1207a254e568SChristoph Hellwig if (ret <= 0) 1208a254e568SChristoph Hellwig break; 1209a254e568SChristoph Hellwig pos += ret; 1210a254e568SChristoph Hellwig done += ret; 1211a254e568SChristoph Hellwig } 1212a254e568SChristoph Hellwig 1213a254e568SChristoph Hellwig iocb->ki_pos += done; 1214a254e568SChristoph Hellwig return done ? done : ret; 1215a254e568SChristoph Hellwig } 121611c59c92SRoss Zwisler EXPORT_SYMBOL_GPL(dax_iomap_rw); 1217a7d73fe6SChristoph Hellwig 1218ab77dab4SSouptick Joarder static vm_fault_t dax_fault_return(int error) 12199f141d6eSJan Kara { 12209f141d6eSJan Kara if (error == 0) 12219f141d6eSJan Kara return VM_FAULT_NOPAGE; 1222c9aed74eSSouptick Joarder return vmf_error(error); 12239f141d6eSJan Kara } 12249f141d6eSJan Kara 1225aaa422c4SDan Williams /* 1226aaa422c4SDan Williams * MAP_SYNC on a dax mapping guarantees dirty metadata is 1227aaa422c4SDan Williams * flushed on write-faults (non-cow), but not read-faults. 1228aaa422c4SDan Williams */ 1229aaa422c4SDan Williams static bool dax_fault_is_synchronous(unsigned long flags, 1230aaa422c4SDan Williams struct vm_area_struct *vma, struct iomap *iomap) 1231aaa422c4SDan Williams { 1232aaa422c4SDan Williams return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) 1233aaa422c4SDan Williams && (iomap->flags & IOMAP_F_DIRTY); 1234aaa422c4SDan Williams } 1235aaa422c4SDan Williams 1236ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1237c0b24625SJan Kara int *iomap_errp, const struct iomap_ops *ops) 1238a7d73fe6SChristoph Hellwig { 1239a0987ad5SJan Kara struct vm_area_struct *vma = vmf->vma; 1240a0987ad5SJan Kara struct address_space *mapping = vma->vm_file->f_mapping; 1241b15cd800SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, vmf->pgoff); 1242a7d73fe6SChristoph Hellwig struct inode *inode = mapping->host; 12431a29d85eSJan Kara unsigned long vaddr = vmf->address; 1244a7d73fe6SChristoph Hellwig loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; 1245c039b997SGoldwyn Rodrigues struct iomap iomap = { .type = IOMAP_HOLE }; 1246c039b997SGoldwyn Rodrigues struct iomap srcmap = { .type = IOMAP_HOLE }; 12479484ab1bSJan Kara unsigned flags = IOMAP_FAULT; 1248a7d73fe6SChristoph Hellwig int error, major = 0; 1249d2c43ef1SJan Kara bool write = vmf->flags & FAULT_FLAG_WRITE; 1250caa51d26SJan Kara bool sync; 1251ab77dab4SSouptick Joarder vm_fault_t ret = 0; 1252a7d73fe6SChristoph Hellwig void *entry; 12531b5a1cb2SJan Kara pfn_t pfn; 1254a7d73fe6SChristoph Hellwig 1255ab77dab4SSouptick Joarder trace_dax_pte_fault(inode, vmf, ret); 1256a7d73fe6SChristoph Hellwig /* 1257a7d73fe6SChristoph Hellwig * Check whether offset isn't beyond end of file now. Caller is supposed 1258a7d73fe6SChristoph Hellwig * to hold locks serializing us with truncate / punch hole so this is 1259a7d73fe6SChristoph Hellwig * a reliable test. 1260a7d73fe6SChristoph Hellwig */ 1261a9c42b33SRoss Zwisler if (pos >= i_size_read(inode)) { 1262ab77dab4SSouptick Joarder ret = VM_FAULT_SIGBUS; 1263a9c42b33SRoss Zwisler goto out; 1264a9c42b33SRoss Zwisler } 1265a7d73fe6SChristoph Hellwig 1266d2c43ef1SJan Kara if (write && !vmf->cow_page) 1267a7d73fe6SChristoph Hellwig flags |= IOMAP_WRITE; 1268a7d73fe6SChristoph Hellwig 1269b15cd800SMatthew Wilcox entry = grab_mapping_entry(&xas, mapping, 0); 1270b15cd800SMatthew Wilcox if (xa_is_internal(entry)) { 1271b15cd800SMatthew Wilcox ret = xa_to_internal(entry); 127213e451fdSJan Kara goto out; 127313e451fdSJan Kara } 127413e451fdSJan Kara 1275a7d73fe6SChristoph Hellwig /* 1276e2093926SRoss Zwisler * It is possible, particularly with mixed reads & writes to private 1277e2093926SRoss Zwisler * mappings, that we have raced with a PMD fault that overlaps with 1278e2093926SRoss Zwisler * the PTE we need to set up. If so just return and the fault will be 1279e2093926SRoss Zwisler * retried. 1280e2093926SRoss Zwisler */ 1281e2093926SRoss Zwisler if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1282ab77dab4SSouptick Joarder ret = VM_FAULT_NOPAGE; 1283e2093926SRoss Zwisler goto unlock_entry; 1284e2093926SRoss Zwisler } 1285e2093926SRoss Zwisler 1286e2093926SRoss Zwisler /* 1287a7d73fe6SChristoph Hellwig * Note that we don't bother to use iomap_apply here: DAX required 1288a7d73fe6SChristoph Hellwig * the file system block size to be equal the page size, which means 1289a7d73fe6SChristoph Hellwig * that we never have to deal with more than a single extent here. 1290a7d73fe6SChristoph Hellwig */ 1291c039b997SGoldwyn Rodrigues error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap, &srcmap); 1292c0b24625SJan Kara if (iomap_errp) 1293c0b24625SJan Kara *iomap_errp = error; 1294a9c42b33SRoss Zwisler if (error) { 1295ab77dab4SSouptick Joarder ret = dax_fault_return(error); 129613e451fdSJan Kara goto unlock_entry; 1297a9c42b33SRoss Zwisler } 1298a7d73fe6SChristoph Hellwig if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { 129913e451fdSJan Kara error = -EIO; /* fs corruption? */ 130013e451fdSJan Kara goto error_finish_iomap; 1301a7d73fe6SChristoph Hellwig } 1302a7d73fe6SChristoph Hellwig 1303a7d73fe6SChristoph Hellwig if (vmf->cow_page) { 130431a6f1a6SJan Kara sector_t sector = dax_iomap_sector(&iomap, pos); 130531a6f1a6SJan Kara 1306a7d73fe6SChristoph Hellwig switch (iomap.type) { 1307a7d73fe6SChristoph Hellwig case IOMAP_HOLE: 1308a7d73fe6SChristoph Hellwig case IOMAP_UNWRITTEN: 1309a7d73fe6SChristoph Hellwig clear_user_highpage(vmf->cow_page, vaddr); 1310a7d73fe6SChristoph Hellwig break; 1311a7d73fe6SChristoph Hellwig case IOMAP_MAPPED: 1312cccbce67SDan Williams error = copy_user_dax(iomap.bdev, iomap.dax_dev, 1313cccbce67SDan Williams sector, PAGE_SIZE, vmf->cow_page, vaddr); 1314a7d73fe6SChristoph Hellwig break; 1315a7d73fe6SChristoph Hellwig default: 1316a7d73fe6SChristoph Hellwig WARN_ON_ONCE(1); 1317a7d73fe6SChristoph Hellwig error = -EIO; 1318a7d73fe6SChristoph Hellwig break; 1319a7d73fe6SChristoph Hellwig } 1320a7d73fe6SChristoph Hellwig 1321a7d73fe6SChristoph Hellwig if (error) 132213e451fdSJan Kara goto error_finish_iomap; 1323b1aa812bSJan Kara 1324b1aa812bSJan Kara __SetPageUptodate(vmf->cow_page); 1325ab77dab4SSouptick Joarder ret = finish_fault(vmf); 1326ab77dab4SSouptick Joarder if (!ret) 1327ab77dab4SSouptick Joarder ret = VM_FAULT_DONE_COW; 132813e451fdSJan Kara goto finish_iomap; 1329a7d73fe6SChristoph Hellwig } 1330a7d73fe6SChristoph Hellwig 1331aaa422c4SDan Williams sync = dax_fault_is_synchronous(flags, vma, &iomap); 1332caa51d26SJan Kara 1333a7d73fe6SChristoph Hellwig switch (iomap.type) { 1334a7d73fe6SChristoph Hellwig case IOMAP_MAPPED: 1335a7d73fe6SChristoph Hellwig if (iomap.flags & IOMAP_F_NEW) { 1336a7d73fe6SChristoph Hellwig count_vm_event(PGMAJFAULT); 1337a0987ad5SJan Kara count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 1338a7d73fe6SChristoph Hellwig major = VM_FAULT_MAJOR; 1339a7d73fe6SChristoph Hellwig } 13401b5a1cb2SJan Kara error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn); 13411b5a1cb2SJan Kara if (error < 0) 13421b5a1cb2SJan Kara goto error_finish_iomap; 13431b5a1cb2SJan Kara 1344b15cd800SMatthew Wilcox entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, 1345caa51d26SJan Kara 0, write && !sync); 13461b5a1cb2SJan Kara 1347caa51d26SJan Kara /* 1348caa51d26SJan Kara * If we are doing synchronous page fault and inode needs fsync, 1349caa51d26SJan Kara * we can insert PTE into page tables only after that happens. 1350caa51d26SJan Kara * Skip insertion for now and return the pfn so that caller can 1351caa51d26SJan Kara * insert it after fsync is done. 1352caa51d26SJan Kara */ 1353caa51d26SJan Kara if (sync) { 1354caa51d26SJan Kara if (WARN_ON_ONCE(!pfnp)) { 1355caa51d26SJan Kara error = -EIO; 1356caa51d26SJan Kara goto error_finish_iomap; 1357caa51d26SJan Kara } 1358caa51d26SJan Kara *pfnp = pfn; 1359ab77dab4SSouptick Joarder ret = VM_FAULT_NEEDDSYNC | major; 1360caa51d26SJan Kara goto finish_iomap; 1361caa51d26SJan Kara } 13621b5a1cb2SJan Kara trace_dax_insert_mapping(inode, vmf, entry); 13631b5a1cb2SJan Kara if (write) 1364ab77dab4SSouptick Joarder ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn); 13651b5a1cb2SJan Kara else 1366ab77dab4SSouptick Joarder ret = vmf_insert_mixed(vma, vaddr, pfn); 13671b5a1cb2SJan Kara 1368ab77dab4SSouptick Joarder goto finish_iomap; 1369a7d73fe6SChristoph Hellwig case IOMAP_UNWRITTEN: 1370a7d73fe6SChristoph Hellwig case IOMAP_HOLE: 1371d2c43ef1SJan Kara if (!write) { 1372b15cd800SMatthew Wilcox ret = dax_load_hole(&xas, mapping, &entry, vmf); 137313e451fdSJan Kara goto finish_iomap; 13741550290bSRoss Zwisler } 1375a7d73fe6SChristoph Hellwig /*FALLTHRU*/ 1376a7d73fe6SChristoph Hellwig default: 1377a7d73fe6SChristoph Hellwig WARN_ON_ONCE(1); 1378a7d73fe6SChristoph Hellwig error = -EIO; 1379a7d73fe6SChristoph Hellwig break; 1380a7d73fe6SChristoph Hellwig } 1381a7d73fe6SChristoph Hellwig 138213e451fdSJan Kara error_finish_iomap: 1383ab77dab4SSouptick Joarder ret = dax_fault_return(error); 13849f141d6eSJan Kara finish_iomap: 13859f141d6eSJan Kara if (ops->iomap_end) { 13869f141d6eSJan Kara int copied = PAGE_SIZE; 13879f141d6eSJan Kara 1388ab77dab4SSouptick Joarder if (ret & VM_FAULT_ERROR) 13899f141d6eSJan Kara copied = 0; 13909f141d6eSJan Kara /* 13919f141d6eSJan Kara * The fault is done by now and there's no way back (other 13929f141d6eSJan Kara * thread may be already happily using PTE we have installed). 13939f141d6eSJan Kara * Just ignore error from ->iomap_end since we cannot do much 13949f141d6eSJan Kara * with it. 13959f141d6eSJan Kara */ 13969f141d6eSJan Kara ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); 13971550290bSRoss Zwisler } 139813e451fdSJan Kara unlock_entry: 1399b15cd800SMatthew Wilcox dax_unlock_entry(&xas, entry); 1400a9c42b33SRoss Zwisler out: 1401ab77dab4SSouptick Joarder trace_dax_pte_fault_done(inode, vmf, ret); 1402ab77dab4SSouptick Joarder return ret | major; 1403a7d73fe6SChristoph Hellwig } 1404642261acSRoss Zwisler 1405642261acSRoss Zwisler #ifdef CONFIG_FS_DAX_PMD 1406b15cd800SMatthew Wilcox static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, 1407b15cd800SMatthew Wilcox struct iomap *iomap, void **entry) 1408642261acSRoss Zwisler { 1409f4200391SDave Jiang struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1410f4200391SDave Jiang unsigned long pmd_addr = vmf->address & PMD_MASK; 141111cf9d86SAneesh Kumar K.V struct vm_area_struct *vma = vmf->vma; 1412653b2ea3SRoss Zwisler struct inode *inode = mapping->host; 141311cf9d86SAneesh Kumar K.V pgtable_t pgtable = NULL; 1414642261acSRoss Zwisler struct page *zero_page; 1415642261acSRoss Zwisler spinlock_t *ptl; 1416642261acSRoss Zwisler pmd_t pmd_entry; 14173fe0791cSDan Williams pfn_t pfn; 1418642261acSRoss Zwisler 1419f4200391SDave Jiang zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1420642261acSRoss Zwisler 1421642261acSRoss Zwisler if (unlikely(!zero_page)) 1422653b2ea3SRoss Zwisler goto fallback; 1423642261acSRoss Zwisler 14243fe0791cSDan Williams pfn = page_to_pfn_t(zero_page); 1425b15cd800SMatthew Wilcox *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, 14263159f943SMatthew Wilcox DAX_PMD | DAX_ZERO_PAGE, false); 1427642261acSRoss Zwisler 142811cf9d86SAneesh Kumar K.V if (arch_needs_pgtable_deposit()) { 142911cf9d86SAneesh Kumar K.V pgtable = pte_alloc_one(vma->vm_mm); 143011cf9d86SAneesh Kumar K.V if (!pgtable) 143111cf9d86SAneesh Kumar K.V return VM_FAULT_OOM; 143211cf9d86SAneesh Kumar K.V } 143311cf9d86SAneesh Kumar K.V 1434f4200391SDave Jiang ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1435f4200391SDave Jiang if (!pmd_none(*(vmf->pmd))) { 1436642261acSRoss Zwisler spin_unlock(ptl); 1437653b2ea3SRoss Zwisler goto fallback; 1438642261acSRoss Zwisler } 1439642261acSRoss Zwisler 144011cf9d86SAneesh Kumar K.V if (pgtable) { 144111cf9d86SAneesh Kumar K.V pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 144211cf9d86SAneesh Kumar K.V mm_inc_nr_ptes(vma->vm_mm); 144311cf9d86SAneesh Kumar K.V } 1444f4200391SDave Jiang pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1445642261acSRoss Zwisler pmd_entry = pmd_mkhuge(pmd_entry); 1446f4200391SDave Jiang set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1447642261acSRoss Zwisler spin_unlock(ptl); 1448b15cd800SMatthew Wilcox trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); 1449642261acSRoss Zwisler return VM_FAULT_NOPAGE; 1450653b2ea3SRoss Zwisler 1451653b2ea3SRoss Zwisler fallback: 145211cf9d86SAneesh Kumar K.V if (pgtable) 145311cf9d86SAneesh Kumar K.V pte_free(vma->vm_mm, pgtable); 1454b15cd800SMatthew Wilcox trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); 1455642261acSRoss Zwisler return VM_FAULT_FALLBACK; 1456642261acSRoss Zwisler } 1457642261acSRoss Zwisler 1458ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1459a2d58167SDave Jiang const struct iomap_ops *ops) 1460642261acSRoss Zwisler { 1461f4200391SDave Jiang struct vm_area_struct *vma = vmf->vma; 1462642261acSRoss Zwisler struct address_space *mapping = vma->vm_file->f_mapping; 1463b15cd800SMatthew Wilcox XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); 1464d8a849e1SDave Jiang unsigned long pmd_addr = vmf->address & PMD_MASK; 1465d8a849e1SDave Jiang bool write = vmf->flags & FAULT_FLAG_WRITE; 1466caa51d26SJan Kara bool sync; 14679484ab1bSJan Kara unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; 1468642261acSRoss Zwisler struct inode *inode = mapping->host; 1469ab77dab4SSouptick Joarder vm_fault_t result = VM_FAULT_FALLBACK; 1470c039b997SGoldwyn Rodrigues struct iomap iomap = { .type = IOMAP_HOLE }; 1471c039b997SGoldwyn Rodrigues struct iomap srcmap = { .type = IOMAP_HOLE }; 1472b15cd800SMatthew Wilcox pgoff_t max_pgoff; 1473642261acSRoss Zwisler void *entry; 1474642261acSRoss Zwisler loff_t pos; 1475642261acSRoss Zwisler int error; 1476302a5e31SJan Kara pfn_t pfn; 1477642261acSRoss Zwisler 1478282a8e03SRoss Zwisler /* 1479282a8e03SRoss Zwisler * Check whether offset isn't beyond end of file now. Caller is 1480282a8e03SRoss Zwisler * supposed to hold locks serializing us with truncate / punch hole so 1481282a8e03SRoss Zwisler * this is a reliable test. 1482282a8e03SRoss Zwisler */ 1483957ac8c4SJeff Moyer max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1484282a8e03SRoss Zwisler 1485f4200391SDave Jiang trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); 1486282a8e03SRoss Zwisler 1487fffa281bSRoss Zwisler /* 1488fffa281bSRoss Zwisler * Make sure that the faulting address's PMD offset (color) matches 1489fffa281bSRoss Zwisler * the PMD offset from the start of the file. This is necessary so 1490fffa281bSRoss Zwisler * that a PMD range in the page table overlaps exactly with a PMD 1491a77d19f4SMatthew Wilcox * range in the page cache. 1492fffa281bSRoss Zwisler */ 1493fffa281bSRoss Zwisler if ((vmf->pgoff & PG_PMD_COLOUR) != 1494fffa281bSRoss Zwisler ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 1495fffa281bSRoss Zwisler goto fallback; 1496fffa281bSRoss Zwisler 1497642261acSRoss Zwisler /* Fall back to PTEs if we're going to COW */ 1498642261acSRoss Zwisler if (write && !(vma->vm_flags & VM_SHARED)) 1499642261acSRoss Zwisler goto fallback; 1500642261acSRoss Zwisler 1501642261acSRoss Zwisler /* If the PMD would extend outside the VMA */ 1502642261acSRoss Zwisler if (pmd_addr < vma->vm_start) 1503642261acSRoss Zwisler goto fallback; 1504642261acSRoss Zwisler if ((pmd_addr + PMD_SIZE) > vma->vm_end) 1505642261acSRoss Zwisler goto fallback; 1506642261acSRoss Zwisler 1507b15cd800SMatthew Wilcox if (xas.xa_index >= max_pgoff) { 1508282a8e03SRoss Zwisler result = VM_FAULT_SIGBUS; 1509282a8e03SRoss Zwisler goto out; 1510282a8e03SRoss Zwisler } 1511642261acSRoss Zwisler 1512642261acSRoss Zwisler /* If the PMD would extend beyond the file size */ 1513b15cd800SMatthew Wilcox if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff) 1514642261acSRoss Zwisler goto fallback; 1515642261acSRoss Zwisler 1516642261acSRoss Zwisler /* 1517b15cd800SMatthew Wilcox * grab_mapping_entry() will make sure we get an empty PMD entry, 1518b15cd800SMatthew Wilcox * a zero PMD entry or a DAX PMD. If it can't (because a PTE 1519b15cd800SMatthew Wilcox * entry is already in the array, for instance), it will return 1520b15cd800SMatthew Wilcox * VM_FAULT_FALLBACK. 15219f141d6eSJan Kara */ 152223c84eb7SMatthew Wilcox (Oracle) entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); 1523b15cd800SMatthew Wilcox if (xa_is_internal(entry)) { 1524b15cd800SMatthew Wilcox result = xa_to_internal(entry); 1525876f2946SRoss Zwisler goto fallback; 1526b15cd800SMatthew Wilcox } 1527876f2946SRoss Zwisler 1528876f2946SRoss Zwisler /* 1529e2093926SRoss Zwisler * It is possible, particularly with mixed reads & writes to private 1530e2093926SRoss Zwisler * mappings, that we have raced with a PTE fault that overlaps with 1531e2093926SRoss Zwisler * the PMD we need to set up. If so just return and the fault will be 1532e2093926SRoss Zwisler * retried. 1533e2093926SRoss Zwisler */ 1534e2093926SRoss Zwisler if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1535e2093926SRoss Zwisler !pmd_devmap(*vmf->pmd)) { 1536e2093926SRoss Zwisler result = 0; 1537e2093926SRoss Zwisler goto unlock_entry; 1538e2093926SRoss Zwisler } 1539e2093926SRoss Zwisler 1540e2093926SRoss Zwisler /* 1541876f2946SRoss Zwisler * Note that we don't use iomap_apply here. We aren't doing I/O, only 1542876f2946SRoss Zwisler * setting up a mapping, so really we're using iomap_begin() as a way 1543876f2946SRoss Zwisler * to look up our filesystem block. 1544876f2946SRoss Zwisler */ 1545b15cd800SMatthew Wilcox pos = (loff_t)xas.xa_index << PAGE_SHIFT; 1546c039b997SGoldwyn Rodrigues error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap, 1547c039b997SGoldwyn Rodrigues &srcmap); 1548876f2946SRoss Zwisler if (error) 1549876f2946SRoss Zwisler goto unlock_entry; 1550876f2946SRoss Zwisler 1551876f2946SRoss Zwisler if (iomap.offset + iomap.length < pos + PMD_SIZE) 15529f141d6eSJan Kara goto finish_iomap; 15539f141d6eSJan Kara 1554aaa422c4SDan Williams sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap); 1555caa51d26SJan Kara 1556642261acSRoss Zwisler switch (iomap.type) { 1557642261acSRoss Zwisler case IOMAP_MAPPED: 1558302a5e31SJan Kara error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn); 1559302a5e31SJan Kara if (error < 0) 1560302a5e31SJan Kara goto finish_iomap; 1561302a5e31SJan Kara 1562b15cd800SMatthew Wilcox entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, 15633159f943SMatthew Wilcox DAX_PMD, write && !sync); 1564302a5e31SJan Kara 1565caa51d26SJan Kara /* 1566caa51d26SJan Kara * If we are doing synchronous page fault and inode needs fsync, 1567caa51d26SJan Kara * we can insert PMD into page tables only after that happens. 1568caa51d26SJan Kara * Skip insertion for now and return the pfn so that caller can 1569caa51d26SJan Kara * insert it after fsync is done. 1570caa51d26SJan Kara */ 1571caa51d26SJan Kara if (sync) { 1572caa51d26SJan Kara if (WARN_ON_ONCE(!pfnp)) 1573caa51d26SJan Kara goto finish_iomap; 1574caa51d26SJan Kara *pfnp = pfn; 1575caa51d26SJan Kara result = VM_FAULT_NEEDDSYNC; 1576caa51d26SJan Kara goto finish_iomap; 1577caa51d26SJan Kara } 1578caa51d26SJan Kara 1579302a5e31SJan Kara trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); 1580fce86ff5SDan Williams result = vmf_insert_pfn_pmd(vmf, pfn, write); 1581642261acSRoss Zwisler break; 1582642261acSRoss Zwisler case IOMAP_UNWRITTEN: 1583642261acSRoss Zwisler case IOMAP_HOLE: 1584642261acSRoss Zwisler if (WARN_ON_ONCE(write)) 1585876f2946SRoss Zwisler break; 1586b15cd800SMatthew Wilcox result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry); 1587642261acSRoss Zwisler break; 1588642261acSRoss Zwisler default: 1589642261acSRoss Zwisler WARN_ON_ONCE(1); 1590642261acSRoss Zwisler break; 1591642261acSRoss Zwisler } 1592642261acSRoss Zwisler 15939f141d6eSJan Kara finish_iomap: 15949f141d6eSJan Kara if (ops->iomap_end) { 15959f141d6eSJan Kara int copied = PMD_SIZE; 15969f141d6eSJan Kara 15979f141d6eSJan Kara if (result == VM_FAULT_FALLBACK) 15989f141d6eSJan Kara copied = 0; 15999f141d6eSJan Kara /* 16009f141d6eSJan Kara * The fault is done by now and there's no way back (other 16019f141d6eSJan Kara * thread may be already happily using PMD we have installed). 16029f141d6eSJan Kara * Just ignore error from ->iomap_end since we cannot do much 16039f141d6eSJan Kara * with it. 16049f141d6eSJan Kara */ 16059f141d6eSJan Kara ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags, 16069f141d6eSJan Kara &iomap); 16079f141d6eSJan Kara } 1608876f2946SRoss Zwisler unlock_entry: 1609b15cd800SMatthew Wilcox dax_unlock_entry(&xas, entry); 1610642261acSRoss Zwisler fallback: 1611642261acSRoss Zwisler if (result == VM_FAULT_FALLBACK) { 1612d8a849e1SDave Jiang split_huge_pmd(vma, vmf->pmd, vmf->address); 1613642261acSRoss Zwisler count_vm_event(THP_FAULT_FALLBACK); 1614642261acSRoss Zwisler } 1615282a8e03SRoss Zwisler out: 1616f4200391SDave Jiang trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); 1617642261acSRoss Zwisler return result; 1618642261acSRoss Zwisler } 1619a2d58167SDave Jiang #else 1620ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 162101cddfe9SArnd Bergmann const struct iomap_ops *ops) 1622a2d58167SDave Jiang { 1623a2d58167SDave Jiang return VM_FAULT_FALLBACK; 1624a2d58167SDave Jiang } 1625642261acSRoss Zwisler #endif /* CONFIG_FS_DAX_PMD */ 1626a2d58167SDave Jiang 1627a2d58167SDave Jiang /** 1628a2d58167SDave Jiang * dax_iomap_fault - handle a page fault on a DAX file 1629a2d58167SDave Jiang * @vmf: The description of the fault 1630cec04e8cSJan Kara * @pe_size: Size of the page to fault in 16319a0dd422SJan Kara * @pfnp: PFN to insert for synchronous faults if fsync is required 1632c0b24625SJan Kara * @iomap_errp: Storage for detailed error code in case of error 1633cec04e8cSJan Kara * @ops: Iomap ops passed from the file system 1634a2d58167SDave Jiang * 1635a2d58167SDave Jiang * When a page fault occurs, filesystems may call this helper in 1636a2d58167SDave Jiang * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1637a2d58167SDave Jiang * has done all the necessary locking for page fault to proceed 1638a2d58167SDave Jiang * successfully. 1639a2d58167SDave Jiang */ 1640ab77dab4SSouptick Joarder vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1641c0b24625SJan Kara pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) 1642a2d58167SDave Jiang { 1643c791ace1SDave Jiang switch (pe_size) { 1644c791ace1SDave Jiang case PE_SIZE_PTE: 1645c0b24625SJan Kara return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); 1646c791ace1SDave Jiang case PE_SIZE_PMD: 16479a0dd422SJan Kara return dax_iomap_pmd_fault(vmf, pfnp, ops); 1648a2d58167SDave Jiang default: 1649a2d58167SDave Jiang return VM_FAULT_FALLBACK; 1650a2d58167SDave Jiang } 1651a2d58167SDave Jiang } 1652a2d58167SDave Jiang EXPORT_SYMBOL_GPL(dax_iomap_fault); 165371eab6dfSJan Kara 1654a77d19f4SMatthew Wilcox /* 165571eab6dfSJan Kara * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables 165671eab6dfSJan Kara * @vmf: The description of the fault 165771eab6dfSJan Kara * @pfn: PFN to insert 1658cfc93c6cSMatthew Wilcox * @order: Order of entry to insert. 165971eab6dfSJan Kara * 1660a77d19f4SMatthew Wilcox * This function inserts a writeable PTE or PMD entry into the page tables 1661a77d19f4SMatthew Wilcox * for an mmaped DAX file. It also marks the page cache entry as dirty. 166271eab6dfSJan Kara */ 1663cfc93c6cSMatthew Wilcox static vm_fault_t 1664cfc93c6cSMatthew Wilcox dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) 166571eab6dfSJan Kara { 166671eab6dfSJan Kara struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1667cfc93c6cSMatthew Wilcox XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); 1668cfc93c6cSMatthew Wilcox void *entry; 1669ab77dab4SSouptick Joarder vm_fault_t ret; 167071eab6dfSJan Kara 1671cfc93c6cSMatthew Wilcox xas_lock_irq(&xas); 167223c84eb7SMatthew Wilcox (Oracle) entry = get_unlocked_entry(&xas, order); 167371eab6dfSJan Kara /* Did we race with someone splitting entry or so? */ 167423c84eb7SMatthew Wilcox (Oracle) if (!entry || dax_is_conflict(entry) || 167523c84eb7SMatthew Wilcox (Oracle) (order == 0 && !dax_is_pte_entry(entry))) { 1676cfc93c6cSMatthew Wilcox put_unlocked_entry(&xas, entry); 1677cfc93c6cSMatthew Wilcox xas_unlock_irq(&xas); 167871eab6dfSJan Kara trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 167971eab6dfSJan Kara VM_FAULT_NOPAGE); 168071eab6dfSJan Kara return VM_FAULT_NOPAGE; 168171eab6dfSJan Kara } 1682cfc93c6cSMatthew Wilcox xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); 1683cfc93c6cSMatthew Wilcox dax_lock_entry(&xas, entry); 1684cfc93c6cSMatthew Wilcox xas_unlock_irq(&xas); 1685cfc93c6cSMatthew Wilcox if (order == 0) 1686ab77dab4SSouptick Joarder ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 168771eab6dfSJan Kara #ifdef CONFIG_FS_DAX_PMD 1688cfc93c6cSMatthew Wilcox else if (order == PMD_ORDER) 1689fce86ff5SDan Williams ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE); 169071eab6dfSJan Kara #endif 1691cfc93c6cSMatthew Wilcox else 1692ab77dab4SSouptick Joarder ret = VM_FAULT_FALLBACK; 1693cfc93c6cSMatthew Wilcox dax_unlock_entry(&xas, entry); 1694ab77dab4SSouptick Joarder trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); 1695ab77dab4SSouptick Joarder return ret; 169671eab6dfSJan Kara } 169771eab6dfSJan Kara 169871eab6dfSJan Kara /** 169971eab6dfSJan Kara * dax_finish_sync_fault - finish synchronous page fault 170071eab6dfSJan Kara * @vmf: The description of the fault 170171eab6dfSJan Kara * @pe_size: Size of entry to be inserted 170271eab6dfSJan Kara * @pfn: PFN to insert 170371eab6dfSJan Kara * 170471eab6dfSJan Kara * This function ensures that the file range touched by the page fault is 170571eab6dfSJan Kara * stored persistently on the media and handles inserting of appropriate page 170671eab6dfSJan Kara * table entry. 170771eab6dfSJan Kara */ 1708ab77dab4SSouptick Joarder vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 1709ab77dab4SSouptick Joarder enum page_entry_size pe_size, pfn_t pfn) 171071eab6dfSJan Kara { 171171eab6dfSJan Kara int err; 171271eab6dfSJan Kara loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; 1713cfc93c6cSMatthew Wilcox unsigned int order = pe_order(pe_size); 1714cfc93c6cSMatthew Wilcox size_t len = PAGE_SIZE << order; 171571eab6dfSJan Kara 171671eab6dfSJan Kara err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); 171771eab6dfSJan Kara if (err) 171871eab6dfSJan Kara return VM_FAULT_SIGBUS; 1719cfc93c6cSMatthew Wilcox return dax_insert_pfn_mkwrite(vmf, pfn, order); 172071eab6dfSJan Kara } 172171eab6dfSJan Kara EXPORT_SYMBOL_GPL(dax_finish_sync_fault); 1722