1d475c634SMatthew Wilcox /* 2d475c634SMatthew Wilcox * fs/dax.c - Direct Access filesystem code 3d475c634SMatthew Wilcox * Copyright (c) 2013-2014 Intel Corporation 4d475c634SMatthew Wilcox * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 5d475c634SMatthew Wilcox * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 6d475c634SMatthew Wilcox * 7d475c634SMatthew Wilcox * This program is free software; you can redistribute it and/or modify it 8d475c634SMatthew Wilcox * under the terms and conditions of the GNU General Public License, 9d475c634SMatthew Wilcox * version 2, as published by the Free Software Foundation. 10d475c634SMatthew Wilcox * 11d475c634SMatthew Wilcox * This program is distributed in the hope it will be useful, but WITHOUT 12d475c634SMatthew Wilcox * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13d475c634SMatthew Wilcox * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14d475c634SMatthew Wilcox * more details. 15d475c634SMatthew Wilcox */ 16d475c634SMatthew Wilcox 17d475c634SMatthew Wilcox #include <linux/atomic.h> 18d475c634SMatthew Wilcox #include <linux/blkdev.h> 19d475c634SMatthew Wilcox #include <linux/buffer_head.h> 20d77e92e2SRoss Zwisler #include <linux/dax.h> 21d475c634SMatthew Wilcox #include <linux/fs.h> 22d475c634SMatthew Wilcox #include <linux/genhd.h> 23f7ca90b1SMatthew Wilcox #include <linux/highmem.h> 24f7ca90b1SMatthew Wilcox #include <linux/memcontrol.h> 25f7ca90b1SMatthew Wilcox #include <linux/mm.h> 26d475c634SMatthew Wilcox #include <linux/mutex.h> 279973c98eSRoss Zwisler #include <linux/pagevec.h> 28289c6aedSMatthew Wilcox #include <linux/sched.h> 29f361bf4aSIngo Molnar #include <linux/sched/signal.h> 30d475c634SMatthew Wilcox #include <linux/uio.h> 31f7ca90b1SMatthew Wilcox #include <linux/vmstat.h> 3234c0fd54SDan Williams #include <linux/pfn_t.h> 330e749e54SDan Williams #include <linux/sizes.h> 344b4bb46dSJan Kara #include <linux/mmu_notifier.h> 35a254e568SChristoph Hellwig #include <linux/iomap.h> 36a254e568SChristoph Hellwig #include "internal.h" 37d475c634SMatthew Wilcox 38282a8e03SRoss Zwisler #define CREATE_TRACE_POINTS 39282a8e03SRoss Zwisler #include <trace/events/fs_dax.h> 40282a8e03SRoss Zwisler 41cfc93c6cSMatthew Wilcox static inline unsigned int pe_order(enum page_entry_size pe_size) 42cfc93c6cSMatthew Wilcox { 43cfc93c6cSMatthew Wilcox if (pe_size == PE_SIZE_PTE) 44cfc93c6cSMatthew Wilcox return PAGE_SHIFT - PAGE_SHIFT; 45cfc93c6cSMatthew Wilcox if (pe_size == PE_SIZE_PMD) 46cfc93c6cSMatthew Wilcox return PMD_SHIFT - PAGE_SHIFT; 47cfc93c6cSMatthew Wilcox if (pe_size == PE_SIZE_PUD) 48cfc93c6cSMatthew Wilcox return PUD_SHIFT - PAGE_SHIFT; 49cfc93c6cSMatthew Wilcox return ~0; 50cfc93c6cSMatthew Wilcox } 51cfc93c6cSMatthew Wilcox 52ac401cc7SJan Kara /* We choose 4096 entries - same as per-zone page wait tables */ 53ac401cc7SJan Kara #define DAX_WAIT_TABLE_BITS 12 54ac401cc7SJan Kara #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 55ac401cc7SJan Kara 56917f3452SRoss Zwisler /* The 'colour' (ie low bits) within a PMD of a page offset. */ 57917f3452SRoss Zwisler #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 58977fbdcdSMatthew Wilcox #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) 59917f3452SRoss Zwisler 60cfc93c6cSMatthew Wilcox /* The order of a PMD entry */ 61cfc93c6cSMatthew Wilcox #define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT) 62cfc93c6cSMatthew Wilcox 63ce95ab0fSRoss Zwisler static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 64ac401cc7SJan Kara 65ac401cc7SJan Kara static int __init init_dax_wait_table(void) 66ac401cc7SJan Kara { 67ac401cc7SJan Kara int i; 68ac401cc7SJan Kara 69ac401cc7SJan Kara for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 70ac401cc7SJan Kara init_waitqueue_head(wait_table + i); 71ac401cc7SJan Kara return 0; 72ac401cc7SJan Kara } 73ac401cc7SJan Kara fs_initcall(init_dax_wait_table); 74ac401cc7SJan Kara 75527b19d0SRoss Zwisler /* 763159f943SMatthew Wilcox * DAX pagecache entries use XArray value entries so they can't be mistaken 773159f943SMatthew Wilcox * for pages. We use one bit for locking, one bit for the entry size (PMD) 783159f943SMatthew Wilcox * and two more to tell us if the entry is a zero page or an empty entry that 793159f943SMatthew Wilcox * is just used for locking. In total four special bits. 80527b19d0SRoss Zwisler * 81527b19d0SRoss Zwisler * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE 82527b19d0SRoss Zwisler * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem 83527b19d0SRoss Zwisler * block allocation. 84527b19d0SRoss Zwisler */ 853159f943SMatthew Wilcox #define DAX_SHIFT (4) 863159f943SMatthew Wilcox #define DAX_LOCKED (1UL << 0) 873159f943SMatthew Wilcox #define DAX_PMD (1UL << 1) 883159f943SMatthew Wilcox #define DAX_ZERO_PAGE (1UL << 2) 893159f943SMatthew Wilcox #define DAX_EMPTY (1UL << 3) 90527b19d0SRoss Zwisler 91a77d19f4SMatthew Wilcox static unsigned long dax_to_pfn(void *entry) 92527b19d0SRoss Zwisler { 933159f943SMatthew Wilcox return xa_to_value(entry) >> DAX_SHIFT; 94527b19d0SRoss Zwisler } 95527b19d0SRoss Zwisler 969f32d221SMatthew Wilcox static void *dax_make_entry(pfn_t pfn, unsigned long flags) 979f32d221SMatthew Wilcox { 989f32d221SMatthew Wilcox return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); 999f32d221SMatthew Wilcox } 1009f32d221SMatthew Wilcox 1019f32d221SMatthew Wilcox static void *dax_make_page_entry(struct page *page) 1029f32d221SMatthew Wilcox { 1039f32d221SMatthew Wilcox pfn_t pfn = page_to_pfn_t(page); 1049f32d221SMatthew Wilcox return dax_make_entry(pfn, PageHead(page) ? DAX_PMD : 0); 1059f32d221SMatthew Wilcox } 1069f32d221SMatthew Wilcox 107cfc93c6cSMatthew Wilcox static bool dax_is_locked(void *entry) 108cfc93c6cSMatthew Wilcox { 109cfc93c6cSMatthew Wilcox return xa_to_value(entry) & DAX_LOCKED; 110cfc93c6cSMatthew Wilcox } 111cfc93c6cSMatthew Wilcox 112a77d19f4SMatthew Wilcox static unsigned int dax_entry_order(void *entry) 113527b19d0SRoss Zwisler { 1143159f943SMatthew Wilcox if (xa_to_value(entry) & DAX_PMD) 115cfc93c6cSMatthew Wilcox return PMD_ORDER; 116527b19d0SRoss Zwisler return 0; 117527b19d0SRoss Zwisler } 118527b19d0SRoss Zwisler 119642261acSRoss Zwisler static int dax_is_pmd_entry(void *entry) 120642261acSRoss Zwisler { 1213159f943SMatthew Wilcox return xa_to_value(entry) & DAX_PMD; 122642261acSRoss Zwisler } 123642261acSRoss Zwisler 124642261acSRoss Zwisler static int dax_is_pte_entry(void *entry) 125642261acSRoss Zwisler { 1263159f943SMatthew Wilcox return !(xa_to_value(entry) & DAX_PMD); 127642261acSRoss Zwisler } 128642261acSRoss Zwisler 129642261acSRoss Zwisler static int dax_is_zero_entry(void *entry) 130642261acSRoss Zwisler { 1313159f943SMatthew Wilcox return xa_to_value(entry) & DAX_ZERO_PAGE; 132642261acSRoss Zwisler } 133642261acSRoss Zwisler 134642261acSRoss Zwisler static int dax_is_empty_entry(void *entry) 135642261acSRoss Zwisler { 1363159f943SMatthew Wilcox return xa_to_value(entry) & DAX_EMPTY; 137642261acSRoss Zwisler } 138642261acSRoss Zwisler 139f7ca90b1SMatthew Wilcox /* 140a77d19f4SMatthew Wilcox * DAX page cache entry locking 141ac401cc7SJan Kara */ 142ac401cc7SJan Kara struct exceptional_entry_key { 143ec4907ffSMatthew Wilcox struct xarray *xa; 14463e95b5cSRoss Zwisler pgoff_t entry_start; 145ac401cc7SJan Kara }; 146ac401cc7SJan Kara 147ac401cc7SJan Kara struct wait_exceptional_entry_queue { 148ac6424b9SIngo Molnar wait_queue_entry_t wait; 149ac401cc7SJan Kara struct exceptional_entry_key key; 150ac401cc7SJan Kara }; 151ac401cc7SJan Kara 152b15cd800SMatthew Wilcox static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, 153b15cd800SMatthew Wilcox void *entry, struct exceptional_entry_key *key) 15463e95b5cSRoss Zwisler { 15563e95b5cSRoss Zwisler unsigned long hash; 156b15cd800SMatthew Wilcox unsigned long index = xas->xa_index; 15763e95b5cSRoss Zwisler 15863e95b5cSRoss Zwisler /* 15963e95b5cSRoss Zwisler * If 'entry' is a PMD, align the 'index' that we use for the wait 16063e95b5cSRoss Zwisler * queue to the start of that PMD. This ensures that all offsets in 16163e95b5cSRoss Zwisler * the range covered by the PMD map to the same bit lock. 16263e95b5cSRoss Zwisler */ 163642261acSRoss Zwisler if (dax_is_pmd_entry(entry)) 164917f3452SRoss Zwisler index &= ~PG_PMD_COLOUR; 165b15cd800SMatthew Wilcox key->xa = xas->xa; 16663e95b5cSRoss Zwisler key->entry_start = index; 16763e95b5cSRoss Zwisler 168b15cd800SMatthew Wilcox hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); 16963e95b5cSRoss Zwisler return wait_table + hash; 17063e95b5cSRoss Zwisler } 17163e95b5cSRoss Zwisler 172ec4907ffSMatthew Wilcox static int wake_exceptional_entry_func(wait_queue_entry_t *wait, 173ec4907ffSMatthew Wilcox unsigned int mode, int sync, void *keyp) 174ac401cc7SJan Kara { 175ac401cc7SJan Kara struct exceptional_entry_key *key = keyp; 176ac401cc7SJan Kara struct wait_exceptional_entry_queue *ewait = 177ac401cc7SJan Kara container_of(wait, struct wait_exceptional_entry_queue, wait); 178ac401cc7SJan Kara 179ec4907ffSMatthew Wilcox if (key->xa != ewait->key.xa || 18063e95b5cSRoss Zwisler key->entry_start != ewait->key.entry_start) 181ac401cc7SJan Kara return 0; 182ac401cc7SJan Kara return autoremove_wake_function(wait, mode, sync, NULL); 183ac401cc7SJan Kara } 184ac401cc7SJan Kara 185ac401cc7SJan Kara /* 186b93b0163SMatthew Wilcox * @entry may no longer be the entry at the index in the mapping. 187b93b0163SMatthew Wilcox * The important information it's conveying is whether the entry at 188b93b0163SMatthew Wilcox * this index used to be a PMD entry. 189e30331ffSRoss Zwisler */ 190b15cd800SMatthew Wilcox static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all) 191e30331ffSRoss Zwisler { 192e30331ffSRoss Zwisler struct exceptional_entry_key key; 193e30331ffSRoss Zwisler wait_queue_head_t *wq; 194e30331ffSRoss Zwisler 195b15cd800SMatthew Wilcox wq = dax_entry_waitqueue(xas, entry, &key); 196e30331ffSRoss Zwisler 197e30331ffSRoss Zwisler /* 198e30331ffSRoss Zwisler * Checking for locked entry and prepare_to_wait_exclusive() happens 199b93b0163SMatthew Wilcox * under the i_pages lock, ditto for entry handling in our callers. 200e30331ffSRoss Zwisler * So at this point all tasks that could have seen our entry locked 201e30331ffSRoss Zwisler * must be in the waitqueue and the following check will see them. 202e30331ffSRoss Zwisler */ 203e30331ffSRoss Zwisler if (waitqueue_active(wq)) 204e30331ffSRoss Zwisler __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); 205e30331ffSRoss Zwisler } 206e30331ffSRoss Zwisler 207cfc93c6cSMatthew Wilcox /* 208cfc93c6cSMatthew Wilcox * Look up entry in page cache, wait for it to become unlocked if it 209cfc93c6cSMatthew Wilcox * is a DAX entry and return it. The caller must subsequently call 210cfc93c6cSMatthew Wilcox * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry() 211cfc93c6cSMatthew Wilcox * if it did. 212cfc93c6cSMatthew Wilcox * 213cfc93c6cSMatthew Wilcox * Must be called with the i_pages lock held. 214cfc93c6cSMatthew Wilcox */ 215cfc93c6cSMatthew Wilcox static void *get_unlocked_entry(struct xa_state *xas) 216cfc93c6cSMatthew Wilcox { 217cfc93c6cSMatthew Wilcox void *entry; 218cfc93c6cSMatthew Wilcox struct wait_exceptional_entry_queue ewait; 219cfc93c6cSMatthew Wilcox wait_queue_head_t *wq; 220cfc93c6cSMatthew Wilcox 221cfc93c6cSMatthew Wilcox init_wait(&ewait.wait); 222cfc93c6cSMatthew Wilcox ewait.wait.func = wake_exceptional_entry_func; 223cfc93c6cSMatthew Wilcox 224cfc93c6cSMatthew Wilcox for (;;) { 225cfc93c6cSMatthew Wilcox entry = xas_load(xas); 226cfc93c6cSMatthew Wilcox if (!entry || xa_is_internal(entry) || 227cfc93c6cSMatthew Wilcox WARN_ON_ONCE(!xa_is_value(entry)) || 228cfc93c6cSMatthew Wilcox !dax_is_locked(entry)) 229cfc93c6cSMatthew Wilcox return entry; 230cfc93c6cSMatthew Wilcox 231b15cd800SMatthew Wilcox wq = dax_entry_waitqueue(xas, entry, &ewait.key); 232cfc93c6cSMatthew Wilcox prepare_to_wait_exclusive(wq, &ewait.wait, 233cfc93c6cSMatthew Wilcox TASK_UNINTERRUPTIBLE); 234cfc93c6cSMatthew Wilcox xas_unlock_irq(xas); 235cfc93c6cSMatthew Wilcox xas_reset(xas); 236cfc93c6cSMatthew Wilcox schedule(); 237cfc93c6cSMatthew Wilcox finish_wait(wq, &ewait.wait); 238cfc93c6cSMatthew Wilcox xas_lock_irq(xas); 239cfc93c6cSMatthew Wilcox } 240cfc93c6cSMatthew Wilcox } 241cfc93c6cSMatthew Wilcox 242cfc93c6cSMatthew Wilcox static void put_unlocked_entry(struct xa_state *xas, void *entry) 243cfc93c6cSMatthew Wilcox { 244cfc93c6cSMatthew Wilcox /* If we were the only waiter woken, wake the next one */ 245cfc93c6cSMatthew Wilcox if (entry) 246cfc93c6cSMatthew Wilcox dax_wake_entry(xas, entry, false); 247cfc93c6cSMatthew Wilcox } 248cfc93c6cSMatthew Wilcox 249cfc93c6cSMatthew Wilcox /* 250cfc93c6cSMatthew Wilcox * We used the xa_state to get the entry, but then we locked the entry and 251cfc93c6cSMatthew Wilcox * dropped the xa_lock, so we know the xa_state is stale and must be reset 252cfc93c6cSMatthew Wilcox * before use. 253cfc93c6cSMatthew Wilcox */ 254cfc93c6cSMatthew Wilcox static void dax_unlock_entry(struct xa_state *xas, void *entry) 255cfc93c6cSMatthew Wilcox { 256cfc93c6cSMatthew Wilcox void *old; 257cfc93c6cSMatthew Wilcox 2587ae2ea7dSMatthew Wilcox BUG_ON(dax_is_locked(entry)); 259cfc93c6cSMatthew Wilcox xas_reset(xas); 260cfc93c6cSMatthew Wilcox xas_lock_irq(xas); 261cfc93c6cSMatthew Wilcox old = xas_store(xas, entry); 262cfc93c6cSMatthew Wilcox xas_unlock_irq(xas); 263cfc93c6cSMatthew Wilcox BUG_ON(!dax_is_locked(old)); 264cfc93c6cSMatthew Wilcox dax_wake_entry(xas, entry, false); 265cfc93c6cSMatthew Wilcox } 266cfc93c6cSMatthew Wilcox 267cfc93c6cSMatthew Wilcox /* 268cfc93c6cSMatthew Wilcox * Return: The entry stored at this location before it was locked. 269cfc93c6cSMatthew Wilcox */ 270cfc93c6cSMatthew Wilcox static void *dax_lock_entry(struct xa_state *xas, void *entry) 271cfc93c6cSMatthew Wilcox { 272cfc93c6cSMatthew Wilcox unsigned long v = xa_to_value(entry); 273cfc93c6cSMatthew Wilcox return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); 274cfc93c6cSMatthew Wilcox } 275cfc93c6cSMatthew Wilcox 276d2c997c0SDan Williams static unsigned long dax_entry_size(void *entry) 277d2c997c0SDan Williams { 278d2c997c0SDan Williams if (dax_is_zero_entry(entry)) 279d2c997c0SDan Williams return 0; 280d2c997c0SDan Williams else if (dax_is_empty_entry(entry)) 281d2c997c0SDan Williams return 0; 282d2c997c0SDan Williams else if (dax_is_pmd_entry(entry)) 283d2c997c0SDan Williams return PMD_SIZE; 284d2c997c0SDan Williams else 285d2c997c0SDan Williams return PAGE_SIZE; 286d2c997c0SDan Williams } 287d2c997c0SDan Williams 288a77d19f4SMatthew Wilcox static unsigned long dax_end_pfn(void *entry) 289d2c997c0SDan Williams { 290a77d19f4SMatthew Wilcox return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; 291d2c997c0SDan Williams } 292d2c997c0SDan Williams 293d2c997c0SDan Williams /* 294d2c997c0SDan Williams * Iterate through all mapped pfns represented by an entry, i.e. skip 295d2c997c0SDan Williams * 'empty' and 'zero' entries. 296d2c997c0SDan Williams */ 297d2c997c0SDan Williams #define for_each_mapped_pfn(entry, pfn) \ 298a77d19f4SMatthew Wilcox for (pfn = dax_to_pfn(entry); \ 299a77d19f4SMatthew Wilcox pfn < dax_end_pfn(entry); pfn++) 300d2c997c0SDan Williams 30173449dafSDan Williams /* 30273449dafSDan Williams * TODO: for reflink+dax we need a way to associate a single page with 30373449dafSDan Williams * multiple address_space instances at different linear_page_index() 30473449dafSDan Williams * offsets. 30573449dafSDan Williams */ 30673449dafSDan Williams static void dax_associate_entry(void *entry, struct address_space *mapping, 30773449dafSDan Williams struct vm_area_struct *vma, unsigned long address) 308d2c997c0SDan Williams { 30973449dafSDan Williams unsigned long size = dax_entry_size(entry), pfn, index; 31073449dafSDan Williams int i = 0; 311d2c997c0SDan Williams 312d2c997c0SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 313d2c997c0SDan Williams return; 314d2c997c0SDan Williams 31573449dafSDan Williams index = linear_page_index(vma, address & ~(size - 1)); 316d2c997c0SDan Williams for_each_mapped_pfn(entry, pfn) { 317d2c997c0SDan Williams struct page *page = pfn_to_page(pfn); 318d2c997c0SDan Williams 319d2c997c0SDan Williams WARN_ON_ONCE(page->mapping); 320d2c997c0SDan Williams page->mapping = mapping; 32173449dafSDan Williams page->index = index + i++; 322d2c997c0SDan Williams } 323d2c997c0SDan Williams } 324d2c997c0SDan Williams 325d2c997c0SDan Williams static void dax_disassociate_entry(void *entry, struct address_space *mapping, 326d2c997c0SDan Williams bool trunc) 327d2c997c0SDan Williams { 328d2c997c0SDan Williams unsigned long pfn; 329d2c997c0SDan Williams 330d2c997c0SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 331d2c997c0SDan Williams return; 332d2c997c0SDan Williams 333d2c997c0SDan Williams for_each_mapped_pfn(entry, pfn) { 334d2c997c0SDan Williams struct page *page = pfn_to_page(pfn); 335d2c997c0SDan Williams 336d2c997c0SDan Williams WARN_ON_ONCE(trunc && page_ref_count(page) > 1); 337d2c997c0SDan Williams WARN_ON_ONCE(page->mapping && page->mapping != mapping); 338d2c997c0SDan Williams page->mapping = NULL; 33973449dafSDan Williams page->index = 0; 340d2c997c0SDan Williams } 341d2c997c0SDan Williams } 342d2c997c0SDan Williams 3435fac7408SDan Williams static struct page *dax_busy_page(void *entry) 3445fac7408SDan Williams { 3455fac7408SDan Williams unsigned long pfn; 3465fac7408SDan Williams 3475fac7408SDan Williams for_each_mapped_pfn(entry, pfn) { 3485fac7408SDan Williams struct page *page = pfn_to_page(pfn); 3495fac7408SDan Williams 3505fac7408SDan Williams if (page_ref_count(page) > 1) 3515fac7408SDan Williams return page; 3525fac7408SDan Williams } 3535fac7408SDan Williams return NULL; 3545fac7408SDan Williams } 3555fac7408SDan Williams 356c2a7d2a1SDan Williams bool dax_lock_mapping_entry(struct page *page) 357c2a7d2a1SDan Williams { 3589f32d221SMatthew Wilcox XA_STATE(xas, NULL, 0); 3599f32d221SMatthew Wilcox void *entry; 360c2a7d2a1SDan Williams 361c2a7d2a1SDan Williams for (;;) { 3629f32d221SMatthew Wilcox struct address_space *mapping = READ_ONCE(page->mapping); 363c2a7d2a1SDan Williams 364c2a7d2a1SDan Williams if (!dax_mapping(mapping)) 3659f32d221SMatthew Wilcox return false; 366c2a7d2a1SDan Williams 367c2a7d2a1SDan Williams /* 368c2a7d2a1SDan Williams * In the device-dax case there's no need to lock, a 369c2a7d2a1SDan Williams * struct dev_pagemap pin is sufficient to keep the 370c2a7d2a1SDan Williams * inode alive, and we assume we have dev_pagemap pin 371c2a7d2a1SDan Williams * otherwise we would not have a valid pfn_to_page() 372c2a7d2a1SDan Williams * translation. 373c2a7d2a1SDan Williams */ 3749f32d221SMatthew Wilcox if (S_ISCHR(mapping->host->i_mode)) 3759f32d221SMatthew Wilcox return true; 376c2a7d2a1SDan Williams 3779f32d221SMatthew Wilcox xas.xa = &mapping->i_pages; 3789f32d221SMatthew Wilcox xas_lock_irq(&xas); 379c2a7d2a1SDan Williams if (mapping != page->mapping) { 3809f32d221SMatthew Wilcox xas_unlock_irq(&xas); 381c2a7d2a1SDan Williams continue; 382c2a7d2a1SDan Williams } 3839f32d221SMatthew Wilcox xas_set(&xas, page->index); 3849f32d221SMatthew Wilcox entry = xas_load(&xas); 3859f32d221SMatthew Wilcox if (dax_is_locked(entry)) { 3869f32d221SMatthew Wilcox entry = get_unlocked_entry(&xas); 3879f32d221SMatthew Wilcox xas_unlock_irq(&xas); 388c2a7d2a1SDan Williams continue; 389c2a7d2a1SDan Williams } 3909f32d221SMatthew Wilcox dax_lock_entry(&xas, entry); 3919f32d221SMatthew Wilcox xas_unlock_irq(&xas); 3929f32d221SMatthew Wilcox return true; 3939f32d221SMatthew Wilcox } 394c2a7d2a1SDan Williams } 395c2a7d2a1SDan Williams 396c2a7d2a1SDan Williams void dax_unlock_mapping_entry(struct page *page) 397c2a7d2a1SDan Williams { 398c2a7d2a1SDan Williams struct address_space *mapping = page->mapping; 3999f32d221SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, page->index); 400c2a7d2a1SDan Williams 4019f32d221SMatthew Wilcox if (S_ISCHR(mapping->host->i_mode)) 402c2a7d2a1SDan Williams return; 403c2a7d2a1SDan Williams 4049f32d221SMatthew Wilcox dax_unlock_entry(&xas, dax_make_page_entry(page)); 405c2a7d2a1SDan Williams } 406c2a7d2a1SDan Williams 407ac401cc7SJan Kara /* 408a77d19f4SMatthew Wilcox * Find page cache entry at given index. If it is a DAX entry, return it 409a77d19f4SMatthew Wilcox * with the entry locked. If the page cache doesn't contain an entry at 410a77d19f4SMatthew Wilcox * that index, add a locked empty entry. 411ac401cc7SJan Kara * 4123159f943SMatthew Wilcox * When requesting an entry with size DAX_PMD, grab_mapping_entry() will 413b15cd800SMatthew Wilcox * either return that locked entry or will return VM_FAULT_FALLBACK. 414b15cd800SMatthew Wilcox * This will happen if there are any PTE entries within the PMD range 415b15cd800SMatthew Wilcox * that we are requesting. 416642261acSRoss Zwisler * 417b15cd800SMatthew Wilcox * We always favor PTE entries over PMD entries. There isn't a flow where we 418b15cd800SMatthew Wilcox * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD 419b15cd800SMatthew Wilcox * insertion will fail if it finds any PTE entries already in the tree, and a 420b15cd800SMatthew Wilcox * PTE insertion will cause an existing PMD entry to be unmapped and 421b15cd800SMatthew Wilcox * downgraded to PTE entries. This happens for both PMD zero pages as 422b15cd800SMatthew Wilcox * well as PMD empty entries. 423642261acSRoss Zwisler * 424b15cd800SMatthew Wilcox * The exception to this downgrade path is for PMD entries that have 425b15cd800SMatthew Wilcox * real storage backing them. We will leave these real PMD entries in 426b15cd800SMatthew Wilcox * the tree, and PTE writes will simply dirty the entire PMD entry. 427642261acSRoss Zwisler * 428ac401cc7SJan Kara * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 429ac401cc7SJan Kara * persistent memory the benefit is doubtful. We can add that later if we can 430ac401cc7SJan Kara * show it helps. 431b15cd800SMatthew Wilcox * 432b15cd800SMatthew Wilcox * On error, this function does not return an ERR_PTR. Instead it returns 433b15cd800SMatthew Wilcox * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values 434b15cd800SMatthew Wilcox * overlap with xarray value entries. 435ac401cc7SJan Kara */ 436b15cd800SMatthew Wilcox static void *grab_mapping_entry(struct xa_state *xas, 437b15cd800SMatthew Wilcox struct address_space *mapping, unsigned long size_flag) 438ac401cc7SJan Kara { 439b15cd800SMatthew Wilcox unsigned long index = xas->xa_index; 440b15cd800SMatthew Wilcox bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */ 441b15cd800SMatthew Wilcox void *entry; 442ac401cc7SJan Kara 443b15cd800SMatthew Wilcox retry: 444b15cd800SMatthew Wilcox xas_lock_irq(xas); 445b15cd800SMatthew Wilcox entry = get_unlocked_entry(xas); 446b15cd800SMatthew Wilcox if (xa_is_internal(entry)) 447b15cd800SMatthew Wilcox goto fallback; 448642261acSRoss Zwisler 449b15cd800SMatthew Wilcox if (entry) { 450b15cd800SMatthew Wilcox if (WARN_ON_ONCE(!xa_is_value(entry))) { 451b15cd800SMatthew Wilcox xas_set_err(xas, EIO); 45291d25ba8SRoss Zwisler goto out_unlock; 45391d25ba8SRoss Zwisler } 45491d25ba8SRoss Zwisler 4553159f943SMatthew Wilcox if (size_flag & DAX_PMD) { 45691d25ba8SRoss Zwisler if (dax_is_pte_entry(entry)) { 457b15cd800SMatthew Wilcox put_unlocked_entry(xas, entry); 458b15cd800SMatthew Wilcox goto fallback; 459642261acSRoss Zwisler } 460642261acSRoss Zwisler } else { /* trying to grab a PTE entry */ 46191d25ba8SRoss Zwisler if (dax_is_pmd_entry(entry) && 462642261acSRoss Zwisler (dax_is_zero_entry(entry) || 463642261acSRoss Zwisler dax_is_empty_entry(entry))) { 464642261acSRoss Zwisler pmd_downgrade = true; 465642261acSRoss Zwisler } 466642261acSRoss Zwisler } 467642261acSRoss Zwisler } 468642261acSRoss Zwisler 469642261acSRoss Zwisler if (pmd_downgrade) { 470642261acSRoss Zwisler /* 471642261acSRoss Zwisler * Make sure 'entry' remains valid while we drop 472b93b0163SMatthew Wilcox * the i_pages lock. 473642261acSRoss Zwisler */ 474b15cd800SMatthew Wilcox dax_lock_entry(xas, entry); 475642261acSRoss Zwisler 476642261acSRoss Zwisler /* 477642261acSRoss Zwisler * Besides huge zero pages the only other thing that gets 478642261acSRoss Zwisler * downgraded are empty entries which don't need to be 479642261acSRoss Zwisler * unmapped. 480642261acSRoss Zwisler */ 481b15cd800SMatthew Wilcox if (dax_is_zero_entry(entry)) { 482b15cd800SMatthew Wilcox xas_unlock_irq(xas); 483b15cd800SMatthew Wilcox unmap_mapping_pages(mapping, 484b15cd800SMatthew Wilcox xas->xa_index & ~PG_PMD_COLOUR, 485977fbdcdSMatthew Wilcox PG_PMD_NR, false); 486b15cd800SMatthew Wilcox xas_reset(xas); 487b15cd800SMatthew Wilcox xas_lock_irq(xas); 488e11f8b7bSRoss Zwisler } 489e11f8b7bSRoss Zwisler 490d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, false); 491b15cd800SMatthew Wilcox xas_store(xas, NULL); /* undo the PMD join */ 492b15cd800SMatthew Wilcox dax_wake_entry(xas, entry, true); 493642261acSRoss Zwisler mapping->nrexceptional--; 494b15cd800SMatthew Wilcox entry = NULL; 495b15cd800SMatthew Wilcox xas_set(xas, index); 496642261acSRoss Zwisler } 497642261acSRoss Zwisler 498b15cd800SMatthew Wilcox if (entry) { 499b15cd800SMatthew Wilcox dax_lock_entry(xas, entry); 500b15cd800SMatthew Wilcox } else { 501b15cd800SMatthew Wilcox entry = dax_make_entry(pfn_to_pfn_t(0), size_flag | DAX_EMPTY); 502b15cd800SMatthew Wilcox dax_lock_entry(xas, entry); 503b15cd800SMatthew Wilcox if (xas_error(xas)) 504b15cd800SMatthew Wilcox goto out_unlock; 505ac401cc7SJan Kara mapping->nrexceptional++; 506ac401cc7SJan Kara } 507b15cd800SMatthew Wilcox 508642261acSRoss Zwisler out_unlock: 509b15cd800SMatthew Wilcox xas_unlock_irq(xas); 510b15cd800SMatthew Wilcox if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) 511b15cd800SMatthew Wilcox goto retry; 512b15cd800SMatthew Wilcox if (xas->xa_node == XA_ERROR(-ENOMEM)) 513b15cd800SMatthew Wilcox return xa_mk_internal(VM_FAULT_OOM); 514b15cd800SMatthew Wilcox if (xas_error(xas)) 515b15cd800SMatthew Wilcox return xa_mk_internal(VM_FAULT_SIGBUS); 516e3ad61c6SRoss Zwisler return entry; 517b15cd800SMatthew Wilcox fallback: 518b15cd800SMatthew Wilcox xas_unlock_irq(xas); 519b15cd800SMatthew Wilcox return xa_mk_internal(VM_FAULT_FALLBACK); 520ac401cc7SJan Kara } 521ac401cc7SJan Kara 5225fac7408SDan Williams /** 5235fac7408SDan Williams * dax_layout_busy_page - find first pinned page in @mapping 5245fac7408SDan Williams * @mapping: address space to scan for a page with ref count > 1 5255fac7408SDan Williams * 5265fac7408SDan Williams * DAX requires ZONE_DEVICE mapped pages. These pages are never 5275fac7408SDan Williams * 'onlined' to the page allocator so they are considered idle when 5285fac7408SDan Williams * page->count == 1. A filesystem uses this interface to determine if 5295fac7408SDan Williams * any page in the mapping is busy, i.e. for DMA, or other 5305fac7408SDan Williams * get_user_pages() usages. 5315fac7408SDan Williams * 5325fac7408SDan Williams * It is expected that the filesystem is holding locks to block the 5335fac7408SDan Williams * establishment of new mappings in this address_space. I.e. it expects 5345fac7408SDan Williams * to be able to run unmap_mapping_range() and subsequently not race 5355fac7408SDan Williams * mapping_mapped() becoming true. 5365fac7408SDan Williams */ 5375fac7408SDan Williams struct page *dax_layout_busy_page(struct address_space *mapping) 5385fac7408SDan Williams { 539084a8990SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, 0); 540084a8990SMatthew Wilcox void *entry; 541084a8990SMatthew Wilcox unsigned int scanned = 0; 5425fac7408SDan Williams struct page *page = NULL; 5435fac7408SDan Williams 5445fac7408SDan Williams /* 5455fac7408SDan Williams * In the 'limited' case get_user_pages() for dax is disabled. 5465fac7408SDan Williams */ 5475fac7408SDan Williams if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 5485fac7408SDan Williams return NULL; 5495fac7408SDan Williams 5505fac7408SDan Williams if (!dax_mapping(mapping) || !mapping_mapped(mapping)) 5515fac7408SDan Williams return NULL; 5525fac7408SDan Williams 5535fac7408SDan Williams /* 5545fac7408SDan Williams * If we race get_user_pages_fast() here either we'll see the 555084a8990SMatthew Wilcox * elevated page count in the iteration and wait, or 5565fac7408SDan Williams * get_user_pages_fast() will see that the page it took a reference 5575fac7408SDan Williams * against is no longer mapped in the page tables and bail to the 5585fac7408SDan Williams * get_user_pages() slow path. The slow path is protected by 5595fac7408SDan Williams * pte_lock() and pmd_lock(). New references are not taken without 5605fac7408SDan Williams * holding those locks, and unmap_mapping_range() will not zero the 5615fac7408SDan Williams * pte or pmd without holding the respective lock, so we are 5625fac7408SDan Williams * guaranteed to either see new references or prevent new 5635fac7408SDan Williams * references from being established. 5645fac7408SDan Williams */ 5655fac7408SDan Williams unmap_mapping_range(mapping, 0, 0, 1); 5665fac7408SDan Williams 567084a8990SMatthew Wilcox xas_lock_irq(&xas); 568084a8990SMatthew Wilcox xas_for_each(&xas, entry, ULONG_MAX) { 569084a8990SMatthew Wilcox if (WARN_ON_ONCE(!xa_is_value(entry))) 5705fac7408SDan Williams continue; 571084a8990SMatthew Wilcox if (unlikely(dax_is_locked(entry))) 572084a8990SMatthew Wilcox entry = get_unlocked_entry(&xas); 5735fac7408SDan Williams if (entry) 5745fac7408SDan Williams page = dax_busy_page(entry); 575084a8990SMatthew Wilcox put_unlocked_entry(&xas, entry); 5765fac7408SDan Williams if (page) 5775fac7408SDan Williams break; 578084a8990SMatthew Wilcox if (++scanned % XA_CHECK_SCHED) 579084a8990SMatthew Wilcox continue; 580cdbf8897SRoss Zwisler 581084a8990SMatthew Wilcox xas_pause(&xas); 582084a8990SMatthew Wilcox xas_unlock_irq(&xas); 583084a8990SMatthew Wilcox cond_resched(); 584084a8990SMatthew Wilcox xas_lock_irq(&xas); 5855fac7408SDan Williams } 586084a8990SMatthew Wilcox xas_unlock_irq(&xas); 5875fac7408SDan Williams return page; 5885fac7408SDan Williams } 5895fac7408SDan Williams EXPORT_SYMBOL_GPL(dax_layout_busy_page); 5905fac7408SDan Williams 591a77d19f4SMatthew Wilcox static int __dax_invalidate_entry(struct address_space *mapping, 592c6dcf52cSJan Kara pgoff_t index, bool trunc) 593c6dcf52cSJan Kara { 59407f2d89cSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, index); 595c6dcf52cSJan Kara int ret = 0; 596c6dcf52cSJan Kara void *entry; 597c6dcf52cSJan Kara 59807f2d89cSMatthew Wilcox xas_lock_irq(&xas); 59907f2d89cSMatthew Wilcox entry = get_unlocked_entry(&xas); 6003159f943SMatthew Wilcox if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 601c6dcf52cSJan Kara goto out; 602c6dcf52cSJan Kara if (!trunc && 60307f2d89cSMatthew Wilcox (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || 60407f2d89cSMatthew Wilcox xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) 605c6dcf52cSJan Kara goto out; 606d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, trunc); 60707f2d89cSMatthew Wilcox xas_store(&xas, NULL); 608c6dcf52cSJan Kara mapping->nrexceptional--; 609c6dcf52cSJan Kara ret = 1; 610c6dcf52cSJan Kara out: 61107f2d89cSMatthew Wilcox put_unlocked_entry(&xas, entry); 61207f2d89cSMatthew Wilcox xas_unlock_irq(&xas); 613c6dcf52cSJan Kara return ret; 614c6dcf52cSJan Kara } 61507f2d89cSMatthew Wilcox 616ac401cc7SJan Kara /* 6173159f943SMatthew Wilcox * Delete DAX entry at @index from @mapping. Wait for it 6183159f943SMatthew Wilcox * to be unlocked before deleting it. 619ac401cc7SJan Kara */ 620ac401cc7SJan Kara int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 621ac401cc7SJan Kara { 622a77d19f4SMatthew Wilcox int ret = __dax_invalidate_entry(mapping, index, true); 623ac401cc7SJan Kara 624ac401cc7SJan Kara /* 625ac401cc7SJan Kara * This gets called from truncate / punch_hole path. As such, the caller 626ac401cc7SJan Kara * must hold locks protecting against concurrent modifications of the 627a77d19f4SMatthew Wilcox * page cache (usually fs-private i_mmap_sem for writing). Since the 6283159f943SMatthew Wilcox * caller has seen a DAX entry for this index, we better find it 629ac401cc7SJan Kara * at that index as well... 630ac401cc7SJan Kara */ 631c6dcf52cSJan Kara WARN_ON_ONCE(!ret); 632c6dcf52cSJan Kara return ret; 633ac401cc7SJan Kara } 634ac401cc7SJan Kara 635c6dcf52cSJan Kara /* 6363159f943SMatthew Wilcox * Invalidate DAX entry if it is clean. 637c6dcf52cSJan Kara */ 638c6dcf52cSJan Kara int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 639c6dcf52cSJan Kara pgoff_t index) 640c6dcf52cSJan Kara { 641a77d19f4SMatthew Wilcox return __dax_invalidate_entry(mapping, index, false); 642ac401cc7SJan Kara } 643ac401cc7SJan Kara 644cccbce67SDan Williams static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, 645cccbce67SDan Williams sector_t sector, size_t size, struct page *to, 646cccbce67SDan Williams unsigned long vaddr) 647f7ca90b1SMatthew Wilcox { 648cccbce67SDan Williams void *vto, *kaddr; 649cccbce67SDan Williams pgoff_t pgoff; 650cccbce67SDan Williams long rc; 651cccbce67SDan Williams int id; 652e2e05394SRoss Zwisler 653cccbce67SDan Williams rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); 654cccbce67SDan Williams if (rc) 655cccbce67SDan Williams return rc; 656cccbce67SDan Williams 657cccbce67SDan Williams id = dax_read_lock(); 65886ed913bSHuaisheng Ye rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL); 659cccbce67SDan Williams if (rc < 0) { 660cccbce67SDan Williams dax_read_unlock(id); 661cccbce67SDan Williams return rc; 662cccbce67SDan Williams } 663f7ca90b1SMatthew Wilcox vto = kmap_atomic(to); 664cccbce67SDan Williams copy_user_page(vto, (void __force *)kaddr, vaddr, to); 665f7ca90b1SMatthew Wilcox kunmap_atomic(vto); 666cccbce67SDan Williams dax_read_unlock(id); 667f7ca90b1SMatthew Wilcox return 0; 668f7ca90b1SMatthew Wilcox } 669f7ca90b1SMatthew Wilcox 670642261acSRoss Zwisler /* 671642261acSRoss Zwisler * By this point grab_mapping_entry() has ensured that we have a locked entry 672642261acSRoss Zwisler * of the appropriate size so we don't have to worry about downgrading PMDs to 673642261acSRoss Zwisler * PTEs. If we happen to be trying to insert a PTE and there is a PMD 674642261acSRoss Zwisler * already in the tree, we will skip the insertion and just dirty the PMD as 675642261acSRoss Zwisler * appropriate. 676642261acSRoss Zwisler */ 677b15cd800SMatthew Wilcox static void *dax_insert_entry(struct xa_state *xas, 678b15cd800SMatthew Wilcox struct address_space *mapping, struct vm_fault *vmf, 679b15cd800SMatthew Wilcox void *entry, pfn_t pfn, unsigned long flags, bool dirty) 6809973c98eSRoss Zwisler { 681b15cd800SMatthew Wilcox void *new_entry = dax_make_entry(pfn, flags); 6829973c98eSRoss Zwisler 683f5b7b748SJan Kara if (dirty) 6849973c98eSRoss Zwisler __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 6859973c98eSRoss Zwisler 6863159f943SMatthew Wilcox if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) { 687b15cd800SMatthew Wilcox unsigned long index = xas->xa_index; 68891d25ba8SRoss Zwisler /* we are replacing a zero page with block mapping */ 68991d25ba8SRoss Zwisler if (dax_is_pmd_entry(entry)) 690977fbdcdSMatthew Wilcox unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 691977fbdcdSMatthew Wilcox PG_PMD_NR, false); 69291d25ba8SRoss Zwisler else /* pte entry */ 693b15cd800SMatthew Wilcox unmap_mapping_pages(mapping, index, 1, false); 694ac401cc7SJan Kara } 6959973c98eSRoss Zwisler 696b15cd800SMatthew Wilcox xas_reset(xas); 697b15cd800SMatthew Wilcox xas_lock_irq(xas); 698d2c997c0SDan Williams if (dax_entry_size(entry) != dax_entry_size(new_entry)) { 699d2c997c0SDan Williams dax_disassociate_entry(entry, mapping, false); 70073449dafSDan Williams dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); 701d2c997c0SDan Williams } 702642261acSRoss Zwisler 70391d25ba8SRoss Zwisler if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 704642261acSRoss Zwisler /* 705a77d19f4SMatthew Wilcox * Only swap our new entry into the page cache if the current 706642261acSRoss Zwisler * entry is a zero page or an empty entry. If a normal PTE or 707a77d19f4SMatthew Wilcox * PMD entry is already in the cache, we leave it alone. This 708642261acSRoss Zwisler * means that if we are trying to insert a PTE and the 709642261acSRoss Zwisler * existing entry is a PMD, we will just leave the PMD in the 710642261acSRoss Zwisler * tree and dirty it if necessary. 711642261acSRoss Zwisler */ 712b15cd800SMatthew Wilcox void *old = dax_lock_entry(xas, new_entry); 713b15cd800SMatthew Wilcox WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | 714b15cd800SMatthew Wilcox DAX_LOCKED)); 71591d25ba8SRoss Zwisler entry = new_entry; 716b15cd800SMatthew Wilcox } else { 717b15cd800SMatthew Wilcox xas_load(xas); /* Walk the xa_state */ 718ac401cc7SJan Kara } 71991d25ba8SRoss Zwisler 720f5b7b748SJan Kara if (dirty) 721b15cd800SMatthew Wilcox xas_set_mark(xas, PAGECACHE_TAG_DIRTY); 72291d25ba8SRoss Zwisler 723b15cd800SMatthew Wilcox xas_unlock_irq(xas); 72491d25ba8SRoss Zwisler return entry; 7259973c98eSRoss Zwisler } 7269973c98eSRoss Zwisler 727a77d19f4SMatthew Wilcox static inline 728a77d19f4SMatthew Wilcox unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma) 7294b4bb46dSJan Kara { 7304b4bb46dSJan Kara unsigned long address; 7314b4bb46dSJan Kara 7324b4bb46dSJan Kara address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 7334b4bb46dSJan Kara VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 7344b4bb46dSJan Kara return address; 7354b4bb46dSJan Kara } 7364b4bb46dSJan Kara 7374b4bb46dSJan Kara /* Walk all mappings of a given index of a file and writeprotect them */ 738a77d19f4SMatthew Wilcox static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index, 739a77d19f4SMatthew Wilcox unsigned long pfn) 7404b4bb46dSJan Kara { 7414b4bb46dSJan Kara struct vm_area_struct *vma; 742f729c8c9SRoss Zwisler pte_t pte, *ptep = NULL; 743f729c8c9SRoss Zwisler pmd_t *pmdp = NULL; 7444b4bb46dSJan Kara spinlock_t *ptl; 7454b4bb46dSJan Kara 7464b4bb46dSJan Kara i_mmap_lock_read(mapping); 7474b4bb46dSJan Kara vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { 748a4d1a885SJérôme Glisse unsigned long address, start, end; 7494b4bb46dSJan Kara 7504b4bb46dSJan Kara cond_resched(); 7514b4bb46dSJan Kara 7524b4bb46dSJan Kara if (!(vma->vm_flags & VM_SHARED)) 7534b4bb46dSJan Kara continue; 7544b4bb46dSJan Kara 7554b4bb46dSJan Kara address = pgoff_address(index, vma); 756a4d1a885SJérôme Glisse 757a4d1a885SJérôme Glisse /* 758a4d1a885SJérôme Glisse * Note because we provide start/end to follow_pte_pmd it will 759a4d1a885SJérôme Glisse * call mmu_notifier_invalidate_range_start() on our behalf 760a4d1a885SJérôme Glisse * before taking any lock. 761a4d1a885SJérôme Glisse */ 762a4d1a885SJérôme Glisse if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl)) 7634b4bb46dSJan Kara continue; 764f729c8c9SRoss Zwisler 7650f10851eSJérôme Glisse /* 7660f10851eSJérôme Glisse * No need to call mmu_notifier_invalidate_range() as we are 7670f10851eSJérôme Glisse * downgrading page table protection not changing it to point 7680f10851eSJérôme Glisse * to a new page. 7690f10851eSJérôme Glisse * 770ad56b738SMike Rapoport * See Documentation/vm/mmu_notifier.rst 7710f10851eSJérôme Glisse */ 772f729c8c9SRoss Zwisler if (pmdp) { 773f729c8c9SRoss Zwisler #ifdef CONFIG_FS_DAX_PMD 774f729c8c9SRoss Zwisler pmd_t pmd; 775f729c8c9SRoss Zwisler 776f729c8c9SRoss Zwisler if (pfn != pmd_pfn(*pmdp)) 777f729c8c9SRoss Zwisler goto unlock_pmd; 778f6f37321SLinus Torvalds if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) 779f729c8c9SRoss Zwisler goto unlock_pmd; 780f729c8c9SRoss Zwisler 781f729c8c9SRoss Zwisler flush_cache_page(vma, address, pfn); 782f729c8c9SRoss Zwisler pmd = pmdp_huge_clear_flush(vma, address, pmdp); 783f729c8c9SRoss Zwisler pmd = pmd_wrprotect(pmd); 784f729c8c9SRoss Zwisler pmd = pmd_mkclean(pmd); 785f729c8c9SRoss Zwisler set_pmd_at(vma->vm_mm, address, pmdp, pmd); 786f729c8c9SRoss Zwisler unlock_pmd: 787f729c8c9SRoss Zwisler #endif 788ee190ca6SJan H. Schönherr spin_unlock(ptl); 789f729c8c9SRoss Zwisler } else { 7904b4bb46dSJan Kara if (pfn != pte_pfn(*ptep)) 791f729c8c9SRoss Zwisler goto unlock_pte; 7924b4bb46dSJan Kara if (!pte_dirty(*ptep) && !pte_write(*ptep)) 793f729c8c9SRoss Zwisler goto unlock_pte; 7944b4bb46dSJan Kara 7954b4bb46dSJan Kara flush_cache_page(vma, address, pfn); 7964b4bb46dSJan Kara pte = ptep_clear_flush(vma, address, ptep); 7974b4bb46dSJan Kara pte = pte_wrprotect(pte); 7984b4bb46dSJan Kara pte = pte_mkclean(pte); 7994b4bb46dSJan Kara set_pte_at(vma->vm_mm, address, ptep, pte); 800f729c8c9SRoss Zwisler unlock_pte: 8014b4bb46dSJan Kara pte_unmap_unlock(ptep, ptl); 802f729c8c9SRoss Zwisler } 8034b4bb46dSJan Kara 804a4d1a885SJérôme Glisse mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); 8054b4bb46dSJan Kara } 8064b4bb46dSJan Kara i_mmap_unlock_read(mapping); 8074b4bb46dSJan Kara } 8084b4bb46dSJan Kara 8099fc747f6SMatthew Wilcox static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, 8109fc747f6SMatthew Wilcox struct address_space *mapping, void *entry) 8119973c98eSRoss Zwisler { 8123fe0791cSDan Williams unsigned long pfn; 8133fe0791cSDan Williams long ret = 0; 814cccbce67SDan Williams size_t size; 8159973c98eSRoss Zwisler 8169973c98eSRoss Zwisler /* 817a6abc2c0SJan Kara * A page got tagged dirty in DAX mapping? Something is seriously 818a6abc2c0SJan Kara * wrong. 8199973c98eSRoss Zwisler */ 8203159f943SMatthew Wilcox if (WARN_ON(!xa_is_value(entry))) 821a6abc2c0SJan Kara return -EIO; 8229973c98eSRoss Zwisler 8239fc747f6SMatthew Wilcox if (unlikely(dax_is_locked(entry))) { 8249fc747f6SMatthew Wilcox void *old_entry = entry; 8259fc747f6SMatthew Wilcox 8269fc747f6SMatthew Wilcox entry = get_unlocked_entry(xas); 8279fc747f6SMatthew Wilcox 828a6abc2c0SJan Kara /* Entry got punched out / reallocated? */ 8299fc747f6SMatthew Wilcox if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 830a6abc2c0SJan Kara goto put_unlocked; 831a6abc2c0SJan Kara /* 8329fc747f6SMatthew Wilcox * Entry got reallocated elsewhere? No need to writeback. 8339fc747f6SMatthew Wilcox * We have to compare pfns as we must not bail out due to 8349fc747f6SMatthew Wilcox * difference in lockbit or entry type. 835a6abc2c0SJan Kara */ 8369fc747f6SMatthew Wilcox if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) 837a6abc2c0SJan Kara goto put_unlocked; 838642261acSRoss Zwisler if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 839642261acSRoss Zwisler dax_is_zero_entry(entry))) { 8409973c98eSRoss Zwisler ret = -EIO; 841a6abc2c0SJan Kara goto put_unlocked; 8429973c98eSRoss Zwisler } 8439973c98eSRoss Zwisler 8449fc747f6SMatthew Wilcox /* Another fsync thread may have already done this entry */ 8459fc747f6SMatthew Wilcox if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) 846a6abc2c0SJan Kara goto put_unlocked; 8479fc747f6SMatthew Wilcox } 8489fc747f6SMatthew Wilcox 849a6abc2c0SJan Kara /* Lock the entry to serialize with page faults */ 8509fc747f6SMatthew Wilcox dax_lock_entry(xas, entry); 8519fc747f6SMatthew Wilcox 852a6abc2c0SJan Kara /* 853a6abc2c0SJan Kara * We can clear the tag now but we have to be careful so that concurrent 854a6abc2c0SJan Kara * dax_writeback_one() calls for the same index cannot finish before we 855a6abc2c0SJan Kara * actually flush the caches. This is achieved as the calls will look 856b93b0163SMatthew Wilcox * at the entry only under the i_pages lock and once they do that 857b93b0163SMatthew Wilcox * they will see the entry locked and wait for it to unlock. 858a6abc2c0SJan Kara */ 8599fc747f6SMatthew Wilcox xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); 8609fc747f6SMatthew Wilcox xas_unlock_irq(xas); 861a6abc2c0SJan Kara 862642261acSRoss Zwisler /* 863642261acSRoss Zwisler * Even if dax_writeback_mapping_range() was given a wbc->range_start 864642261acSRoss Zwisler * in the middle of a PMD, the 'index' we are given will be aligned to 8653fe0791cSDan Williams * the start index of the PMD, as will the pfn we pull from 'entry'. 8663fe0791cSDan Williams * This allows us to flush for PMD_SIZE and not have to worry about 8673fe0791cSDan Williams * partial PMD writebacks. 868642261acSRoss Zwisler */ 869a77d19f4SMatthew Wilcox pfn = dax_to_pfn(entry); 870a77d19f4SMatthew Wilcox size = PAGE_SIZE << dax_entry_order(entry); 871cccbce67SDan Williams 8729fc747f6SMatthew Wilcox dax_entry_mkclean(mapping, xas->xa_index, pfn); 8733fe0791cSDan Williams dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size); 8744b4bb46dSJan Kara /* 8754b4bb46dSJan Kara * After we have flushed the cache, we can clear the dirty tag. There 8764b4bb46dSJan Kara * cannot be new dirty data in the pfn after the flush has completed as 8774b4bb46dSJan Kara * the pfn mappings are writeprotected and fault waits for mapping 8784b4bb46dSJan Kara * entry lock. 8794b4bb46dSJan Kara */ 8809fc747f6SMatthew Wilcox xas_reset(xas); 8819fc747f6SMatthew Wilcox xas_lock_irq(xas); 8829fc747f6SMatthew Wilcox xas_store(xas, entry); 8839fc747f6SMatthew Wilcox xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); 8849fc747f6SMatthew Wilcox dax_wake_entry(xas, entry, false); 8859fc747f6SMatthew Wilcox 8869fc747f6SMatthew Wilcox trace_dax_writeback_one(mapping->host, xas->xa_index, 8879fc747f6SMatthew Wilcox size >> PAGE_SHIFT); 8889973c98eSRoss Zwisler return ret; 8899973c98eSRoss Zwisler 890a6abc2c0SJan Kara put_unlocked: 8919fc747f6SMatthew Wilcox put_unlocked_entry(xas, entry); 8929973c98eSRoss Zwisler return ret; 8939973c98eSRoss Zwisler } 8949973c98eSRoss Zwisler 8959973c98eSRoss Zwisler /* 8969973c98eSRoss Zwisler * Flush the mapping to the persistent domain within the byte range of [start, 8979973c98eSRoss Zwisler * end]. This is required by data integrity operations to ensure file data is 8989973c98eSRoss Zwisler * on persistent storage prior to completion of the operation. 8999973c98eSRoss Zwisler */ 9007f6d5b52SRoss Zwisler int dax_writeback_mapping_range(struct address_space *mapping, 9017f6d5b52SRoss Zwisler struct block_device *bdev, struct writeback_control *wbc) 9029973c98eSRoss Zwisler { 9039fc747f6SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); 9049973c98eSRoss Zwisler struct inode *inode = mapping->host; 9059fc747f6SMatthew Wilcox pgoff_t end_index = wbc->range_end >> PAGE_SHIFT; 906cccbce67SDan Williams struct dax_device *dax_dev; 9079fc747f6SMatthew Wilcox void *entry; 9089fc747f6SMatthew Wilcox int ret = 0; 9099fc747f6SMatthew Wilcox unsigned int scanned = 0; 9109973c98eSRoss Zwisler 9119973c98eSRoss Zwisler if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 9129973c98eSRoss Zwisler return -EIO; 9139973c98eSRoss Zwisler 9147f6d5b52SRoss Zwisler if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) 9157f6d5b52SRoss Zwisler return 0; 9167f6d5b52SRoss Zwisler 917cccbce67SDan Williams dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 918cccbce67SDan Williams if (!dax_dev) 919cccbce67SDan Williams return -EIO; 920cccbce67SDan Williams 9219fc747f6SMatthew Wilcox trace_dax_writeback_range(inode, xas.xa_index, end_index); 9229973c98eSRoss Zwisler 9239fc747f6SMatthew Wilcox tag_pages_for_writeback(mapping, xas.xa_index, end_index); 924d14a3f48SRoss Zwisler 9259fc747f6SMatthew Wilcox xas_lock_irq(&xas); 9269fc747f6SMatthew Wilcox xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { 9279fc747f6SMatthew Wilcox ret = dax_writeback_one(&xas, dax_dev, mapping, entry); 928819ec6b9SJeff Layton if (ret < 0) { 929819ec6b9SJeff Layton mapping_set_error(mapping, ret); 9309fc747f6SMatthew Wilcox break; 931d14a3f48SRoss Zwisler } 9329fc747f6SMatthew Wilcox if (++scanned % XA_CHECK_SCHED) 9339fc747f6SMatthew Wilcox continue; 9349fc747f6SMatthew Wilcox 9359fc747f6SMatthew Wilcox xas_pause(&xas); 9369fc747f6SMatthew Wilcox xas_unlock_irq(&xas); 9379fc747f6SMatthew Wilcox cond_resched(); 9389fc747f6SMatthew Wilcox xas_lock_irq(&xas); 939d14a3f48SRoss Zwisler } 9409fc747f6SMatthew Wilcox xas_unlock_irq(&xas); 941cccbce67SDan Williams put_dax(dax_dev); 9429fc747f6SMatthew Wilcox trace_dax_writeback_range_done(inode, xas.xa_index, end_index); 9439fc747f6SMatthew Wilcox return ret; 9449973c98eSRoss Zwisler } 9459973c98eSRoss Zwisler EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 9469973c98eSRoss Zwisler 94731a6f1a6SJan Kara static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 948f7ca90b1SMatthew Wilcox { 949a3841f94SLinus Torvalds return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9; 95031a6f1a6SJan Kara } 951f7ca90b1SMatthew Wilcox 9525e161e40SJan Kara static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size, 9535e161e40SJan Kara pfn_t *pfnp) 9545e161e40SJan Kara { 9555e161e40SJan Kara const sector_t sector = dax_iomap_sector(iomap, pos); 9565e161e40SJan Kara pgoff_t pgoff; 9575e161e40SJan Kara int id, rc; 9585e161e40SJan Kara long length; 9595e161e40SJan Kara 9605e161e40SJan Kara rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff); 961cccbce67SDan Williams if (rc) 962cccbce67SDan Williams return rc; 963cccbce67SDan Williams id = dax_read_lock(); 9645e161e40SJan Kara length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), 96586ed913bSHuaisheng Ye NULL, pfnp); 9665e161e40SJan Kara if (length < 0) { 9675e161e40SJan Kara rc = length; 9685e161e40SJan Kara goto out; 9695e161e40SJan Kara } 9705e161e40SJan Kara rc = -EINVAL; 9715e161e40SJan Kara if (PFN_PHYS(length) < size) 9725e161e40SJan Kara goto out; 9735e161e40SJan Kara if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) 9745e161e40SJan Kara goto out; 9755e161e40SJan Kara /* For larger pages we need devmap */ 9765e161e40SJan Kara if (length > 1 && !pfn_t_devmap(*pfnp)) 9775e161e40SJan Kara goto out; 9785e161e40SJan Kara rc = 0; 9795e161e40SJan Kara out: 980cccbce67SDan Williams dax_read_unlock(id); 981cccbce67SDan Williams return rc; 982cccbce67SDan Williams } 983f7ca90b1SMatthew Wilcox 9842f89dc12SJan Kara /* 98591d25ba8SRoss Zwisler * The user has performed a load from a hole in the file. Allocating a new 98691d25ba8SRoss Zwisler * page in the file would cause excessive storage usage for workloads with 98791d25ba8SRoss Zwisler * sparse files. Instead we insert a read-only mapping of the 4k zero page. 98891d25ba8SRoss Zwisler * If this page is ever written to we will re-fault and change the mapping to 98991d25ba8SRoss Zwisler * point to real DAX storage instead. 9902f89dc12SJan Kara */ 991b15cd800SMatthew Wilcox static vm_fault_t dax_load_hole(struct xa_state *xas, 992b15cd800SMatthew Wilcox struct address_space *mapping, void **entry, 993e30331ffSRoss Zwisler struct vm_fault *vmf) 994e30331ffSRoss Zwisler { 995e30331ffSRoss Zwisler struct inode *inode = mapping->host; 99691d25ba8SRoss Zwisler unsigned long vaddr = vmf->address; 997b90ca5ccSMatthew Wilcox pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); 998b90ca5ccSMatthew Wilcox vm_fault_t ret; 999e30331ffSRoss Zwisler 1000b15cd800SMatthew Wilcox *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, 10013159f943SMatthew Wilcox DAX_ZERO_PAGE, false); 10023159f943SMatthew Wilcox 1003ab77dab4SSouptick Joarder ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); 1004e30331ffSRoss Zwisler trace_dax_load_hole(inode, vmf, ret); 1005e30331ffSRoss Zwisler return ret; 1006e30331ffSRoss Zwisler } 1007e30331ffSRoss Zwisler 10084b0228faSVishal Verma static bool dax_range_is_aligned(struct block_device *bdev, 10094b0228faSVishal Verma unsigned int offset, unsigned int length) 10104b0228faSVishal Verma { 10114b0228faSVishal Verma unsigned short sector_size = bdev_logical_block_size(bdev); 10124b0228faSVishal Verma 10134b0228faSVishal Verma if (!IS_ALIGNED(offset, sector_size)) 10144b0228faSVishal Verma return false; 10154b0228faSVishal Verma if (!IS_ALIGNED(length, sector_size)) 10164b0228faSVishal Verma return false; 10174b0228faSVishal Verma 10184b0228faSVishal Verma return true; 10194b0228faSVishal Verma } 10204b0228faSVishal Verma 1021cccbce67SDan Williams int __dax_zero_page_range(struct block_device *bdev, 1022cccbce67SDan Williams struct dax_device *dax_dev, sector_t sector, 1023cccbce67SDan Williams unsigned int offset, unsigned int size) 1024679c8bd3SChristoph Hellwig { 1025cccbce67SDan Williams if (dax_range_is_aligned(bdev, offset, size)) { 1026cccbce67SDan Williams sector_t start_sector = sector + (offset >> 9); 10274b0228faSVishal Verma 10284b0228faSVishal Verma return blkdev_issue_zeroout(bdev, start_sector, 102953ef7d0eSLinus Torvalds size >> 9, GFP_NOFS, 0); 10304b0228faSVishal Verma } else { 1031cccbce67SDan Williams pgoff_t pgoff; 1032cccbce67SDan Williams long rc, id; 1033cccbce67SDan Williams void *kaddr; 1034cccbce67SDan Williams 1035e84b83b9SDan Williams rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); 1036cccbce67SDan Williams if (rc) 1037cccbce67SDan Williams return rc; 1038cccbce67SDan Williams 1039cccbce67SDan Williams id = dax_read_lock(); 104086ed913bSHuaisheng Ye rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL); 1041cccbce67SDan Williams if (rc < 0) { 1042cccbce67SDan Williams dax_read_unlock(id); 1043cccbce67SDan Williams return rc; 1044cccbce67SDan Williams } 104581f55870SDan Williams memset(kaddr + offset, 0, size); 1046c3ca015fSMikulas Patocka dax_flush(dax_dev, kaddr + offset, size); 1047cccbce67SDan Williams dax_read_unlock(id); 10484b0228faSVishal Verma } 1049679c8bd3SChristoph Hellwig return 0; 1050679c8bd3SChristoph Hellwig } 1051679c8bd3SChristoph Hellwig EXPORT_SYMBOL_GPL(__dax_zero_page_range); 1052679c8bd3SChristoph Hellwig 1053a254e568SChristoph Hellwig static loff_t 105411c59c92SRoss Zwisler dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 1055a254e568SChristoph Hellwig struct iomap *iomap) 1056a254e568SChristoph Hellwig { 1057cccbce67SDan Williams struct block_device *bdev = iomap->bdev; 1058cccbce67SDan Williams struct dax_device *dax_dev = iomap->dax_dev; 1059a254e568SChristoph Hellwig struct iov_iter *iter = data; 1060a254e568SChristoph Hellwig loff_t end = pos + length, done = 0; 1061a254e568SChristoph Hellwig ssize_t ret = 0; 1062a77d4786SDan Williams size_t xfer; 1063cccbce67SDan Williams int id; 1064a254e568SChristoph Hellwig 1065a254e568SChristoph Hellwig if (iov_iter_rw(iter) == READ) { 1066a254e568SChristoph Hellwig end = min(end, i_size_read(inode)); 1067a254e568SChristoph Hellwig if (pos >= end) 1068a254e568SChristoph Hellwig return 0; 1069a254e568SChristoph Hellwig 1070a254e568SChristoph Hellwig if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 1071a254e568SChristoph Hellwig return iov_iter_zero(min(length, end - pos), iter); 1072a254e568SChristoph Hellwig } 1073a254e568SChristoph Hellwig 1074a254e568SChristoph Hellwig if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 1075a254e568SChristoph Hellwig return -EIO; 1076a254e568SChristoph Hellwig 1077e3fce68cSJan Kara /* 1078e3fce68cSJan Kara * Write can allocate block for an area which has a hole page mapped 1079e3fce68cSJan Kara * into page tables. We have to tear down these mappings so that data 1080e3fce68cSJan Kara * written by write(2) is visible in mmap. 1081e3fce68cSJan Kara */ 1082cd656375SJan Kara if (iomap->flags & IOMAP_F_NEW) { 1083e3fce68cSJan Kara invalidate_inode_pages2_range(inode->i_mapping, 1084e3fce68cSJan Kara pos >> PAGE_SHIFT, 1085e3fce68cSJan Kara (end - 1) >> PAGE_SHIFT); 1086e3fce68cSJan Kara } 1087e3fce68cSJan Kara 1088cccbce67SDan Williams id = dax_read_lock(); 1089a254e568SChristoph Hellwig while (pos < end) { 1090a254e568SChristoph Hellwig unsigned offset = pos & (PAGE_SIZE - 1); 1091cccbce67SDan Williams const size_t size = ALIGN(length + offset, PAGE_SIZE); 1092cccbce67SDan Williams const sector_t sector = dax_iomap_sector(iomap, pos); 1093a254e568SChristoph Hellwig ssize_t map_len; 1094cccbce67SDan Williams pgoff_t pgoff; 1095cccbce67SDan Williams void *kaddr; 1096a254e568SChristoph Hellwig 1097d1908f52SMichal Hocko if (fatal_signal_pending(current)) { 1098d1908f52SMichal Hocko ret = -EINTR; 1099d1908f52SMichal Hocko break; 1100d1908f52SMichal Hocko } 1101d1908f52SMichal Hocko 1102cccbce67SDan Williams ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 1103cccbce67SDan Williams if (ret) 1104cccbce67SDan Williams break; 1105cccbce67SDan Williams 1106cccbce67SDan Williams map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 110786ed913bSHuaisheng Ye &kaddr, NULL); 1108a254e568SChristoph Hellwig if (map_len < 0) { 1109a254e568SChristoph Hellwig ret = map_len; 1110a254e568SChristoph Hellwig break; 1111a254e568SChristoph Hellwig } 1112a254e568SChristoph Hellwig 1113cccbce67SDan Williams map_len = PFN_PHYS(map_len); 1114cccbce67SDan Williams kaddr += offset; 1115a254e568SChristoph Hellwig map_len -= offset; 1116a254e568SChristoph Hellwig if (map_len > end - pos) 1117a254e568SChristoph Hellwig map_len = end - pos; 1118a254e568SChristoph Hellwig 1119a2e050f5SRoss Zwisler /* 1120a2e050f5SRoss Zwisler * The userspace address for the memory copy has already been 1121a2e050f5SRoss Zwisler * validated via access_ok() in either vfs_read() or 1122a2e050f5SRoss Zwisler * vfs_write(), depending on which operation we are doing. 1123a2e050f5SRoss Zwisler */ 1124a254e568SChristoph Hellwig if (iov_iter_rw(iter) == WRITE) 1125a77d4786SDan Williams xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1126fec53774SDan Williams map_len, iter); 1127a254e568SChristoph Hellwig else 1128a77d4786SDan Williams xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, 1129b3a9a0c3SDan Williams map_len, iter); 1130a254e568SChristoph Hellwig 1131a77d4786SDan Williams pos += xfer; 1132a77d4786SDan Williams length -= xfer; 1133a77d4786SDan Williams done += xfer; 1134a77d4786SDan Williams 1135a77d4786SDan Williams if (xfer == 0) 1136a77d4786SDan Williams ret = -EFAULT; 1137a77d4786SDan Williams if (xfer < map_len) 1138a77d4786SDan Williams break; 1139a254e568SChristoph Hellwig } 1140cccbce67SDan Williams dax_read_unlock(id); 1141a254e568SChristoph Hellwig 1142a254e568SChristoph Hellwig return done ? done : ret; 1143a254e568SChristoph Hellwig } 1144a254e568SChristoph Hellwig 1145a254e568SChristoph Hellwig /** 114611c59c92SRoss Zwisler * dax_iomap_rw - Perform I/O to a DAX file 1147a254e568SChristoph Hellwig * @iocb: The control block for this I/O 1148a254e568SChristoph Hellwig * @iter: The addresses to do I/O from or to 1149a254e568SChristoph Hellwig * @ops: iomap ops passed from the file system 1150a254e568SChristoph Hellwig * 1151a254e568SChristoph Hellwig * This function performs read and write operations to directly mapped 1152a254e568SChristoph Hellwig * persistent memory. The callers needs to take care of read/write exclusion 1153a254e568SChristoph Hellwig * and evicting any page cache pages in the region under I/O. 1154a254e568SChristoph Hellwig */ 1155a254e568SChristoph Hellwig ssize_t 115611c59c92SRoss Zwisler dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 11578ff6daa1SChristoph Hellwig const struct iomap_ops *ops) 1158a254e568SChristoph Hellwig { 1159a254e568SChristoph Hellwig struct address_space *mapping = iocb->ki_filp->f_mapping; 1160a254e568SChristoph Hellwig struct inode *inode = mapping->host; 1161a254e568SChristoph Hellwig loff_t pos = iocb->ki_pos, ret = 0, done = 0; 1162a254e568SChristoph Hellwig unsigned flags = 0; 1163a254e568SChristoph Hellwig 1164168316dbSChristoph Hellwig if (iov_iter_rw(iter) == WRITE) { 1165168316dbSChristoph Hellwig lockdep_assert_held_exclusive(&inode->i_rwsem); 1166a254e568SChristoph Hellwig flags |= IOMAP_WRITE; 1167168316dbSChristoph Hellwig } else { 1168168316dbSChristoph Hellwig lockdep_assert_held(&inode->i_rwsem); 1169168316dbSChristoph Hellwig } 1170a254e568SChristoph Hellwig 1171a254e568SChristoph Hellwig while (iov_iter_count(iter)) { 1172a254e568SChristoph Hellwig ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, 117311c59c92SRoss Zwisler iter, dax_iomap_actor); 1174a254e568SChristoph Hellwig if (ret <= 0) 1175a254e568SChristoph Hellwig break; 1176a254e568SChristoph Hellwig pos += ret; 1177a254e568SChristoph Hellwig done += ret; 1178a254e568SChristoph Hellwig } 1179a254e568SChristoph Hellwig 1180a254e568SChristoph Hellwig iocb->ki_pos += done; 1181a254e568SChristoph Hellwig return done ? done : ret; 1182a254e568SChristoph Hellwig } 118311c59c92SRoss Zwisler EXPORT_SYMBOL_GPL(dax_iomap_rw); 1184a7d73fe6SChristoph Hellwig 1185ab77dab4SSouptick Joarder static vm_fault_t dax_fault_return(int error) 11869f141d6eSJan Kara { 11879f141d6eSJan Kara if (error == 0) 11889f141d6eSJan Kara return VM_FAULT_NOPAGE; 11899f141d6eSJan Kara if (error == -ENOMEM) 11909f141d6eSJan Kara return VM_FAULT_OOM; 11919f141d6eSJan Kara return VM_FAULT_SIGBUS; 11929f141d6eSJan Kara } 11939f141d6eSJan Kara 1194aaa422c4SDan Williams /* 1195aaa422c4SDan Williams * MAP_SYNC on a dax mapping guarantees dirty metadata is 1196aaa422c4SDan Williams * flushed on write-faults (non-cow), but not read-faults. 1197aaa422c4SDan Williams */ 1198aaa422c4SDan Williams static bool dax_fault_is_synchronous(unsigned long flags, 1199aaa422c4SDan Williams struct vm_area_struct *vma, struct iomap *iomap) 1200aaa422c4SDan Williams { 1201aaa422c4SDan Williams return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) 1202aaa422c4SDan Williams && (iomap->flags & IOMAP_F_DIRTY); 1203aaa422c4SDan Williams } 1204aaa422c4SDan Williams 1205ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1206c0b24625SJan Kara int *iomap_errp, const struct iomap_ops *ops) 1207a7d73fe6SChristoph Hellwig { 1208a0987ad5SJan Kara struct vm_area_struct *vma = vmf->vma; 1209a0987ad5SJan Kara struct address_space *mapping = vma->vm_file->f_mapping; 1210b15cd800SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, vmf->pgoff); 1211a7d73fe6SChristoph Hellwig struct inode *inode = mapping->host; 12121a29d85eSJan Kara unsigned long vaddr = vmf->address; 1213a7d73fe6SChristoph Hellwig loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; 1214a7d73fe6SChristoph Hellwig struct iomap iomap = { 0 }; 12159484ab1bSJan Kara unsigned flags = IOMAP_FAULT; 1216a7d73fe6SChristoph Hellwig int error, major = 0; 1217d2c43ef1SJan Kara bool write = vmf->flags & FAULT_FLAG_WRITE; 1218caa51d26SJan Kara bool sync; 1219ab77dab4SSouptick Joarder vm_fault_t ret = 0; 1220a7d73fe6SChristoph Hellwig void *entry; 12211b5a1cb2SJan Kara pfn_t pfn; 1222a7d73fe6SChristoph Hellwig 1223ab77dab4SSouptick Joarder trace_dax_pte_fault(inode, vmf, ret); 1224a7d73fe6SChristoph Hellwig /* 1225a7d73fe6SChristoph Hellwig * Check whether offset isn't beyond end of file now. Caller is supposed 1226a7d73fe6SChristoph Hellwig * to hold locks serializing us with truncate / punch hole so this is 1227a7d73fe6SChristoph Hellwig * a reliable test. 1228a7d73fe6SChristoph Hellwig */ 1229a9c42b33SRoss Zwisler if (pos >= i_size_read(inode)) { 1230ab77dab4SSouptick Joarder ret = VM_FAULT_SIGBUS; 1231a9c42b33SRoss Zwisler goto out; 1232a9c42b33SRoss Zwisler } 1233a7d73fe6SChristoph Hellwig 1234d2c43ef1SJan Kara if (write && !vmf->cow_page) 1235a7d73fe6SChristoph Hellwig flags |= IOMAP_WRITE; 1236a7d73fe6SChristoph Hellwig 1237b15cd800SMatthew Wilcox entry = grab_mapping_entry(&xas, mapping, 0); 1238b15cd800SMatthew Wilcox if (xa_is_internal(entry)) { 1239b15cd800SMatthew Wilcox ret = xa_to_internal(entry); 124013e451fdSJan Kara goto out; 124113e451fdSJan Kara } 124213e451fdSJan Kara 1243a7d73fe6SChristoph Hellwig /* 1244e2093926SRoss Zwisler * It is possible, particularly with mixed reads & writes to private 1245e2093926SRoss Zwisler * mappings, that we have raced with a PMD fault that overlaps with 1246e2093926SRoss Zwisler * the PTE we need to set up. If so just return and the fault will be 1247e2093926SRoss Zwisler * retried. 1248e2093926SRoss Zwisler */ 1249e2093926SRoss Zwisler if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1250ab77dab4SSouptick Joarder ret = VM_FAULT_NOPAGE; 1251e2093926SRoss Zwisler goto unlock_entry; 1252e2093926SRoss Zwisler } 1253e2093926SRoss Zwisler 1254e2093926SRoss Zwisler /* 1255a7d73fe6SChristoph Hellwig * Note that we don't bother to use iomap_apply here: DAX required 1256a7d73fe6SChristoph Hellwig * the file system block size to be equal the page size, which means 1257a7d73fe6SChristoph Hellwig * that we never have to deal with more than a single extent here. 1258a7d73fe6SChristoph Hellwig */ 1259a7d73fe6SChristoph Hellwig error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); 1260c0b24625SJan Kara if (iomap_errp) 1261c0b24625SJan Kara *iomap_errp = error; 1262a9c42b33SRoss Zwisler if (error) { 1263ab77dab4SSouptick Joarder ret = dax_fault_return(error); 126413e451fdSJan Kara goto unlock_entry; 1265a9c42b33SRoss Zwisler } 1266a7d73fe6SChristoph Hellwig if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { 126713e451fdSJan Kara error = -EIO; /* fs corruption? */ 126813e451fdSJan Kara goto error_finish_iomap; 1269a7d73fe6SChristoph Hellwig } 1270a7d73fe6SChristoph Hellwig 1271a7d73fe6SChristoph Hellwig if (vmf->cow_page) { 127231a6f1a6SJan Kara sector_t sector = dax_iomap_sector(&iomap, pos); 127331a6f1a6SJan Kara 1274a7d73fe6SChristoph Hellwig switch (iomap.type) { 1275a7d73fe6SChristoph Hellwig case IOMAP_HOLE: 1276a7d73fe6SChristoph Hellwig case IOMAP_UNWRITTEN: 1277a7d73fe6SChristoph Hellwig clear_user_highpage(vmf->cow_page, vaddr); 1278a7d73fe6SChristoph Hellwig break; 1279a7d73fe6SChristoph Hellwig case IOMAP_MAPPED: 1280cccbce67SDan Williams error = copy_user_dax(iomap.bdev, iomap.dax_dev, 1281cccbce67SDan Williams sector, PAGE_SIZE, vmf->cow_page, vaddr); 1282a7d73fe6SChristoph Hellwig break; 1283a7d73fe6SChristoph Hellwig default: 1284a7d73fe6SChristoph Hellwig WARN_ON_ONCE(1); 1285a7d73fe6SChristoph Hellwig error = -EIO; 1286a7d73fe6SChristoph Hellwig break; 1287a7d73fe6SChristoph Hellwig } 1288a7d73fe6SChristoph Hellwig 1289a7d73fe6SChristoph Hellwig if (error) 129013e451fdSJan Kara goto error_finish_iomap; 1291b1aa812bSJan Kara 1292b1aa812bSJan Kara __SetPageUptodate(vmf->cow_page); 1293ab77dab4SSouptick Joarder ret = finish_fault(vmf); 1294ab77dab4SSouptick Joarder if (!ret) 1295ab77dab4SSouptick Joarder ret = VM_FAULT_DONE_COW; 129613e451fdSJan Kara goto finish_iomap; 1297a7d73fe6SChristoph Hellwig } 1298a7d73fe6SChristoph Hellwig 1299aaa422c4SDan Williams sync = dax_fault_is_synchronous(flags, vma, &iomap); 1300caa51d26SJan Kara 1301a7d73fe6SChristoph Hellwig switch (iomap.type) { 1302a7d73fe6SChristoph Hellwig case IOMAP_MAPPED: 1303a7d73fe6SChristoph Hellwig if (iomap.flags & IOMAP_F_NEW) { 1304a7d73fe6SChristoph Hellwig count_vm_event(PGMAJFAULT); 1305a0987ad5SJan Kara count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 1306a7d73fe6SChristoph Hellwig major = VM_FAULT_MAJOR; 1307a7d73fe6SChristoph Hellwig } 13081b5a1cb2SJan Kara error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn); 13091b5a1cb2SJan Kara if (error < 0) 13101b5a1cb2SJan Kara goto error_finish_iomap; 13111b5a1cb2SJan Kara 1312b15cd800SMatthew Wilcox entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, 1313caa51d26SJan Kara 0, write && !sync); 13141b5a1cb2SJan Kara 1315caa51d26SJan Kara /* 1316caa51d26SJan Kara * If we are doing synchronous page fault and inode needs fsync, 1317caa51d26SJan Kara * we can insert PTE into page tables only after that happens. 1318caa51d26SJan Kara * Skip insertion for now and return the pfn so that caller can 1319caa51d26SJan Kara * insert it after fsync is done. 1320caa51d26SJan Kara */ 1321caa51d26SJan Kara if (sync) { 1322caa51d26SJan Kara if (WARN_ON_ONCE(!pfnp)) { 1323caa51d26SJan Kara error = -EIO; 1324caa51d26SJan Kara goto error_finish_iomap; 1325caa51d26SJan Kara } 1326caa51d26SJan Kara *pfnp = pfn; 1327ab77dab4SSouptick Joarder ret = VM_FAULT_NEEDDSYNC | major; 1328caa51d26SJan Kara goto finish_iomap; 1329caa51d26SJan Kara } 13301b5a1cb2SJan Kara trace_dax_insert_mapping(inode, vmf, entry); 13311b5a1cb2SJan Kara if (write) 1332ab77dab4SSouptick Joarder ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn); 13331b5a1cb2SJan Kara else 1334ab77dab4SSouptick Joarder ret = vmf_insert_mixed(vma, vaddr, pfn); 13351b5a1cb2SJan Kara 1336ab77dab4SSouptick Joarder goto finish_iomap; 1337a7d73fe6SChristoph Hellwig case IOMAP_UNWRITTEN: 1338a7d73fe6SChristoph Hellwig case IOMAP_HOLE: 1339d2c43ef1SJan Kara if (!write) { 1340b15cd800SMatthew Wilcox ret = dax_load_hole(&xas, mapping, &entry, vmf); 134113e451fdSJan Kara goto finish_iomap; 13421550290bSRoss Zwisler } 1343a7d73fe6SChristoph Hellwig /*FALLTHRU*/ 1344a7d73fe6SChristoph Hellwig default: 1345a7d73fe6SChristoph Hellwig WARN_ON_ONCE(1); 1346a7d73fe6SChristoph Hellwig error = -EIO; 1347a7d73fe6SChristoph Hellwig break; 1348a7d73fe6SChristoph Hellwig } 1349a7d73fe6SChristoph Hellwig 135013e451fdSJan Kara error_finish_iomap: 1351ab77dab4SSouptick Joarder ret = dax_fault_return(error); 13529f141d6eSJan Kara finish_iomap: 13539f141d6eSJan Kara if (ops->iomap_end) { 13549f141d6eSJan Kara int copied = PAGE_SIZE; 13559f141d6eSJan Kara 1356ab77dab4SSouptick Joarder if (ret & VM_FAULT_ERROR) 13579f141d6eSJan Kara copied = 0; 13589f141d6eSJan Kara /* 13599f141d6eSJan Kara * The fault is done by now and there's no way back (other 13609f141d6eSJan Kara * thread may be already happily using PTE we have installed). 13619f141d6eSJan Kara * Just ignore error from ->iomap_end since we cannot do much 13629f141d6eSJan Kara * with it. 13639f141d6eSJan Kara */ 13649f141d6eSJan Kara ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); 13651550290bSRoss Zwisler } 136613e451fdSJan Kara unlock_entry: 1367b15cd800SMatthew Wilcox dax_unlock_entry(&xas, entry); 1368a9c42b33SRoss Zwisler out: 1369ab77dab4SSouptick Joarder trace_dax_pte_fault_done(inode, vmf, ret); 1370ab77dab4SSouptick Joarder return ret | major; 1371a7d73fe6SChristoph Hellwig } 1372642261acSRoss Zwisler 1373642261acSRoss Zwisler #ifdef CONFIG_FS_DAX_PMD 1374b15cd800SMatthew Wilcox static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, 1375b15cd800SMatthew Wilcox struct iomap *iomap, void **entry) 1376642261acSRoss Zwisler { 1377f4200391SDave Jiang struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1378f4200391SDave Jiang unsigned long pmd_addr = vmf->address & PMD_MASK; 1379653b2ea3SRoss Zwisler struct inode *inode = mapping->host; 1380642261acSRoss Zwisler struct page *zero_page; 1381642261acSRoss Zwisler spinlock_t *ptl; 1382642261acSRoss Zwisler pmd_t pmd_entry; 13833fe0791cSDan Williams pfn_t pfn; 1384642261acSRoss Zwisler 1385f4200391SDave Jiang zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1386642261acSRoss Zwisler 1387642261acSRoss Zwisler if (unlikely(!zero_page)) 1388653b2ea3SRoss Zwisler goto fallback; 1389642261acSRoss Zwisler 13903fe0791cSDan Williams pfn = page_to_pfn_t(zero_page); 1391b15cd800SMatthew Wilcox *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, 13923159f943SMatthew Wilcox DAX_PMD | DAX_ZERO_PAGE, false); 1393642261acSRoss Zwisler 1394f4200391SDave Jiang ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1395f4200391SDave Jiang if (!pmd_none(*(vmf->pmd))) { 1396642261acSRoss Zwisler spin_unlock(ptl); 1397653b2ea3SRoss Zwisler goto fallback; 1398642261acSRoss Zwisler } 1399642261acSRoss Zwisler 1400f4200391SDave Jiang pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1401642261acSRoss Zwisler pmd_entry = pmd_mkhuge(pmd_entry); 1402f4200391SDave Jiang set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1403642261acSRoss Zwisler spin_unlock(ptl); 1404b15cd800SMatthew Wilcox trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); 1405642261acSRoss Zwisler return VM_FAULT_NOPAGE; 1406653b2ea3SRoss Zwisler 1407653b2ea3SRoss Zwisler fallback: 1408b15cd800SMatthew Wilcox trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); 1409642261acSRoss Zwisler return VM_FAULT_FALLBACK; 1410642261acSRoss Zwisler } 1411642261acSRoss Zwisler 1412ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1413a2d58167SDave Jiang const struct iomap_ops *ops) 1414642261acSRoss Zwisler { 1415f4200391SDave Jiang struct vm_area_struct *vma = vmf->vma; 1416642261acSRoss Zwisler struct address_space *mapping = vma->vm_file->f_mapping; 1417b15cd800SMatthew Wilcox XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); 1418d8a849e1SDave Jiang unsigned long pmd_addr = vmf->address & PMD_MASK; 1419d8a849e1SDave Jiang bool write = vmf->flags & FAULT_FLAG_WRITE; 1420caa51d26SJan Kara bool sync; 14219484ab1bSJan Kara unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; 1422642261acSRoss Zwisler struct inode *inode = mapping->host; 1423ab77dab4SSouptick Joarder vm_fault_t result = VM_FAULT_FALLBACK; 1424642261acSRoss Zwisler struct iomap iomap = { 0 }; 1425b15cd800SMatthew Wilcox pgoff_t max_pgoff; 1426642261acSRoss Zwisler void *entry; 1427642261acSRoss Zwisler loff_t pos; 1428642261acSRoss Zwisler int error; 1429302a5e31SJan Kara pfn_t pfn; 1430642261acSRoss Zwisler 1431282a8e03SRoss Zwisler /* 1432282a8e03SRoss Zwisler * Check whether offset isn't beyond end of file now. Caller is 1433282a8e03SRoss Zwisler * supposed to hold locks serializing us with truncate / punch hole so 1434282a8e03SRoss Zwisler * this is a reliable test. 1435282a8e03SRoss Zwisler */ 1436957ac8c4SJeff Moyer max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1437282a8e03SRoss Zwisler 1438f4200391SDave Jiang trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); 1439282a8e03SRoss Zwisler 1440fffa281bSRoss Zwisler /* 1441fffa281bSRoss Zwisler * Make sure that the faulting address's PMD offset (color) matches 1442fffa281bSRoss Zwisler * the PMD offset from the start of the file. This is necessary so 1443fffa281bSRoss Zwisler * that a PMD range in the page table overlaps exactly with a PMD 1444a77d19f4SMatthew Wilcox * range in the page cache. 1445fffa281bSRoss Zwisler */ 1446fffa281bSRoss Zwisler if ((vmf->pgoff & PG_PMD_COLOUR) != 1447fffa281bSRoss Zwisler ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 1448fffa281bSRoss Zwisler goto fallback; 1449fffa281bSRoss Zwisler 1450642261acSRoss Zwisler /* Fall back to PTEs if we're going to COW */ 1451642261acSRoss Zwisler if (write && !(vma->vm_flags & VM_SHARED)) 1452642261acSRoss Zwisler goto fallback; 1453642261acSRoss Zwisler 1454642261acSRoss Zwisler /* If the PMD would extend outside the VMA */ 1455642261acSRoss Zwisler if (pmd_addr < vma->vm_start) 1456642261acSRoss Zwisler goto fallback; 1457642261acSRoss Zwisler if ((pmd_addr + PMD_SIZE) > vma->vm_end) 1458642261acSRoss Zwisler goto fallback; 1459642261acSRoss Zwisler 1460b15cd800SMatthew Wilcox if (xas.xa_index >= max_pgoff) { 1461282a8e03SRoss Zwisler result = VM_FAULT_SIGBUS; 1462282a8e03SRoss Zwisler goto out; 1463282a8e03SRoss Zwisler } 1464642261acSRoss Zwisler 1465642261acSRoss Zwisler /* If the PMD would extend beyond the file size */ 1466b15cd800SMatthew Wilcox if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff) 1467642261acSRoss Zwisler goto fallback; 1468642261acSRoss Zwisler 1469642261acSRoss Zwisler /* 1470b15cd800SMatthew Wilcox * grab_mapping_entry() will make sure we get an empty PMD entry, 1471b15cd800SMatthew Wilcox * a zero PMD entry or a DAX PMD. If it can't (because a PTE 1472b15cd800SMatthew Wilcox * entry is already in the array, for instance), it will return 1473b15cd800SMatthew Wilcox * VM_FAULT_FALLBACK. 14749f141d6eSJan Kara */ 1475b15cd800SMatthew Wilcox entry = grab_mapping_entry(&xas, mapping, DAX_PMD); 1476b15cd800SMatthew Wilcox if (xa_is_internal(entry)) { 1477b15cd800SMatthew Wilcox result = xa_to_internal(entry); 1478876f2946SRoss Zwisler goto fallback; 1479b15cd800SMatthew Wilcox } 1480876f2946SRoss Zwisler 1481876f2946SRoss Zwisler /* 1482e2093926SRoss Zwisler * It is possible, particularly with mixed reads & writes to private 1483e2093926SRoss Zwisler * mappings, that we have raced with a PTE fault that overlaps with 1484e2093926SRoss Zwisler * the PMD we need to set up. If so just return and the fault will be 1485e2093926SRoss Zwisler * retried. 1486e2093926SRoss Zwisler */ 1487e2093926SRoss Zwisler if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1488e2093926SRoss Zwisler !pmd_devmap(*vmf->pmd)) { 1489e2093926SRoss Zwisler result = 0; 1490e2093926SRoss Zwisler goto unlock_entry; 1491e2093926SRoss Zwisler } 1492e2093926SRoss Zwisler 1493e2093926SRoss Zwisler /* 1494876f2946SRoss Zwisler * Note that we don't use iomap_apply here. We aren't doing I/O, only 1495876f2946SRoss Zwisler * setting up a mapping, so really we're using iomap_begin() as a way 1496876f2946SRoss Zwisler * to look up our filesystem block. 1497876f2946SRoss Zwisler */ 1498b15cd800SMatthew Wilcox pos = (loff_t)xas.xa_index << PAGE_SHIFT; 1499876f2946SRoss Zwisler error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); 1500876f2946SRoss Zwisler if (error) 1501876f2946SRoss Zwisler goto unlock_entry; 1502876f2946SRoss Zwisler 1503876f2946SRoss Zwisler if (iomap.offset + iomap.length < pos + PMD_SIZE) 15049f141d6eSJan Kara goto finish_iomap; 15059f141d6eSJan Kara 1506aaa422c4SDan Williams sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap); 1507caa51d26SJan Kara 1508642261acSRoss Zwisler switch (iomap.type) { 1509642261acSRoss Zwisler case IOMAP_MAPPED: 1510302a5e31SJan Kara error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn); 1511302a5e31SJan Kara if (error < 0) 1512302a5e31SJan Kara goto finish_iomap; 1513302a5e31SJan Kara 1514b15cd800SMatthew Wilcox entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, 15153159f943SMatthew Wilcox DAX_PMD, write && !sync); 1516302a5e31SJan Kara 1517caa51d26SJan Kara /* 1518caa51d26SJan Kara * If we are doing synchronous page fault and inode needs fsync, 1519caa51d26SJan Kara * we can insert PMD into page tables only after that happens. 1520caa51d26SJan Kara * Skip insertion for now and return the pfn so that caller can 1521caa51d26SJan Kara * insert it after fsync is done. 1522caa51d26SJan Kara */ 1523caa51d26SJan Kara if (sync) { 1524caa51d26SJan Kara if (WARN_ON_ONCE(!pfnp)) 1525caa51d26SJan Kara goto finish_iomap; 1526caa51d26SJan Kara *pfnp = pfn; 1527caa51d26SJan Kara result = VM_FAULT_NEEDDSYNC; 1528caa51d26SJan Kara goto finish_iomap; 1529caa51d26SJan Kara } 1530caa51d26SJan Kara 1531302a5e31SJan Kara trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); 1532302a5e31SJan Kara result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn, 1533302a5e31SJan Kara write); 1534642261acSRoss Zwisler break; 1535642261acSRoss Zwisler case IOMAP_UNWRITTEN: 1536642261acSRoss Zwisler case IOMAP_HOLE: 1537642261acSRoss Zwisler if (WARN_ON_ONCE(write)) 1538876f2946SRoss Zwisler break; 1539b15cd800SMatthew Wilcox result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry); 1540642261acSRoss Zwisler break; 1541642261acSRoss Zwisler default: 1542642261acSRoss Zwisler WARN_ON_ONCE(1); 1543642261acSRoss Zwisler break; 1544642261acSRoss Zwisler } 1545642261acSRoss Zwisler 15469f141d6eSJan Kara finish_iomap: 15479f141d6eSJan Kara if (ops->iomap_end) { 15489f141d6eSJan Kara int copied = PMD_SIZE; 15499f141d6eSJan Kara 15509f141d6eSJan Kara if (result == VM_FAULT_FALLBACK) 15519f141d6eSJan Kara copied = 0; 15529f141d6eSJan Kara /* 15539f141d6eSJan Kara * The fault is done by now and there's no way back (other 15549f141d6eSJan Kara * thread may be already happily using PMD we have installed). 15559f141d6eSJan Kara * Just ignore error from ->iomap_end since we cannot do much 15569f141d6eSJan Kara * with it. 15579f141d6eSJan Kara */ 15589f141d6eSJan Kara ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags, 15599f141d6eSJan Kara &iomap); 15609f141d6eSJan Kara } 1561876f2946SRoss Zwisler unlock_entry: 1562b15cd800SMatthew Wilcox dax_unlock_entry(&xas, entry); 1563642261acSRoss Zwisler fallback: 1564642261acSRoss Zwisler if (result == VM_FAULT_FALLBACK) { 1565d8a849e1SDave Jiang split_huge_pmd(vma, vmf->pmd, vmf->address); 1566642261acSRoss Zwisler count_vm_event(THP_FAULT_FALLBACK); 1567642261acSRoss Zwisler } 1568282a8e03SRoss Zwisler out: 1569f4200391SDave Jiang trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); 1570642261acSRoss Zwisler return result; 1571642261acSRoss Zwisler } 1572a2d58167SDave Jiang #else 1573ab77dab4SSouptick Joarder static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 157401cddfe9SArnd Bergmann const struct iomap_ops *ops) 1575a2d58167SDave Jiang { 1576a2d58167SDave Jiang return VM_FAULT_FALLBACK; 1577a2d58167SDave Jiang } 1578642261acSRoss Zwisler #endif /* CONFIG_FS_DAX_PMD */ 1579a2d58167SDave Jiang 1580a2d58167SDave Jiang /** 1581a2d58167SDave Jiang * dax_iomap_fault - handle a page fault on a DAX file 1582a2d58167SDave Jiang * @vmf: The description of the fault 1583cec04e8cSJan Kara * @pe_size: Size of the page to fault in 15849a0dd422SJan Kara * @pfnp: PFN to insert for synchronous faults if fsync is required 1585c0b24625SJan Kara * @iomap_errp: Storage for detailed error code in case of error 1586cec04e8cSJan Kara * @ops: Iomap ops passed from the file system 1587a2d58167SDave Jiang * 1588a2d58167SDave Jiang * When a page fault occurs, filesystems may call this helper in 1589a2d58167SDave Jiang * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1590a2d58167SDave Jiang * has done all the necessary locking for page fault to proceed 1591a2d58167SDave Jiang * successfully. 1592a2d58167SDave Jiang */ 1593ab77dab4SSouptick Joarder vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1594c0b24625SJan Kara pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) 1595a2d58167SDave Jiang { 1596c791ace1SDave Jiang switch (pe_size) { 1597c791ace1SDave Jiang case PE_SIZE_PTE: 1598c0b24625SJan Kara return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); 1599c791ace1SDave Jiang case PE_SIZE_PMD: 16009a0dd422SJan Kara return dax_iomap_pmd_fault(vmf, pfnp, ops); 1601a2d58167SDave Jiang default: 1602a2d58167SDave Jiang return VM_FAULT_FALLBACK; 1603a2d58167SDave Jiang } 1604a2d58167SDave Jiang } 1605a2d58167SDave Jiang EXPORT_SYMBOL_GPL(dax_iomap_fault); 160671eab6dfSJan Kara 1607a77d19f4SMatthew Wilcox /* 160871eab6dfSJan Kara * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables 160971eab6dfSJan Kara * @vmf: The description of the fault 161071eab6dfSJan Kara * @pfn: PFN to insert 1611cfc93c6cSMatthew Wilcox * @order: Order of entry to insert. 161271eab6dfSJan Kara * 1613a77d19f4SMatthew Wilcox * This function inserts a writeable PTE or PMD entry into the page tables 1614a77d19f4SMatthew Wilcox * for an mmaped DAX file. It also marks the page cache entry as dirty. 161571eab6dfSJan Kara */ 1616cfc93c6cSMatthew Wilcox static vm_fault_t 1617cfc93c6cSMatthew Wilcox dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) 161871eab6dfSJan Kara { 161971eab6dfSJan Kara struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1620cfc93c6cSMatthew Wilcox XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); 1621cfc93c6cSMatthew Wilcox void *entry; 1622ab77dab4SSouptick Joarder vm_fault_t ret; 162371eab6dfSJan Kara 1624cfc93c6cSMatthew Wilcox xas_lock_irq(&xas); 1625cfc93c6cSMatthew Wilcox entry = get_unlocked_entry(&xas); 162671eab6dfSJan Kara /* Did we race with someone splitting entry or so? */ 162771eab6dfSJan Kara if (!entry || 1628cfc93c6cSMatthew Wilcox (order == 0 && !dax_is_pte_entry(entry)) || 1629cfc93c6cSMatthew Wilcox (order == PMD_ORDER && (xa_is_internal(entry) || 1630cfc93c6cSMatthew Wilcox !dax_is_pmd_entry(entry)))) { 1631cfc93c6cSMatthew Wilcox put_unlocked_entry(&xas, entry); 1632cfc93c6cSMatthew Wilcox xas_unlock_irq(&xas); 163371eab6dfSJan Kara trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 163471eab6dfSJan Kara VM_FAULT_NOPAGE); 163571eab6dfSJan Kara return VM_FAULT_NOPAGE; 163671eab6dfSJan Kara } 1637cfc93c6cSMatthew Wilcox xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); 1638cfc93c6cSMatthew Wilcox dax_lock_entry(&xas, entry); 1639cfc93c6cSMatthew Wilcox xas_unlock_irq(&xas); 1640cfc93c6cSMatthew Wilcox if (order == 0) 1641ab77dab4SSouptick Joarder ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 164271eab6dfSJan Kara #ifdef CONFIG_FS_DAX_PMD 1643cfc93c6cSMatthew Wilcox else if (order == PMD_ORDER) 1644ab77dab4SSouptick Joarder ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, 164571eab6dfSJan Kara pfn, true); 164671eab6dfSJan Kara #endif 1647cfc93c6cSMatthew Wilcox else 1648ab77dab4SSouptick Joarder ret = VM_FAULT_FALLBACK; 1649cfc93c6cSMatthew Wilcox dax_unlock_entry(&xas, entry); 1650ab77dab4SSouptick Joarder trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); 1651ab77dab4SSouptick Joarder return ret; 165271eab6dfSJan Kara } 165371eab6dfSJan Kara 165471eab6dfSJan Kara /** 165571eab6dfSJan Kara * dax_finish_sync_fault - finish synchronous page fault 165671eab6dfSJan Kara * @vmf: The description of the fault 165771eab6dfSJan Kara * @pe_size: Size of entry to be inserted 165871eab6dfSJan Kara * @pfn: PFN to insert 165971eab6dfSJan Kara * 166071eab6dfSJan Kara * This function ensures that the file range touched by the page fault is 166171eab6dfSJan Kara * stored persistently on the media and handles inserting of appropriate page 166271eab6dfSJan Kara * table entry. 166371eab6dfSJan Kara */ 1664ab77dab4SSouptick Joarder vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 1665ab77dab4SSouptick Joarder enum page_entry_size pe_size, pfn_t pfn) 166671eab6dfSJan Kara { 166771eab6dfSJan Kara int err; 166871eab6dfSJan Kara loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; 1669cfc93c6cSMatthew Wilcox unsigned int order = pe_order(pe_size); 1670cfc93c6cSMatthew Wilcox size_t len = PAGE_SIZE << order; 167171eab6dfSJan Kara 167271eab6dfSJan Kara err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); 167371eab6dfSJan Kara if (err) 167471eab6dfSJan Kara return VM_FAULT_SIGBUS; 1675cfc93c6cSMatthew Wilcox return dax_insert_pfn_mkwrite(vmf, pfn, order); 167671eab6dfSJan Kara } 167771eab6dfSJan Kara EXPORT_SYMBOL_GPL(dax_finish_sync_fault); 1678