1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * fs/dax.c - Direct Access filesystem code 4 * Copyright (c) 2013-2014 Intel Corporation 5 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 6 * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 7 */ 8 9 #include <linux/atomic.h> 10 #include <linux/blkdev.h> 11 #include <linux/buffer_head.h> 12 #include <linux/dax.h> 13 #include <linux/fs.h> 14 #include <linux/genhd.h> 15 #include <linux/highmem.h> 16 #include <linux/memcontrol.h> 17 #include <linux/mm.h> 18 #include <linux/mutex.h> 19 #include <linux/pagevec.h> 20 #include <linux/sched.h> 21 #include <linux/sched/signal.h> 22 #include <linux/uio.h> 23 #include <linux/vmstat.h> 24 #include <linux/pfn_t.h> 25 #include <linux/sizes.h> 26 #include <linux/mmu_notifier.h> 27 #include <linux/iomap.h> 28 #include <asm/pgalloc.h> 29 30 #define CREATE_TRACE_POINTS 31 #include <trace/events/fs_dax.h> 32 33 static inline unsigned int pe_order(enum page_entry_size pe_size) 34 { 35 if (pe_size == PE_SIZE_PTE) 36 return PAGE_SHIFT - PAGE_SHIFT; 37 if (pe_size == PE_SIZE_PMD) 38 return PMD_SHIFT - PAGE_SHIFT; 39 if (pe_size == PE_SIZE_PUD) 40 return PUD_SHIFT - PAGE_SHIFT; 41 return ~0; 42 } 43 44 /* We choose 4096 entries - same as per-zone page wait tables */ 45 #define DAX_WAIT_TABLE_BITS 12 46 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 47 48 /* The 'colour' (ie low bits) within a PMD of a page offset. */ 49 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 50 #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) 51 52 /* The order of a PMD entry */ 53 #define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT) 54 55 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 56 57 static int __init init_dax_wait_table(void) 58 { 59 int i; 60 61 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 62 init_waitqueue_head(wait_table + i); 63 return 0; 64 } 65 fs_initcall(init_dax_wait_table); 66 67 /* 68 * DAX pagecache entries use XArray value entries so they can't be mistaken 69 * for pages. We use one bit for locking, one bit for the entry size (PMD) 70 * and two more to tell us if the entry is a zero page or an empty entry that 71 * is just used for locking. In total four special bits. 72 * 73 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE 74 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem 75 * block allocation. 76 */ 77 #define DAX_SHIFT (4) 78 #define DAX_LOCKED (1UL << 0) 79 #define DAX_PMD (1UL << 1) 80 #define DAX_ZERO_PAGE (1UL << 2) 81 #define DAX_EMPTY (1UL << 3) 82 83 static unsigned long dax_to_pfn(void *entry) 84 { 85 return xa_to_value(entry) >> DAX_SHIFT; 86 } 87 88 static void *dax_make_entry(pfn_t pfn, unsigned long flags) 89 { 90 return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); 91 } 92 93 static bool dax_is_locked(void *entry) 94 { 95 return xa_to_value(entry) & DAX_LOCKED; 96 } 97 98 static unsigned int dax_entry_order(void *entry) 99 { 100 if (xa_to_value(entry) & DAX_PMD) 101 return PMD_ORDER; 102 return 0; 103 } 104 105 static unsigned long dax_is_pmd_entry(void *entry) 106 { 107 return xa_to_value(entry) & DAX_PMD; 108 } 109 110 static bool dax_is_pte_entry(void *entry) 111 { 112 return !(xa_to_value(entry) & DAX_PMD); 113 } 114 115 static int dax_is_zero_entry(void *entry) 116 { 117 return xa_to_value(entry) & DAX_ZERO_PAGE; 118 } 119 120 static int dax_is_empty_entry(void *entry) 121 { 122 return xa_to_value(entry) & DAX_EMPTY; 123 } 124 125 /* 126 * true if the entry that was found is of a smaller order than the entry 127 * we were looking for 128 */ 129 static bool dax_is_conflict(void *entry) 130 { 131 return entry == XA_RETRY_ENTRY; 132 } 133 134 /* 135 * DAX page cache entry locking 136 */ 137 struct exceptional_entry_key { 138 struct xarray *xa; 139 pgoff_t entry_start; 140 }; 141 142 struct wait_exceptional_entry_queue { 143 wait_queue_entry_t wait; 144 struct exceptional_entry_key key; 145 }; 146 147 /** 148 * enum dax_wake_mode: waitqueue wakeup behaviour 149 * @WAKE_ALL: wake all waiters in the waitqueue 150 * @WAKE_NEXT: wake only the first waiter in the waitqueue 151 */ 152 enum dax_wake_mode { 153 WAKE_ALL, 154 WAKE_NEXT, 155 }; 156 157 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, 158 void *entry, struct exceptional_entry_key *key) 159 { 160 unsigned long hash; 161 unsigned long index = xas->xa_index; 162 163 /* 164 * If 'entry' is a PMD, align the 'index' that we use for the wait 165 * queue to the start of that PMD. This ensures that all offsets in 166 * the range covered by the PMD map to the same bit lock. 167 */ 168 if (dax_is_pmd_entry(entry)) 169 index &= ~PG_PMD_COLOUR; 170 key->xa = xas->xa; 171 key->entry_start = index; 172 173 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); 174 return wait_table + hash; 175 } 176 177 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, 178 unsigned int mode, int sync, void *keyp) 179 { 180 struct exceptional_entry_key *key = keyp; 181 struct wait_exceptional_entry_queue *ewait = 182 container_of(wait, struct wait_exceptional_entry_queue, wait); 183 184 if (key->xa != ewait->key.xa || 185 key->entry_start != ewait->key.entry_start) 186 return 0; 187 return autoremove_wake_function(wait, mode, sync, NULL); 188 } 189 190 /* 191 * @entry may no longer be the entry at the index in the mapping. 192 * The important information it's conveying is whether the entry at 193 * this index used to be a PMD entry. 194 */ 195 static void dax_wake_entry(struct xa_state *xas, void *entry, 196 enum dax_wake_mode mode) 197 { 198 struct exceptional_entry_key key; 199 wait_queue_head_t *wq; 200 201 wq = dax_entry_waitqueue(xas, entry, &key); 202 203 /* 204 * Checking for locked entry and prepare_to_wait_exclusive() happens 205 * under the i_pages lock, ditto for entry handling in our callers. 206 * So at this point all tasks that could have seen our entry locked 207 * must be in the waitqueue and the following check will see them. 208 */ 209 if (waitqueue_active(wq)) 210 __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key); 211 } 212 213 /* 214 * Look up entry in page cache, wait for it to become unlocked if it 215 * is a DAX entry and return it. The caller must subsequently call 216 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry() 217 * if it did. The entry returned may have a larger order than @order. 218 * If @order is larger than the order of the entry found in i_pages, this 219 * function returns a dax_is_conflict entry. 220 * 221 * Must be called with the i_pages lock held. 222 */ 223 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) 224 { 225 void *entry; 226 struct wait_exceptional_entry_queue ewait; 227 wait_queue_head_t *wq; 228 229 init_wait(&ewait.wait); 230 ewait.wait.func = wake_exceptional_entry_func; 231 232 for (;;) { 233 entry = xas_find_conflict(xas); 234 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 235 return entry; 236 if (dax_entry_order(entry) < order) 237 return XA_RETRY_ENTRY; 238 if (!dax_is_locked(entry)) 239 return entry; 240 241 wq = dax_entry_waitqueue(xas, entry, &ewait.key); 242 prepare_to_wait_exclusive(wq, &ewait.wait, 243 TASK_UNINTERRUPTIBLE); 244 xas_unlock_irq(xas); 245 xas_reset(xas); 246 schedule(); 247 finish_wait(wq, &ewait.wait); 248 xas_lock_irq(xas); 249 } 250 } 251 252 /* 253 * The only thing keeping the address space around is the i_pages lock 254 * (it's cycled in clear_inode() after removing the entries from i_pages) 255 * After we call xas_unlock_irq(), we cannot touch xas->xa. 256 */ 257 static void wait_entry_unlocked(struct xa_state *xas, void *entry) 258 { 259 struct wait_exceptional_entry_queue ewait; 260 wait_queue_head_t *wq; 261 262 init_wait(&ewait.wait); 263 ewait.wait.func = wake_exceptional_entry_func; 264 265 wq = dax_entry_waitqueue(xas, entry, &ewait.key); 266 /* 267 * Unlike get_unlocked_entry() there is no guarantee that this 268 * path ever successfully retrieves an unlocked entry before an 269 * inode dies. Perform a non-exclusive wait in case this path 270 * never successfully performs its own wake up. 271 */ 272 prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); 273 xas_unlock_irq(xas); 274 schedule(); 275 finish_wait(wq, &ewait.wait); 276 } 277 278 static void put_unlocked_entry(struct xa_state *xas, void *entry, 279 enum dax_wake_mode mode) 280 { 281 if (entry && !dax_is_conflict(entry)) 282 dax_wake_entry(xas, entry, mode); 283 } 284 285 /* 286 * We used the xa_state to get the entry, but then we locked the entry and 287 * dropped the xa_lock, so we know the xa_state is stale and must be reset 288 * before use. 289 */ 290 static void dax_unlock_entry(struct xa_state *xas, void *entry) 291 { 292 void *old; 293 294 BUG_ON(dax_is_locked(entry)); 295 xas_reset(xas); 296 xas_lock_irq(xas); 297 old = xas_store(xas, entry); 298 xas_unlock_irq(xas); 299 BUG_ON(!dax_is_locked(old)); 300 dax_wake_entry(xas, entry, WAKE_NEXT); 301 } 302 303 /* 304 * Return: The entry stored at this location before it was locked. 305 */ 306 static void *dax_lock_entry(struct xa_state *xas, void *entry) 307 { 308 unsigned long v = xa_to_value(entry); 309 return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); 310 } 311 312 static unsigned long dax_entry_size(void *entry) 313 { 314 if (dax_is_zero_entry(entry)) 315 return 0; 316 else if (dax_is_empty_entry(entry)) 317 return 0; 318 else if (dax_is_pmd_entry(entry)) 319 return PMD_SIZE; 320 else 321 return PAGE_SIZE; 322 } 323 324 static unsigned long dax_end_pfn(void *entry) 325 { 326 return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; 327 } 328 329 /* 330 * Iterate through all mapped pfns represented by an entry, i.e. skip 331 * 'empty' and 'zero' entries. 332 */ 333 #define for_each_mapped_pfn(entry, pfn) \ 334 for (pfn = dax_to_pfn(entry); \ 335 pfn < dax_end_pfn(entry); pfn++) 336 337 /* 338 * TODO: for reflink+dax we need a way to associate a single page with 339 * multiple address_space instances at different linear_page_index() 340 * offsets. 341 */ 342 static void dax_associate_entry(void *entry, struct address_space *mapping, 343 struct vm_area_struct *vma, unsigned long address) 344 { 345 unsigned long size = dax_entry_size(entry), pfn, index; 346 int i = 0; 347 348 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 349 return; 350 351 index = linear_page_index(vma, address & ~(size - 1)); 352 for_each_mapped_pfn(entry, pfn) { 353 struct page *page = pfn_to_page(pfn); 354 355 WARN_ON_ONCE(page->mapping); 356 page->mapping = mapping; 357 page->index = index + i++; 358 } 359 } 360 361 static void dax_disassociate_entry(void *entry, struct address_space *mapping, 362 bool trunc) 363 { 364 unsigned long pfn; 365 366 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 367 return; 368 369 for_each_mapped_pfn(entry, pfn) { 370 struct page *page = pfn_to_page(pfn); 371 372 WARN_ON_ONCE(trunc && page_ref_count(page) > 1); 373 WARN_ON_ONCE(page->mapping && page->mapping != mapping); 374 page->mapping = NULL; 375 page->index = 0; 376 } 377 } 378 379 static struct page *dax_busy_page(void *entry) 380 { 381 unsigned long pfn; 382 383 for_each_mapped_pfn(entry, pfn) { 384 struct page *page = pfn_to_page(pfn); 385 386 if (page_ref_count(page) > 1) 387 return page; 388 } 389 return NULL; 390 } 391 392 /* 393 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page 394 * @page: The page whose entry we want to lock 395 * 396 * Context: Process context. 397 * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could 398 * not be locked. 399 */ 400 dax_entry_t dax_lock_page(struct page *page) 401 { 402 XA_STATE(xas, NULL, 0); 403 void *entry; 404 405 /* Ensure page->mapping isn't freed while we look at it */ 406 rcu_read_lock(); 407 for (;;) { 408 struct address_space *mapping = READ_ONCE(page->mapping); 409 410 entry = NULL; 411 if (!mapping || !dax_mapping(mapping)) 412 break; 413 414 /* 415 * In the device-dax case there's no need to lock, a 416 * struct dev_pagemap pin is sufficient to keep the 417 * inode alive, and we assume we have dev_pagemap pin 418 * otherwise we would not have a valid pfn_to_page() 419 * translation. 420 */ 421 entry = (void *)~0UL; 422 if (S_ISCHR(mapping->host->i_mode)) 423 break; 424 425 xas.xa = &mapping->i_pages; 426 xas_lock_irq(&xas); 427 if (mapping != page->mapping) { 428 xas_unlock_irq(&xas); 429 continue; 430 } 431 xas_set(&xas, page->index); 432 entry = xas_load(&xas); 433 if (dax_is_locked(entry)) { 434 rcu_read_unlock(); 435 wait_entry_unlocked(&xas, entry); 436 rcu_read_lock(); 437 continue; 438 } 439 dax_lock_entry(&xas, entry); 440 xas_unlock_irq(&xas); 441 break; 442 } 443 rcu_read_unlock(); 444 return (dax_entry_t)entry; 445 } 446 447 void dax_unlock_page(struct page *page, dax_entry_t cookie) 448 { 449 struct address_space *mapping = page->mapping; 450 XA_STATE(xas, &mapping->i_pages, page->index); 451 452 if (S_ISCHR(mapping->host->i_mode)) 453 return; 454 455 dax_unlock_entry(&xas, (void *)cookie); 456 } 457 458 /* 459 * Find page cache entry at given index. If it is a DAX entry, return it 460 * with the entry locked. If the page cache doesn't contain an entry at 461 * that index, add a locked empty entry. 462 * 463 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will 464 * either return that locked entry or will return VM_FAULT_FALLBACK. 465 * This will happen if there are any PTE entries within the PMD range 466 * that we are requesting. 467 * 468 * We always favor PTE entries over PMD entries. There isn't a flow where we 469 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD 470 * insertion will fail if it finds any PTE entries already in the tree, and a 471 * PTE insertion will cause an existing PMD entry to be unmapped and 472 * downgraded to PTE entries. This happens for both PMD zero pages as 473 * well as PMD empty entries. 474 * 475 * The exception to this downgrade path is for PMD entries that have 476 * real storage backing them. We will leave these real PMD entries in 477 * the tree, and PTE writes will simply dirty the entire PMD entry. 478 * 479 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 480 * persistent memory the benefit is doubtful. We can add that later if we can 481 * show it helps. 482 * 483 * On error, this function does not return an ERR_PTR. Instead it returns 484 * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values 485 * overlap with xarray value entries. 486 */ 487 static void *grab_mapping_entry(struct xa_state *xas, 488 struct address_space *mapping, unsigned int order) 489 { 490 unsigned long index = xas->xa_index; 491 bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */ 492 void *entry; 493 494 retry: 495 xas_lock_irq(xas); 496 entry = get_unlocked_entry(xas, order); 497 498 if (entry) { 499 if (dax_is_conflict(entry)) 500 goto fallback; 501 if (!xa_is_value(entry)) { 502 xas_set_err(xas, -EIO); 503 goto out_unlock; 504 } 505 506 if (order == 0) { 507 if (dax_is_pmd_entry(entry) && 508 (dax_is_zero_entry(entry) || 509 dax_is_empty_entry(entry))) { 510 pmd_downgrade = true; 511 } 512 } 513 } 514 515 if (pmd_downgrade) { 516 /* 517 * Make sure 'entry' remains valid while we drop 518 * the i_pages lock. 519 */ 520 dax_lock_entry(xas, entry); 521 522 /* 523 * Besides huge zero pages the only other thing that gets 524 * downgraded are empty entries which don't need to be 525 * unmapped. 526 */ 527 if (dax_is_zero_entry(entry)) { 528 xas_unlock_irq(xas); 529 unmap_mapping_pages(mapping, 530 xas->xa_index & ~PG_PMD_COLOUR, 531 PG_PMD_NR, false); 532 xas_reset(xas); 533 xas_lock_irq(xas); 534 } 535 536 dax_disassociate_entry(entry, mapping, false); 537 xas_store(xas, NULL); /* undo the PMD join */ 538 dax_wake_entry(xas, entry, WAKE_ALL); 539 mapping->nrpages -= PG_PMD_NR; 540 entry = NULL; 541 xas_set(xas, index); 542 } 543 544 if (entry) { 545 dax_lock_entry(xas, entry); 546 } else { 547 unsigned long flags = DAX_EMPTY; 548 549 if (order > 0) 550 flags |= DAX_PMD; 551 entry = dax_make_entry(pfn_to_pfn_t(0), flags); 552 dax_lock_entry(xas, entry); 553 if (xas_error(xas)) 554 goto out_unlock; 555 mapping->nrpages += 1UL << order; 556 } 557 558 out_unlock: 559 xas_unlock_irq(xas); 560 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) 561 goto retry; 562 if (xas->xa_node == XA_ERROR(-ENOMEM)) 563 return xa_mk_internal(VM_FAULT_OOM); 564 if (xas_error(xas)) 565 return xa_mk_internal(VM_FAULT_SIGBUS); 566 return entry; 567 fallback: 568 xas_unlock_irq(xas); 569 return xa_mk_internal(VM_FAULT_FALLBACK); 570 } 571 572 /** 573 * dax_layout_busy_page_range - find first pinned page in @mapping 574 * @mapping: address space to scan for a page with ref count > 1 575 * @start: Starting offset. Page containing 'start' is included. 576 * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX, 577 * pages from 'start' till the end of file are included. 578 * 579 * DAX requires ZONE_DEVICE mapped pages. These pages are never 580 * 'onlined' to the page allocator so they are considered idle when 581 * page->count == 1. A filesystem uses this interface to determine if 582 * any page in the mapping is busy, i.e. for DMA, or other 583 * get_user_pages() usages. 584 * 585 * It is expected that the filesystem is holding locks to block the 586 * establishment of new mappings in this address_space. I.e. it expects 587 * to be able to run unmap_mapping_range() and subsequently not race 588 * mapping_mapped() becoming true. 589 */ 590 struct page *dax_layout_busy_page_range(struct address_space *mapping, 591 loff_t start, loff_t end) 592 { 593 void *entry; 594 unsigned int scanned = 0; 595 struct page *page = NULL; 596 pgoff_t start_idx = start >> PAGE_SHIFT; 597 pgoff_t end_idx; 598 XA_STATE(xas, &mapping->i_pages, start_idx); 599 600 /* 601 * In the 'limited' case get_user_pages() for dax is disabled. 602 */ 603 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 604 return NULL; 605 606 if (!dax_mapping(mapping) || !mapping_mapped(mapping)) 607 return NULL; 608 609 /* If end == LLONG_MAX, all pages from start to till end of file */ 610 if (end == LLONG_MAX) 611 end_idx = ULONG_MAX; 612 else 613 end_idx = end >> PAGE_SHIFT; 614 /* 615 * If we race get_user_pages_fast() here either we'll see the 616 * elevated page count in the iteration and wait, or 617 * get_user_pages_fast() will see that the page it took a reference 618 * against is no longer mapped in the page tables and bail to the 619 * get_user_pages() slow path. The slow path is protected by 620 * pte_lock() and pmd_lock(). New references are not taken without 621 * holding those locks, and unmap_mapping_pages() will not zero the 622 * pte or pmd without holding the respective lock, so we are 623 * guaranteed to either see new references or prevent new 624 * references from being established. 625 */ 626 unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0); 627 628 xas_lock_irq(&xas); 629 xas_for_each(&xas, entry, end_idx) { 630 if (WARN_ON_ONCE(!xa_is_value(entry))) 631 continue; 632 if (unlikely(dax_is_locked(entry))) 633 entry = get_unlocked_entry(&xas, 0); 634 if (entry) 635 page = dax_busy_page(entry); 636 put_unlocked_entry(&xas, entry, WAKE_NEXT); 637 if (page) 638 break; 639 if (++scanned % XA_CHECK_SCHED) 640 continue; 641 642 xas_pause(&xas); 643 xas_unlock_irq(&xas); 644 cond_resched(); 645 xas_lock_irq(&xas); 646 } 647 xas_unlock_irq(&xas); 648 return page; 649 } 650 EXPORT_SYMBOL_GPL(dax_layout_busy_page_range); 651 652 struct page *dax_layout_busy_page(struct address_space *mapping) 653 { 654 return dax_layout_busy_page_range(mapping, 0, LLONG_MAX); 655 } 656 EXPORT_SYMBOL_GPL(dax_layout_busy_page); 657 658 static int __dax_invalidate_entry(struct address_space *mapping, 659 pgoff_t index, bool trunc) 660 { 661 XA_STATE(xas, &mapping->i_pages, index); 662 int ret = 0; 663 void *entry; 664 665 xas_lock_irq(&xas); 666 entry = get_unlocked_entry(&xas, 0); 667 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 668 goto out; 669 if (!trunc && 670 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || 671 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) 672 goto out; 673 dax_disassociate_entry(entry, mapping, trunc); 674 xas_store(&xas, NULL); 675 mapping->nrpages -= 1UL << dax_entry_order(entry); 676 ret = 1; 677 out: 678 put_unlocked_entry(&xas, entry, WAKE_ALL); 679 xas_unlock_irq(&xas); 680 return ret; 681 } 682 683 /* 684 * Delete DAX entry at @index from @mapping. Wait for it 685 * to be unlocked before deleting it. 686 */ 687 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 688 { 689 int ret = __dax_invalidate_entry(mapping, index, true); 690 691 /* 692 * This gets called from truncate / punch_hole path. As such, the caller 693 * must hold locks protecting against concurrent modifications of the 694 * page cache (usually fs-private i_mmap_sem for writing). Since the 695 * caller has seen a DAX entry for this index, we better find it 696 * at that index as well... 697 */ 698 WARN_ON_ONCE(!ret); 699 return ret; 700 } 701 702 /* 703 * Invalidate DAX entry if it is clean. 704 */ 705 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 706 pgoff_t index) 707 { 708 return __dax_invalidate_entry(mapping, index, false); 709 } 710 711 static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_dev, 712 sector_t sector, struct page *to, unsigned long vaddr) 713 { 714 void *vto, *kaddr; 715 pgoff_t pgoff; 716 long rc; 717 int id; 718 719 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); 720 if (rc) 721 return rc; 722 723 id = dax_read_lock(); 724 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(PAGE_SIZE), &kaddr, NULL); 725 if (rc < 0) { 726 dax_read_unlock(id); 727 return rc; 728 } 729 vto = kmap_atomic(to); 730 copy_user_page(vto, (void __force *)kaddr, vaddr, to); 731 kunmap_atomic(vto); 732 dax_read_unlock(id); 733 return 0; 734 } 735 736 /* 737 * By this point grab_mapping_entry() has ensured that we have a locked entry 738 * of the appropriate size so we don't have to worry about downgrading PMDs to 739 * PTEs. If we happen to be trying to insert a PTE and there is a PMD 740 * already in the tree, we will skip the insertion and just dirty the PMD as 741 * appropriate. 742 */ 743 static void *dax_insert_entry(struct xa_state *xas, 744 struct address_space *mapping, struct vm_fault *vmf, 745 void *entry, pfn_t pfn, unsigned long flags, bool dirty) 746 { 747 void *new_entry = dax_make_entry(pfn, flags); 748 749 if (dirty) 750 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 751 752 if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) { 753 unsigned long index = xas->xa_index; 754 /* we are replacing a zero page with block mapping */ 755 if (dax_is_pmd_entry(entry)) 756 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 757 PG_PMD_NR, false); 758 else /* pte entry */ 759 unmap_mapping_pages(mapping, index, 1, false); 760 } 761 762 xas_reset(xas); 763 xas_lock_irq(xas); 764 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 765 void *old; 766 767 dax_disassociate_entry(entry, mapping, false); 768 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); 769 /* 770 * Only swap our new entry into the page cache if the current 771 * entry is a zero page or an empty entry. If a normal PTE or 772 * PMD entry is already in the cache, we leave it alone. This 773 * means that if we are trying to insert a PTE and the 774 * existing entry is a PMD, we will just leave the PMD in the 775 * tree and dirty it if necessary. 776 */ 777 old = dax_lock_entry(xas, new_entry); 778 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | 779 DAX_LOCKED)); 780 entry = new_entry; 781 } else { 782 xas_load(xas); /* Walk the xa_state */ 783 } 784 785 if (dirty) 786 xas_set_mark(xas, PAGECACHE_TAG_DIRTY); 787 788 xas_unlock_irq(xas); 789 return entry; 790 } 791 792 static inline 793 unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma) 794 { 795 unsigned long address; 796 797 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 798 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 799 return address; 800 } 801 802 /* Walk all mappings of a given index of a file and writeprotect them */ 803 static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index, 804 unsigned long pfn) 805 { 806 struct vm_area_struct *vma; 807 pte_t pte, *ptep = NULL; 808 pmd_t *pmdp = NULL; 809 spinlock_t *ptl; 810 811 i_mmap_lock_read(mapping); 812 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { 813 struct mmu_notifier_range range; 814 unsigned long address; 815 816 cond_resched(); 817 818 if (!(vma->vm_flags & VM_SHARED)) 819 continue; 820 821 address = pgoff_address(index, vma); 822 823 /* 824 * follow_invalidate_pte() will use the range to call 825 * mmu_notifier_invalidate_range_start() on our behalf before 826 * taking any lock. 827 */ 828 if (follow_invalidate_pte(vma->vm_mm, address, &range, &ptep, 829 &pmdp, &ptl)) 830 continue; 831 832 /* 833 * No need to call mmu_notifier_invalidate_range() as we are 834 * downgrading page table protection not changing it to point 835 * to a new page. 836 * 837 * See Documentation/vm/mmu_notifier.rst 838 */ 839 if (pmdp) { 840 #ifdef CONFIG_FS_DAX_PMD 841 pmd_t pmd; 842 843 if (pfn != pmd_pfn(*pmdp)) 844 goto unlock_pmd; 845 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) 846 goto unlock_pmd; 847 848 flush_cache_page(vma, address, pfn); 849 pmd = pmdp_invalidate(vma, address, pmdp); 850 pmd = pmd_wrprotect(pmd); 851 pmd = pmd_mkclean(pmd); 852 set_pmd_at(vma->vm_mm, address, pmdp, pmd); 853 unlock_pmd: 854 #endif 855 spin_unlock(ptl); 856 } else { 857 if (pfn != pte_pfn(*ptep)) 858 goto unlock_pte; 859 if (!pte_dirty(*ptep) && !pte_write(*ptep)) 860 goto unlock_pte; 861 862 flush_cache_page(vma, address, pfn); 863 pte = ptep_clear_flush(vma, address, ptep); 864 pte = pte_wrprotect(pte); 865 pte = pte_mkclean(pte); 866 set_pte_at(vma->vm_mm, address, ptep, pte); 867 unlock_pte: 868 pte_unmap_unlock(ptep, ptl); 869 } 870 871 mmu_notifier_invalidate_range_end(&range); 872 } 873 i_mmap_unlock_read(mapping); 874 } 875 876 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, 877 struct address_space *mapping, void *entry) 878 { 879 unsigned long pfn, index, count; 880 long ret = 0; 881 882 /* 883 * A page got tagged dirty in DAX mapping? Something is seriously 884 * wrong. 885 */ 886 if (WARN_ON(!xa_is_value(entry))) 887 return -EIO; 888 889 if (unlikely(dax_is_locked(entry))) { 890 void *old_entry = entry; 891 892 entry = get_unlocked_entry(xas, 0); 893 894 /* Entry got punched out / reallocated? */ 895 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 896 goto put_unlocked; 897 /* 898 * Entry got reallocated elsewhere? No need to writeback. 899 * We have to compare pfns as we must not bail out due to 900 * difference in lockbit or entry type. 901 */ 902 if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) 903 goto put_unlocked; 904 if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 905 dax_is_zero_entry(entry))) { 906 ret = -EIO; 907 goto put_unlocked; 908 } 909 910 /* Another fsync thread may have already done this entry */ 911 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) 912 goto put_unlocked; 913 } 914 915 /* Lock the entry to serialize with page faults */ 916 dax_lock_entry(xas, entry); 917 918 /* 919 * We can clear the tag now but we have to be careful so that concurrent 920 * dax_writeback_one() calls for the same index cannot finish before we 921 * actually flush the caches. This is achieved as the calls will look 922 * at the entry only under the i_pages lock and once they do that 923 * they will see the entry locked and wait for it to unlock. 924 */ 925 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); 926 xas_unlock_irq(xas); 927 928 /* 929 * If dax_writeback_mapping_range() was given a wbc->range_start 930 * in the middle of a PMD, the 'index' we use needs to be 931 * aligned to the start of the PMD. 932 * This allows us to flush for PMD_SIZE and not have to worry about 933 * partial PMD writebacks. 934 */ 935 pfn = dax_to_pfn(entry); 936 count = 1UL << dax_entry_order(entry); 937 index = xas->xa_index & ~(count - 1); 938 939 dax_entry_mkclean(mapping, index, pfn); 940 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE); 941 /* 942 * After we have flushed the cache, we can clear the dirty tag. There 943 * cannot be new dirty data in the pfn after the flush has completed as 944 * the pfn mappings are writeprotected and fault waits for mapping 945 * entry lock. 946 */ 947 xas_reset(xas); 948 xas_lock_irq(xas); 949 xas_store(xas, entry); 950 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); 951 dax_wake_entry(xas, entry, WAKE_NEXT); 952 953 trace_dax_writeback_one(mapping->host, index, count); 954 return ret; 955 956 put_unlocked: 957 put_unlocked_entry(xas, entry, WAKE_NEXT); 958 return ret; 959 } 960 961 /* 962 * Flush the mapping to the persistent domain within the byte range of [start, 963 * end]. This is required by data integrity operations to ensure file data is 964 * on persistent storage prior to completion of the operation. 965 */ 966 int dax_writeback_mapping_range(struct address_space *mapping, 967 struct dax_device *dax_dev, struct writeback_control *wbc) 968 { 969 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); 970 struct inode *inode = mapping->host; 971 pgoff_t end_index = wbc->range_end >> PAGE_SHIFT; 972 void *entry; 973 int ret = 0; 974 unsigned int scanned = 0; 975 976 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 977 return -EIO; 978 979 if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL) 980 return 0; 981 982 trace_dax_writeback_range(inode, xas.xa_index, end_index); 983 984 tag_pages_for_writeback(mapping, xas.xa_index, end_index); 985 986 xas_lock_irq(&xas); 987 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { 988 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); 989 if (ret < 0) { 990 mapping_set_error(mapping, ret); 991 break; 992 } 993 if (++scanned % XA_CHECK_SCHED) 994 continue; 995 996 xas_pause(&xas); 997 xas_unlock_irq(&xas); 998 cond_resched(); 999 xas_lock_irq(&xas); 1000 } 1001 xas_unlock_irq(&xas); 1002 trace_dax_writeback_range_done(inode, xas.xa_index, end_index); 1003 return ret; 1004 } 1005 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 1006 1007 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 1008 { 1009 return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9; 1010 } 1011 1012 static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size, 1013 pfn_t *pfnp) 1014 { 1015 const sector_t sector = dax_iomap_sector(iomap, pos); 1016 pgoff_t pgoff; 1017 int id, rc; 1018 long length; 1019 1020 rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff); 1021 if (rc) 1022 return rc; 1023 id = dax_read_lock(); 1024 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), 1025 NULL, pfnp); 1026 if (length < 0) { 1027 rc = length; 1028 goto out; 1029 } 1030 rc = -EINVAL; 1031 if (PFN_PHYS(length) < size) 1032 goto out; 1033 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) 1034 goto out; 1035 /* For larger pages we need devmap */ 1036 if (length > 1 && !pfn_t_devmap(*pfnp)) 1037 goto out; 1038 rc = 0; 1039 out: 1040 dax_read_unlock(id); 1041 return rc; 1042 } 1043 1044 /* 1045 * The user has performed a load from a hole in the file. Allocating a new 1046 * page in the file would cause excessive storage usage for workloads with 1047 * sparse files. Instead we insert a read-only mapping of the 4k zero page. 1048 * If this page is ever written to we will re-fault and change the mapping to 1049 * point to real DAX storage instead. 1050 */ 1051 static vm_fault_t dax_load_hole(struct xa_state *xas, 1052 struct address_space *mapping, void **entry, 1053 struct vm_fault *vmf) 1054 { 1055 struct inode *inode = mapping->host; 1056 unsigned long vaddr = vmf->address; 1057 pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); 1058 vm_fault_t ret; 1059 1060 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, 1061 DAX_ZERO_PAGE, false); 1062 1063 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); 1064 trace_dax_load_hole(inode, vmf, ret); 1065 return ret; 1066 } 1067 1068 s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap) 1069 { 1070 sector_t sector = iomap_sector(iomap, pos & PAGE_MASK); 1071 pgoff_t pgoff; 1072 long rc, id; 1073 void *kaddr; 1074 bool page_aligned = false; 1075 unsigned offset = offset_in_page(pos); 1076 unsigned size = min_t(u64, PAGE_SIZE - offset, length); 1077 1078 if (IS_ALIGNED(sector << SECTOR_SHIFT, PAGE_SIZE) && 1079 (size == PAGE_SIZE)) 1080 page_aligned = true; 1081 1082 rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff); 1083 if (rc) 1084 return rc; 1085 1086 id = dax_read_lock(); 1087 1088 if (page_aligned) 1089 rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1); 1090 else 1091 rc = dax_direct_access(iomap->dax_dev, pgoff, 1, &kaddr, NULL); 1092 if (rc < 0) { 1093 dax_read_unlock(id); 1094 return rc; 1095 } 1096 1097 if (!page_aligned) { 1098 memset(kaddr + offset, 0, size); 1099 dax_flush(iomap->dax_dev, kaddr + offset, size); 1100 } 1101 dax_read_unlock(id); 1102 return size; 1103 } 1104 1105 static loff_t 1106 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 1107 struct iomap *iomap, struct iomap *srcmap) 1108 { 1109 struct block_device *bdev = iomap->bdev; 1110 struct dax_device *dax_dev = iomap->dax_dev; 1111 struct iov_iter *iter = data; 1112 loff_t end = pos + length, done = 0; 1113 ssize_t ret = 0; 1114 size_t xfer; 1115 int id; 1116 1117 if (iov_iter_rw(iter) == READ) { 1118 end = min(end, i_size_read(inode)); 1119 if (pos >= end) 1120 return 0; 1121 1122 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 1123 return iov_iter_zero(min(length, end - pos), iter); 1124 } 1125 1126 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 1127 return -EIO; 1128 1129 /* 1130 * Write can allocate block for an area which has a hole page mapped 1131 * into page tables. We have to tear down these mappings so that data 1132 * written by write(2) is visible in mmap. 1133 */ 1134 if (iomap->flags & IOMAP_F_NEW) { 1135 invalidate_inode_pages2_range(inode->i_mapping, 1136 pos >> PAGE_SHIFT, 1137 (end - 1) >> PAGE_SHIFT); 1138 } 1139 1140 id = dax_read_lock(); 1141 while (pos < end) { 1142 unsigned offset = pos & (PAGE_SIZE - 1); 1143 const size_t size = ALIGN(length + offset, PAGE_SIZE); 1144 const sector_t sector = dax_iomap_sector(iomap, pos); 1145 ssize_t map_len; 1146 pgoff_t pgoff; 1147 void *kaddr; 1148 1149 if (fatal_signal_pending(current)) { 1150 ret = -EINTR; 1151 break; 1152 } 1153 1154 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 1155 if (ret) 1156 break; 1157 1158 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 1159 &kaddr, NULL); 1160 if (map_len < 0) { 1161 ret = map_len; 1162 break; 1163 } 1164 1165 map_len = PFN_PHYS(map_len); 1166 kaddr += offset; 1167 map_len -= offset; 1168 if (map_len > end - pos) 1169 map_len = end - pos; 1170 1171 /* 1172 * The userspace address for the memory copy has already been 1173 * validated via access_ok() in either vfs_read() or 1174 * vfs_write(), depending on which operation we are doing. 1175 */ 1176 if (iov_iter_rw(iter) == WRITE) 1177 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1178 map_len, iter); 1179 else 1180 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, 1181 map_len, iter); 1182 1183 pos += xfer; 1184 length -= xfer; 1185 done += xfer; 1186 1187 if (xfer == 0) 1188 ret = -EFAULT; 1189 if (xfer < map_len) 1190 break; 1191 } 1192 dax_read_unlock(id); 1193 1194 return done ? done : ret; 1195 } 1196 1197 /** 1198 * dax_iomap_rw - Perform I/O to a DAX file 1199 * @iocb: The control block for this I/O 1200 * @iter: The addresses to do I/O from or to 1201 * @ops: iomap ops passed from the file system 1202 * 1203 * This function performs read and write operations to directly mapped 1204 * persistent memory. The callers needs to take care of read/write exclusion 1205 * and evicting any page cache pages in the region under I/O. 1206 */ 1207 ssize_t 1208 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 1209 const struct iomap_ops *ops) 1210 { 1211 struct address_space *mapping = iocb->ki_filp->f_mapping; 1212 struct inode *inode = mapping->host; 1213 loff_t pos = iocb->ki_pos, ret = 0, done = 0; 1214 unsigned flags = 0; 1215 1216 if (iov_iter_rw(iter) == WRITE) { 1217 lockdep_assert_held_write(&inode->i_rwsem); 1218 flags |= IOMAP_WRITE; 1219 } else { 1220 lockdep_assert_held(&inode->i_rwsem); 1221 } 1222 1223 if (iocb->ki_flags & IOCB_NOWAIT) 1224 flags |= IOMAP_NOWAIT; 1225 1226 while (iov_iter_count(iter)) { 1227 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, 1228 iter, dax_iomap_actor); 1229 if (ret <= 0) 1230 break; 1231 pos += ret; 1232 done += ret; 1233 } 1234 1235 iocb->ki_pos += done; 1236 return done ? done : ret; 1237 } 1238 EXPORT_SYMBOL_GPL(dax_iomap_rw); 1239 1240 static vm_fault_t dax_fault_return(int error) 1241 { 1242 if (error == 0) 1243 return VM_FAULT_NOPAGE; 1244 return vmf_error(error); 1245 } 1246 1247 /* 1248 * MAP_SYNC on a dax mapping guarantees dirty metadata is 1249 * flushed on write-faults (non-cow), but not read-faults. 1250 */ 1251 static bool dax_fault_is_synchronous(unsigned long flags, 1252 struct vm_area_struct *vma, struct iomap *iomap) 1253 { 1254 return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) 1255 && (iomap->flags & IOMAP_F_DIRTY); 1256 } 1257 1258 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1259 int *iomap_errp, const struct iomap_ops *ops) 1260 { 1261 struct vm_area_struct *vma = vmf->vma; 1262 struct address_space *mapping = vma->vm_file->f_mapping; 1263 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); 1264 struct inode *inode = mapping->host; 1265 unsigned long vaddr = vmf->address; 1266 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; 1267 struct iomap iomap = { .type = IOMAP_HOLE }; 1268 struct iomap srcmap = { .type = IOMAP_HOLE }; 1269 unsigned flags = IOMAP_FAULT; 1270 int error, major = 0; 1271 bool write = vmf->flags & FAULT_FLAG_WRITE; 1272 bool sync; 1273 vm_fault_t ret = 0; 1274 void *entry; 1275 pfn_t pfn; 1276 1277 trace_dax_pte_fault(inode, vmf, ret); 1278 /* 1279 * Check whether offset isn't beyond end of file now. Caller is supposed 1280 * to hold locks serializing us with truncate / punch hole so this is 1281 * a reliable test. 1282 */ 1283 if (pos >= i_size_read(inode)) { 1284 ret = VM_FAULT_SIGBUS; 1285 goto out; 1286 } 1287 1288 if (write && !vmf->cow_page) 1289 flags |= IOMAP_WRITE; 1290 1291 entry = grab_mapping_entry(&xas, mapping, 0); 1292 if (xa_is_internal(entry)) { 1293 ret = xa_to_internal(entry); 1294 goto out; 1295 } 1296 1297 /* 1298 * It is possible, particularly with mixed reads & writes to private 1299 * mappings, that we have raced with a PMD fault that overlaps with 1300 * the PTE we need to set up. If so just return and the fault will be 1301 * retried. 1302 */ 1303 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1304 ret = VM_FAULT_NOPAGE; 1305 goto unlock_entry; 1306 } 1307 1308 /* 1309 * Note that we don't bother to use iomap_apply here: DAX required 1310 * the file system block size to be equal the page size, which means 1311 * that we never have to deal with more than a single extent here. 1312 */ 1313 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap, &srcmap); 1314 if (iomap_errp) 1315 *iomap_errp = error; 1316 if (error) { 1317 ret = dax_fault_return(error); 1318 goto unlock_entry; 1319 } 1320 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { 1321 error = -EIO; /* fs corruption? */ 1322 goto error_finish_iomap; 1323 } 1324 1325 if (vmf->cow_page) { 1326 sector_t sector = dax_iomap_sector(&iomap, pos); 1327 1328 switch (iomap.type) { 1329 case IOMAP_HOLE: 1330 case IOMAP_UNWRITTEN: 1331 clear_user_highpage(vmf->cow_page, vaddr); 1332 break; 1333 case IOMAP_MAPPED: 1334 error = copy_cow_page_dax(iomap.bdev, iomap.dax_dev, 1335 sector, vmf->cow_page, vaddr); 1336 break; 1337 default: 1338 WARN_ON_ONCE(1); 1339 error = -EIO; 1340 break; 1341 } 1342 1343 if (error) 1344 goto error_finish_iomap; 1345 1346 __SetPageUptodate(vmf->cow_page); 1347 ret = finish_fault(vmf); 1348 if (!ret) 1349 ret = VM_FAULT_DONE_COW; 1350 goto finish_iomap; 1351 } 1352 1353 sync = dax_fault_is_synchronous(flags, vma, &iomap); 1354 1355 switch (iomap.type) { 1356 case IOMAP_MAPPED: 1357 if (iomap.flags & IOMAP_F_NEW) { 1358 count_vm_event(PGMAJFAULT); 1359 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 1360 major = VM_FAULT_MAJOR; 1361 } 1362 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn); 1363 if (error < 0) 1364 goto error_finish_iomap; 1365 1366 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, 1367 0, write && !sync); 1368 1369 /* 1370 * If we are doing synchronous page fault and inode needs fsync, 1371 * we can insert PTE into page tables only after that happens. 1372 * Skip insertion for now and return the pfn so that caller can 1373 * insert it after fsync is done. 1374 */ 1375 if (sync) { 1376 if (WARN_ON_ONCE(!pfnp)) { 1377 error = -EIO; 1378 goto error_finish_iomap; 1379 } 1380 *pfnp = pfn; 1381 ret = VM_FAULT_NEEDDSYNC | major; 1382 goto finish_iomap; 1383 } 1384 trace_dax_insert_mapping(inode, vmf, entry); 1385 if (write) 1386 ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn); 1387 else 1388 ret = vmf_insert_mixed(vma, vaddr, pfn); 1389 1390 goto finish_iomap; 1391 case IOMAP_UNWRITTEN: 1392 case IOMAP_HOLE: 1393 if (!write) { 1394 ret = dax_load_hole(&xas, mapping, &entry, vmf); 1395 goto finish_iomap; 1396 } 1397 fallthrough; 1398 default: 1399 WARN_ON_ONCE(1); 1400 error = -EIO; 1401 break; 1402 } 1403 1404 error_finish_iomap: 1405 ret = dax_fault_return(error); 1406 finish_iomap: 1407 if (ops->iomap_end) { 1408 int copied = PAGE_SIZE; 1409 1410 if (ret & VM_FAULT_ERROR) 1411 copied = 0; 1412 /* 1413 * The fault is done by now and there's no way back (other 1414 * thread may be already happily using PTE we have installed). 1415 * Just ignore error from ->iomap_end since we cannot do much 1416 * with it. 1417 */ 1418 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); 1419 } 1420 unlock_entry: 1421 dax_unlock_entry(&xas, entry); 1422 out: 1423 trace_dax_pte_fault_done(inode, vmf, ret); 1424 return ret | major; 1425 } 1426 1427 #ifdef CONFIG_FS_DAX_PMD 1428 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, 1429 struct iomap *iomap, void **entry) 1430 { 1431 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1432 unsigned long pmd_addr = vmf->address & PMD_MASK; 1433 struct vm_area_struct *vma = vmf->vma; 1434 struct inode *inode = mapping->host; 1435 pgtable_t pgtable = NULL; 1436 struct page *zero_page; 1437 spinlock_t *ptl; 1438 pmd_t pmd_entry; 1439 pfn_t pfn; 1440 1441 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1442 1443 if (unlikely(!zero_page)) 1444 goto fallback; 1445 1446 pfn = page_to_pfn_t(zero_page); 1447 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, 1448 DAX_PMD | DAX_ZERO_PAGE, false); 1449 1450 if (arch_needs_pgtable_deposit()) { 1451 pgtable = pte_alloc_one(vma->vm_mm); 1452 if (!pgtable) 1453 return VM_FAULT_OOM; 1454 } 1455 1456 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1457 if (!pmd_none(*(vmf->pmd))) { 1458 spin_unlock(ptl); 1459 goto fallback; 1460 } 1461 1462 if (pgtable) { 1463 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 1464 mm_inc_nr_ptes(vma->vm_mm); 1465 } 1466 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1467 pmd_entry = pmd_mkhuge(pmd_entry); 1468 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1469 spin_unlock(ptl); 1470 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); 1471 return VM_FAULT_NOPAGE; 1472 1473 fallback: 1474 if (pgtable) 1475 pte_free(vma->vm_mm, pgtable); 1476 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); 1477 return VM_FAULT_FALLBACK; 1478 } 1479 1480 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1481 const struct iomap_ops *ops) 1482 { 1483 struct vm_area_struct *vma = vmf->vma; 1484 struct address_space *mapping = vma->vm_file->f_mapping; 1485 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); 1486 unsigned long pmd_addr = vmf->address & PMD_MASK; 1487 bool write = vmf->flags & FAULT_FLAG_WRITE; 1488 bool sync; 1489 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; 1490 struct inode *inode = mapping->host; 1491 vm_fault_t result = VM_FAULT_FALLBACK; 1492 struct iomap iomap = { .type = IOMAP_HOLE }; 1493 struct iomap srcmap = { .type = IOMAP_HOLE }; 1494 pgoff_t max_pgoff; 1495 void *entry; 1496 loff_t pos; 1497 int error; 1498 pfn_t pfn; 1499 1500 /* 1501 * Check whether offset isn't beyond end of file now. Caller is 1502 * supposed to hold locks serializing us with truncate / punch hole so 1503 * this is a reliable test. 1504 */ 1505 max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1506 1507 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); 1508 1509 /* 1510 * Make sure that the faulting address's PMD offset (color) matches 1511 * the PMD offset from the start of the file. This is necessary so 1512 * that a PMD range in the page table overlaps exactly with a PMD 1513 * range in the page cache. 1514 */ 1515 if ((vmf->pgoff & PG_PMD_COLOUR) != 1516 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 1517 goto fallback; 1518 1519 /* Fall back to PTEs if we're going to COW */ 1520 if (write && !(vma->vm_flags & VM_SHARED)) 1521 goto fallback; 1522 1523 /* If the PMD would extend outside the VMA */ 1524 if (pmd_addr < vma->vm_start) 1525 goto fallback; 1526 if ((pmd_addr + PMD_SIZE) > vma->vm_end) 1527 goto fallback; 1528 1529 if (xas.xa_index >= max_pgoff) { 1530 result = VM_FAULT_SIGBUS; 1531 goto out; 1532 } 1533 1534 /* If the PMD would extend beyond the file size */ 1535 if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff) 1536 goto fallback; 1537 1538 /* 1539 * grab_mapping_entry() will make sure we get an empty PMD entry, 1540 * a zero PMD entry or a DAX PMD. If it can't (because a PTE 1541 * entry is already in the array, for instance), it will return 1542 * VM_FAULT_FALLBACK. 1543 */ 1544 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); 1545 if (xa_is_internal(entry)) { 1546 result = xa_to_internal(entry); 1547 goto fallback; 1548 } 1549 1550 /* 1551 * It is possible, particularly with mixed reads & writes to private 1552 * mappings, that we have raced with a PTE fault that overlaps with 1553 * the PMD we need to set up. If so just return and the fault will be 1554 * retried. 1555 */ 1556 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1557 !pmd_devmap(*vmf->pmd)) { 1558 result = 0; 1559 goto unlock_entry; 1560 } 1561 1562 /* 1563 * Note that we don't use iomap_apply here. We aren't doing I/O, only 1564 * setting up a mapping, so really we're using iomap_begin() as a way 1565 * to look up our filesystem block. 1566 */ 1567 pos = (loff_t)xas.xa_index << PAGE_SHIFT; 1568 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap, 1569 &srcmap); 1570 if (error) 1571 goto unlock_entry; 1572 1573 if (iomap.offset + iomap.length < pos + PMD_SIZE) 1574 goto finish_iomap; 1575 1576 sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap); 1577 1578 switch (iomap.type) { 1579 case IOMAP_MAPPED: 1580 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn); 1581 if (error < 0) 1582 goto finish_iomap; 1583 1584 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, 1585 DAX_PMD, write && !sync); 1586 1587 /* 1588 * If we are doing synchronous page fault and inode needs fsync, 1589 * we can insert PMD into page tables only after that happens. 1590 * Skip insertion for now and return the pfn so that caller can 1591 * insert it after fsync is done. 1592 */ 1593 if (sync) { 1594 if (WARN_ON_ONCE(!pfnp)) 1595 goto finish_iomap; 1596 *pfnp = pfn; 1597 result = VM_FAULT_NEEDDSYNC; 1598 goto finish_iomap; 1599 } 1600 1601 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); 1602 result = vmf_insert_pfn_pmd(vmf, pfn, write); 1603 break; 1604 case IOMAP_UNWRITTEN: 1605 case IOMAP_HOLE: 1606 if (WARN_ON_ONCE(write)) 1607 break; 1608 result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry); 1609 break; 1610 default: 1611 WARN_ON_ONCE(1); 1612 break; 1613 } 1614 1615 finish_iomap: 1616 if (ops->iomap_end) { 1617 int copied = PMD_SIZE; 1618 1619 if (result == VM_FAULT_FALLBACK) 1620 copied = 0; 1621 /* 1622 * The fault is done by now and there's no way back (other 1623 * thread may be already happily using PMD we have installed). 1624 * Just ignore error from ->iomap_end since we cannot do much 1625 * with it. 1626 */ 1627 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags, 1628 &iomap); 1629 } 1630 unlock_entry: 1631 dax_unlock_entry(&xas, entry); 1632 fallback: 1633 if (result == VM_FAULT_FALLBACK) { 1634 split_huge_pmd(vma, vmf->pmd, vmf->address); 1635 count_vm_event(THP_FAULT_FALLBACK); 1636 } 1637 out: 1638 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); 1639 return result; 1640 } 1641 #else 1642 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1643 const struct iomap_ops *ops) 1644 { 1645 return VM_FAULT_FALLBACK; 1646 } 1647 #endif /* CONFIG_FS_DAX_PMD */ 1648 1649 /** 1650 * dax_iomap_fault - handle a page fault on a DAX file 1651 * @vmf: The description of the fault 1652 * @pe_size: Size of the page to fault in 1653 * @pfnp: PFN to insert for synchronous faults if fsync is required 1654 * @iomap_errp: Storage for detailed error code in case of error 1655 * @ops: Iomap ops passed from the file system 1656 * 1657 * When a page fault occurs, filesystems may call this helper in 1658 * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1659 * has done all the necessary locking for page fault to proceed 1660 * successfully. 1661 */ 1662 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1663 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) 1664 { 1665 switch (pe_size) { 1666 case PE_SIZE_PTE: 1667 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); 1668 case PE_SIZE_PMD: 1669 return dax_iomap_pmd_fault(vmf, pfnp, ops); 1670 default: 1671 return VM_FAULT_FALLBACK; 1672 } 1673 } 1674 EXPORT_SYMBOL_GPL(dax_iomap_fault); 1675 1676 /* 1677 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables 1678 * @vmf: The description of the fault 1679 * @pfn: PFN to insert 1680 * @order: Order of entry to insert. 1681 * 1682 * This function inserts a writeable PTE or PMD entry into the page tables 1683 * for an mmaped DAX file. It also marks the page cache entry as dirty. 1684 */ 1685 static vm_fault_t 1686 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) 1687 { 1688 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1689 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); 1690 void *entry; 1691 vm_fault_t ret; 1692 1693 xas_lock_irq(&xas); 1694 entry = get_unlocked_entry(&xas, order); 1695 /* Did we race with someone splitting entry or so? */ 1696 if (!entry || dax_is_conflict(entry) || 1697 (order == 0 && !dax_is_pte_entry(entry))) { 1698 put_unlocked_entry(&xas, entry, WAKE_NEXT); 1699 xas_unlock_irq(&xas); 1700 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 1701 VM_FAULT_NOPAGE); 1702 return VM_FAULT_NOPAGE; 1703 } 1704 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); 1705 dax_lock_entry(&xas, entry); 1706 xas_unlock_irq(&xas); 1707 if (order == 0) 1708 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 1709 #ifdef CONFIG_FS_DAX_PMD 1710 else if (order == PMD_ORDER) 1711 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE); 1712 #endif 1713 else 1714 ret = VM_FAULT_FALLBACK; 1715 dax_unlock_entry(&xas, entry); 1716 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); 1717 return ret; 1718 } 1719 1720 /** 1721 * dax_finish_sync_fault - finish synchronous page fault 1722 * @vmf: The description of the fault 1723 * @pe_size: Size of entry to be inserted 1724 * @pfn: PFN to insert 1725 * 1726 * This function ensures that the file range touched by the page fault is 1727 * stored persistently on the media and handles inserting of appropriate page 1728 * table entry. 1729 */ 1730 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 1731 enum page_entry_size pe_size, pfn_t pfn) 1732 { 1733 int err; 1734 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; 1735 unsigned int order = pe_order(pe_size); 1736 size_t len = PAGE_SIZE << order; 1737 1738 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); 1739 if (err) 1740 return VM_FAULT_SIGBUS; 1741 return dax_insert_pfn_mkwrite(vmf, pfn, order); 1742 } 1743 EXPORT_SYMBOL_GPL(dax_finish_sync_fault); 1744