1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * fs/dax.c - Direct Access filesystem code 4 * Copyright (c) 2013-2014 Intel Corporation 5 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 6 * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 7 */ 8 9 #include <linux/atomic.h> 10 #include <linux/blkdev.h> 11 #include <linux/buffer_head.h> 12 #include <linux/dax.h> 13 #include <linux/fs.h> 14 #include <linux/highmem.h> 15 #include <linux/memcontrol.h> 16 #include <linux/mm.h> 17 #include <linux/mutex.h> 18 #include <linux/pagevec.h> 19 #include <linux/sched.h> 20 #include <linux/sched/signal.h> 21 #include <linux/uio.h> 22 #include <linux/vmstat.h> 23 #include <linux/pfn_t.h> 24 #include <linux/sizes.h> 25 #include <linux/mmu_notifier.h> 26 #include <linux/iomap.h> 27 #include <linux/rmap.h> 28 #include <asm/pgalloc.h> 29 30 #define CREATE_TRACE_POINTS 31 #include <trace/events/fs_dax.h> 32 33 static inline unsigned int pe_order(enum page_entry_size pe_size) 34 { 35 if (pe_size == PE_SIZE_PTE) 36 return PAGE_SHIFT - PAGE_SHIFT; 37 if (pe_size == PE_SIZE_PMD) 38 return PMD_SHIFT - PAGE_SHIFT; 39 if (pe_size == PE_SIZE_PUD) 40 return PUD_SHIFT - PAGE_SHIFT; 41 return ~0; 42 } 43 44 /* We choose 4096 entries - same as per-zone page wait tables */ 45 #define DAX_WAIT_TABLE_BITS 12 46 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 47 48 /* The 'colour' (ie low bits) within a PMD of a page offset. */ 49 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 50 #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) 51 52 /* The order of a PMD entry */ 53 #define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT) 54 55 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 56 57 static int __init init_dax_wait_table(void) 58 { 59 int i; 60 61 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 62 init_waitqueue_head(wait_table + i); 63 return 0; 64 } 65 fs_initcall(init_dax_wait_table); 66 67 /* 68 * DAX pagecache entries use XArray value entries so they can't be mistaken 69 * for pages. We use one bit for locking, one bit for the entry size (PMD) 70 * and two more to tell us if the entry is a zero page or an empty entry that 71 * is just used for locking. In total four special bits. 72 * 73 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE 74 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem 75 * block allocation. 76 */ 77 #define DAX_SHIFT (4) 78 #define DAX_LOCKED (1UL << 0) 79 #define DAX_PMD (1UL << 1) 80 #define DAX_ZERO_PAGE (1UL << 2) 81 #define DAX_EMPTY (1UL << 3) 82 83 static unsigned long dax_to_pfn(void *entry) 84 { 85 return xa_to_value(entry) >> DAX_SHIFT; 86 } 87 88 static void *dax_make_entry(pfn_t pfn, unsigned long flags) 89 { 90 return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); 91 } 92 93 static bool dax_is_locked(void *entry) 94 { 95 return xa_to_value(entry) & DAX_LOCKED; 96 } 97 98 static unsigned int dax_entry_order(void *entry) 99 { 100 if (xa_to_value(entry) & DAX_PMD) 101 return PMD_ORDER; 102 return 0; 103 } 104 105 static unsigned long dax_is_pmd_entry(void *entry) 106 { 107 return xa_to_value(entry) & DAX_PMD; 108 } 109 110 static bool dax_is_pte_entry(void *entry) 111 { 112 return !(xa_to_value(entry) & DAX_PMD); 113 } 114 115 static int dax_is_zero_entry(void *entry) 116 { 117 return xa_to_value(entry) & DAX_ZERO_PAGE; 118 } 119 120 static int dax_is_empty_entry(void *entry) 121 { 122 return xa_to_value(entry) & DAX_EMPTY; 123 } 124 125 /* 126 * true if the entry that was found is of a smaller order than the entry 127 * we were looking for 128 */ 129 static bool dax_is_conflict(void *entry) 130 { 131 return entry == XA_RETRY_ENTRY; 132 } 133 134 /* 135 * DAX page cache entry locking 136 */ 137 struct exceptional_entry_key { 138 struct xarray *xa; 139 pgoff_t entry_start; 140 }; 141 142 struct wait_exceptional_entry_queue { 143 wait_queue_entry_t wait; 144 struct exceptional_entry_key key; 145 }; 146 147 /** 148 * enum dax_wake_mode: waitqueue wakeup behaviour 149 * @WAKE_ALL: wake all waiters in the waitqueue 150 * @WAKE_NEXT: wake only the first waiter in the waitqueue 151 */ 152 enum dax_wake_mode { 153 WAKE_ALL, 154 WAKE_NEXT, 155 }; 156 157 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, 158 void *entry, struct exceptional_entry_key *key) 159 { 160 unsigned long hash; 161 unsigned long index = xas->xa_index; 162 163 /* 164 * If 'entry' is a PMD, align the 'index' that we use for the wait 165 * queue to the start of that PMD. This ensures that all offsets in 166 * the range covered by the PMD map to the same bit lock. 167 */ 168 if (dax_is_pmd_entry(entry)) 169 index &= ~PG_PMD_COLOUR; 170 key->xa = xas->xa; 171 key->entry_start = index; 172 173 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); 174 return wait_table + hash; 175 } 176 177 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, 178 unsigned int mode, int sync, void *keyp) 179 { 180 struct exceptional_entry_key *key = keyp; 181 struct wait_exceptional_entry_queue *ewait = 182 container_of(wait, struct wait_exceptional_entry_queue, wait); 183 184 if (key->xa != ewait->key.xa || 185 key->entry_start != ewait->key.entry_start) 186 return 0; 187 return autoremove_wake_function(wait, mode, sync, NULL); 188 } 189 190 /* 191 * @entry may no longer be the entry at the index in the mapping. 192 * The important information it's conveying is whether the entry at 193 * this index used to be a PMD entry. 194 */ 195 static void dax_wake_entry(struct xa_state *xas, void *entry, 196 enum dax_wake_mode mode) 197 { 198 struct exceptional_entry_key key; 199 wait_queue_head_t *wq; 200 201 wq = dax_entry_waitqueue(xas, entry, &key); 202 203 /* 204 * Checking for locked entry and prepare_to_wait_exclusive() happens 205 * under the i_pages lock, ditto for entry handling in our callers. 206 * So at this point all tasks that could have seen our entry locked 207 * must be in the waitqueue and the following check will see them. 208 */ 209 if (waitqueue_active(wq)) 210 __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key); 211 } 212 213 /* 214 * Look up entry in page cache, wait for it to become unlocked if it 215 * is a DAX entry and return it. The caller must subsequently call 216 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry() 217 * if it did. The entry returned may have a larger order than @order. 218 * If @order is larger than the order of the entry found in i_pages, this 219 * function returns a dax_is_conflict entry. 220 * 221 * Must be called with the i_pages lock held. 222 */ 223 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) 224 { 225 void *entry; 226 struct wait_exceptional_entry_queue ewait; 227 wait_queue_head_t *wq; 228 229 init_wait(&ewait.wait); 230 ewait.wait.func = wake_exceptional_entry_func; 231 232 for (;;) { 233 entry = xas_find_conflict(xas); 234 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 235 return entry; 236 if (dax_entry_order(entry) < order) 237 return XA_RETRY_ENTRY; 238 if (!dax_is_locked(entry)) 239 return entry; 240 241 wq = dax_entry_waitqueue(xas, entry, &ewait.key); 242 prepare_to_wait_exclusive(wq, &ewait.wait, 243 TASK_UNINTERRUPTIBLE); 244 xas_unlock_irq(xas); 245 xas_reset(xas); 246 schedule(); 247 finish_wait(wq, &ewait.wait); 248 xas_lock_irq(xas); 249 } 250 } 251 252 /* 253 * The only thing keeping the address space around is the i_pages lock 254 * (it's cycled in clear_inode() after removing the entries from i_pages) 255 * After we call xas_unlock_irq(), we cannot touch xas->xa. 256 */ 257 static void wait_entry_unlocked(struct xa_state *xas, void *entry) 258 { 259 struct wait_exceptional_entry_queue ewait; 260 wait_queue_head_t *wq; 261 262 init_wait(&ewait.wait); 263 ewait.wait.func = wake_exceptional_entry_func; 264 265 wq = dax_entry_waitqueue(xas, entry, &ewait.key); 266 /* 267 * Unlike get_unlocked_entry() there is no guarantee that this 268 * path ever successfully retrieves an unlocked entry before an 269 * inode dies. Perform a non-exclusive wait in case this path 270 * never successfully performs its own wake up. 271 */ 272 prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); 273 xas_unlock_irq(xas); 274 schedule(); 275 finish_wait(wq, &ewait.wait); 276 } 277 278 static void put_unlocked_entry(struct xa_state *xas, void *entry, 279 enum dax_wake_mode mode) 280 { 281 if (entry && !dax_is_conflict(entry)) 282 dax_wake_entry(xas, entry, mode); 283 } 284 285 /* 286 * We used the xa_state to get the entry, but then we locked the entry and 287 * dropped the xa_lock, so we know the xa_state is stale and must be reset 288 * before use. 289 */ 290 static void dax_unlock_entry(struct xa_state *xas, void *entry) 291 { 292 void *old; 293 294 BUG_ON(dax_is_locked(entry)); 295 xas_reset(xas); 296 xas_lock_irq(xas); 297 old = xas_store(xas, entry); 298 xas_unlock_irq(xas); 299 BUG_ON(!dax_is_locked(old)); 300 dax_wake_entry(xas, entry, WAKE_NEXT); 301 } 302 303 /* 304 * Return: The entry stored at this location before it was locked. 305 */ 306 static void *dax_lock_entry(struct xa_state *xas, void *entry) 307 { 308 unsigned long v = xa_to_value(entry); 309 return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); 310 } 311 312 static unsigned long dax_entry_size(void *entry) 313 { 314 if (dax_is_zero_entry(entry)) 315 return 0; 316 else if (dax_is_empty_entry(entry)) 317 return 0; 318 else if (dax_is_pmd_entry(entry)) 319 return PMD_SIZE; 320 else 321 return PAGE_SIZE; 322 } 323 324 static unsigned long dax_end_pfn(void *entry) 325 { 326 return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; 327 } 328 329 /* 330 * Iterate through all mapped pfns represented by an entry, i.e. skip 331 * 'empty' and 'zero' entries. 332 */ 333 #define for_each_mapped_pfn(entry, pfn) \ 334 for (pfn = dax_to_pfn(entry); \ 335 pfn < dax_end_pfn(entry); pfn++) 336 337 static inline bool dax_mapping_is_cow(struct address_space *mapping) 338 { 339 return (unsigned long)mapping == PAGE_MAPPING_DAX_COW; 340 } 341 342 /* 343 * Set the page->mapping with FS_DAX_MAPPING_COW flag, increase the refcount. 344 */ 345 static inline void dax_mapping_set_cow(struct page *page) 346 { 347 if ((uintptr_t)page->mapping != PAGE_MAPPING_DAX_COW) { 348 /* 349 * Reset the index if the page was already mapped 350 * regularly before. 351 */ 352 if (page->mapping) 353 page->index = 1; 354 page->mapping = (void *)PAGE_MAPPING_DAX_COW; 355 } 356 page->index++; 357 } 358 359 /* 360 * When it is called in dax_insert_entry(), the cow flag will indicate that 361 * whether this entry is shared by multiple files. If so, set the page->mapping 362 * FS_DAX_MAPPING_COW, and use page->index as refcount. 363 */ 364 static void dax_associate_entry(void *entry, struct address_space *mapping, 365 struct vm_area_struct *vma, unsigned long address, bool cow) 366 { 367 unsigned long size = dax_entry_size(entry), pfn, index; 368 int i = 0; 369 370 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 371 return; 372 373 index = linear_page_index(vma, address & ~(size - 1)); 374 for_each_mapped_pfn(entry, pfn) { 375 struct page *page = pfn_to_page(pfn); 376 377 if (cow) { 378 dax_mapping_set_cow(page); 379 } else { 380 WARN_ON_ONCE(page->mapping); 381 page->mapping = mapping; 382 page->index = index + i++; 383 } 384 } 385 } 386 387 static void dax_disassociate_entry(void *entry, struct address_space *mapping, 388 bool trunc) 389 { 390 unsigned long pfn; 391 392 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 393 return; 394 395 for_each_mapped_pfn(entry, pfn) { 396 struct page *page = pfn_to_page(pfn); 397 398 WARN_ON_ONCE(trunc && page_ref_count(page) > 1); 399 if (dax_mapping_is_cow(page->mapping)) { 400 /* keep the CoW flag if this page is still shared */ 401 if (page->index-- > 0) 402 continue; 403 } else 404 WARN_ON_ONCE(page->mapping && page->mapping != mapping); 405 page->mapping = NULL; 406 page->index = 0; 407 } 408 } 409 410 static struct page *dax_busy_page(void *entry) 411 { 412 unsigned long pfn; 413 414 for_each_mapped_pfn(entry, pfn) { 415 struct page *page = pfn_to_page(pfn); 416 417 if (page_ref_count(page) > 1) 418 return page; 419 } 420 return NULL; 421 } 422 423 /* 424 * dax_lock_page - Lock the DAX entry corresponding to a page 425 * @page: The page whose entry we want to lock 426 * 427 * Context: Process context. 428 * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could 429 * not be locked. 430 */ 431 dax_entry_t dax_lock_page(struct page *page) 432 { 433 XA_STATE(xas, NULL, 0); 434 void *entry; 435 436 /* Ensure page->mapping isn't freed while we look at it */ 437 rcu_read_lock(); 438 for (;;) { 439 struct address_space *mapping = READ_ONCE(page->mapping); 440 441 entry = NULL; 442 if (!mapping || !dax_mapping(mapping)) 443 break; 444 445 /* 446 * In the device-dax case there's no need to lock, a 447 * struct dev_pagemap pin is sufficient to keep the 448 * inode alive, and we assume we have dev_pagemap pin 449 * otherwise we would not have a valid pfn_to_page() 450 * translation. 451 */ 452 entry = (void *)~0UL; 453 if (S_ISCHR(mapping->host->i_mode)) 454 break; 455 456 xas.xa = &mapping->i_pages; 457 xas_lock_irq(&xas); 458 if (mapping != page->mapping) { 459 xas_unlock_irq(&xas); 460 continue; 461 } 462 xas_set(&xas, page->index); 463 entry = xas_load(&xas); 464 if (dax_is_locked(entry)) { 465 rcu_read_unlock(); 466 wait_entry_unlocked(&xas, entry); 467 rcu_read_lock(); 468 continue; 469 } 470 dax_lock_entry(&xas, entry); 471 xas_unlock_irq(&xas); 472 break; 473 } 474 rcu_read_unlock(); 475 return (dax_entry_t)entry; 476 } 477 478 void dax_unlock_page(struct page *page, dax_entry_t cookie) 479 { 480 struct address_space *mapping = page->mapping; 481 XA_STATE(xas, &mapping->i_pages, page->index); 482 483 if (S_ISCHR(mapping->host->i_mode)) 484 return; 485 486 dax_unlock_entry(&xas, (void *)cookie); 487 } 488 489 /* 490 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping 491 * @mapping: the file's mapping whose entry we want to lock 492 * @index: the offset within this file 493 * @page: output the dax page corresponding to this dax entry 494 * 495 * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry 496 * could not be locked. 497 */ 498 dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index, 499 struct page **page) 500 { 501 XA_STATE(xas, NULL, 0); 502 void *entry; 503 504 rcu_read_lock(); 505 for (;;) { 506 entry = NULL; 507 if (!dax_mapping(mapping)) 508 break; 509 510 xas.xa = &mapping->i_pages; 511 xas_lock_irq(&xas); 512 xas_set(&xas, index); 513 entry = xas_load(&xas); 514 if (dax_is_locked(entry)) { 515 rcu_read_unlock(); 516 wait_entry_unlocked(&xas, entry); 517 rcu_read_lock(); 518 continue; 519 } 520 if (!entry || 521 dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 522 /* 523 * Because we are looking for entry from file's mapping 524 * and index, so the entry may not be inserted for now, 525 * or even a zero/empty entry. We don't think this is 526 * an error case. So, return a special value and do 527 * not output @page. 528 */ 529 entry = (void *)~0UL; 530 } else { 531 *page = pfn_to_page(dax_to_pfn(entry)); 532 dax_lock_entry(&xas, entry); 533 } 534 xas_unlock_irq(&xas); 535 break; 536 } 537 rcu_read_unlock(); 538 return (dax_entry_t)entry; 539 } 540 541 void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index, 542 dax_entry_t cookie) 543 { 544 XA_STATE(xas, &mapping->i_pages, index); 545 546 if (cookie == ~0UL) 547 return; 548 549 dax_unlock_entry(&xas, (void *)cookie); 550 } 551 552 /* 553 * Find page cache entry at given index. If it is a DAX entry, return it 554 * with the entry locked. If the page cache doesn't contain an entry at 555 * that index, add a locked empty entry. 556 * 557 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will 558 * either return that locked entry or will return VM_FAULT_FALLBACK. 559 * This will happen if there are any PTE entries within the PMD range 560 * that we are requesting. 561 * 562 * We always favor PTE entries over PMD entries. There isn't a flow where we 563 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD 564 * insertion will fail if it finds any PTE entries already in the tree, and a 565 * PTE insertion will cause an existing PMD entry to be unmapped and 566 * downgraded to PTE entries. This happens for both PMD zero pages as 567 * well as PMD empty entries. 568 * 569 * The exception to this downgrade path is for PMD entries that have 570 * real storage backing them. We will leave these real PMD entries in 571 * the tree, and PTE writes will simply dirty the entire PMD entry. 572 * 573 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 574 * persistent memory the benefit is doubtful. We can add that later if we can 575 * show it helps. 576 * 577 * On error, this function does not return an ERR_PTR. Instead it returns 578 * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values 579 * overlap with xarray value entries. 580 */ 581 static void *grab_mapping_entry(struct xa_state *xas, 582 struct address_space *mapping, unsigned int order) 583 { 584 unsigned long index = xas->xa_index; 585 bool pmd_downgrade; /* splitting PMD entry into PTE entries? */ 586 void *entry; 587 588 retry: 589 pmd_downgrade = false; 590 xas_lock_irq(xas); 591 entry = get_unlocked_entry(xas, order); 592 593 if (entry) { 594 if (dax_is_conflict(entry)) 595 goto fallback; 596 if (!xa_is_value(entry)) { 597 xas_set_err(xas, -EIO); 598 goto out_unlock; 599 } 600 601 if (order == 0) { 602 if (dax_is_pmd_entry(entry) && 603 (dax_is_zero_entry(entry) || 604 dax_is_empty_entry(entry))) { 605 pmd_downgrade = true; 606 } 607 } 608 } 609 610 if (pmd_downgrade) { 611 /* 612 * Make sure 'entry' remains valid while we drop 613 * the i_pages lock. 614 */ 615 dax_lock_entry(xas, entry); 616 617 /* 618 * Besides huge zero pages the only other thing that gets 619 * downgraded are empty entries which don't need to be 620 * unmapped. 621 */ 622 if (dax_is_zero_entry(entry)) { 623 xas_unlock_irq(xas); 624 unmap_mapping_pages(mapping, 625 xas->xa_index & ~PG_PMD_COLOUR, 626 PG_PMD_NR, false); 627 xas_reset(xas); 628 xas_lock_irq(xas); 629 } 630 631 dax_disassociate_entry(entry, mapping, false); 632 xas_store(xas, NULL); /* undo the PMD join */ 633 dax_wake_entry(xas, entry, WAKE_ALL); 634 mapping->nrpages -= PG_PMD_NR; 635 entry = NULL; 636 xas_set(xas, index); 637 } 638 639 if (entry) { 640 dax_lock_entry(xas, entry); 641 } else { 642 unsigned long flags = DAX_EMPTY; 643 644 if (order > 0) 645 flags |= DAX_PMD; 646 entry = dax_make_entry(pfn_to_pfn_t(0), flags); 647 dax_lock_entry(xas, entry); 648 if (xas_error(xas)) 649 goto out_unlock; 650 mapping->nrpages += 1UL << order; 651 } 652 653 out_unlock: 654 xas_unlock_irq(xas); 655 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) 656 goto retry; 657 if (xas->xa_node == XA_ERROR(-ENOMEM)) 658 return xa_mk_internal(VM_FAULT_OOM); 659 if (xas_error(xas)) 660 return xa_mk_internal(VM_FAULT_SIGBUS); 661 return entry; 662 fallback: 663 xas_unlock_irq(xas); 664 return xa_mk_internal(VM_FAULT_FALLBACK); 665 } 666 667 /** 668 * dax_layout_busy_page_range - find first pinned page in @mapping 669 * @mapping: address space to scan for a page with ref count > 1 670 * @start: Starting offset. Page containing 'start' is included. 671 * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX, 672 * pages from 'start' till the end of file are included. 673 * 674 * DAX requires ZONE_DEVICE mapped pages. These pages are never 675 * 'onlined' to the page allocator so they are considered idle when 676 * page->count == 1. A filesystem uses this interface to determine if 677 * any page in the mapping is busy, i.e. for DMA, or other 678 * get_user_pages() usages. 679 * 680 * It is expected that the filesystem is holding locks to block the 681 * establishment of new mappings in this address_space. I.e. it expects 682 * to be able to run unmap_mapping_range() and subsequently not race 683 * mapping_mapped() becoming true. 684 */ 685 struct page *dax_layout_busy_page_range(struct address_space *mapping, 686 loff_t start, loff_t end) 687 { 688 void *entry; 689 unsigned int scanned = 0; 690 struct page *page = NULL; 691 pgoff_t start_idx = start >> PAGE_SHIFT; 692 pgoff_t end_idx; 693 XA_STATE(xas, &mapping->i_pages, start_idx); 694 695 /* 696 * In the 'limited' case get_user_pages() for dax is disabled. 697 */ 698 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 699 return NULL; 700 701 if (!dax_mapping(mapping) || !mapping_mapped(mapping)) 702 return NULL; 703 704 /* If end == LLONG_MAX, all pages from start to till end of file */ 705 if (end == LLONG_MAX) 706 end_idx = ULONG_MAX; 707 else 708 end_idx = end >> PAGE_SHIFT; 709 /* 710 * If we race get_user_pages_fast() here either we'll see the 711 * elevated page count in the iteration and wait, or 712 * get_user_pages_fast() will see that the page it took a reference 713 * against is no longer mapped in the page tables and bail to the 714 * get_user_pages() slow path. The slow path is protected by 715 * pte_lock() and pmd_lock(). New references are not taken without 716 * holding those locks, and unmap_mapping_pages() will not zero the 717 * pte or pmd without holding the respective lock, so we are 718 * guaranteed to either see new references or prevent new 719 * references from being established. 720 */ 721 unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0); 722 723 xas_lock_irq(&xas); 724 xas_for_each(&xas, entry, end_idx) { 725 if (WARN_ON_ONCE(!xa_is_value(entry))) 726 continue; 727 if (unlikely(dax_is_locked(entry))) 728 entry = get_unlocked_entry(&xas, 0); 729 if (entry) 730 page = dax_busy_page(entry); 731 put_unlocked_entry(&xas, entry, WAKE_NEXT); 732 if (page) 733 break; 734 if (++scanned % XA_CHECK_SCHED) 735 continue; 736 737 xas_pause(&xas); 738 xas_unlock_irq(&xas); 739 cond_resched(); 740 xas_lock_irq(&xas); 741 } 742 xas_unlock_irq(&xas); 743 return page; 744 } 745 EXPORT_SYMBOL_GPL(dax_layout_busy_page_range); 746 747 struct page *dax_layout_busy_page(struct address_space *mapping) 748 { 749 return dax_layout_busy_page_range(mapping, 0, LLONG_MAX); 750 } 751 EXPORT_SYMBOL_GPL(dax_layout_busy_page); 752 753 static int __dax_invalidate_entry(struct address_space *mapping, 754 pgoff_t index, bool trunc) 755 { 756 XA_STATE(xas, &mapping->i_pages, index); 757 int ret = 0; 758 void *entry; 759 760 xas_lock_irq(&xas); 761 entry = get_unlocked_entry(&xas, 0); 762 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 763 goto out; 764 if (!trunc && 765 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || 766 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) 767 goto out; 768 dax_disassociate_entry(entry, mapping, trunc); 769 xas_store(&xas, NULL); 770 mapping->nrpages -= 1UL << dax_entry_order(entry); 771 ret = 1; 772 out: 773 put_unlocked_entry(&xas, entry, WAKE_ALL); 774 xas_unlock_irq(&xas); 775 return ret; 776 } 777 778 /* 779 * Delete DAX entry at @index from @mapping. Wait for it 780 * to be unlocked before deleting it. 781 */ 782 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 783 { 784 int ret = __dax_invalidate_entry(mapping, index, true); 785 786 /* 787 * This gets called from truncate / punch_hole path. As such, the caller 788 * must hold locks protecting against concurrent modifications of the 789 * page cache (usually fs-private i_mmap_sem for writing). Since the 790 * caller has seen a DAX entry for this index, we better find it 791 * at that index as well... 792 */ 793 WARN_ON_ONCE(!ret); 794 return ret; 795 } 796 797 /* 798 * Invalidate DAX entry if it is clean. 799 */ 800 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 801 pgoff_t index) 802 { 803 return __dax_invalidate_entry(mapping, index, false); 804 } 805 806 static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos) 807 { 808 return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset); 809 } 810 811 static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter) 812 { 813 pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos); 814 void *vto, *kaddr; 815 long rc; 816 int id; 817 818 id = dax_read_lock(); 819 rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS, 820 &kaddr, NULL); 821 if (rc < 0) { 822 dax_read_unlock(id); 823 return rc; 824 } 825 vto = kmap_atomic(vmf->cow_page); 826 copy_user_page(vto, kaddr, vmf->address, vmf->cow_page); 827 kunmap_atomic(vto); 828 dax_read_unlock(id); 829 return 0; 830 } 831 832 /* 833 * By this point grab_mapping_entry() has ensured that we have a locked entry 834 * of the appropriate size so we don't have to worry about downgrading PMDs to 835 * PTEs. If we happen to be trying to insert a PTE and there is a PMD 836 * already in the tree, we will skip the insertion and just dirty the PMD as 837 * appropriate. 838 */ 839 static void *dax_insert_entry(struct xa_state *xas, 840 struct address_space *mapping, struct vm_fault *vmf, 841 void *entry, pfn_t pfn, unsigned long flags, bool dirty) 842 { 843 void *new_entry = dax_make_entry(pfn, flags); 844 845 if (dirty) 846 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 847 848 if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) { 849 unsigned long index = xas->xa_index; 850 /* we are replacing a zero page with block mapping */ 851 if (dax_is_pmd_entry(entry)) 852 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 853 PG_PMD_NR, false); 854 else /* pte entry */ 855 unmap_mapping_pages(mapping, index, 1, false); 856 } 857 858 xas_reset(xas); 859 xas_lock_irq(xas); 860 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 861 void *old; 862 863 dax_disassociate_entry(entry, mapping, false); 864 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, 865 false); 866 /* 867 * Only swap our new entry into the page cache if the current 868 * entry is a zero page or an empty entry. If a normal PTE or 869 * PMD entry is already in the cache, we leave it alone. This 870 * means that if we are trying to insert a PTE and the 871 * existing entry is a PMD, we will just leave the PMD in the 872 * tree and dirty it if necessary. 873 */ 874 old = dax_lock_entry(xas, new_entry); 875 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | 876 DAX_LOCKED)); 877 entry = new_entry; 878 } else { 879 xas_load(xas); /* Walk the xa_state */ 880 } 881 882 if (dirty) 883 xas_set_mark(xas, PAGECACHE_TAG_DIRTY); 884 885 xas_unlock_irq(xas); 886 return entry; 887 } 888 889 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, 890 struct address_space *mapping, void *entry) 891 { 892 unsigned long pfn, index, count, end; 893 long ret = 0; 894 struct vm_area_struct *vma; 895 896 /* 897 * A page got tagged dirty in DAX mapping? Something is seriously 898 * wrong. 899 */ 900 if (WARN_ON(!xa_is_value(entry))) 901 return -EIO; 902 903 if (unlikely(dax_is_locked(entry))) { 904 void *old_entry = entry; 905 906 entry = get_unlocked_entry(xas, 0); 907 908 /* Entry got punched out / reallocated? */ 909 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 910 goto put_unlocked; 911 /* 912 * Entry got reallocated elsewhere? No need to writeback. 913 * We have to compare pfns as we must not bail out due to 914 * difference in lockbit or entry type. 915 */ 916 if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) 917 goto put_unlocked; 918 if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 919 dax_is_zero_entry(entry))) { 920 ret = -EIO; 921 goto put_unlocked; 922 } 923 924 /* Another fsync thread may have already done this entry */ 925 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) 926 goto put_unlocked; 927 } 928 929 /* Lock the entry to serialize with page faults */ 930 dax_lock_entry(xas, entry); 931 932 /* 933 * We can clear the tag now but we have to be careful so that concurrent 934 * dax_writeback_one() calls for the same index cannot finish before we 935 * actually flush the caches. This is achieved as the calls will look 936 * at the entry only under the i_pages lock and once they do that 937 * they will see the entry locked and wait for it to unlock. 938 */ 939 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); 940 xas_unlock_irq(xas); 941 942 /* 943 * If dax_writeback_mapping_range() was given a wbc->range_start 944 * in the middle of a PMD, the 'index' we use needs to be 945 * aligned to the start of the PMD. 946 * This allows us to flush for PMD_SIZE and not have to worry about 947 * partial PMD writebacks. 948 */ 949 pfn = dax_to_pfn(entry); 950 count = 1UL << dax_entry_order(entry); 951 index = xas->xa_index & ~(count - 1); 952 end = index + count - 1; 953 954 /* Walk all mappings of a given index of a file and writeprotect them */ 955 i_mmap_lock_read(mapping); 956 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) { 957 pfn_mkclean_range(pfn, count, index, vma); 958 cond_resched(); 959 } 960 i_mmap_unlock_read(mapping); 961 962 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE); 963 /* 964 * After we have flushed the cache, we can clear the dirty tag. There 965 * cannot be new dirty data in the pfn after the flush has completed as 966 * the pfn mappings are writeprotected and fault waits for mapping 967 * entry lock. 968 */ 969 xas_reset(xas); 970 xas_lock_irq(xas); 971 xas_store(xas, entry); 972 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); 973 dax_wake_entry(xas, entry, WAKE_NEXT); 974 975 trace_dax_writeback_one(mapping->host, index, count); 976 return ret; 977 978 put_unlocked: 979 put_unlocked_entry(xas, entry, WAKE_NEXT); 980 return ret; 981 } 982 983 /* 984 * Flush the mapping to the persistent domain within the byte range of [start, 985 * end]. This is required by data integrity operations to ensure file data is 986 * on persistent storage prior to completion of the operation. 987 */ 988 int dax_writeback_mapping_range(struct address_space *mapping, 989 struct dax_device *dax_dev, struct writeback_control *wbc) 990 { 991 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); 992 struct inode *inode = mapping->host; 993 pgoff_t end_index = wbc->range_end >> PAGE_SHIFT; 994 void *entry; 995 int ret = 0; 996 unsigned int scanned = 0; 997 998 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 999 return -EIO; 1000 1001 if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL) 1002 return 0; 1003 1004 trace_dax_writeback_range(inode, xas.xa_index, end_index); 1005 1006 tag_pages_for_writeback(mapping, xas.xa_index, end_index); 1007 1008 xas_lock_irq(&xas); 1009 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { 1010 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); 1011 if (ret < 0) { 1012 mapping_set_error(mapping, ret); 1013 break; 1014 } 1015 if (++scanned % XA_CHECK_SCHED) 1016 continue; 1017 1018 xas_pause(&xas); 1019 xas_unlock_irq(&xas); 1020 cond_resched(); 1021 xas_lock_irq(&xas); 1022 } 1023 xas_unlock_irq(&xas); 1024 trace_dax_writeback_range_done(inode, xas.xa_index, end_index); 1025 return ret; 1026 } 1027 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 1028 1029 static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos, 1030 size_t size, void **kaddr, pfn_t *pfnp) 1031 { 1032 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1033 int id, rc = 0; 1034 long length; 1035 1036 id = dax_read_lock(); 1037 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), 1038 DAX_ACCESS, kaddr, pfnp); 1039 if (length < 0) { 1040 rc = length; 1041 goto out; 1042 } 1043 if (!pfnp) 1044 goto out_check_addr; 1045 rc = -EINVAL; 1046 if (PFN_PHYS(length) < size) 1047 goto out; 1048 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) 1049 goto out; 1050 /* For larger pages we need devmap */ 1051 if (length > 1 && !pfn_t_devmap(*pfnp)) 1052 goto out; 1053 rc = 0; 1054 1055 out_check_addr: 1056 if (!kaddr) 1057 goto out; 1058 if (!*kaddr) 1059 rc = -EFAULT; 1060 out: 1061 dax_read_unlock(id); 1062 return rc; 1063 } 1064 1065 /* 1066 * The user has performed a load from a hole in the file. Allocating a new 1067 * page in the file would cause excessive storage usage for workloads with 1068 * sparse files. Instead we insert a read-only mapping of the 4k zero page. 1069 * If this page is ever written to we will re-fault and change the mapping to 1070 * point to real DAX storage instead. 1071 */ 1072 static vm_fault_t dax_load_hole(struct xa_state *xas, 1073 struct address_space *mapping, void **entry, 1074 struct vm_fault *vmf) 1075 { 1076 struct inode *inode = mapping->host; 1077 unsigned long vaddr = vmf->address; 1078 pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); 1079 vm_fault_t ret; 1080 1081 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, 1082 DAX_ZERO_PAGE, false); 1083 1084 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); 1085 trace_dax_load_hole(inode, vmf, ret); 1086 return ret; 1087 } 1088 1089 #ifdef CONFIG_FS_DAX_PMD 1090 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, 1091 const struct iomap *iomap, void **entry) 1092 { 1093 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1094 unsigned long pmd_addr = vmf->address & PMD_MASK; 1095 struct vm_area_struct *vma = vmf->vma; 1096 struct inode *inode = mapping->host; 1097 pgtable_t pgtable = NULL; 1098 struct page *zero_page; 1099 spinlock_t *ptl; 1100 pmd_t pmd_entry; 1101 pfn_t pfn; 1102 1103 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1104 1105 if (unlikely(!zero_page)) 1106 goto fallback; 1107 1108 pfn = page_to_pfn_t(zero_page); 1109 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, 1110 DAX_PMD | DAX_ZERO_PAGE, false); 1111 1112 if (arch_needs_pgtable_deposit()) { 1113 pgtable = pte_alloc_one(vma->vm_mm); 1114 if (!pgtable) 1115 return VM_FAULT_OOM; 1116 } 1117 1118 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1119 if (!pmd_none(*(vmf->pmd))) { 1120 spin_unlock(ptl); 1121 goto fallback; 1122 } 1123 1124 if (pgtable) { 1125 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 1126 mm_inc_nr_ptes(vma->vm_mm); 1127 } 1128 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1129 pmd_entry = pmd_mkhuge(pmd_entry); 1130 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1131 spin_unlock(ptl); 1132 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); 1133 return VM_FAULT_NOPAGE; 1134 1135 fallback: 1136 if (pgtable) 1137 pte_free(vma->vm_mm, pgtable); 1138 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); 1139 return VM_FAULT_FALLBACK; 1140 } 1141 #else 1142 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, 1143 const struct iomap *iomap, void **entry) 1144 { 1145 return VM_FAULT_FALLBACK; 1146 } 1147 #endif /* CONFIG_FS_DAX_PMD */ 1148 1149 static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff, 1150 unsigned int offset, size_t size) 1151 { 1152 void *kaddr; 1153 long ret; 1154 1155 ret = dax_direct_access(dax_dev, pgoff, 1, DAX_ACCESS, &kaddr, NULL); 1156 if (ret > 0) { 1157 memset(kaddr + offset, 0, size); 1158 dax_flush(dax_dev, kaddr + offset, size); 1159 } 1160 return ret; 1161 } 1162 1163 static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero) 1164 { 1165 const struct iomap *iomap = &iter->iomap; 1166 const struct iomap *srcmap = iomap_iter_srcmap(iter); 1167 loff_t pos = iter->pos; 1168 u64 length = iomap_length(iter); 1169 s64 written = 0; 1170 1171 /* already zeroed? we're done. */ 1172 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 1173 return length; 1174 1175 do { 1176 unsigned offset = offset_in_page(pos); 1177 unsigned size = min_t(u64, PAGE_SIZE - offset, length); 1178 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1179 long rc; 1180 int id; 1181 1182 id = dax_read_lock(); 1183 if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE) 1184 rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1); 1185 else 1186 rc = dax_memzero(iomap->dax_dev, pgoff, offset, size); 1187 dax_read_unlock(id); 1188 1189 if (rc < 0) 1190 return rc; 1191 pos += size; 1192 length -= size; 1193 written += size; 1194 if (did_zero) 1195 *did_zero = true; 1196 } while (length > 0); 1197 1198 return written; 1199 } 1200 1201 int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 1202 const struct iomap_ops *ops) 1203 { 1204 struct iomap_iter iter = { 1205 .inode = inode, 1206 .pos = pos, 1207 .len = len, 1208 .flags = IOMAP_DAX | IOMAP_ZERO, 1209 }; 1210 int ret; 1211 1212 while ((ret = iomap_iter(&iter, ops)) > 0) 1213 iter.processed = dax_zero_iter(&iter, did_zero); 1214 return ret; 1215 } 1216 EXPORT_SYMBOL_GPL(dax_zero_range); 1217 1218 int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 1219 const struct iomap_ops *ops) 1220 { 1221 unsigned int blocksize = i_blocksize(inode); 1222 unsigned int off = pos & (blocksize - 1); 1223 1224 /* Block boundary? Nothing to do */ 1225 if (!off) 1226 return 0; 1227 return dax_zero_range(inode, pos, blocksize - off, did_zero, ops); 1228 } 1229 EXPORT_SYMBOL_GPL(dax_truncate_page); 1230 1231 static loff_t dax_iomap_iter(const struct iomap_iter *iomi, 1232 struct iov_iter *iter) 1233 { 1234 const struct iomap *iomap = &iomi->iomap; 1235 loff_t length = iomap_length(iomi); 1236 loff_t pos = iomi->pos; 1237 struct dax_device *dax_dev = iomap->dax_dev; 1238 loff_t end = pos + length, done = 0; 1239 ssize_t ret = 0; 1240 size_t xfer; 1241 int id; 1242 1243 if (iov_iter_rw(iter) == READ) { 1244 end = min(end, i_size_read(iomi->inode)); 1245 if (pos >= end) 1246 return 0; 1247 1248 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 1249 return iov_iter_zero(min(length, end - pos), iter); 1250 } 1251 1252 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 1253 return -EIO; 1254 1255 /* 1256 * Write can allocate block for an area which has a hole page mapped 1257 * into page tables. We have to tear down these mappings so that data 1258 * written by write(2) is visible in mmap. 1259 */ 1260 if (iomap->flags & IOMAP_F_NEW) { 1261 invalidate_inode_pages2_range(iomi->inode->i_mapping, 1262 pos >> PAGE_SHIFT, 1263 (end - 1) >> PAGE_SHIFT); 1264 } 1265 1266 id = dax_read_lock(); 1267 while (pos < end) { 1268 unsigned offset = pos & (PAGE_SIZE - 1); 1269 const size_t size = ALIGN(length + offset, PAGE_SIZE); 1270 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1271 ssize_t map_len; 1272 bool recovery = false; 1273 void *kaddr; 1274 1275 if (fatal_signal_pending(current)) { 1276 ret = -EINTR; 1277 break; 1278 } 1279 1280 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 1281 DAX_ACCESS, &kaddr, NULL); 1282 if (map_len == -EIO && iov_iter_rw(iter) == WRITE) { 1283 map_len = dax_direct_access(dax_dev, pgoff, 1284 PHYS_PFN(size), DAX_RECOVERY_WRITE, 1285 &kaddr, NULL); 1286 if (map_len > 0) 1287 recovery = true; 1288 } 1289 if (map_len < 0) { 1290 ret = map_len; 1291 break; 1292 } 1293 1294 map_len = PFN_PHYS(map_len); 1295 kaddr += offset; 1296 map_len -= offset; 1297 if (map_len > end - pos) 1298 map_len = end - pos; 1299 1300 if (recovery) 1301 xfer = dax_recovery_write(dax_dev, pgoff, kaddr, 1302 map_len, iter); 1303 else if (iov_iter_rw(iter) == WRITE) 1304 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1305 map_len, iter); 1306 else 1307 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, 1308 map_len, iter); 1309 1310 pos += xfer; 1311 length -= xfer; 1312 done += xfer; 1313 1314 if (xfer == 0) 1315 ret = -EFAULT; 1316 if (xfer < map_len) 1317 break; 1318 } 1319 dax_read_unlock(id); 1320 1321 return done ? done : ret; 1322 } 1323 1324 /** 1325 * dax_iomap_rw - Perform I/O to a DAX file 1326 * @iocb: The control block for this I/O 1327 * @iter: The addresses to do I/O from or to 1328 * @ops: iomap ops passed from the file system 1329 * 1330 * This function performs read and write operations to directly mapped 1331 * persistent memory. The callers needs to take care of read/write exclusion 1332 * and evicting any page cache pages in the region under I/O. 1333 */ 1334 ssize_t 1335 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 1336 const struct iomap_ops *ops) 1337 { 1338 struct iomap_iter iomi = { 1339 .inode = iocb->ki_filp->f_mapping->host, 1340 .pos = iocb->ki_pos, 1341 .len = iov_iter_count(iter), 1342 .flags = IOMAP_DAX, 1343 }; 1344 loff_t done = 0; 1345 int ret; 1346 1347 if (iov_iter_rw(iter) == WRITE) { 1348 lockdep_assert_held_write(&iomi.inode->i_rwsem); 1349 iomi.flags |= IOMAP_WRITE; 1350 } else { 1351 lockdep_assert_held(&iomi.inode->i_rwsem); 1352 } 1353 1354 if (iocb->ki_flags & IOCB_NOWAIT) 1355 iomi.flags |= IOMAP_NOWAIT; 1356 1357 while ((ret = iomap_iter(&iomi, ops)) > 0) 1358 iomi.processed = dax_iomap_iter(&iomi, iter); 1359 1360 done = iomi.pos - iocb->ki_pos; 1361 iocb->ki_pos = iomi.pos; 1362 return done ? done : ret; 1363 } 1364 EXPORT_SYMBOL_GPL(dax_iomap_rw); 1365 1366 static vm_fault_t dax_fault_return(int error) 1367 { 1368 if (error == 0) 1369 return VM_FAULT_NOPAGE; 1370 return vmf_error(error); 1371 } 1372 1373 /* 1374 * MAP_SYNC on a dax mapping guarantees dirty metadata is 1375 * flushed on write-faults (non-cow), but not read-faults. 1376 */ 1377 static bool dax_fault_is_synchronous(unsigned long flags, 1378 struct vm_area_struct *vma, const struct iomap *iomap) 1379 { 1380 return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) 1381 && (iomap->flags & IOMAP_F_DIRTY); 1382 } 1383 1384 /* 1385 * When handling a synchronous page fault and the inode need a fsync, we can 1386 * insert the PTE/PMD into page tables only after that fsync happened. Skip 1387 * insertion for now and return the pfn so that caller can insert it after the 1388 * fsync is done. 1389 */ 1390 static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn) 1391 { 1392 if (WARN_ON_ONCE(!pfnp)) 1393 return VM_FAULT_SIGBUS; 1394 *pfnp = pfn; 1395 return VM_FAULT_NEEDDSYNC; 1396 } 1397 1398 static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf, 1399 const struct iomap_iter *iter) 1400 { 1401 vm_fault_t ret; 1402 int error = 0; 1403 1404 switch (iter->iomap.type) { 1405 case IOMAP_HOLE: 1406 case IOMAP_UNWRITTEN: 1407 clear_user_highpage(vmf->cow_page, vmf->address); 1408 break; 1409 case IOMAP_MAPPED: 1410 error = copy_cow_page_dax(vmf, iter); 1411 break; 1412 default: 1413 WARN_ON_ONCE(1); 1414 error = -EIO; 1415 break; 1416 } 1417 1418 if (error) 1419 return dax_fault_return(error); 1420 1421 __SetPageUptodate(vmf->cow_page); 1422 ret = finish_fault(vmf); 1423 if (!ret) 1424 return VM_FAULT_DONE_COW; 1425 return ret; 1426 } 1427 1428 /** 1429 * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault. 1430 * @vmf: vm fault instance 1431 * @iter: iomap iter 1432 * @pfnp: pfn to be returned 1433 * @xas: the dax mapping tree of a file 1434 * @entry: an unlocked dax entry to be inserted 1435 * @pmd: distinguish whether it is a pmd fault 1436 */ 1437 static vm_fault_t dax_fault_iter(struct vm_fault *vmf, 1438 const struct iomap_iter *iter, pfn_t *pfnp, 1439 struct xa_state *xas, void **entry, bool pmd) 1440 { 1441 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1442 const struct iomap *iomap = &iter->iomap; 1443 size_t size = pmd ? PMD_SIZE : PAGE_SIZE; 1444 loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; 1445 bool write = vmf->flags & FAULT_FLAG_WRITE; 1446 bool sync = dax_fault_is_synchronous(iter->flags, vmf->vma, iomap); 1447 unsigned long entry_flags = pmd ? DAX_PMD : 0; 1448 int err = 0; 1449 pfn_t pfn; 1450 1451 if (!pmd && vmf->cow_page) 1452 return dax_fault_cow_page(vmf, iter); 1453 1454 /* if we are reading UNWRITTEN and HOLE, return a hole. */ 1455 if (!write && 1456 (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) { 1457 if (!pmd) 1458 return dax_load_hole(xas, mapping, entry, vmf); 1459 return dax_pmd_load_hole(xas, vmf, iomap, entry); 1460 } 1461 1462 if (iomap->type != IOMAP_MAPPED) { 1463 WARN_ON_ONCE(1); 1464 return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS; 1465 } 1466 1467 err = dax_iomap_direct_access(&iter->iomap, pos, size, NULL, &pfn); 1468 if (err) 1469 return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err); 1470 1471 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, entry_flags, 1472 write && !sync); 1473 1474 if (sync) 1475 return dax_fault_synchronous_pfnp(pfnp, pfn); 1476 1477 /* insert PMD pfn */ 1478 if (pmd) 1479 return vmf_insert_pfn_pmd(vmf, pfn, write); 1480 1481 /* insert PTE pfn */ 1482 if (write) 1483 return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 1484 return vmf_insert_mixed(vmf->vma, vmf->address, pfn); 1485 } 1486 1487 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1488 int *iomap_errp, const struct iomap_ops *ops) 1489 { 1490 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1491 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); 1492 struct iomap_iter iter = { 1493 .inode = mapping->host, 1494 .pos = (loff_t)vmf->pgoff << PAGE_SHIFT, 1495 .len = PAGE_SIZE, 1496 .flags = IOMAP_DAX | IOMAP_FAULT, 1497 }; 1498 vm_fault_t ret = 0; 1499 void *entry; 1500 int error; 1501 1502 trace_dax_pte_fault(iter.inode, vmf, ret); 1503 /* 1504 * Check whether offset isn't beyond end of file now. Caller is supposed 1505 * to hold locks serializing us with truncate / punch hole so this is 1506 * a reliable test. 1507 */ 1508 if (iter.pos >= i_size_read(iter.inode)) { 1509 ret = VM_FAULT_SIGBUS; 1510 goto out; 1511 } 1512 1513 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) 1514 iter.flags |= IOMAP_WRITE; 1515 1516 entry = grab_mapping_entry(&xas, mapping, 0); 1517 if (xa_is_internal(entry)) { 1518 ret = xa_to_internal(entry); 1519 goto out; 1520 } 1521 1522 /* 1523 * It is possible, particularly with mixed reads & writes to private 1524 * mappings, that we have raced with a PMD fault that overlaps with 1525 * the PTE we need to set up. If so just return and the fault will be 1526 * retried. 1527 */ 1528 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1529 ret = VM_FAULT_NOPAGE; 1530 goto unlock_entry; 1531 } 1532 1533 while ((error = iomap_iter(&iter, ops)) > 0) { 1534 if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) { 1535 iter.processed = -EIO; /* fs corruption? */ 1536 continue; 1537 } 1538 1539 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false); 1540 if (ret != VM_FAULT_SIGBUS && 1541 (iter.iomap.flags & IOMAP_F_NEW)) { 1542 count_vm_event(PGMAJFAULT); 1543 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); 1544 ret |= VM_FAULT_MAJOR; 1545 } 1546 1547 if (!(ret & VM_FAULT_ERROR)) 1548 iter.processed = PAGE_SIZE; 1549 } 1550 1551 if (iomap_errp) 1552 *iomap_errp = error; 1553 if (!ret && error) 1554 ret = dax_fault_return(error); 1555 1556 unlock_entry: 1557 dax_unlock_entry(&xas, entry); 1558 out: 1559 trace_dax_pte_fault_done(iter.inode, vmf, ret); 1560 return ret; 1561 } 1562 1563 #ifdef CONFIG_FS_DAX_PMD 1564 static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas, 1565 pgoff_t max_pgoff) 1566 { 1567 unsigned long pmd_addr = vmf->address & PMD_MASK; 1568 bool write = vmf->flags & FAULT_FLAG_WRITE; 1569 1570 /* 1571 * Make sure that the faulting address's PMD offset (color) matches 1572 * the PMD offset from the start of the file. This is necessary so 1573 * that a PMD range in the page table overlaps exactly with a PMD 1574 * range in the page cache. 1575 */ 1576 if ((vmf->pgoff & PG_PMD_COLOUR) != 1577 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 1578 return true; 1579 1580 /* Fall back to PTEs if we're going to COW */ 1581 if (write && !(vmf->vma->vm_flags & VM_SHARED)) 1582 return true; 1583 1584 /* If the PMD would extend outside the VMA */ 1585 if (pmd_addr < vmf->vma->vm_start) 1586 return true; 1587 if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end) 1588 return true; 1589 1590 /* If the PMD would extend beyond the file size */ 1591 if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff) 1592 return true; 1593 1594 return false; 1595 } 1596 1597 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1598 const struct iomap_ops *ops) 1599 { 1600 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1601 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); 1602 struct iomap_iter iter = { 1603 .inode = mapping->host, 1604 .len = PMD_SIZE, 1605 .flags = IOMAP_DAX | IOMAP_FAULT, 1606 }; 1607 vm_fault_t ret = VM_FAULT_FALLBACK; 1608 pgoff_t max_pgoff; 1609 void *entry; 1610 int error; 1611 1612 if (vmf->flags & FAULT_FLAG_WRITE) 1613 iter.flags |= IOMAP_WRITE; 1614 1615 /* 1616 * Check whether offset isn't beyond end of file now. Caller is 1617 * supposed to hold locks serializing us with truncate / punch hole so 1618 * this is a reliable test. 1619 */ 1620 max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE); 1621 1622 trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0); 1623 1624 if (xas.xa_index >= max_pgoff) { 1625 ret = VM_FAULT_SIGBUS; 1626 goto out; 1627 } 1628 1629 if (dax_fault_check_fallback(vmf, &xas, max_pgoff)) 1630 goto fallback; 1631 1632 /* 1633 * grab_mapping_entry() will make sure we get an empty PMD entry, 1634 * a zero PMD entry or a DAX PMD. If it can't (because a PTE 1635 * entry is already in the array, for instance), it will return 1636 * VM_FAULT_FALLBACK. 1637 */ 1638 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); 1639 if (xa_is_internal(entry)) { 1640 ret = xa_to_internal(entry); 1641 goto fallback; 1642 } 1643 1644 /* 1645 * It is possible, particularly with mixed reads & writes to private 1646 * mappings, that we have raced with a PTE fault that overlaps with 1647 * the PMD we need to set up. If so just return and the fault will be 1648 * retried. 1649 */ 1650 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1651 !pmd_devmap(*vmf->pmd)) { 1652 ret = 0; 1653 goto unlock_entry; 1654 } 1655 1656 iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT; 1657 while ((error = iomap_iter(&iter, ops)) > 0) { 1658 if (iomap_length(&iter) < PMD_SIZE) 1659 continue; /* actually breaks out of the loop */ 1660 1661 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true); 1662 if (ret != VM_FAULT_FALLBACK) 1663 iter.processed = PMD_SIZE; 1664 } 1665 1666 unlock_entry: 1667 dax_unlock_entry(&xas, entry); 1668 fallback: 1669 if (ret == VM_FAULT_FALLBACK) { 1670 split_huge_pmd(vmf->vma, vmf->pmd, vmf->address); 1671 count_vm_event(THP_FAULT_FALLBACK); 1672 } 1673 out: 1674 trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret); 1675 return ret; 1676 } 1677 #else 1678 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1679 const struct iomap_ops *ops) 1680 { 1681 return VM_FAULT_FALLBACK; 1682 } 1683 #endif /* CONFIG_FS_DAX_PMD */ 1684 1685 /** 1686 * dax_iomap_fault - handle a page fault on a DAX file 1687 * @vmf: The description of the fault 1688 * @pe_size: Size of the page to fault in 1689 * @pfnp: PFN to insert for synchronous faults if fsync is required 1690 * @iomap_errp: Storage for detailed error code in case of error 1691 * @ops: Iomap ops passed from the file system 1692 * 1693 * When a page fault occurs, filesystems may call this helper in 1694 * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1695 * has done all the necessary locking for page fault to proceed 1696 * successfully. 1697 */ 1698 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1699 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) 1700 { 1701 switch (pe_size) { 1702 case PE_SIZE_PTE: 1703 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); 1704 case PE_SIZE_PMD: 1705 return dax_iomap_pmd_fault(vmf, pfnp, ops); 1706 default: 1707 return VM_FAULT_FALLBACK; 1708 } 1709 } 1710 EXPORT_SYMBOL_GPL(dax_iomap_fault); 1711 1712 /* 1713 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables 1714 * @vmf: The description of the fault 1715 * @pfn: PFN to insert 1716 * @order: Order of entry to insert. 1717 * 1718 * This function inserts a writeable PTE or PMD entry into the page tables 1719 * for an mmaped DAX file. It also marks the page cache entry as dirty. 1720 */ 1721 static vm_fault_t 1722 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) 1723 { 1724 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1725 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); 1726 void *entry; 1727 vm_fault_t ret; 1728 1729 xas_lock_irq(&xas); 1730 entry = get_unlocked_entry(&xas, order); 1731 /* Did we race with someone splitting entry or so? */ 1732 if (!entry || dax_is_conflict(entry) || 1733 (order == 0 && !dax_is_pte_entry(entry))) { 1734 put_unlocked_entry(&xas, entry, WAKE_NEXT); 1735 xas_unlock_irq(&xas); 1736 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 1737 VM_FAULT_NOPAGE); 1738 return VM_FAULT_NOPAGE; 1739 } 1740 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); 1741 dax_lock_entry(&xas, entry); 1742 xas_unlock_irq(&xas); 1743 if (order == 0) 1744 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 1745 #ifdef CONFIG_FS_DAX_PMD 1746 else if (order == PMD_ORDER) 1747 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE); 1748 #endif 1749 else 1750 ret = VM_FAULT_FALLBACK; 1751 dax_unlock_entry(&xas, entry); 1752 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); 1753 return ret; 1754 } 1755 1756 /** 1757 * dax_finish_sync_fault - finish synchronous page fault 1758 * @vmf: The description of the fault 1759 * @pe_size: Size of entry to be inserted 1760 * @pfn: PFN to insert 1761 * 1762 * This function ensures that the file range touched by the page fault is 1763 * stored persistently on the media and handles inserting of appropriate page 1764 * table entry. 1765 */ 1766 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 1767 enum page_entry_size pe_size, pfn_t pfn) 1768 { 1769 int err; 1770 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; 1771 unsigned int order = pe_order(pe_size); 1772 size_t len = PAGE_SIZE << order; 1773 1774 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); 1775 if (err) 1776 return VM_FAULT_SIGBUS; 1777 return dax_insert_pfn_mkwrite(vmf, pfn, order); 1778 } 1779 EXPORT_SYMBOL_GPL(dax_finish_sync_fault); 1780