1 /* 2 * fs/dax.c - Direct Access filesystem code 3 * Copyright (c) 2013-2014 Intel Corporation 4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 */ 16 17 #include <linux/atomic.h> 18 #include <linux/blkdev.h> 19 #include <linux/buffer_head.h> 20 #include <linux/dax.h> 21 #include <linux/fs.h> 22 #include <linux/genhd.h> 23 #include <linux/highmem.h> 24 #include <linux/memcontrol.h> 25 #include <linux/mm.h> 26 #include <linux/mutex.h> 27 #include <linux/pagevec.h> 28 #include <linux/sched.h> 29 #include <linux/sched/signal.h> 30 #include <linux/uio.h> 31 #include <linux/vmstat.h> 32 #include <linux/pfn_t.h> 33 #include <linux/sizes.h> 34 #include <linux/mmu_notifier.h> 35 #include <linux/iomap.h> 36 #include "internal.h" 37 38 #define CREATE_TRACE_POINTS 39 #include <trace/events/fs_dax.h> 40 41 /* We choose 4096 entries - same as per-zone page wait tables */ 42 #define DAX_WAIT_TABLE_BITS 12 43 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 44 45 /* The 'colour' (ie low bits) within a PMD of a page offset. */ 46 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 47 #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) 48 49 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 50 51 static int __init init_dax_wait_table(void) 52 { 53 int i; 54 55 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 56 init_waitqueue_head(wait_table + i); 57 return 0; 58 } 59 fs_initcall(init_dax_wait_table); 60 61 /* 62 * We use lowest available bit in exceptional entry for locking, one bit for 63 * the entry size (PMD) and two more to tell us if the entry is a zero page or 64 * an empty entry that is just used for locking. In total four special bits. 65 * 66 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE 67 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem 68 * block allocation. 69 */ 70 #define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4) 71 #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) 72 #define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1)) 73 #define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2)) 74 #define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)) 75 76 static unsigned long dax_radix_pfn(void *entry) 77 { 78 return (unsigned long)entry >> RADIX_DAX_SHIFT; 79 } 80 81 static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags) 82 { 83 return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags | 84 (pfn << RADIX_DAX_SHIFT) | RADIX_DAX_ENTRY_LOCK); 85 } 86 87 static unsigned int dax_radix_order(void *entry) 88 { 89 if ((unsigned long)entry & RADIX_DAX_PMD) 90 return PMD_SHIFT - PAGE_SHIFT; 91 return 0; 92 } 93 94 static int dax_is_pmd_entry(void *entry) 95 { 96 return (unsigned long)entry & RADIX_DAX_PMD; 97 } 98 99 static int dax_is_pte_entry(void *entry) 100 { 101 return !((unsigned long)entry & RADIX_DAX_PMD); 102 } 103 104 static int dax_is_zero_entry(void *entry) 105 { 106 return (unsigned long)entry & RADIX_DAX_ZERO_PAGE; 107 } 108 109 static int dax_is_empty_entry(void *entry) 110 { 111 return (unsigned long)entry & RADIX_DAX_EMPTY; 112 } 113 114 /* 115 * DAX radix tree locking 116 */ 117 struct exceptional_entry_key { 118 struct address_space *mapping; 119 pgoff_t entry_start; 120 }; 121 122 struct wait_exceptional_entry_queue { 123 wait_queue_entry_t wait; 124 struct exceptional_entry_key key; 125 }; 126 127 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, 128 pgoff_t index, void *entry, struct exceptional_entry_key *key) 129 { 130 unsigned long hash; 131 132 /* 133 * If 'entry' is a PMD, align the 'index' that we use for the wait 134 * queue to the start of that PMD. This ensures that all offsets in 135 * the range covered by the PMD map to the same bit lock. 136 */ 137 if (dax_is_pmd_entry(entry)) 138 index &= ~PG_PMD_COLOUR; 139 140 key->mapping = mapping; 141 key->entry_start = index; 142 143 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS); 144 return wait_table + hash; 145 } 146 147 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode, 148 int sync, void *keyp) 149 { 150 struct exceptional_entry_key *key = keyp; 151 struct wait_exceptional_entry_queue *ewait = 152 container_of(wait, struct wait_exceptional_entry_queue, wait); 153 154 if (key->mapping != ewait->key.mapping || 155 key->entry_start != ewait->key.entry_start) 156 return 0; 157 return autoremove_wake_function(wait, mode, sync, NULL); 158 } 159 160 /* 161 * @entry may no longer be the entry at the index in the mapping. 162 * The important information it's conveying is whether the entry at 163 * this index used to be a PMD entry. 164 */ 165 static void dax_wake_mapping_entry_waiter(struct address_space *mapping, 166 pgoff_t index, void *entry, bool wake_all) 167 { 168 struct exceptional_entry_key key; 169 wait_queue_head_t *wq; 170 171 wq = dax_entry_waitqueue(mapping, index, entry, &key); 172 173 /* 174 * Checking for locked entry and prepare_to_wait_exclusive() happens 175 * under the i_pages lock, ditto for entry handling in our callers. 176 * So at this point all tasks that could have seen our entry locked 177 * must be in the waitqueue and the following check will see them. 178 */ 179 if (waitqueue_active(wq)) 180 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); 181 } 182 183 /* 184 * Check whether the given slot is locked. Must be called with the i_pages 185 * lock held. 186 */ 187 static inline int slot_locked(struct address_space *mapping, void **slot) 188 { 189 unsigned long entry = (unsigned long) 190 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); 191 return entry & RADIX_DAX_ENTRY_LOCK; 192 } 193 194 /* 195 * Mark the given slot as locked. Must be called with the i_pages lock held. 196 */ 197 static inline void *lock_slot(struct address_space *mapping, void **slot) 198 { 199 unsigned long entry = (unsigned long) 200 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); 201 202 entry |= RADIX_DAX_ENTRY_LOCK; 203 radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry); 204 return (void *)entry; 205 } 206 207 /* 208 * Mark the given slot as unlocked. Must be called with the i_pages lock held. 209 */ 210 static inline void *unlock_slot(struct address_space *mapping, void **slot) 211 { 212 unsigned long entry = (unsigned long) 213 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); 214 215 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK; 216 radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry); 217 return (void *)entry; 218 } 219 220 /* 221 * Lookup entry in radix tree, wait for it to become unlocked if it is 222 * exceptional entry and return it. The caller must call 223 * put_unlocked_mapping_entry() when he decided not to lock the entry or 224 * put_locked_mapping_entry() when he locked the entry and now wants to 225 * unlock it. 226 * 227 * Must be called with the i_pages lock held. 228 */ 229 static void *__get_unlocked_mapping_entry(struct address_space *mapping, 230 pgoff_t index, void ***slotp, bool (*wait_fn)(void)) 231 { 232 void *entry, **slot; 233 struct wait_exceptional_entry_queue ewait; 234 wait_queue_head_t *wq; 235 236 init_wait(&ewait.wait); 237 ewait.wait.func = wake_exceptional_entry_func; 238 239 for (;;) { 240 bool revalidate; 241 242 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, 243 &slot); 244 if (!entry || 245 WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) || 246 !slot_locked(mapping, slot)) { 247 if (slotp) 248 *slotp = slot; 249 return entry; 250 } 251 252 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); 253 prepare_to_wait_exclusive(wq, &ewait.wait, 254 TASK_UNINTERRUPTIBLE); 255 xa_unlock_irq(&mapping->i_pages); 256 revalidate = wait_fn(); 257 finish_wait(wq, &ewait.wait); 258 xa_lock_irq(&mapping->i_pages); 259 if (revalidate) 260 return ERR_PTR(-EAGAIN); 261 } 262 } 263 264 static bool entry_wait(void) 265 { 266 schedule(); 267 /* 268 * Never return an ERR_PTR() from 269 * __get_unlocked_mapping_entry(), just keep looping. 270 */ 271 return false; 272 } 273 274 static void *get_unlocked_mapping_entry(struct address_space *mapping, 275 pgoff_t index, void ***slotp) 276 { 277 return __get_unlocked_mapping_entry(mapping, index, slotp, entry_wait); 278 } 279 280 static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index) 281 { 282 void *entry, **slot; 283 284 xa_lock_irq(&mapping->i_pages); 285 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot); 286 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) || 287 !slot_locked(mapping, slot))) { 288 xa_unlock_irq(&mapping->i_pages); 289 return; 290 } 291 unlock_slot(mapping, slot); 292 xa_unlock_irq(&mapping->i_pages); 293 dax_wake_mapping_entry_waiter(mapping, index, entry, false); 294 } 295 296 static void put_locked_mapping_entry(struct address_space *mapping, 297 pgoff_t index) 298 { 299 unlock_mapping_entry(mapping, index); 300 } 301 302 /* 303 * Called when we are done with radix tree entry we looked up via 304 * get_unlocked_mapping_entry() and which we didn't lock in the end. 305 */ 306 static void put_unlocked_mapping_entry(struct address_space *mapping, 307 pgoff_t index, void *entry) 308 { 309 if (!entry) 310 return; 311 312 /* We have to wake up next waiter for the radix tree entry lock */ 313 dax_wake_mapping_entry_waiter(mapping, index, entry, false); 314 } 315 316 static unsigned long dax_entry_size(void *entry) 317 { 318 if (dax_is_zero_entry(entry)) 319 return 0; 320 else if (dax_is_empty_entry(entry)) 321 return 0; 322 else if (dax_is_pmd_entry(entry)) 323 return PMD_SIZE; 324 else 325 return PAGE_SIZE; 326 } 327 328 static unsigned long dax_radix_end_pfn(void *entry) 329 { 330 return dax_radix_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; 331 } 332 333 /* 334 * Iterate through all mapped pfns represented by an entry, i.e. skip 335 * 'empty' and 'zero' entries. 336 */ 337 #define for_each_mapped_pfn(entry, pfn) \ 338 for (pfn = dax_radix_pfn(entry); \ 339 pfn < dax_radix_end_pfn(entry); pfn++) 340 341 /* 342 * TODO: for reflink+dax we need a way to associate a single page with 343 * multiple address_space instances at different linear_page_index() 344 * offsets. 345 */ 346 static void dax_associate_entry(void *entry, struct address_space *mapping, 347 struct vm_area_struct *vma, unsigned long address) 348 { 349 unsigned long size = dax_entry_size(entry), pfn, index; 350 int i = 0; 351 352 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 353 return; 354 355 index = linear_page_index(vma, address & ~(size - 1)); 356 for_each_mapped_pfn(entry, pfn) { 357 struct page *page = pfn_to_page(pfn); 358 359 WARN_ON_ONCE(page->mapping); 360 page->mapping = mapping; 361 page->index = index + i++; 362 } 363 } 364 365 static void dax_disassociate_entry(void *entry, struct address_space *mapping, 366 bool trunc) 367 { 368 unsigned long pfn; 369 370 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 371 return; 372 373 for_each_mapped_pfn(entry, pfn) { 374 struct page *page = pfn_to_page(pfn); 375 376 WARN_ON_ONCE(trunc && page_ref_count(page) > 1); 377 WARN_ON_ONCE(page->mapping && page->mapping != mapping); 378 page->mapping = NULL; 379 page->index = 0; 380 } 381 } 382 383 static struct page *dax_busy_page(void *entry) 384 { 385 unsigned long pfn; 386 387 for_each_mapped_pfn(entry, pfn) { 388 struct page *page = pfn_to_page(pfn); 389 390 if (page_ref_count(page) > 1) 391 return page; 392 } 393 return NULL; 394 } 395 396 static bool entry_wait_revalidate(void) 397 { 398 rcu_read_unlock(); 399 schedule(); 400 rcu_read_lock(); 401 402 /* 403 * Tell __get_unlocked_mapping_entry() to take a break, we need 404 * to revalidate page->mapping after dropping locks 405 */ 406 return true; 407 } 408 409 bool dax_lock_mapping_entry(struct page *page) 410 { 411 pgoff_t index; 412 struct inode *inode; 413 bool did_lock = false; 414 void *entry = NULL, **slot; 415 struct address_space *mapping; 416 417 rcu_read_lock(); 418 for (;;) { 419 mapping = READ_ONCE(page->mapping); 420 421 if (!dax_mapping(mapping)) 422 break; 423 424 /* 425 * In the device-dax case there's no need to lock, a 426 * struct dev_pagemap pin is sufficient to keep the 427 * inode alive, and we assume we have dev_pagemap pin 428 * otherwise we would not have a valid pfn_to_page() 429 * translation. 430 */ 431 inode = mapping->host; 432 if (S_ISCHR(inode->i_mode)) { 433 did_lock = true; 434 break; 435 } 436 437 xa_lock_irq(&mapping->i_pages); 438 if (mapping != page->mapping) { 439 xa_unlock_irq(&mapping->i_pages); 440 continue; 441 } 442 index = page->index; 443 444 entry = __get_unlocked_mapping_entry(mapping, index, &slot, 445 entry_wait_revalidate); 446 if (!entry) { 447 xa_unlock_irq(&mapping->i_pages); 448 break; 449 } else if (IS_ERR(entry)) { 450 WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN); 451 continue; 452 } 453 lock_slot(mapping, slot); 454 did_lock = true; 455 xa_unlock_irq(&mapping->i_pages); 456 break; 457 } 458 rcu_read_unlock(); 459 460 return did_lock; 461 } 462 463 void dax_unlock_mapping_entry(struct page *page) 464 { 465 struct address_space *mapping = page->mapping; 466 struct inode *inode = mapping->host; 467 468 if (S_ISCHR(inode->i_mode)) 469 return; 470 471 unlock_mapping_entry(mapping, page->index); 472 } 473 474 /* 475 * Find radix tree entry at given index. If it points to an exceptional entry, 476 * return it with the radix tree entry locked. If the radix tree doesn't 477 * contain given index, create an empty exceptional entry for the index and 478 * return with it locked. 479 * 480 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will 481 * either return that locked entry or will return an error. This error will 482 * happen if there are any 4k entries within the 2MiB range that we are 483 * requesting. 484 * 485 * We always favor 4k entries over 2MiB entries. There isn't a flow where we 486 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB 487 * insertion will fail if it finds any 4k entries already in the tree, and a 488 * 4k insertion will cause an existing 2MiB entry to be unmapped and 489 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as 490 * well as 2MiB empty entries. 491 * 492 * The exception to this downgrade path is for 2MiB DAX PMD entries that have 493 * real storage backing them. We will leave these real 2MiB DAX entries in 494 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry. 495 * 496 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 497 * persistent memory the benefit is doubtful. We can add that later if we can 498 * show it helps. 499 */ 500 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, 501 unsigned long size_flag) 502 { 503 bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */ 504 void *entry, **slot; 505 506 restart: 507 xa_lock_irq(&mapping->i_pages); 508 entry = get_unlocked_mapping_entry(mapping, index, &slot); 509 510 if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) { 511 entry = ERR_PTR(-EIO); 512 goto out_unlock; 513 } 514 515 if (entry) { 516 if (size_flag & RADIX_DAX_PMD) { 517 if (dax_is_pte_entry(entry)) { 518 put_unlocked_mapping_entry(mapping, index, 519 entry); 520 entry = ERR_PTR(-EEXIST); 521 goto out_unlock; 522 } 523 } else { /* trying to grab a PTE entry */ 524 if (dax_is_pmd_entry(entry) && 525 (dax_is_zero_entry(entry) || 526 dax_is_empty_entry(entry))) { 527 pmd_downgrade = true; 528 } 529 } 530 } 531 532 /* No entry for given index? Make sure radix tree is big enough. */ 533 if (!entry || pmd_downgrade) { 534 int err; 535 536 if (pmd_downgrade) { 537 /* 538 * Make sure 'entry' remains valid while we drop 539 * the i_pages lock. 540 */ 541 entry = lock_slot(mapping, slot); 542 } 543 544 xa_unlock_irq(&mapping->i_pages); 545 /* 546 * Besides huge zero pages the only other thing that gets 547 * downgraded are empty entries which don't need to be 548 * unmapped. 549 */ 550 if (pmd_downgrade && dax_is_zero_entry(entry)) 551 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 552 PG_PMD_NR, false); 553 554 err = radix_tree_preload( 555 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); 556 if (err) { 557 if (pmd_downgrade) 558 put_locked_mapping_entry(mapping, index); 559 return ERR_PTR(err); 560 } 561 xa_lock_irq(&mapping->i_pages); 562 563 if (!entry) { 564 /* 565 * We needed to drop the i_pages lock while calling 566 * radix_tree_preload() and we didn't have an entry to 567 * lock. See if another thread inserted an entry at 568 * our index during this time. 569 */ 570 entry = __radix_tree_lookup(&mapping->i_pages, index, 571 NULL, &slot); 572 if (entry) { 573 radix_tree_preload_end(); 574 xa_unlock_irq(&mapping->i_pages); 575 goto restart; 576 } 577 } 578 579 if (pmd_downgrade) { 580 dax_disassociate_entry(entry, mapping, false); 581 radix_tree_delete(&mapping->i_pages, index); 582 mapping->nrexceptional--; 583 dax_wake_mapping_entry_waiter(mapping, index, entry, 584 true); 585 } 586 587 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY); 588 589 err = __radix_tree_insert(&mapping->i_pages, index, 590 dax_radix_order(entry), entry); 591 radix_tree_preload_end(); 592 if (err) { 593 xa_unlock_irq(&mapping->i_pages); 594 /* 595 * Our insertion of a DAX entry failed, most likely 596 * because we were inserting a PMD entry and it 597 * collided with a PTE sized entry at a different 598 * index in the PMD range. We haven't inserted 599 * anything into the radix tree and have no waiters to 600 * wake. 601 */ 602 return ERR_PTR(err); 603 } 604 /* Good, we have inserted empty locked entry into the tree. */ 605 mapping->nrexceptional++; 606 xa_unlock_irq(&mapping->i_pages); 607 return entry; 608 } 609 entry = lock_slot(mapping, slot); 610 out_unlock: 611 xa_unlock_irq(&mapping->i_pages); 612 return entry; 613 } 614 615 /** 616 * dax_layout_busy_page - find first pinned page in @mapping 617 * @mapping: address space to scan for a page with ref count > 1 618 * 619 * DAX requires ZONE_DEVICE mapped pages. These pages are never 620 * 'onlined' to the page allocator so they are considered idle when 621 * page->count == 1. A filesystem uses this interface to determine if 622 * any page in the mapping is busy, i.e. for DMA, or other 623 * get_user_pages() usages. 624 * 625 * It is expected that the filesystem is holding locks to block the 626 * establishment of new mappings in this address_space. I.e. it expects 627 * to be able to run unmap_mapping_range() and subsequently not race 628 * mapping_mapped() becoming true. 629 */ 630 struct page *dax_layout_busy_page(struct address_space *mapping) 631 { 632 pgoff_t indices[PAGEVEC_SIZE]; 633 struct page *page = NULL; 634 struct pagevec pvec; 635 pgoff_t index, end; 636 unsigned i; 637 638 /* 639 * In the 'limited' case get_user_pages() for dax is disabled. 640 */ 641 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 642 return NULL; 643 644 if (!dax_mapping(mapping) || !mapping_mapped(mapping)) 645 return NULL; 646 647 pagevec_init(&pvec); 648 index = 0; 649 end = -1; 650 651 /* 652 * If we race get_user_pages_fast() here either we'll see the 653 * elevated page count in the pagevec_lookup and wait, or 654 * get_user_pages_fast() will see that the page it took a reference 655 * against is no longer mapped in the page tables and bail to the 656 * get_user_pages() slow path. The slow path is protected by 657 * pte_lock() and pmd_lock(). New references are not taken without 658 * holding those locks, and unmap_mapping_range() will not zero the 659 * pte or pmd without holding the respective lock, so we are 660 * guaranteed to either see new references or prevent new 661 * references from being established. 662 */ 663 unmap_mapping_range(mapping, 0, 0, 1); 664 665 while (index < end && pagevec_lookup_entries(&pvec, mapping, index, 666 min(end - index, (pgoff_t)PAGEVEC_SIZE), 667 indices)) { 668 for (i = 0; i < pagevec_count(&pvec); i++) { 669 struct page *pvec_ent = pvec.pages[i]; 670 void *entry; 671 672 index = indices[i]; 673 if (index >= end) 674 break; 675 676 if (WARN_ON_ONCE( 677 !radix_tree_exceptional_entry(pvec_ent))) 678 continue; 679 680 xa_lock_irq(&mapping->i_pages); 681 entry = get_unlocked_mapping_entry(mapping, index, NULL); 682 if (entry) 683 page = dax_busy_page(entry); 684 put_unlocked_mapping_entry(mapping, index, entry); 685 xa_unlock_irq(&mapping->i_pages); 686 if (page) 687 break; 688 } 689 690 /* 691 * We don't expect normal struct page entries to exist in our 692 * tree, but we keep these pagevec calls so that this code is 693 * consistent with the common pattern for handling pagevecs 694 * throughout the kernel. 695 */ 696 pagevec_remove_exceptionals(&pvec); 697 pagevec_release(&pvec); 698 index++; 699 700 if (page) 701 break; 702 } 703 return page; 704 } 705 EXPORT_SYMBOL_GPL(dax_layout_busy_page); 706 707 static int __dax_invalidate_mapping_entry(struct address_space *mapping, 708 pgoff_t index, bool trunc) 709 { 710 int ret = 0; 711 void *entry; 712 struct radix_tree_root *pages = &mapping->i_pages; 713 714 xa_lock_irq(pages); 715 entry = get_unlocked_mapping_entry(mapping, index, NULL); 716 if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry))) 717 goto out; 718 if (!trunc && 719 (radix_tree_tag_get(pages, index, PAGECACHE_TAG_DIRTY) || 720 radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE))) 721 goto out; 722 dax_disassociate_entry(entry, mapping, trunc); 723 radix_tree_delete(pages, index); 724 mapping->nrexceptional--; 725 ret = 1; 726 out: 727 put_unlocked_mapping_entry(mapping, index, entry); 728 xa_unlock_irq(pages); 729 return ret; 730 } 731 /* 732 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree 733 * entry to get unlocked before deleting it. 734 */ 735 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 736 { 737 int ret = __dax_invalidate_mapping_entry(mapping, index, true); 738 739 /* 740 * This gets called from truncate / punch_hole path. As such, the caller 741 * must hold locks protecting against concurrent modifications of the 742 * radix tree (usually fs-private i_mmap_sem for writing). Since the 743 * caller has seen exceptional entry for this index, we better find it 744 * at that index as well... 745 */ 746 WARN_ON_ONCE(!ret); 747 return ret; 748 } 749 750 /* 751 * Invalidate exceptional DAX entry if it is clean. 752 */ 753 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 754 pgoff_t index) 755 { 756 return __dax_invalidate_mapping_entry(mapping, index, false); 757 } 758 759 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, 760 sector_t sector, size_t size, struct page *to, 761 unsigned long vaddr) 762 { 763 void *vto, *kaddr; 764 pgoff_t pgoff; 765 long rc; 766 int id; 767 768 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); 769 if (rc) 770 return rc; 771 772 id = dax_read_lock(); 773 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL); 774 if (rc < 0) { 775 dax_read_unlock(id); 776 return rc; 777 } 778 vto = kmap_atomic(to); 779 copy_user_page(vto, (void __force *)kaddr, vaddr, to); 780 kunmap_atomic(vto); 781 dax_read_unlock(id); 782 return 0; 783 } 784 785 /* 786 * By this point grab_mapping_entry() has ensured that we have a locked entry 787 * of the appropriate size so we don't have to worry about downgrading PMDs to 788 * PTEs. If we happen to be trying to insert a PTE and there is a PMD 789 * already in the tree, we will skip the insertion and just dirty the PMD as 790 * appropriate. 791 */ 792 static void *dax_insert_mapping_entry(struct address_space *mapping, 793 struct vm_fault *vmf, 794 void *entry, pfn_t pfn_t, 795 unsigned long flags, bool dirty) 796 { 797 struct radix_tree_root *pages = &mapping->i_pages; 798 unsigned long pfn = pfn_t_to_pfn(pfn_t); 799 pgoff_t index = vmf->pgoff; 800 void *new_entry; 801 802 if (dirty) 803 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 804 805 if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) { 806 /* we are replacing a zero page with block mapping */ 807 if (dax_is_pmd_entry(entry)) 808 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 809 PG_PMD_NR, false); 810 else /* pte entry */ 811 unmap_mapping_pages(mapping, vmf->pgoff, 1, false); 812 } 813 814 xa_lock_irq(pages); 815 new_entry = dax_radix_locked_entry(pfn, flags); 816 if (dax_entry_size(entry) != dax_entry_size(new_entry)) { 817 dax_disassociate_entry(entry, mapping, false); 818 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); 819 } 820 821 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 822 /* 823 * Only swap our new entry into the radix tree if the current 824 * entry is a zero page or an empty entry. If a normal PTE or 825 * PMD entry is already in the tree, we leave it alone. This 826 * means that if we are trying to insert a PTE and the 827 * existing entry is a PMD, we will just leave the PMD in the 828 * tree and dirty it if necessary. 829 */ 830 struct radix_tree_node *node; 831 void **slot; 832 void *ret; 833 834 ret = __radix_tree_lookup(pages, index, &node, &slot); 835 WARN_ON_ONCE(ret != entry); 836 __radix_tree_replace(pages, node, slot, 837 new_entry, NULL); 838 entry = new_entry; 839 } 840 841 if (dirty) 842 radix_tree_tag_set(pages, index, PAGECACHE_TAG_DIRTY); 843 844 xa_unlock_irq(pages); 845 return entry; 846 } 847 848 static inline unsigned long 849 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma) 850 { 851 unsigned long address; 852 853 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 854 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 855 return address; 856 } 857 858 /* Walk all mappings of a given index of a file and writeprotect them */ 859 static void dax_mapping_entry_mkclean(struct address_space *mapping, 860 pgoff_t index, unsigned long pfn) 861 { 862 struct vm_area_struct *vma; 863 pte_t pte, *ptep = NULL; 864 pmd_t *pmdp = NULL; 865 spinlock_t *ptl; 866 867 i_mmap_lock_read(mapping); 868 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { 869 unsigned long address, start, end; 870 871 cond_resched(); 872 873 if (!(vma->vm_flags & VM_SHARED)) 874 continue; 875 876 address = pgoff_address(index, vma); 877 878 /* 879 * Note because we provide start/end to follow_pte_pmd it will 880 * call mmu_notifier_invalidate_range_start() on our behalf 881 * before taking any lock. 882 */ 883 if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl)) 884 continue; 885 886 /* 887 * No need to call mmu_notifier_invalidate_range() as we are 888 * downgrading page table protection not changing it to point 889 * to a new page. 890 * 891 * See Documentation/vm/mmu_notifier.rst 892 */ 893 if (pmdp) { 894 #ifdef CONFIG_FS_DAX_PMD 895 pmd_t pmd; 896 897 if (pfn != pmd_pfn(*pmdp)) 898 goto unlock_pmd; 899 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) 900 goto unlock_pmd; 901 902 flush_cache_page(vma, address, pfn); 903 pmd = pmdp_huge_clear_flush(vma, address, pmdp); 904 pmd = pmd_wrprotect(pmd); 905 pmd = pmd_mkclean(pmd); 906 set_pmd_at(vma->vm_mm, address, pmdp, pmd); 907 unlock_pmd: 908 #endif 909 spin_unlock(ptl); 910 } else { 911 if (pfn != pte_pfn(*ptep)) 912 goto unlock_pte; 913 if (!pte_dirty(*ptep) && !pte_write(*ptep)) 914 goto unlock_pte; 915 916 flush_cache_page(vma, address, pfn); 917 pte = ptep_clear_flush(vma, address, ptep); 918 pte = pte_wrprotect(pte); 919 pte = pte_mkclean(pte); 920 set_pte_at(vma->vm_mm, address, ptep, pte); 921 unlock_pte: 922 pte_unmap_unlock(ptep, ptl); 923 } 924 925 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); 926 } 927 i_mmap_unlock_read(mapping); 928 } 929 930 static int dax_writeback_one(struct dax_device *dax_dev, 931 struct address_space *mapping, pgoff_t index, void *entry) 932 { 933 struct radix_tree_root *pages = &mapping->i_pages; 934 void *entry2, **slot; 935 unsigned long pfn; 936 long ret = 0; 937 size_t size; 938 939 /* 940 * A page got tagged dirty in DAX mapping? Something is seriously 941 * wrong. 942 */ 943 if (WARN_ON(!radix_tree_exceptional_entry(entry))) 944 return -EIO; 945 946 xa_lock_irq(pages); 947 entry2 = get_unlocked_mapping_entry(mapping, index, &slot); 948 /* Entry got punched out / reallocated? */ 949 if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2))) 950 goto put_unlocked; 951 /* 952 * Entry got reallocated elsewhere? No need to writeback. We have to 953 * compare pfns as we must not bail out due to difference in lockbit 954 * or entry type. 955 */ 956 if (dax_radix_pfn(entry2) != dax_radix_pfn(entry)) 957 goto put_unlocked; 958 if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 959 dax_is_zero_entry(entry))) { 960 ret = -EIO; 961 goto put_unlocked; 962 } 963 964 /* Another fsync thread may have already written back this entry */ 965 if (!radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE)) 966 goto put_unlocked; 967 /* Lock the entry to serialize with page faults */ 968 entry = lock_slot(mapping, slot); 969 /* 970 * We can clear the tag now but we have to be careful so that concurrent 971 * dax_writeback_one() calls for the same index cannot finish before we 972 * actually flush the caches. This is achieved as the calls will look 973 * at the entry only under the i_pages lock and once they do that 974 * they will see the entry locked and wait for it to unlock. 975 */ 976 radix_tree_tag_clear(pages, index, PAGECACHE_TAG_TOWRITE); 977 xa_unlock_irq(pages); 978 979 /* 980 * Even if dax_writeback_mapping_range() was given a wbc->range_start 981 * in the middle of a PMD, the 'index' we are given will be aligned to 982 * the start index of the PMD, as will the pfn we pull from 'entry'. 983 * This allows us to flush for PMD_SIZE and not have to worry about 984 * partial PMD writebacks. 985 */ 986 pfn = dax_radix_pfn(entry); 987 size = PAGE_SIZE << dax_radix_order(entry); 988 989 dax_mapping_entry_mkclean(mapping, index, pfn); 990 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size); 991 /* 992 * After we have flushed the cache, we can clear the dirty tag. There 993 * cannot be new dirty data in the pfn after the flush has completed as 994 * the pfn mappings are writeprotected and fault waits for mapping 995 * entry lock. 996 */ 997 xa_lock_irq(pages); 998 radix_tree_tag_clear(pages, index, PAGECACHE_TAG_DIRTY); 999 xa_unlock_irq(pages); 1000 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT); 1001 put_locked_mapping_entry(mapping, index); 1002 return ret; 1003 1004 put_unlocked: 1005 put_unlocked_mapping_entry(mapping, index, entry2); 1006 xa_unlock_irq(pages); 1007 return ret; 1008 } 1009 1010 /* 1011 * Flush the mapping to the persistent domain within the byte range of [start, 1012 * end]. This is required by data integrity operations to ensure file data is 1013 * on persistent storage prior to completion of the operation. 1014 */ 1015 int dax_writeback_mapping_range(struct address_space *mapping, 1016 struct block_device *bdev, struct writeback_control *wbc) 1017 { 1018 struct inode *inode = mapping->host; 1019 pgoff_t start_index, end_index; 1020 pgoff_t indices[PAGEVEC_SIZE]; 1021 struct dax_device *dax_dev; 1022 struct pagevec pvec; 1023 bool done = false; 1024 int i, ret = 0; 1025 1026 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 1027 return -EIO; 1028 1029 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) 1030 return 0; 1031 1032 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 1033 if (!dax_dev) 1034 return -EIO; 1035 1036 start_index = wbc->range_start >> PAGE_SHIFT; 1037 end_index = wbc->range_end >> PAGE_SHIFT; 1038 1039 trace_dax_writeback_range(inode, start_index, end_index); 1040 1041 tag_pages_for_writeback(mapping, start_index, end_index); 1042 1043 pagevec_init(&pvec); 1044 while (!done) { 1045 pvec.nr = find_get_entries_tag(mapping, start_index, 1046 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE, 1047 pvec.pages, indices); 1048 1049 if (pvec.nr == 0) 1050 break; 1051 1052 for (i = 0; i < pvec.nr; i++) { 1053 if (indices[i] > end_index) { 1054 done = true; 1055 break; 1056 } 1057 1058 ret = dax_writeback_one(dax_dev, mapping, indices[i], 1059 pvec.pages[i]); 1060 if (ret < 0) { 1061 mapping_set_error(mapping, ret); 1062 goto out; 1063 } 1064 } 1065 start_index = indices[pvec.nr - 1] + 1; 1066 } 1067 out: 1068 put_dax(dax_dev); 1069 trace_dax_writeback_range_done(inode, start_index, end_index); 1070 return (ret < 0 ? ret : 0); 1071 } 1072 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 1073 1074 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 1075 { 1076 return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9; 1077 } 1078 1079 static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size, 1080 pfn_t *pfnp) 1081 { 1082 const sector_t sector = dax_iomap_sector(iomap, pos); 1083 pgoff_t pgoff; 1084 int id, rc; 1085 long length; 1086 1087 rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff); 1088 if (rc) 1089 return rc; 1090 id = dax_read_lock(); 1091 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), 1092 NULL, pfnp); 1093 if (length < 0) { 1094 rc = length; 1095 goto out; 1096 } 1097 rc = -EINVAL; 1098 if (PFN_PHYS(length) < size) 1099 goto out; 1100 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) 1101 goto out; 1102 /* For larger pages we need devmap */ 1103 if (length > 1 && !pfn_t_devmap(*pfnp)) 1104 goto out; 1105 rc = 0; 1106 out: 1107 dax_read_unlock(id); 1108 return rc; 1109 } 1110 1111 /* 1112 * The user has performed a load from a hole in the file. Allocating a new 1113 * page in the file would cause excessive storage usage for workloads with 1114 * sparse files. Instead we insert a read-only mapping of the 4k zero page. 1115 * If this page is ever written to we will re-fault and change the mapping to 1116 * point to real DAX storage instead. 1117 */ 1118 static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry, 1119 struct vm_fault *vmf) 1120 { 1121 struct inode *inode = mapping->host; 1122 unsigned long vaddr = vmf->address; 1123 vm_fault_t ret = VM_FAULT_NOPAGE; 1124 struct page *zero_page; 1125 pfn_t pfn; 1126 1127 zero_page = ZERO_PAGE(0); 1128 if (unlikely(!zero_page)) { 1129 ret = VM_FAULT_OOM; 1130 goto out; 1131 } 1132 1133 pfn = page_to_pfn_t(zero_page); 1134 dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE, 1135 false); 1136 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); 1137 out: 1138 trace_dax_load_hole(inode, vmf, ret); 1139 return ret; 1140 } 1141 1142 static bool dax_range_is_aligned(struct block_device *bdev, 1143 unsigned int offset, unsigned int length) 1144 { 1145 unsigned short sector_size = bdev_logical_block_size(bdev); 1146 1147 if (!IS_ALIGNED(offset, sector_size)) 1148 return false; 1149 if (!IS_ALIGNED(length, sector_size)) 1150 return false; 1151 1152 return true; 1153 } 1154 1155 int __dax_zero_page_range(struct block_device *bdev, 1156 struct dax_device *dax_dev, sector_t sector, 1157 unsigned int offset, unsigned int size) 1158 { 1159 if (dax_range_is_aligned(bdev, offset, size)) { 1160 sector_t start_sector = sector + (offset >> 9); 1161 1162 return blkdev_issue_zeroout(bdev, start_sector, 1163 size >> 9, GFP_NOFS, 0); 1164 } else { 1165 pgoff_t pgoff; 1166 long rc, id; 1167 void *kaddr; 1168 1169 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); 1170 if (rc) 1171 return rc; 1172 1173 id = dax_read_lock(); 1174 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL); 1175 if (rc < 0) { 1176 dax_read_unlock(id); 1177 return rc; 1178 } 1179 memset(kaddr + offset, 0, size); 1180 dax_flush(dax_dev, kaddr + offset, size); 1181 dax_read_unlock(id); 1182 } 1183 return 0; 1184 } 1185 EXPORT_SYMBOL_GPL(__dax_zero_page_range); 1186 1187 static loff_t 1188 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 1189 struct iomap *iomap) 1190 { 1191 struct block_device *bdev = iomap->bdev; 1192 struct dax_device *dax_dev = iomap->dax_dev; 1193 struct iov_iter *iter = data; 1194 loff_t end = pos + length, done = 0; 1195 ssize_t ret = 0; 1196 size_t xfer; 1197 int id; 1198 1199 if (iov_iter_rw(iter) == READ) { 1200 end = min(end, i_size_read(inode)); 1201 if (pos >= end) 1202 return 0; 1203 1204 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 1205 return iov_iter_zero(min(length, end - pos), iter); 1206 } 1207 1208 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 1209 return -EIO; 1210 1211 /* 1212 * Write can allocate block for an area which has a hole page mapped 1213 * into page tables. We have to tear down these mappings so that data 1214 * written by write(2) is visible in mmap. 1215 */ 1216 if (iomap->flags & IOMAP_F_NEW) { 1217 invalidate_inode_pages2_range(inode->i_mapping, 1218 pos >> PAGE_SHIFT, 1219 (end - 1) >> PAGE_SHIFT); 1220 } 1221 1222 id = dax_read_lock(); 1223 while (pos < end) { 1224 unsigned offset = pos & (PAGE_SIZE - 1); 1225 const size_t size = ALIGN(length + offset, PAGE_SIZE); 1226 const sector_t sector = dax_iomap_sector(iomap, pos); 1227 ssize_t map_len; 1228 pgoff_t pgoff; 1229 void *kaddr; 1230 1231 if (fatal_signal_pending(current)) { 1232 ret = -EINTR; 1233 break; 1234 } 1235 1236 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 1237 if (ret) 1238 break; 1239 1240 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 1241 &kaddr, NULL); 1242 if (map_len < 0) { 1243 ret = map_len; 1244 break; 1245 } 1246 1247 map_len = PFN_PHYS(map_len); 1248 kaddr += offset; 1249 map_len -= offset; 1250 if (map_len > end - pos) 1251 map_len = end - pos; 1252 1253 /* 1254 * The userspace address for the memory copy has already been 1255 * validated via access_ok() in either vfs_read() or 1256 * vfs_write(), depending on which operation we are doing. 1257 */ 1258 if (iov_iter_rw(iter) == WRITE) 1259 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1260 map_len, iter); 1261 else 1262 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, 1263 map_len, iter); 1264 1265 pos += xfer; 1266 length -= xfer; 1267 done += xfer; 1268 1269 if (xfer == 0) 1270 ret = -EFAULT; 1271 if (xfer < map_len) 1272 break; 1273 } 1274 dax_read_unlock(id); 1275 1276 return done ? done : ret; 1277 } 1278 1279 /** 1280 * dax_iomap_rw - Perform I/O to a DAX file 1281 * @iocb: The control block for this I/O 1282 * @iter: The addresses to do I/O from or to 1283 * @ops: iomap ops passed from the file system 1284 * 1285 * This function performs read and write operations to directly mapped 1286 * persistent memory. The callers needs to take care of read/write exclusion 1287 * and evicting any page cache pages in the region under I/O. 1288 */ 1289 ssize_t 1290 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 1291 const struct iomap_ops *ops) 1292 { 1293 struct address_space *mapping = iocb->ki_filp->f_mapping; 1294 struct inode *inode = mapping->host; 1295 loff_t pos = iocb->ki_pos, ret = 0, done = 0; 1296 unsigned flags = 0; 1297 1298 if (iov_iter_rw(iter) == WRITE) { 1299 lockdep_assert_held_exclusive(&inode->i_rwsem); 1300 flags |= IOMAP_WRITE; 1301 } else { 1302 lockdep_assert_held(&inode->i_rwsem); 1303 } 1304 1305 while (iov_iter_count(iter)) { 1306 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, 1307 iter, dax_iomap_actor); 1308 if (ret <= 0) 1309 break; 1310 pos += ret; 1311 done += ret; 1312 } 1313 1314 iocb->ki_pos += done; 1315 return done ? done : ret; 1316 } 1317 EXPORT_SYMBOL_GPL(dax_iomap_rw); 1318 1319 static vm_fault_t dax_fault_return(int error) 1320 { 1321 if (error == 0) 1322 return VM_FAULT_NOPAGE; 1323 if (error == -ENOMEM) 1324 return VM_FAULT_OOM; 1325 return VM_FAULT_SIGBUS; 1326 } 1327 1328 /* 1329 * MAP_SYNC on a dax mapping guarantees dirty metadata is 1330 * flushed on write-faults (non-cow), but not read-faults. 1331 */ 1332 static bool dax_fault_is_synchronous(unsigned long flags, 1333 struct vm_area_struct *vma, struct iomap *iomap) 1334 { 1335 return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) 1336 && (iomap->flags & IOMAP_F_DIRTY); 1337 } 1338 1339 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1340 int *iomap_errp, const struct iomap_ops *ops) 1341 { 1342 struct vm_area_struct *vma = vmf->vma; 1343 struct address_space *mapping = vma->vm_file->f_mapping; 1344 struct inode *inode = mapping->host; 1345 unsigned long vaddr = vmf->address; 1346 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; 1347 struct iomap iomap = { 0 }; 1348 unsigned flags = IOMAP_FAULT; 1349 int error, major = 0; 1350 bool write = vmf->flags & FAULT_FLAG_WRITE; 1351 bool sync; 1352 vm_fault_t ret = 0; 1353 void *entry; 1354 pfn_t pfn; 1355 1356 trace_dax_pte_fault(inode, vmf, ret); 1357 /* 1358 * Check whether offset isn't beyond end of file now. Caller is supposed 1359 * to hold locks serializing us with truncate / punch hole so this is 1360 * a reliable test. 1361 */ 1362 if (pos >= i_size_read(inode)) { 1363 ret = VM_FAULT_SIGBUS; 1364 goto out; 1365 } 1366 1367 if (write && !vmf->cow_page) 1368 flags |= IOMAP_WRITE; 1369 1370 entry = grab_mapping_entry(mapping, vmf->pgoff, 0); 1371 if (IS_ERR(entry)) { 1372 ret = dax_fault_return(PTR_ERR(entry)); 1373 goto out; 1374 } 1375 1376 /* 1377 * It is possible, particularly with mixed reads & writes to private 1378 * mappings, that we have raced with a PMD fault that overlaps with 1379 * the PTE we need to set up. If so just return and the fault will be 1380 * retried. 1381 */ 1382 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1383 ret = VM_FAULT_NOPAGE; 1384 goto unlock_entry; 1385 } 1386 1387 /* 1388 * Note that we don't bother to use iomap_apply here: DAX required 1389 * the file system block size to be equal the page size, which means 1390 * that we never have to deal with more than a single extent here. 1391 */ 1392 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); 1393 if (iomap_errp) 1394 *iomap_errp = error; 1395 if (error) { 1396 ret = dax_fault_return(error); 1397 goto unlock_entry; 1398 } 1399 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { 1400 error = -EIO; /* fs corruption? */ 1401 goto error_finish_iomap; 1402 } 1403 1404 if (vmf->cow_page) { 1405 sector_t sector = dax_iomap_sector(&iomap, pos); 1406 1407 switch (iomap.type) { 1408 case IOMAP_HOLE: 1409 case IOMAP_UNWRITTEN: 1410 clear_user_highpage(vmf->cow_page, vaddr); 1411 break; 1412 case IOMAP_MAPPED: 1413 error = copy_user_dax(iomap.bdev, iomap.dax_dev, 1414 sector, PAGE_SIZE, vmf->cow_page, vaddr); 1415 break; 1416 default: 1417 WARN_ON_ONCE(1); 1418 error = -EIO; 1419 break; 1420 } 1421 1422 if (error) 1423 goto error_finish_iomap; 1424 1425 __SetPageUptodate(vmf->cow_page); 1426 ret = finish_fault(vmf); 1427 if (!ret) 1428 ret = VM_FAULT_DONE_COW; 1429 goto finish_iomap; 1430 } 1431 1432 sync = dax_fault_is_synchronous(flags, vma, &iomap); 1433 1434 switch (iomap.type) { 1435 case IOMAP_MAPPED: 1436 if (iomap.flags & IOMAP_F_NEW) { 1437 count_vm_event(PGMAJFAULT); 1438 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 1439 major = VM_FAULT_MAJOR; 1440 } 1441 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn); 1442 if (error < 0) 1443 goto error_finish_iomap; 1444 1445 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn, 1446 0, write && !sync); 1447 1448 /* 1449 * If we are doing synchronous page fault and inode needs fsync, 1450 * we can insert PTE into page tables only after that happens. 1451 * Skip insertion for now and return the pfn so that caller can 1452 * insert it after fsync is done. 1453 */ 1454 if (sync) { 1455 if (WARN_ON_ONCE(!pfnp)) { 1456 error = -EIO; 1457 goto error_finish_iomap; 1458 } 1459 *pfnp = pfn; 1460 ret = VM_FAULT_NEEDDSYNC | major; 1461 goto finish_iomap; 1462 } 1463 trace_dax_insert_mapping(inode, vmf, entry); 1464 if (write) 1465 ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn); 1466 else 1467 ret = vmf_insert_mixed(vma, vaddr, pfn); 1468 1469 goto finish_iomap; 1470 case IOMAP_UNWRITTEN: 1471 case IOMAP_HOLE: 1472 if (!write) { 1473 ret = dax_load_hole(mapping, entry, vmf); 1474 goto finish_iomap; 1475 } 1476 /*FALLTHRU*/ 1477 default: 1478 WARN_ON_ONCE(1); 1479 error = -EIO; 1480 break; 1481 } 1482 1483 error_finish_iomap: 1484 ret = dax_fault_return(error); 1485 finish_iomap: 1486 if (ops->iomap_end) { 1487 int copied = PAGE_SIZE; 1488 1489 if (ret & VM_FAULT_ERROR) 1490 copied = 0; 1491 /* 1492 * The fault is done by now and there's no way back (other 1493 * thread may be already happily using PTE we have installed). 1494 * Just ignore error from ->iomap_end since we cannot do much 1495 * with it. 1496 */ 1497 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); 1498 } 1499 unlock_entry: 1500 put_locked_mapping_entry(mapping, vmf->pgoff); 1501 out: 1502 trace_dax_pte_fault_done(inode, vmf, ret); 1503 return ret | major; 1504 } 1505 1506 #ifdef CONFIG_FS_DAX_PMD 1507 static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap, 1508 void *entry) 1509 { 1510 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1511 unsigned long pmd_addr = vmf->address & PMD_MASK; 1512 struct inode *inode = mapping->host; 1513 struct page *zero_page; 1514 void *ret = NULL; 1515 spinlock_t *ptl; 1516 pmd_t pmd_entry; 1517 pfn_t pfn; 1518 1519 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1520 1521 if (unlikely(!zero_page)) 1522 goto fallback; 1523 1524 pfn = page_to_pfn_t(zero_page); 1525 ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn, 1526 RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false); 1527 1528 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1529 if (!pmd_none(*(vmf->pmd))) { 1530 spin_unlock(ptl); 1531 goto fallback; 1532 } 1533 1534 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1535 pmd_entry = pmd_mkhuge(pmd_entry); 1536 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1537 spin_unlock(ptl); 1538 trace_dax_pmd_load_hole(inode, vmf, zero_page, ret); 1539 return VM_FAULT_NOPAGE; 1540 1541 fallback: 1542 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret); 1543 return VM_FAULT_FALLBACK; 1544 } 1545 1546 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1547 const struct iomap_ops *ops) 1548 { 1549 struct vm_area_struct *vma = vmf->vma; 1550 struct address_space *mapping = vma->vm_file->f_mapping; 1551 unsigned long pmd_addr = vmf->address & PMD_MASK; 1552 bool write = vmf->flags & FAULT_FLAG_WRITE; 1553 bool sync; 1554 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; 1555 struct inode *inode = mapping->host; 1556 vm_fault_t result = VM_FAULT_FALLBACK; 1557 struct iomap iomap = { 0 }; 1558 pgoff_t max_pgoff, pgoff; 1559 void *entry; 1560 loff_t pos; 1561 int error; 1562 pfn_t pfn; 1563 1564 /* 1565 * Check whether offset isn't beyond end of file now. Caller is 1566 * supposed to hold locks serializing us with truncate / punch hole so 1567 * this is a reliable test. 1568 */ 1569 pgoff = linear_page_index(vma, pmd_addr); 1570 max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1571 1572 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); 1573 1574 /* 1575 * Make sure that the faulting address's PMD offset (color) matches 1576 * the PMD offset from the start of the file. This is necessary so 1577 * that a PMD range in the page table overlaps exactly with a PMD 1578 * range in the radix tree. 1579 */ 1580 if ((vmf->pgoff & PG_PMD_COLOUR) != 1581 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 1582 goto fallback; 1583 1584 /* Fall back to PTEs if we're going to COW */ 1585 if (write && !(vma->vm_flags & VM_SHARED)) 1586 goto fallback; 1587 1588 /* If the PMD would extend outside the VMA */ 1589 if (pmd_addr < vma->vm_start) 1590 goto fallback; 1591 if ((pmd_addr + PMD_SIZE) > vma->vm_end) 1592 goto fallback; 1593 1594 if (pgoff >= max_pgoff) { 1595 result = VM_FAULT_SIGBUS; 1596 goto out; 1597 } 1598 1599 /* If the PMD would extend beyond the file size */ 1600 if ((pgoff | PG_PMD_COLOUR) >= max_pgoff) 1601 goto fallback; 1602 1603 /* 1604 * grab_mapping_entry() will make sure we get a 2MiB empty entry, a 1605 * 2MiB zero page entry or a DAX PMD. If it can't (because a 4k page 1606 * is already in the tree, for instance), it will return -EEXIST and 1607 * we just fall back to 4k entries. 1608 */ 1609 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); 1610 if (IS_ERR(entry)) 1611 goto fallback; 1612 1613 /* 1614 * It is possible, particularly with mixed reads & writes to private 1615 * mappings, that we have raced with a PTE fault that overlaps with 1616 * the PMD we need to set up. If so just return and the fault will be 1617 * retried. 1618 */ 1619 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1620 !pmd_devmap(*vmf->pmd)) { 1621 result = 0; 1622 goto unlock_entry; 1623 } 1624 1625 /* 1626 * Note that we don't use iomap_apply here. We aren't doing I/O, only 1627 * setting up a mapping, so really we're using iomap_begin() as a way 1628 * to look up our filesystem block. 1629 */ 1630 pos = (loff_t)pgoff << PAGE_SHIFT; 1631 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); 1632 if (error) 1633 goto unlock_entry; 1634 1635 if (iomap.offset + iomap.length < pos + PMD_SIZE) 1636 goto finish_iomap; 1637 1638 sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap); 1639 1640 switch (iomap.type) { 1641 case IOMAP_MAPPED: 1642 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn); 1643 if (error < 0) 1644 goto finish_iomap; 1645 1646 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn, 1647 RADIX_DAX_PMD, write && !sync); 1648 1649 /* 1650 * If we are doing synchronous page fault and inode needs fsync, 1651 * we can insert PMD into page tables only after that happens. 1652 * Skip insertion for now and return the pfn so that caller can 1653 * insert it after fsync is done. 1654 */ 1655 if (sync) { 1656 if (WARN_ON_ONCE(!pfnp)) 1657 goto finish_iomap; 1658 *pfnp = pfn; 1659 result = VM_FAULT_NEEDDSYNC; 1660 goto finish_iomap; 1661 } 1662 1663 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); 1664 result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn, 1665 write); 1666 break; 1667 case IOMAP_UNWRITTEN: 1668 case IOMAP_HOLE: 1669 if (WARN_ON_ONCE(write)) 1670 break; 1671 result = dax_pmd_load_hole(vmf, &iomap, entry); 1672 break; 1673 default: 1674 WARN_ON_ONCE(1); 1675 break; 1676 } 1677 1678 finish_iomap: 1679 if (ops->iomap_end) { 1680 int copied = PMD_SIZE; 1681 1682 if (result == VM_FAULT_FALLBACK) 1683 copied = 0; 1684 /* 1685 * The fault is done by now and there's no way back (other 1686 * thread may be already happily using PMD we have installed). 1687 * Just ignore error from ->iomap_end since we cannot do much 1688 * with it. 1689 */ 1690 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags, 1691 &iomap); 1692 } 1693 unlock_entry: 1694 put_locked_mapping_entry(mapping, pgoff); 1695 fallback: 1696 if (result == VM_FAULT_FALLBACK) { 1697 split_huge_pmd(vma, vmf->pmd, vmf->address); 1698 count_vm_event(THP_FAULT_FALLBACK); 1699 } 1700 out: 1701 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); 1702 return result; 1703 } 1704 #else 1705 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1706 const struct iomap_ops *ops) 1707 { 1708 return VM_FAULT_FALLBACK; 1709 } 1710 #endif /* CONFIG_FS_DAX_PMD */ 1711 1712 /** 1713 * dax_iomap_fault - handle a page fault on a DAX file 1714 * @vmf: The description of the fault 1715 * @pe_size: Size of the page to fault in 1716 * @pfnp: PFN to insert for synchronous faults if fsync is required 1717 * @iomap_errp: Storage for detailed error code in case of error 1718 * @ops: Iomap ops passed from the file system 1719 * 1720 * When a page fault occurs, filesystems may call this helper in 1721 * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1722 * has done all the necessary locking for page fault to proceed 1723 * successfully. 1724 */ 1725 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1726 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) 1727 { 1728 switch (pe_size) { 1729 case PE_SIZE_PTE: 1730 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); 1731 case PE_SIZE_PMD: 1732 return dax_iomap_pmd_fault(vmf, pfnp, ops); 1733 default: 1734 return VM_FAULT_FALLBACK; 1735 } 1736 } 1737 EXPORT_SYMBOL_GPL(dax_iomap_fault); 1738 1739 /** 1740 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables 1741 * @vmf: The description of the fault 1742 * @pe_size: Size of entry to be inserted 1743 * @pfn: PFN to insert 1744 * 1745 * This function inserts writeable PTE or PMD entry into page tables for mmaped 1746 * DAX file. It takes care of marking corresponding radix tree entry as dirty 1747 * as well. 1748 */ 1749 static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf, 1750 enum page_entry_size pe_size, 1751 pfn_t pfn) 1752 { 1753 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1754 void *entry, **slot; 1755 pgoff_t index = vmf->pgoff; 1756 vm_fault_t ret; 1757 1758 xa_lock_irq(&mapping->i_pages); 1759 entry = get_unlocked_mapping_entry(mapping, index, &slot); 1760 /* Did we race with someone splitting entry or so? */ 1761 if (!entry || 1762 (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) || 1763 (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) { 1764 put_unlocked_mapping_entry(mapping, index, entry); 1765 xa_unlock_irq(&mapping->i_pages); 1766 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 1767 VM_FAULT_NOPAGE); 1768 return VM_FAULT_NOPAGE; 1769 } 1770 radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY); 1771 entry = lock_slot(mapping, slot); 1772 xa_unlock_irq(&mapping->i_pages); 1773 switch (pe_size) { 1774 case PE_SIZE_PTE: 1775 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 1776 break; 1777 #ifdef CONFIG_FS_DAX_PMD 1778 case PE_SIZE_PMD: 1779 ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, 1780 pfn, true); 1781 break; 1782 #endif 1783 default: 1784 ret = VM_FAULT_FALLBACK; 1785 } 1786 put_locked_mapping_entry(mapping, index); 1787 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); 1788 return ret; 1789 } 1790 1791 /** 1792 * dax_finish_sync_fault - finish synchronous page fault 1793 * @vmf: The description of the fault 1794 * @pe_size: Size of entry to be inserted 1795 * @pfn: PFN to insert 1796 * 1797 * This function ensures that the file range touched by the page fault is 1798 * stored persistently on the media and handles inserting of appropriate page 1799 * table entry. 1800 */ 1801 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 1802 enum page_entry_size pe_size, pfn_t pfn) 1803 { 1804 int err; 1805 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; 1806 size_t len = 0; 1807 1808 if (pe_size == PE_SIZE_PTE) 1809 len = PAGE_SIZE; 1810 else if (pe_size == PE_SIZE_PMD) 1811 len = PMD_SIZE; 1812 else 1813 WARN_ON_ONCE(1); 1814 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); 1815 if (err) 1816 return VM_FAULT_SIGBUS; 1817 return dax_insert_pfn_mkwrite(vmf, pe_size, pfn); 1818 } 1819 EXPORT_SYMBOL_GPL(dax_finish_sync_fault); 1820