1 /* 2 * fs/dax.c - Direct Access filesystem code 3 * Copyright (c) 2013-2014 Intel Corporation 4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 */ 16 17 #include <linux/atomic.h> 18 #include <linux/blkdev.h> 19 #include <linux/buffer_head.h> 20 #include <linux/dax.h> 21 #include <linux/fs.h> 22 #include <linux/genhd.h> 23 #include <linux/highmem.h> 24 #include <linux/memcontrol.h> 25 #include <linux/mm.h> 26 #include <linux/mutex.h> 27 #include <linux/pagevec.h> 28 #include <linux/sched.h> 29 #include <linux/sched/signal.h> 30 #include <linux/uio.h> 31 #include <linux/vmstat.h> 32 #include <linux/pfn_t.h> 33 #include <linux/sizes.h> 34 #include <linux/mmu_notifier.h> 35 #include <linux/iomap.h> 36 #include "internal.h" 37 38 #define CREATE_TRACE_POINTS 39 #include <trace/events/fs_dax.h> 40 41 static inline unsigned int pe_order(enum page_entry_size pe_size) 42 { 43 if (pe_size == PE_SIZE_PTE) 44 return PAGE_SHIFT - PAGE_SHIFT; 45 if (pe_size == PE_SIZE_PMD) 46 return PMD_SHIFT - PAGE_SHIFT; 47 if (pe_size == PE_SIZE_PUD) 48 return PUD_SHIFT - PAGE_SHIFT; 49 return ~0; 50 } 51 52 /* We choose 4096 entries - same as per-zone page wait tables */ 53 #define DAX_WAIT_TABLE_BITS 12 54 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 55 56 /* The 'colour' (ie low bits) within a PMD of a page offset. */ 57 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 58 #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) 59 60 /* The order of a PMD entry */ 61 #define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT) 62 63 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 64 65 static int __init init_dax_wait_table(void) 66 { 67 int i; 68 69 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 70 init_waitqueue_head(wait_table + i); 71 return 0; 72 } 73 fs_initcall(init_dax_wait_table); 74 75 /* 76 * DAX pagecache entries use XArray value entries so they can't be mistaken 77 * for pages. We use one bit for locking, one bit for the entry size (PMD) 78 * and two more to tell us if the entry is a zero page or an empty entry that 79 * is just used for locking. In total four special bits. 80 * 81 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE 82 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem 83 * block allocation. 84 */ 85 #define DAX_SHIFT (4) 86 #define DAX_LOCKED (1UL << 0) 87 #define DAX_PMD (1UL << 1) 88 #define DAX_ZERO_PAGE (1UL << 2) 89 #define DAX_EMPTY (1UL << 3) 90 91 static unsigned long dax_to_pfn(void *entry) 92 { 93 return xa_to_value(entry) >> DAX_SHIFT; 94 } 95 96 static void *dax_make_entry(pfn_t pfn, unsigned long flags) 97 { 98 return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); 99 } 100 101 static bool dax_is_locked(void *entry) 102 { 103 return xa_to_value(entry) & DAX_LOCKED; 104 } 105 106 static unsigned int dax_entry_order(void *entry) 107 { 108 if (xa_to_value(entry) & DAX_PMD) 109 return PMD_ORDER; 110 return 0; 111 } 112 113 static unsigned long dax_is_pmd_entry(void *entry) 114 { 115 return xa_to_value(entry) & DAX_PMD; 116 } 117 118 static bool dax_is_pte_entry(void *entry) 119 { 120 return !(xa_to_value(entry) & DAX_PMD); 121 } 122 123 static int dax_is_zero_entry(void *entry) 124 { 125 return xa_to_value(entry) & DAX_ZERO_PAGE; 126 } 127 128 static int dax_is_empty_entry(void *entry) 129 { 130 return xa_to_value(entry) & DAX_EMPTY; 131 } 132 133 /* 134 * DAX page cache entry locking 135 */ 136 struct exceptional_entry_key { 137 struct xarray *xa; 138 pgoff_t entry_start; 139 }; 140 141 struct wait_exceptional_entry_queue { 142 wait_queue_entry_t wait; 143 struct exceptional_entry_key key; 144 }; 145 146 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, 147 void *entry, struct exceptional_entry_key *key) 148 { 149 unsigned long hash; 150 unsigned long index = xas->xa_index; 151 152 /* 153 * If 'entry' is a PMD, align the 'index' that we use for the wait 154 * queue to the start of that PMD. This ensures that all offsets in 155 * the range covered by the PMD map to the same bit lock. 156 */ 157 if (dax_is_pmd_entry(entry)) 158 index &= ~PG_PMD_COLOUR; 159 key->xa = xas->xa; 160 key->entry_start = index; 161 162 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); 163 return wait_table + hash; 164 } 165 166 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, 167 unsigned int mode, int sync, void *keyp) 168 { 169 struct exceptional_entry_key *key = keyp; 170 struct wait_exceptional_entry_queue *ewait = 171 container_of(wait, struct wait_exceptional_entry_queue, wait); 172 173 if (key->xa != ewait->key.xa || 174 key->entry_start != ewait->key.entry_start) 175 return 0; 176 return autoremove_wake_function(wait, mode, sync, NULL); 177 } 178 179 /* 180 * @entry may no longer be the entry at the index in the mapping. 181 * The important information it's conveying is whether the entry at 182 * this index used to be a PMD entry. 183 */ 184 static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all) 185 { 186 struct exceptional_entry_key key; 187 wait_queue_head_t *wq; 188 189 wq = dax_entry_waitqueue(xas, entry, &key); 190 191 /* 192 * Checking for locked entry and prepare_to_wait_exclusive() happens 193 * under the i_pages lock, ditto for entry handling in our callers. 194 * So at this point all tasks that could have seen our entry locked 195 * must be in the waitqueue and the following check will see them. 196 */ 197 if (waitqueue_active(wq)) 198 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); 199 } 200 201 /* 202 * Look up entry in page cache, wait for it to become unlocked if it 203 * is a DAX entry and return it. The caller must subsequently call 204 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry() 205 * if it did. 206 * 207 * Must be called with the i_pages lock held. 208 */ 209 static void *get_unlocked_entry(struct xa_state *xas) 210 { 211 void *entry; 212 struct wait_exceptional_entry_queue ewait; 213 wait_queue_head_t *wq; 214 215 init_wait(&ewait.wait); 216 ewait.wait.func = wake_exceptional_entry_func; 217 218 for (;;) { 219 entry = xas_find_conflict(xas); 220 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) || 221 !dax_is_locked(entry)) 222 return entry; 223 224 wq = dax_entry_waitqueue(xas, entry, &ewait.key); 225 prepare_to_wait_exclusive(wq, &ewait.wait, 226 TASK_UNINTERRUPTIBLE); 227 xas_unlock_irq(xas); 228 xas_reset(xas); 229 schedule(); 230 finish_wait(wq, &ewait.wait); 231 xas_lock_irq(xas); 232 } 233 } 234 235 static void put_unlocked_entry(struct xa_state *xas, void *entry) 236 { 237 /* If we were the only waiter woken, wake the next one */ 238 if (entry) 239 dax_wake_entry(xas, entry, false); 240 } 241 242 /* 243 * We used the xa_state to get the entry, but then we locked the entry and 244 * dropped the xa_lock, so we know the xa_state is stale and must be reset 245 * before use. 246 */ 247 static void dax_unlock_entry(struct xa_state *xas, void *entry) 248 { 249 void *old; 250 251 BUG_ON(dax_is_locked(entry)); 252 xas_reset(xas); 253 xas_lock_irq(xas); 254 old = xas_store(xas, entry); 255 xas_unlock_irq(xas); 256 BUG_ON(!dax_is_locked(old)); 257 dax_wake_entry(xas, entry, false); 258 } 259 260 /* 261 * Return: The entry stored at this location before it was locked. 262 */ 263 static void *dax_lock_entry(struct xa_state *xas, void *entry) 264 { 265 unsigned long v = xa_to_value(entry); 266 return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); 267 } 268 269 static unsigned long dax_entry_size(void *entry) 270 { 271 if (dax_is_zero_entry(entry)) 272 return 0; 273 else if (dax_is_empty_entry(entry)) 274 return 0; 275 else if (dax_is_pmd_entry(entry)) 276 return PMD_SIZE; 277 else 278 return PAGE_SIZE; 279 } 280 281 static unsigned long dax_end_pfn(void *entry) 282 { 283 return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; 284 } 285 286 /* 287 * Iterate through all mapped pfns represented by an entry, i.e. skip 288 * 'empty' and 'zero' entries. 289 */ 290 #define for_each_mapped_pfn(entry, pfn) \ 291 for (pfn = dax_to_pfn(entry); \ 292 pfn < dax_end_pfn(entry); pfn++) 293 294 /* 295 * TODO: for reflink+dax we need a way to associate a single page with 296 * multiple address_space instances at different linear_page_index() 297 * offsets. 298 */ 299 static void dax_associate_entry(void *entry, struct address_space *mapping, 300 struct vm_area_struct *vma, unsigned long address) 301 { 302 unsigned long size = dax_entry_size(entry), pfn, index; 303 int i = 0; 304 305 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 306 return; 307 308 index = linear_page_index(vma, address & ~(size - 1)); 309 for_each_mapped_pfn(entry, pfn) { 310 struct page *page = pfn_to_page(pfn); 311 312 WARN_ON_ONCE(page->mapping); 313 page->mapping = mapping; 314 page->index = index + i++; 315 } 316 } 317 318 static void dax_disassociate_entry(void *entry, struct address_space *mapping, 319 bool trunc) 320 { 321 unsigned long pfn; 322 323 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 324 return; 325 326 for_each_mapped_pfn(entry, pfn) { 327 struct page *page = pfn_to_page(pfn); 328 329 WARN_ON_ONCE(trunc && page_ref_count(page) > 1); 330 WARN_ON_ONCE(page->mapping && page->mapping != mapping); 331 page->mapping = NULL; 332 page->index = 0; 333 } 334 } 335 336 static struct page *dax_busy_page(void *entry) 337 { 338 unsigned long pfn; 339 340 for_each_mapped_pfn(entry, pfn) { 341 struct page *page = pfn_to_page(pfn); 342 343 if (page_ref_count(page) > 1) 344 return page; 345 } 346 return NULL; 347 } 348 349 /* 350 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page 351 * @page: The page whose entry we want to lock 352 * 353 * Context: Process context. 354 * Return: %true if the entry was locked or does not need to be locked. 355 */ 356 bool dax_lock_mapping_entry(struct page *page) 357 { 358 XA_STATE(xas, NULL, 0); 359 void *entry; 360 bool locked; 361 362 /* Ensure page->mapping isn't freed while we look at it */ 363 rcu_read_lock(); 364 for (;;) { 365 struct address_space *mapping = READ_ONCE(page->mapping); 366 367 locked = false; 368 if (!dax_mapping(mapping)) 369 break; 370 371 /* 372 * In the device-dax case there's no need to lock, a 373 * struct dev_pagemap pin is sufficient to keep the 374 * inode alive, and we assume we have dev_pagemap pin 375 * otherwise we would not have a valid pfn_to_page() 376 * translation. 377 */ 378 locked = true; 379 if (S_ISCHR(mapping->host->i_mode)) 380 break; 381 382 xas.xa = &mapping->i_pages; 383 xas_lock_irq(&xas); 384 if (mapping != page->mapping) { 385 xas_unlock_irq(&xas); 386 continue; 387 } 388 xas_set(&xas, page->index); 389 entry = xas_load(&xas); 390 if (dax_is_locked(entry)) { 391 rcu_read_unlock(); 392 entry = get_unlocked_entry(&xas); 393 xas_unlock_irq(&xas); 394 put_unlocked_entry(&xas, entry); 395 rcu_read_lock(); 396 continue; 397 } 398 dax_lock_entry(&xas, entry); 399 xas_unlock_irq(&xas); 400 break; 401 } 402 rcu_read_unlock(); 403 return locked; 404 } 405 406 void dax_unlock_mapping_entry(struct page *page) 407 { 408 struct address_space *mapping = page->mapping; 409 XA_STATE(xas, &mapping->i_pages, page->index); 410 void *entry; 411 412 if (S_ISCHR(mapping->host->i_mode)) 413 return; 414 415 rcu_read_lock(); 416 entry = xas_load(&xas); 417 rcu_read_unlock(); 418 entry = dax_make_entry(page_to_pfn_t(page), dax_is_pmd_entry(entry)); 419 dax_unlock_entry(&xas, entry); 420 } 421 422 /* 423 * Find page cache entry at given index. If it is a DAX entry, return it 424 * with the entry locked. If the page cache doesn't contain an entry at 425 * that index, add a locked empty entry. 426 * 427 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will 428 * either return that locked entry or will return VM_FAULT_FALLBACK. 429 * This will happen if there are any PTE entries within the PMD range 430 * that we are requesting. 431 * 432 * We always favor PTE entries over PMD entries. There isn't a flow where we 433 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD 434 * insertion will fail if it finds any PTE entries already in the tree, and a 435 * PTE insertion will cause an existing PMD entry to be unmapped and 436 * downgraded to PTE entries. This happens for both PMD zero pages as 437 * well as PMD empty entries. 438 * 439 * The exception to this downgrade path is for PMD entries that have 440 * real storage backing them. We will leave these real PMD entries in 441 * the tree, and PTE writes will simply dirty the entire PMD entry. 442 * 443 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 444 * persistent memory the benefit is doubtful. We can add that later if we can 445 * show it helps. 446 * 447 * On error, this function does not return an ERR_PTR. Instead it returns 448 * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values 449 * overlap with xarray value entries. 450 */ 451 static void *grab_mapping_entry(struct xa_state *xas, 452 struct address_space *mapping, unsigned long size_flag) 453 { 454 unsigned long index = xas->xa_index; 455 bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */ 456 void *entry; 457 458 retry: 459 xas_lock_irq(xas); 460 entry = get_unlocked_entry(xas); 461 462 if (entry) { 463 if (!xa_is_value(entry)) { 464 xas_set_err(xas, EIO); 465 goto out_unlock; 466 } 467 468 if (size_flag & DAX_PMD) { 469 if (dax_is_pte_entry(entry)) { 470 put_unlocked_entry(xas, entry); 471 goto fallback; 472 } 473 } else { /* trying to grab a PTE entry */ 474 if (dax_is_pmd_entry(entry) && 475 (dax_is_zero_entry(entry) || 476 dax_is_empty_entry(entry))) { 477 pmd_downgrade = true; 478 } 479 } 480 } 481 482 if (pmd_downgrade) { 483 /* 484 * Make sure 'entry' remains valid while we drop 485 * the i_pages lock. 486 */ 487 dax_lock_entry(xas, entry); 488 489 /* 490 * Besides huge zero pages the only other thing that gets 491 * downgraded are empty entries which don't need to be 492 * unmapped. 493 */ 494 if (dax_is_zero_entry(entry)) { 495 xas_unlock_irq(xas); 496 unmap_mapping_pages(mapping, 497 xas->xa_index & ~PG_PMD_COLOUR, 498 PG_PMD_NR, false); 499 xas_reset(xas); 500 xas_lock_irq(xas); 501 } 502 503 dax_disassociate_entry(entry, mapping, false); 504 xas_store(xas, NULL); /* undo the PMD join */ 505 dax_wake_entry(xas, entry, true); 506 mapping->nrexceptional--; 507 entry = NULL; 508 xas_set(xas, index); 509 } 510 511 if (entry) { 512 dax_lock_entry(xas, entry); 513 } else { 514 entry = dax_make_entry(pfn_to_pfn_t(0), size_flag | DAX_EMPTY); 515 dax_lock_entry(xas, entry); 516 if (xas_error(xas)) 517 goto out_unlock; 518 mapping->nrexceptional++; 519 } 520 521 out_unlock: 522 xas_unlock_irq(xas); 523 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) 524 goto retry; 525 if (xas->xa_node == XA_ERROR(-ENOMEM)) 526 return xa_mk_internal(VM_FAULT_OOM); 527 if (xas_error(xas)) 528 return xa_mk_internal(VM_FAULT_SIGBUS); 529 return entry; 530 fallback: 531 xas_unlock_irq(xas); 532 return xa_mk_internal(VM_FAULT_FALLBACK); 533 } 534 535 /** 536 * dax_layout_busy_page - find first pinned page in @mapping 537 * @mapping: address space to scan for a page with ref count > 1 538 * 539 * DAX requires ZONE_DEVICE mapped pages. These pages are never 540 * 'onlined' to the page allocator so they are considered idle when 541 * page->count == 1. A filesystem uses this interface to determine if 542 * any page in the mapping is busy, i.e. for DMA, or other 543 * get_user_pages() usages. 544 * 545 * It is expected that the filesystem is holding locks to block the 546 * establishment of new mappings in this address_space. I.e. it expects 547 * to be able to run unmap_mapping_range() and subsequently not race 548 * mapping_mapped() becoming true. 549 */ 550 struct page *dax_layout_busy_page(struct address_space *mapping) 551 { 552 XA_STATE(xas, &mapping->i_pages, 0); 553 void *entry; 554 unsigned int scanned = 0; 555 struct page *page = NULL; 556 557 /* 558 * In the 'limited' case get_user_pages() for dax is disabled. 559 */ 560 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 561 return NULL; 562 563 if (!dax_mapping(mapping) || !mapping_mapped(mapping)) 564 return NULL; 565 566 /* 567 * If we race get_user_pages_fast() here either we'll see the 568 * elevated page count in the iteration and wait, or 569 * get_user_pages_fast() will see that the page it took a reference 570 * against is no longer mapped in the page tables and bail to the 571 * get_user_pages() slow path. The slow path is protected by 572 * pte_lock() and pmd_lock(). New references are not taken without 573 * holding those locks, and unmap_mapping_range() will not zero the 574 * pte or pmd without holding the respective lock, so we are 575 * guaranteed to either see new references or prevent new 576 * references from being established. 577 */ 578 unmap_mapping_range(mapping, 0, 0, 1); 579 580 xas_lock_irq(&xas); 581 xas_for_each(&xas, entry, ULONG_MAX) { 582 if (WARN_ON_ONCE(!xa_is_value(entry))) 583 continue; 584 if (unlikely(dax_is_locked(entry))) 585 entry = get_unlocked_entry(&xas); 586 if (entry) 587 page = dax_busy_page(entry); 588 put_unlocked_entry(&xas, entry); 589 if (page) 590 break; 591 if (++scanned % XA_CHECK_SCHED) 592 continue; 593 594 xas_pause(&xas); 595 xas_unlock_irq(&xas); 596 cond_resched(); 597 xas_lock_irq(&xas); 598 } 599 xas_unlock_irq(&xas); 600 return page; 601 } 602 EXPORT_SYMBOL_GPL(dax_layout_busy_page); 603 604 static int __dax_invalidate_entry(struct address_space *mapping, 605 pgoff_t index, bool trunc) 606 { 607 XA_STATE(xas, &mapping->i_pages, index); 608 int ret = 0; 609 void *entry; 610 611 xas_lock_irq(&xas); 612 entry = get_unlocked_entry(&xas); 613 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 614 goto out; 615 if (!trunc && 616 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || 617 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) 618 goto out; 619 dax_disassociate_entry(entry, mapping, trunc); 620 xas_store(&xas, NULL); 621 mapping->nrexceptional--; 622 ret = 1; 623 out: 624 put_unlocked_entry(&xas, entry); 625 xas_unlock_irq(&xas); 626 return ret; 627 } 628 629 /* 630 * Delete DAX entry at @index from @mapping. Wait for it 631 * to be unlocked before deleting it. 632 */ 633 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 634 { 635 int ret = __dax_invalidate_entry(mapping, index, true); 636 637 /* 638 * This gets called from truncate / punch_hole path. As such, the caller 639 * must hold locks protecting against concurrent modifications of the 640 * page cache (usually fs-private i_mmap_sem for writing). Since the 641 * caller has seen a DAX entry for this index, we better find it 642 * at that index as well... 643 */ 644 WARN_ON_ONCE(!ret); 645 return ret; 646 } 647 648 /* 649 * Invalidate DAX entry if it is clean. 650 */ 651 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 652 pgoff_t index) 653 { 654 return __dax_invalidate_entry(mapping, index, false); 655 } 656 657 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, 658 sector_t sector, size_t size, struct page *to, 659 unsigned long vaddr) 660 { 661 void *vto, *kaddr; 662 pgoff_t pgoff; 663 long rc; 664 int id; 665 666 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); 667 if (rc) 668 return rc; 669 670 id = dax_read_lock(); 671 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL); 672 if (rc < 0) { 673 dax_read_unlock(id); 674 return rc; 675 } 676 vto = kmap_atomic(to); 677 copy_user_page(vto, (void __force *)kaddr, vaddr, to); 678 kunmap_atomic(vto); 679 dax_read_unlock(id); 680 return 0; 681 } 682 683 /* 684 * By this point grab_mapping_entry() has ensured that we have a locked entry 685 * of the appropriate size so we don't have to worry about downgrading PMDs to 686 * PTEs. If we happen to be trying to insert a PTE and there is a PMD 687 * already in the tree, we will skip the insertion and just dirty the PMD as 688 * appropriate. 689 */ 690 static void *dax_insert_entry(struct xa_state *xas, 691 struct address_space *mapping, struct vm_fault *vmf, 692 void *entry, pfn_t pfn, unsigned long flags, bool dirty) 693 { 694 void *new_entry = dax_make_entry(pfn, flags); 695 696 if (dirty) 697 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 698 699 if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) { 700 unsigned long index = xas->xa_index; 701 /* we are replacing a zero page with block mapping */ 702 if (dax_is_pmd_entry(entry)) 703 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 704 PG_PMD_NR, false); 705 else /* pte entry */ 706 unmap_mapping_pages(mapping, index, 1, false); 707 } 708 709 xas_reset(xas); 710 xas_lock_irq(xas); 711 if (dax_entry_size(entry) != dax_entry_size(new_entry)) { 712 dax_disassociate_entry(entry, mapping, false); 713 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); 714 } 715 716 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 717 /* 718 * Only swap our new entry into the page cache if the current 719 * entry is a zero page or an empty entry. If a normal PTE or 720 * PMD entry is already in the cache, we leave it alone. This 721 * means that if we are trying to insert a PTE and the 722 * existing entry is a PMD, we will just leave the PMD in the 723 * tree and dirty it if necessary. 724 */ 725 void *old = dax_lock_entry(xas, new_entry); 726 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | 727 DAX_LOCKED)); 728 entry = new_entry; 729 } else { 730 xas_load(xas); /* Walk the xa_state */ 731 } 732 733 if (dirty) 734 xas_set_mark(xas, PAGECACHE_TAG_DIRTY); 735 736 xas_unlock_irq(xas); 737 return entry; 738 } 739 740 static inline 741 unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma) 742 { 743 unsigned long address; 744 745 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 746 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 747 return address; 748 } 749 750 /* Walk all mappings of a given index of a file and writeprotect them */ 751 static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index, 752 unsigned long pfn) 753 { 754 struct vm_area_struct *vma; 755 pte_t pte, *ptep = NULL; 756 pmd_t *pmdp = NULL; 757 spinlock_t *ptl; 758 759 i_mmap_lock_read(mapping); 760 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { 761 unsigned long address, start, end; 762 763 cond_resched(); 764 765 if (!(vma->vm_flags & VM_SHARED)) 766 continue; 767 768 address = pgoff_address(index, vma); 769 770 /* 771 * Note because we provide start/end to follow_pte_pmd it will 772 * call mmu_notifier_invalidate_range_start() on our behalf 773 * before taking any lock. 774 */ 775 if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl)) 776 continue; 777 778 /* 779 * No need to call mmu_notifier_invalidate_range() as we are 780 * downgrading page table protection not changing it to point 781 * to a new page. 782 * 783 * See Documentation/vm/mmu_notifier.rst 784 */ 785 if (pmdp) { 786 #ifdef CONFIG_FS_DAX_PMD 787 pmd_t pmd; 788 789 if (pfn != pmd_pfn(*pmdp)) 790 goto unlock_pmd; 791 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) 792 goto unlock_pmd; 793 794 flush_cache_page(vma, address, pfn); 795 pmd = pmdp_huge_clear_flush(vma, address, pmdp); 796 pmd = pmd_wrprotect(pmd); 797 pmd = pmd_mkclean(pmd); 798 set_pmd_at(vma->vm_mm, address, pmdp, pmd); 799 unlock_pmd: 800 #endif 801 spin_unlock(ptl); 802 } else { 803 if (pfn != pte_pfn(*ptep)) 804 goto unlock_pte; 805 if (!pte_dirty(*ptep) && !pte_write(*ptep)) 806 goto unlock_pte; 807 808 flush_cache_page(vma, address, pfn); 809 pte = ptep_clear_flush(vma, address, ptep); 810 pte = pte_wrprotect(pte); 811 pte = pte_mkclean(pte); 812 set_pte_at(vma->vm_mm, address, ptep, pte); 813 unlock_pte: 814 pte_unmap_unlock(ptep, ptl); 815 } 816 817 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); 818 } 819 i_mmap_unlock_read(mapping); 820 } 821 822 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, 823 struct address_space *mapping, void *entry) 824 { 825 unsigned long pfn; 826 long ret = 0; 827 size_t size; 828 829 /* 830 * A page got tagged dirty in DAX mapping? Something is seriously 831 * wrong. 832 */ 833 if (WARN_ON(!xa_is_value(entry))) 834 return -EIO; 835 836 if (unlikely(dax_is_locked(entry))) { 837 void *old_entry = entry; 838 839 entry = get_unlocked_entry(xas); 840 841 /* Entry got punched out / reallocated? */ 842 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 843 goto put_unlocked; 844 /* 845 * Entry got reallocated elsewhere? No need to writeback. 846 * We have to compare pfns as we must not bail out due to 847 * difference in lockbit or entry type. 848 */ 849 if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) 850 goto put_unlocked; 851 if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 852 dax_is_zero_entry(entry))) { 853 ret = -EIO; 854 goto put_unlocked; 855 } 856 857 /* Another fsync thread may have already done this entry */ 858 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) 859 goto put_unlocked; 860 } 861 862 /* Lock the entry to serialize with page faults */ 863 dax_lock_entry(xas, entry); 864 865 /* 866 * We can clear the tag now but we have to be careful so that concurrent 867 * dax_writeback_one() calls for the same index cannot finish before we 868 * actually flush the caches. This is achieved as the calls will look 869 * at the entry only under the i_pages lock and once they do that 870 * they will see the entry locked and wait for it to unlock. 871 */ 872 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); 873 xas_unlock_irq(xas); 874 875 /* 876 * Even if dax_writeback_mapping_range() was given a wbc->range_start 877 * in the middle of a PMD, the 'index' we are given will be aligned to 878 * the start index of the PMD, as will the pfn we pull from 'entry'. 879 * This allows us to flush for PMD_SIZE and not have to worry about 880 * partial PMD writebacks. 881 */ 882 pfn = dax_to_pfn(entry); 883 size = PAGE_SIZE << dax_entry_order(entry); 884 885 dax_entry_mkclean(mapping, xas->xa_index, pfn); 886 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size); 887 /* 888 * After we have flushed the cache, we can clear the dirty tag. There 889 * cannot be new dirty data in the pfn after the flush has completed as 890 * the pfn mappings are writeprotected and fault waits for mapping 891 * entry lock. 892 */ 893 xas_reset(xas); 894 xas_lock_irq(xas); 895 xas_store(xas, entry); 896 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); 897 dax_wake_entry(xas, entry, false); 898 899 trace_dax_writeback_one(mapping->host, xas->xa_index, 900 size >> PAGE_SHIFT); 901 return ret; 902 903 put_unlocked: 904 put_unlocked_entry(xas, entry); 905 return ret; 906 } 907 908 /* 909 * Flush the mapping to the persistent domain within the byte range of [start, 910 * end]. This is required by data integrity operations to ensure file data is 911 * on persistent storage prior to completion of the operation. 912 */ 913 int dax_writeback_mapping_range(struct address_space *mapping, 914 struct block_device *bdev, struct writeback_control *wbc) 915 { 916 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); 917 struct inode *inode = mapping->host; 918 pgoff_t end_index = wbc->range_end >> PAGE_SHIFT; 919 struct dax_device *dax_dev; 920 void *entry; 921 int ret = 0; 922 unsigned int scanned = 0; 923 924 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 925 return -EIO; 926 927 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) 928 return 0; 929 930 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 931 if (!dax_dev) 932 return -EIO; 933 934 trace_dax_writeback_range(inode, xas.xa_index, end_index); 935 936 tag_pages_for_writeback(mapping, xas.xa_index, end_index); 937 938 xas_lock_irq(&xas); 939 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { 940 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); 941 if (ret < 0) { 942 mapping_set_error(mapping, ret); 943 break; 944 } 945 if (++scanned % XA_CHECK_SCHED) 946 continue; 947 948 xas_pause(&xas); 949 xas_unlock_irq(&xas); 950 cond_resched(); 951 xas_lock_irq(&xas); 952 } 953 xas_unlock_irq(&xas); 954 put_dax(dax_dev); 955 trace_dax_writeback_range_done(inode, xas.xa_index, end_index); 956 return ret; 957 } 958 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 959 960 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 961 { 962 return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9; 963 } 964 965 static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size, 966 pfn_t *pfnp) 967 { 968 const sector_t sector = dax_iomap_sector(iomap, pos); 969 pgoff_t pgoff; 970 int id, rc; 971 long length; 972 973 rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff); 974 if (rc) 975 return rc; 976 id = dax_read_lock(); 977 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), 978 NULL, pfnp); 979 if (length < 0) { 980 rc = length; 981 goto out; 982 } 983 rc = -EINVAL; 984 if (PFN_PHYS(length) < size) 985 goto out; 986 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) 987 goto out; 988 /* For larger pages we need devmap */ 989 if (length > 1 && !pfn_t_devmap(*pfnp)) 990 goto out; 991 rc = 0; 992 out: 993 dax_read_unlock(id); 994 return rc; 995 } 996 997 /* 998 * The user has performed a load from a hole in the file. Allocating a new 999 * page in the file would cause excessive storage usage for workloads with 1000 * sparse files. Instead we insert a read-only mapping of the 4k zero page. 1001 * If this page is ever written to we will re-fault and change the mapping to 1002 * point to real DAX storage instead. 1003 */ 1004 static vm_fault_t dax_load_hole(struct xa_state *xas, 1005 struct address_space *mapping, void **entry, 1006 struct vm_fault *vmf) 1007 { 1008 struct inode *inode = mapping->host; 1009 unsigned long vaddr = vmf->address; 1010 pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); 1011 vm_fault_t ret; 1012 1013 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, 1014 DAX_ZERO_PAGE, false); 1015 1016 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); 1017 trace_dax_load_hole(inode, vmf, ret); 1018 return ret; 1019 } 1020 1021 static bool dax_range_is_aligned(struct block_device *bdev, 1022 unsigned int offset, unsigned int length) 1023 { 1024 unsigned short sector_size = bdev_logical_block_size(bdev); 1025 1026 if (!IS_ALIGNED(offset, sector_size)) 1027 return false; 1028 if (!IS_ALIGNED(length, sector_size)) 1029 return false; 1030 1031 return true; 1032 } 1033 1034 int __dax_zero_page_range(struct block_device *bdev, 1035 struct dax_device *dax_dev, sector_t sector, 1036 unsigned int offset, unsigned int size) 1037 { 1038 if (dax_range_is_aligned(bdev, offset, size)) { 1039 sector_t start_sector = sector + (offset >> 9); 1040 1041 return blkdev_issue_zeroout(bdev, start_sector, 1042 size >> 9, GFP_NOFS, 0); 1043 } else { 1044 pgoff_t pgoff; 1045 long rc, id; 1046 void *kaddr; 1047 1048 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); 1049 if (rc) 1050 return rc; 1051 1052 id = dax_read_lock(); 1053 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL); 1054 if (rc < 0) { 1055 dax_read_unlock(id); 1056 return rc; 1057 } 1058 memset(kaddr + offset, 0, size); 1059 dax_flush(dax_dev, kaddr + offset, size); 1060 dax_read_unlock(id); 1061 } 1062 return 0; 1063 } 1064 EXPORT_SYMBOL_GPL(__dax_zero_page_range); 1065 1066 static loff_t 1067 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 1068 struct iomap *iomap) 1069 { 1070 struct block_device *bdev = iomap->bdev; 1071 struct dax_device *dax_dev = iomap->dax_dev; 1072 struct iov_iter *iter = data; 1073 loff_t end = pos + length, done = 0; 1074 ssize_t ret = 0; 1075 size_t xfer; 1076 int id; 1077 1078 if (iov_iter_rw(iter) == READ) { 1079 end = min(end, i_size_read(inode)); 1080 if (pos >= end) 1081 return 0; 1082 1083 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 1084 return iov_iter_zero(min(length, end - pos), iter); 1085 } 1086 1087 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 1088 return -EIO; 1089 1090 /* 1091 * Write can allocate block for an area which has a hole page mapped 1092 * into page tables. We have to tear down these mappings so that data 1093 * written by write(2) is visible in mmap. 1094 */ 1095 if (iomap->flags & IOMAP_F_NEW) { 1096 invalidate_inode_pages2_range(inode->i_mapping, 1097 pos >> PAGE_SHIFT, 1098 (end - 1) >> PAGE_SHIFT); 1099 } 1100 1101 id = dax_read_lock(); 1102 while (pos < end) { 1103 unsigned offset = pos & (PAGE_SIZE - 1); 1104 const size_t size = ALIGN(length + offset, PAGE_SIZE); 1105 const sector_t sector = dax_iomap_sector(iomap, pos); 1106 ssize_t map_len; 1107 pgoff_t pgoff; 1108 void *kaddr; 1109 1110 if (fatal_signal_pending(current)) { 1111 ret = -EINTR; 1112 break; 1113 } 1114 1115 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 1116 if (ret) 1117 break; 1118 1119 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 1120 &kaddr, NULL); 1121 if (map_len < 0) { 1122 ret = map_len; 1123 break; 1124 } 1125 1126 map_len = PFN_PHYS(map_len); 1127 kaddr += offset; 1128 map_len -= offset; 1129 if (map_len > end - pos) 1130 map_len = end - pos; 1131 1132 /* 1133 * The userspace address for the memory copy has already been 1134 * validated via access_ok() in either vfs_read() or 1135 * vfs_write(), depending on which operation we are doing. 1136 */ 1137 if (iov_iter_rw(iter) == WRITE) 1138 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1139 map_len, iter); 1140 else 1141 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, 1142 map_len, iter); 1143 1144 pos += xfer; 1145 length -= xfer; 1146 done += xfer; 1147 1148 if (xfer == 0) 1149 ret = -EFAULT; 1150 if (xfer < map_len) 1151 break; 1152 } 1153 dax_read_unlock(id); 1154 1155 return done ? done : ret; 1156 } 1157 1158 /** 1159 * dax_iomap_rw - Perform I/O to a DAX file 1160 * @iocb: The control block for this I/O 1161 * @iter: The addresses to do I/O from or to 1162 * @ops: iomap ops passed from the file system 1163 * 1164 * This function performs read and write operations to directly mapped 1165 * persistent memory. The callers needs to take care of read/write exclusion 1166 * and evicting any page cache pages in the region under I/O. 1167 */ 1168 ssize_t 1169 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 1170 const struct iomap_ops *ops) 1171 { 1172 struct address_space *mapping = iocb->ki_filp->f_mapping; 1173 struct inode *inode = mapping->host; 1174 loff_t pos = iocb->ki_pos, ret = 0, done = 0; 1175 unsigned flags = 0; 1176 1177 if (iov_iter_rw(iter) == WRITE) { 1178 lockdep_assert_held_exclusive(&inode->i_rwsem); 1179 flags |= IOMAP_WRITE; 1180 } else { 1181 lockdep_assert_held(&inode->i_rwsem); 1182 } 1183 1184 while (iov_iter_count(iter)) { 1185 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, 1186 iter, dax_iomap_actor); 1187 if (ret <= 0) 1188 break; 1189 pos += ret; 1190 done += ret; 1191 } 1192 1193 iocb->ki_pos += done; 1194 return done ? done : ret; 1195 } 1196 EXPORT_SYMBOL_GPL(dax_iomap_rw); 1197 1198 static vm_fault_t dax_fault_return(int error) 1199 { 1200 if (error == 0) 1201 return VM_FAULT_NOPAGE; 1202 if (error == -ENOMEM) 1203 return VM_FAULT_OOM; 1204 return VM_FAULT_SIGBUS; 1205 } 1206 1207 /* 1208 * MAP_SYNC on a dax mapping guarantees dirty metadata is 1209 * flushed on write-faults (non-cow), but not read-faults. 1210 */ 1211 static bool dax_fault_is_synchronous(unsigned long flags, 1212 struct vm_area_struct *vma, struct iomap *iomap) 1213 { 1214 return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) 1215 && (iomap->flags & IOMAP_F_DIRTY); 1216 } 1217 1218 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1219 int *iomap_errp, const struct iomap_ops *ops) 1220 { 1221 struct vm_area_struct *vma = vmf->vma; 1222 struct address_space *mapping = vma->vm_file->f_mapping; 1223 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); 1224 struct inode *inode = mapping->host; 1225 unsigned long vaddr = vmf->address; 1226 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; 1227 struct iomap iomap = { 0 }; 1228 unsigned flags = IOMAP_FAULT; 1229 int error, major = 0; 1230 bool write = vmf->flags & FAULT_FLAG_WRITE; 1231 bool sync; 1232 vm_fault_t ret = 0; 1233 void *entry; 1234 pfn_t pfn; 1235 1236 trace_dax_pte_fault(inode, vmf, ret); 1237 /* 1238 * Check whether offset isn't beyond end of file now. Caller is supposed 1239 * to hold locks serializing us with truncate / punch hole so this is 1240 * a reliable test. 1241 */ 1242 if (pos >= i_size_read(inode)) { 1243 ret = VM_FAULT_SIGBUS; 1244 goto out; 1245 } 1246 1247 if (write && !vmf->cow_page) 1248 flags |= IOMAP_WRITE; 1249 1250 entry = grab_mapping_entry(&xas, mapping, 0); 1251 if (xa_is_internal(entry)) { 1252 ret = xa_to_internal(entry); 1253 goto out; 1254 } 1255 1256 /* 1257 * It is possible, particularly with mixed reads & writes to private 1258 * mappings, that we have raced with a PMD fault that overlaps with 1259 * the PTE we need to set up. If so just return and the fault will be 1260 * retried. 1261 */ 1262 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1263 ret = VM_FAULT_NOPAGE; 1264 goto unlock_entry; 1265 } 1266 1267 /* 1268 * Note that we don't bother to use iomap_apply here: DAX required 1269 * the file system block size to be equal the page size, which means 1270 * that we never have to deal with more than a single extent here. 1271 */ 1272 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); 1273 if (iomap_errp) 1274 *iomap_errp = error; 1275 if (error) { 1276 ret = dax_fault_return(error); 1277 goto unlock_entry; 1278 } 1279 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { 1280 error = -EIO; /* fs corruption? */ 1281 goto error_finish_iomap; 1282 } 1283 1284 if (vmf->cow_page) { 1285 sector_t sector = dax_iomap_sector(&iomap, pos); 1286 1287 switch (iomap.type) { 1288 case IOMAP_HOLE: 1289 case IOMAP_UNWRITTEN: 1290 clear_user_highpage(vmf->cow_page, vaddr); 1291 break; 1292 case IOMAP_MAPPED: 1293 error = copy_user_dax(iomap.bdev, iomap.dax_dev, 1294 sector, PAGE_SIZE, vmf->cow_page, vaddr); 1295 break; 1296 default: 1297 WARN_ON_ONCE(1); 1298 error = -EIO; 1299 break; 1300 } 1301 1302 if (error) 1303 goto error_finish_iomap; 1304 1305 __SetPageUptodate(vmf->cow_page); 1306 ret = finish_fault(vmf); 1307 if (!ret) 1308 ret = VM_FAULT_DONE_COW; 1309 goto finish_iomap; 1310 } 1311 1312 sync = dax_fault_is_synchronous(flags, vma, &iomap); 1313 1314 switch (iomap.type) { 1315 case IOMAP_MAPPED: 1316 if (iomap.flags & IOMAP_F_NEW) { 1317 count_vm_event(PGMAJFAULT); 1318 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 1319 major = VM_FAULT_MAJOR; 1320 } 1321 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn); 1322 if (error < 0) 1323 goto error_finish_iomap; 1324 1325 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, 1326 0, write && !sync); 1327 1328 /* 1329 * If we are doing synchronous page fault and inode needs fsync, 1330 * we can insert PTE into page tables only after that happens. 1331 * Skip insertion for now and return the pfn so that caller can 1332 * insert it after fsync is done. 1333 */ 1334 if (sync) { 1335 if (WARN_ON_ONCE(!pfnp)) { 1336 error = -EIO; 1337 goto error_finish_iomap; 1338 } 1339 *pfnp = pfn; 1340 ret = VM_FAULT_NEEDDSYNC | major; 1341 goto finish_iomap; 1342 } 1343 trace_dax_insert_mapping(inode, vmf, entry); 1344 if (write) 1345 ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn); 1346 else 1347 ret = vmf_insert_mixed(vma, vaddr, pfn); 1348 1349 goto finish_iomap; 1350 case IOMAP_UNWRITTEN: 1351 case IOMAP_HOLE: 1352 if (!write) { 1353 ret = dax_load_hole(&xas, mapping, &entry, vmf); 1354 goto finish_iomap; 1355 } 1356 /*FALLTHRU*/ 1357 default: 1358 WARN_ON_ONCE(1); 1359 error = -EIO; 1360 break; 1361 } 1362 1363 error_finish_iomap: 1364 ret = dax_fault_return(error); 1365 finish_iomap: 1366 if (ops->iomap_end) { 1367 int copied = PAGE_SIZE; 1368 1369 if (ret & VM_FAULT_ERROR) 1370 copied = 0; 1371 /* 1372 * The fault is done by now and there's no way back (other 1373 * thread may be already happily using PTE we have installed). 1374 * Just ignore error from ->iomap_end since we cannot do much 1375 * with it. 1376 */ 1377 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); 1378 } 1379 unlock_entry: 1380 dax_unlock_entry(&xas, entry); 1381 out: 1382 trace_dax_pte_fault_done(inode, vmf, ret); 1383 return ret | major; 1384 } 1385 1386 #ifdef CONFIG_FS_DAX_PMD 1387 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, 1388 struct iomap *iomap, void **entry) 1389 { 1390 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1391 unsigned long pmd_addr = vmf->address & PMD_MASK; 1392 struct inode *inode = mapping->host; 1393 struct page *zero_page; 1394 spinlock_t *ptl; 1395 pmd_t pmd_entry; 1396 pfn_t pfn; 1397 1398 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1399 1400 if (unlikely(!zero_page)) 1401 goto fallback; 1402 1403 pfn = page_to_pfn_t(zero_page); 1404 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, 1405 DAX_PMD | DAX_ZERO_PAGE, false); 1406 1407 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1408 if (!pmd_none(*(vmf->pmd))) { 1409 spin_unlock(ptl); 1410 goto fallback; 1411 } 1412 1413 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1414 pmd_entry = pmd_mkhuge(pmd_entry); 1415 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1416 spin_unlock(ptl); 1417 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); 1418 return VM_FAULT_NOPAGE; 1419 1420 fallback: 1421 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); 1422 return VM_FAULT_FALLBACK; 1423 } 1424 1425 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1426 const struct iomap_ops *ops) 1427 { 1428 struct vm_area_struct *vma = vmf->vma; 1429 struct address_space *mapping = vma->vm_file->f_mapping; 1430 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); 1431 unsigned long pmd_addr = vmf->address & PMD_MASK; 1432 bool write = vmf->flags & FAULT_FLAG_WRITE; 1433 bool sync; 1434 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; 1435 struct inode *inode = mapping->host; 1436 vm_fault_t result = VM_FAULT_FALLBACK; 1437 struct iomap iomap = { 0 }; 1438 pgoff_t max_pgoff; 1439 void *entry; 1440 loff_t pos; 1441 int error; 1442 pfn_t pfn; 1443 1444 /* 1445 * Check whether offset isn't beyond end of file now. Caller is 1446 * supposed to hold locks serializing us with truncate / punch hole so 1447 * this is a reliable test. 1448 */ 1449 max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1450 1451 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); 1452 1453 /* 1454 * Make sure that the faulting address's PMD offset (color) matches 1455 * the PMD offset from the start of the file. This is necessary so 1456 * that a PMD range in the page table overlaps exactly with a PMD 1457 * range in the page cache. 1458 */ 1459 if ((vmf->pgoff & PG_PMD_COLOUR) != 1460 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 1461 goto fallback; 1462 1463 /* Fall back to PTEs if we're going to COW */ 1464 if (write && !(vma->vm_flags & VM_SHARED)) 1465 goto fallback; 1466 1467 /* If the PMD would extend outside the VMA */ 1468 if (pmd_addr < vma->vm_start) 1469 goto fallback; 1470 if ((pmd_addr + PMD_SIZE) > vma->vm_end) 1471 goto fallback; 1472 1473 if (xas.xa_index >= max_pgoff) { 1474 result = VM_FAULT_SIGBUS; 1475 goto out; 1476 } 1477 1478 /* If the PMD would extend beyond the file size */ 1479 if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff) 1480 goto fallback; 1481 1482 /* 1483 * grab_mapping_entry() will make sure we get an empty PMD entry, 1484 * a zero PMD entry or a DAX PMD. If it can't (because a PTE 1485 * entry is already in the array, for instance), it will return 1486 * VM_FAULT_FALLBACK. 1487 */ 1488 entry = grab_mapping_entry(&xas, mapping, DAX_PMD); 1489 if (xa_is_internal(entry)) { 1490 result = xa_to_internal(entry); 1491 goto fallback; 1492 } 1493 1494 /* 1495 * It is possible, particularly with mixed reads & writes to private 1496 * mappings, that we have raced with a PTE fault that overlaps with 1497 * the PMD we need to set up. If so just return and the fault will be 1498 * retried. 1499 */ 1500 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1501 !pmd_devmap(*vmf->pmd)) { 1502 result = 0; 1503 goto unlock_entry; 1504 } 1505 1506 /* 1507 * Note that we don't use iomap_apply here. We aren't doing I/O, only 1508 * setting up a mapping, so really we're using iomap_begin() as a way 1509 * to look up our filesystem block. 1510 */ 1511 pos = (loff_t)xas.xa_index << PAGE_SHIFT; 1512 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); 1513 if (error) 1514 goto unlock_entry; 1515 1516 if (iomap.offset + iomap.length < pos + PMD_SIZE) 1517 goto finish_iomap; 1518 1519 sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap); 1520 1521 switch (iomap.type) { 1522 case IOMAP_MAPPED: 1523 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn); 1524 if (error < 0) 1525 goto finish_iomap; 1526 1527 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, 1528 DAX_PMD, write && !sync); 1529 1530 /* 1531 * If we are doing synchronous page fault and inode needs fsync, 1532 * we can insert PMD into page tables only after that happens. 1533 * Skip insertion for now and return the pfn so that caller can 1534 * insert it after fsync is done. 1535 */ 1536 if (sync) { 1537 if (WARN_ON_ONCE(!pfnp)) 1538 goto finish_iomap; 1539 *pfnp = pfn; 1540 result = VM_FAULT_NEEDDSYNC; 1541 goto finish_iomap; 1542 } 1543 1544 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); 1545 result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn, 1546 write); 1547 break; 1548 case IOMAP_UNWRITTEN: 1549 case IOMAP_HOLE: 1550 if (WARN_ON_ONCE(write)) 1551 break; 1552 result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry); 1553 break; 1554 default: 1555 WARN_ON_ONCE(1); 1556 break; 1557 } 1558 1559 finish_iomap: 1560 if (ops->iomap_end) { 1561 int copied = PMD_SIZE; 1562 1563 if (result == VM_FAULT_FALLBACK) 1564 copied = 0; 1565 /* 1566 * The fault is done by now and there's no way back (other 1567 * thread may be already happily using PMD we have installed). 1568 * Just ignore error from ->iomap_end since we cannot do much 1569 * with it. 1570 */ 1571 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags, 1572 &iomap); 1573 } 1574 unlock_entry: 1575 dax_unlock_entry(&xas, entry); 1576 fallback: 1577 if (result == VM_FAULT_FALLBACK) { 1578 split_huge_pmd(vma, vmf->pmd, vmf->address); 1579 count_vm_event(THP_FAULT_FALLBACK); 1580 } 1581 out: 1582 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); 1583 return result; 1584 } 1585 #else 1586 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1587 const struct iomap_ops *ops) 1588 { 1589 return VM_FAULT_FALLBACK; 1590 } 1591 #endif /* CONFIG_FS_DAX_PMD */ 1592 1593 /** 1594 * dax_iomap_fault - handle a page fault on a DAX file 1595 * @vmf: The description of the fault 1596 * @pe_size: Size of the page to fault in 1597 * @pfnp: PFN to insert for synchronous faults if fsync is required 1598 * @iomap_errp: Storage for detailed error code in case of error 1599 * @ops: Iomap ops passed from the file system 1600 * 1601 * When a page fault occurs, filesystems may call this helper in 1602 * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1603 * has done all the necessary locking for page fault to proceed 1604 * successfully. 1605 */ 1606 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1607 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) 1608 { 1609 switch (pe_size) { 1610 case PE_SIZE_PTE: 1611 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); 1612 case PE_SIZE_PMD: 1613 return dax_iomap_pmd_fault(vmf, pfnp, ops); 1614 default: 1615 return VM_FAULT_FALLBACK; 1616 } 1617 } 1618 EXPORT_SYMBOL_GPL(dax_iomap_fault); 1619 1620 /* 1621 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables 1622 * @vmf: The description of the fault 1623 * @pfn: PFN to insert 1624 * @order: Order of entry to insert. 1625 * 1626 * This function inserts a writeable PTE or PMD entry into the page tables 1627 * for an mmaped DAX file. It also marks the page cache entry as dirty. 1628 */ 1629 static vm_fault_t 1630 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) 1631 { 1632 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1633 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); 1634 void *entry; 1635 vm_fault_t ret; 1636 1637 xas_lock_irq(&xas); 1638 entry = get_unlocked_entry(&xas); 1639 /* Did we race with someone splitting entry or so? */ 1640 if (!entry || 1641 (order == 0 && !dax_is_pte_entry(entry)) || 1642 (order == PMD_ORDER && !dax_is_pmd_entry(entry))) { 1643 put_unlocked_entry(&xas, entry); 1644 xas_unlock_irq(&xas); 1645 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 1646 VM_FAULT_NOPAGE); 1647 return VM_FAULT_NOPAGE; 1648 } 1649 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); 1650 dax_lock_entry(&xas, entry); 1651 xas_unlock_irq(&xas); 1652 if (order == 0) 1653 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 1654 #ifdef CONFIG_FS_DAX_PMD 1655 else if (order == PMD_ORDER) 1656 ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, 1657 pfn, true); 1658 #endif 1659 else 1660 ret = VM_FAULT_FALLBACK; 1661 dax_unlock_entry(&xas, entry); 1662 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); 1663 return ret; 1664 } 1665 1666 /** 1667 * dax_finish_sync_fault - finish synchronous page fault 1668 * @vmf: The description of the fault 1669 * @pe_size: Size of entry to be inserted 1670 * @pfn: PFN to insert 1671 * 1672 * This function ensures that the file range touched by the page fault is 1673 * stored persistently on the media and handles inserting of appropriate page 1674 * table entry. 1675 */ 1676 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 1677 enum page_entry_size pe_size, pfn_t pfn) 1678 { 1679 int err; 1680 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; 1681 unsigned int order = pe_order(pe_size); 1682 size_t len = PAGE_SIZE << order; 1683 1684 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); 1685 if (err) 1686 return VM_FAULT_SIGBUS; 1687 return dax_insert_pfn_mkwrite(vmf, pfn, order); 1688 } 1689 EXPORT_SYMBOL_GPL(dax_finish_sync_fault); 1690