1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * fs/dax.c - Direct Access filesystem code 4 * Copyright (c) 2013-2014 Intel Corporation 5 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 6 * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 7 */ 8 9 #include <linux/atomic.h> 10 #include <linux/blkdev.h> 11 #include <linux/buffer_head.h> 12 #include <linux/dax.h> 13 #include <linux/fs.h> 14 #include <linux/highmem.h> 15 #include <linux/memcontrol.h> 16 #include <linux/mm.h> 17 #include <linux/mutex.h> 18 #include <linux/pagevec.h> 19 #include <linux/sched.h> 20 #include <linux/sched/signal.h> 21 #include <linux/uio.h> 22 #include <linux/vmstat.h> 23 #include <linux/pfn_t.h> 24 #include <linux/sizes.h> 25 #include <linux/mmu_notifier.h> 26 #include <linux/iomap.h> 27 #include <linux/rmap.h> 28 #include <asm/pgalloc.h> 29 30 #define CREATE_TRACE_POINTS 31 #include <trace/events/fs_dax.h> 32 33 static inline unsigned int pe_order(enum page_entry_size pe_size) 34 { 35 if (pe_size == PE_SIZE_PTE) 36 return PAGE_SHIFT - PAGE_SHIFT; 37 if (pe_size == PE_SIZE_PMD) 38 return PMD_SHIFT - PAGE_SHIFT; 39 if (pe_size == PE_SIZE_PUD) 40 return PUD_SHIFT - PAGE_SHIFT; 41 return ~0; 42 } 43 44 /* We choose 4096 entries - same as per-zone page wait tables */ 45 #define DAX_WAIT_TABLE_BITS 12 46 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 47 48 /* The 'colour' (ie low bits) within a PMD of a page offset. */ 49 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 50 #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) 51 52 /* The order of a PMD entry */ 53 #define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT) 54 55 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 56 57 static int __init init_dax_wait_table(void) 58 { 59 int i; 60 61 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 62 init_waitqueue_head(wait_table + i); 63 return 0; 64 } 65 fs_initcall(init_dax_wait_table); 66 67 /* 68 * DAX pagecache entries use XArray value entries so they can't be mistaken 69 * for pages. We use one bit for locking, one bit for the entry size (PMD) 70 * and two more to tell us if the entry is a zero page or an empty entry that 71 * is just used for locking. In total four special bits. 72 * 73 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE 74 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem 75 * block allocation. 76 */ 77 #define DAX_SHIFT (4) 78 #define DAX_LOCKED (1UL << 0) 79 #define DAX_PMD (1UL << 1) 80 #define DAX_ZERO_PAGE (1UL << 2) 81 #define DAX_EMPTY (1UL << 3) 82 83 static unsigned long dax_to_pfn(void *entry) 84 { 85 return xa_to_value(entry) >> DAX_SHIFT; 86 } 87 88 static void *dax_make_entry(pfn_t pfn, unsigned long flags) 89 { 90 return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); 91 } 92 93 static bool dax_is_locked(void *entry) 94 { 95 return xa_to_value(entry) & DAX_LOCKED; 96 } 97 98 static unsigned int dax_entry_order(void *entry) 99 { 100 if (xa_to_value(entry) & DAX_PMD) 101 return PMD_ORDER; 102 return 0; 103 } 104 105 static unsigned long dax_is_pmd_entry(void *entry) 106 { 107 return xa_to_value(entry) & DAX_PMD; 108 } 109 110 static bool dax_is_pte_entry(void *entry) 111 { 112 return !(xa_to_value(entry) & DAX_PMD); 113 } 114 115 static int dax_is_zero_entry(void *entry) 116 { 117 return xa_to_value(entry) & DAX_ZERO_PAGE; 118 } 119 120 static int dax_is_empty_entry(void *entry) 121 { 122 return xa_to_value(entry) & DAX_EMPTY; 123 } 124 125 /* 126 * true if the entry that was found is of a smaller order than the entry 127 * we were looking for 128 */ 129 static bool dax_is_conflict(void *entry) 130 { 131 return entry == XA_RETRY_ENTRY; 132 } 133 134 /* 135 * DAX page cache entry locking 136 */ 137 struct exceptional_entry_key { 138 struct xarray *xa; 139 pgoff_t entry_start; 140 }; 141 142 struct wait_exceptional_entry_queue { 143 wait_queue_entry_t wait; 144 struct exceptional_entry_key key; 145 }; 146 147 /** 148 * enum dax_wake_mode: waitqueue wakeup behaviour 149 * @WAKE_ALL: wake all waiters in the waitqueue 150 * @WAKE_NEXT: wake only the first waiter in the waitqueue 151 */ 152 enum dax_wake_mode { 153 WAKE_ALL, 154 WAKE_NEXT, 155 }; 156 157 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, 158 void *entry, struct exceptional_entry_key *key) 159 { 160 unsigned long hash; 161 unsigned long index = xas->xa_index; 162 163 /* 164 * If 'entry' is a PMD, align the 'index' that we use for the wait 165 * queue to the start of that PMD. This ensures that all offsets in 166 * the range covered by the PMD map to the same bit lock. 167 */ 168 if (dax_is_pmd_entry(entry)) 169 index &= ~PG_PMD_COLOUR; 170 key->xa = xas->xa; 171 key->entry_start = index; 172 173 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); 174 return wait_table + hash; 175 } 176 177 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, 178 unsigned int mode, int sync, void *keyp) 179 { 180 struct exceptional_entry_key *key = keyp; 181 struct wait_exceptional_entry_queue *ewait = 182 container_of(wait, struct wait_exceptional_entry_queue, wait); 183 184 if (key->xa != ewait->key.xa || 185 key->entry_start != ewait->key.entry_start) 186 return 0; 187 return autoremove_wake_function(wait, mode, sync, NULL); 188 } 189 190 /* 191 * @entry may no longer be the entry at the index in the mapping. 192 * The important information it's conveying is whether the entry at 193 * this index used to be a PMD entry. 194 */ 195 static void dax_wake_entry(struct xa_state *xas, void *entry, 196 enum dax_wake_mode mode) 197 { 198 struct exceptional_entry_key key; 199 wait_queue_head_t *wq; 200 201 wq = dax_entry_waitqueue(xas, entry, &key); 202 203 /* 204 * Checking for locked entry and prepare_to_wait_exclusive() happens 205 * under the i_pages lock, ditto for entry handling in our callers. 206 * So at this point all tasks that could have seen our entry locked 207 * must be in the waitqueue and the following check will see them. 208 */ 209 if (waitqueue_active(wq)) 210 __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key); 211 } 212 213 /* 214 * Look up entry in page cache, wait for it to become unlocked if it 215 * is a DAX entry and return it. The caller must subsequently call 216 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry() 217 * if it did. The entry returned may have a larger order than @order. 218 * If @order is larger than the order of the entry found in i_pages, this 219 * function returns a dax_is_conflict entry. 220 * 221 * Must be called with the i_pages lock held. 222 */ 223 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) 224 { 225 void *entry; 226 struct wait_exceptional_entry_queue ewait; 227 wait_queue_head_t *wq; 228 229 init_wait(&ewait.wait); 230 ewait.wait.func = wake_exceptional_entry_func; 231 232 for (;;) { 233 entry = xas_find_conflict(xas); 234 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 235 return entry; 236 if (dax_entry_order(entry) < order) 237 return XA_RETRY_ENTRY; 238 if (!dax_is_locked(entry)) 239 return entry; 240 241 wq = dax_entry_waitqueue(xas, entry, &ewait.key); 242 prepare_to_wait_exclusive(wq, &ewait.wait, 243 TASK_UNINTERRUPTIBLE); 244 xas_unlock_irq(xas); 245 xas_reset(xas); 246 schedule(); 247 finish_wait(wq, &ewait.wait); 248 xas_lock_irq(xas); 249 } 250 } 251 252 /* 253 * The only thing keeping the address space around is the i_pages lock 254 * (it's cycled in clear_inode() after removing the entries from i_pages) 255 * After we call xas_unlock_irq(), we cannot touch xas->xa. 256 */ 257 static void wait_entry_unlocked(struct xa_state *xas, void *entry) 258 { 259 struct wait_exceptional_entry_queue ewait; 260 wait_queue_head_t *wq; 261 262 init_wait(&ewait.wait); 263 ewait.wait.func = wake_exceptional_entry_func; 264 265 wq = dax_entry_waitqueue(xas, entry, &ewait.key); 266 /* 267 * Unlike get_unlocked_entry() there is no guarantee that this 268 * path ever successfully retrieves an unlocked entry before an 269 * inode dies. Perform a non-exclusive wait in case this path 270 * never successfully performs its own wake up. 271 */ 272 prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); 273 xas_unlock_irq(xas); 274 schedule(); 275 finish_wait(wq, &ewait.wait); 276 } 277 278 static void put_unlocked_entry(struct xa_state *xas, void *entry, 279 enum dax_wake_mode mode) 280 { 281 if (entry && !dax_is_conflict(entry)) 282 dax_wake_entry(xas, entry, mode); 283 } 284 285 /* 286 * We used the xa_state to get the entry, but then we locked the entry and 287 * dropped the xa_lock, so we know the xa_state is stale and must be reset 288 * before use. 289 */ 290 static void dax_unlock_entry(struct xa_state *xas, void *entry) 291 { 292 void *old; 293 294 BUG_ON(dax_is_locked(entry)); 295 xas_reset(xas); 296 xas_lock_irq(xas); 297 old = xas_store(xas, entry); 298 xas_unlock_irq(xas); 299 BUG_ON(!dax_is_locked(old)); 300 dax_wake_entry(xas, entry, WAKE_NEXT); 301 } 302 303 /* 304 * Return: The entry stored at this location before it was locked. 305 */ 306 static void *dax_lock_entry(struct xa_state *xas, void *entry) 307 { 308 unsigned long v = xa_to_value(entry); 309 return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); 310 } 311 312 static unsigned long dax_entry_size(void *entry) 313 { 314 if (dax_is_zero_entry(entry)) 315 return 0; 316 else if (dax_is_empty_entry(entry)) 317 return 0; 318 else if (dax_is_pmd_entry(entry)) 319 return PMD_SIZE; 320 else 321 return PAGE_SIZE; 322 } 323 324 static unsigned long dax_end_pfn(void *entry) 325 { 326 return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; 327 } 328 329 /* 330 * Iterate through all mapped pfns represented by an entry, i.e. skip 331 * 'empty' and 'zero' entries. 332 */ 333 #define for_each_mapped_pfn(entry, pfn) \ 334 for (pfn = dax_to_pfn(entry); \ 335 pfn < dax_end_pfn(entry); pfn++) 336 337 static inline bool dax_page_is_shared(struct page *page) 338 { 339 return page->mapping == PAGE_MAPPING_DAX_SHARED; 340 } 341 342 /* 343 * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the 344 * refcount. 345 */ 346 static inline void dax_page_share_get(struct page *page) 347 { 348 if (page->mapping != PAGE_MAPPING_DAX_SHARED) { 349 /* 350 * Reset the index if the page was already mapped 351 * regularly before. 352 */ 353 if (page->mapping) 354 page->share = 1; 355 page->mapping = PAGE_MAPPING_DAX_SHARED; 356 } 357 page->share++; 358 } 359 360 static inline unsigned long dax_page_share_put(struct page *page) 361 { 362 return --page->share; 363 } 364 365 /* 366 * When it is called in dax_insert_entry(), the shared flag will indicate that 367 * whether this entry is shared by multiple files. If so, set the page->mapping 368 * PAGE_MAPPING_DAX_SHARED, and use page->share as refcount. 369 */ 370 static void dax_associate_entry(void *entry, struct address_space *mapping, 371 struct vm_area_struct *vma, unsigned long address, bool shared) 372 { 373 unsigned long size = dax_entry_size(entry), pfn, index; 374 int i = 0; 375 376 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 377 return; 378 379 index = linear_page_index(vma, address & ~(size - 1)); 380 for_each_mapped_pfn(entry, pfn) { 381 struct page *page = pfn_to_page(pfn); 382 383 if (shared) { 384 dax_page_share_get(page); 385 } else { 386 WARN_ON_ONCE(page->mapping); 387 page->mapping = mapping; 388 page->index = index + i++; 389 } 390 } 391 } 392 393 static void dax_disassociate_entry(void *entry, struct address_space *mapping, 394 bool trunc) 395 { 396 unsigned long pfn; 397 398 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 399 return; 400 401 for_each_mapped_pfn(entry, pfn) { 402 struct page *page = pfn_to_page(pfn); 403 404 WARN_ON_ONCE(trunc && page_ref_count(page) > 1); 405 if (dax_page_is_shared(page)) { 406 /* keep the shared flag if this page is still shared */ 407 if (dax_page_share_put(page) > 0) 408 continue; 409 } else 410 WARN_ON_ONCE(page->mapping && page->mapping != mapping); 411 page->mapping = NULL; 412 page->index = 0; 413 } 414 } 415 416 static struct page *dax_busy_page(void *entry) 417 { 418 unsigned long pfn; 419 420 for_each_mapped_pfn(entry, pfn) { 421 struct page *page = pfn_to_page(pfn); 422 423 if (page_ref_count(page) > 1) 424 return page; 425 } 426 return NULL; 427 } 428 429 /* 430 * dax_lock_page - Lock the DAX entry corresponding to a page 431 * @page: The page whose entry we want to lock 432 * 433 * Context: Process context. 434 * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could 435 * not be locked. 436 */ 437 dax_entry_t dax_lock_page(struct page *page) 438 { 439 XA_STATE(xas, NULL, 0); 440 void *entry; 441 442 /* Ensure page->mapping isn't freed while we look at it */ 443 rcu_read_lock(); 444 for (;;) { 445 struct address_space *mapping = READ_ONCE(page->mapping); 446 447 entry = NULL; 448 if (!mapping || !dax_mapping(mapping)) 449 break; 450 451 /* 452 * In the device-dax case there's no need to lock, a 453 * struct dev_pagemap pin is sufficient to keep the 454 * inode alive, and we assume we have dev_pagemap pin 455 * otherwise we would not have a valid pfn_to_page() 456 * translation. 457 */ 458 entry = (void *)~0UL; 459 if (S_ISCHR(mapping->host->i_mode)) 460 break; 461 462 xas.xa = &mapping->i_pages; 463 xas_lock_irq(&xas); 464 if (mapping != page->mapping) { 465 xas_unlock_irq(&xas); 466 continue; 467 } 468 xas_set(&xas, page->index); 469 entry = xas_load(&xas); 470 if (dax_is_locked(entry)) { 471 rcu_read_unlock(); 472 wait_entry_unlocked(&xas, entry); 473 rcu_read_lock(); 474 continue; 475 } 476 dax_lock_entry(&xas, entry); 477 xas_unlock_irq(&xas); 478 break; 479 } 480 rcu_read_unlock(); 481 return (dax_entry_t)entry; 482 } 483 484 void dax_unlock_page(struct page *page, dax_entry_t cookie) 485 { 486 struct address_space *mapping = page->mapping; 487 XA_STATE(xas, &mapping->i_pages, page->index); 488 489 if (S_ISCHR(mapping->host->i_mode)) 490 return; 491 492 dax_unlock_entry(&xas, (void *)cookie); 493 } 494 495 /* 496 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping 497 * @mapping: the file's mapping whose entry we want to lock 498 * @index: the offset within this file 499 * @page: output the dax page corresponding to this dax entry 500 * 501 * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry 502 * could not be locked. 503 */ 504 dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index, 505 struct page **page) 506 { 507 XA_STATE(xas, NULL, 0); 508 void *entry; 509 510 rcu_read_lock(); 511 for (;;) { 512 entry = NULL; 513 if (!dax_mapping(mapping)) 514 break; 515 516 xas.xa = &mapping->i_pages; 517 xas_lock_irq(&xas); 518 xas_set(&xas, index); 519 entry = xas_load(&xas); 520 if (dax_is_locked(entry)) { 521 rcu_read_unlock(); 522 wait_entry_unlocked(&xas, entry); 523 rcu_read_lock(); 524 continue; 525 } 526 if (!entry || 527 dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 528 /* 529 * Because we are looking for entry from file's mapping 530 * and index, so the entry may not be inserted for now, 531 * or even a zero/empty entry. We don't think this is 532 * an error case. So, return a special value and do 533 * not output @page. 534 */ 535 entry = (void *)~0UL; 536 } else { 537 *page = pfn_to_page(dax_to_pfn(entry)); 538 dax_lock_entry(&xas, entry); 539 } 540 xas_unlock_irq(&xas); 541 break; 542 } 543 rcu_read_unlock(); 544 return (dax_entry_t)entry; 545 } 546 547 void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index, 548 dax_entry_t cookie) 549 { 550 XA_STATE(xas, &mapping->i_pages, index); 551 552 if (cookie == ~0UL) 553 return; 554 555 dax_unlock_entry(&xas, (void *)cookie); 556 } 557 558 /* 559 * Find page cache entry at given index. If it is a DAX entry, return it 560 * with the entry locked. If the page cache doesn't contain an entry at 561 * that index, add a locked empty entry. 562 * 563 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will 564 * either return that locked entry or will return VM_FAULT_FALLBACK. 565 * This will happen if there are any PTE entries within the PMD range 566 * that we are requesting. 567 * 568 * We always favor PTE entries over PMD entries. There isn't a flow where we 569 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD 570 * insertion will fail if it finds any PTE entries already in the tree, and a 571 * PTE insertion will cause an existing PMD entry to be unmapped and 572 * downgraded to PTE entries. This happens for both PMD zero pages as 573 * well as PMD empty entries. 574 * 575 * The exception to this downgrade path is for PMD entries that have 576 * real storage backing them. We will leave these real PMD entries in 577 * the tree, and PTE writes will simply dirty the entire PMD entry. 578 * 579 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 580 * persistent memory the benefit is doubtful. We can add that later if we can 581 * show it helps. 582 * 583 * On error, this function does not return an ERR_PTR. Instead it returns 584 * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values 585 * overlap with xarray value entries. 586 */ 587 static void *grab_mapping_entry(struct xa_state *xas, 588 struct address_space *mapping, unsigned int order) 589 { 590 unsigned long index = xas->xa_index; 591 bool pmd_downgrade; /* splitting PMD entry into PTE entries? */ 592 void *entry; 593 594 retry: 595 pmd_downgrade = false; 596 xas_lock_irq(xas); 597 entry = get_unlocked_entry(xas, order); 598 599 if (entry) { 600 if (dax_is_conflict(entry)) 601 goto fallback; 602 if (!xa_is_value(entry)) { 603 xas_set_err(xas, -EIO); 604 goto out_unlock; 605 } 606 607 if (order == 0) { 608 if (dax_is_pmd_entry(entry) && 609 (dax_is_zero_entry(entry) || 610 dax_is_empty_entry(entry))) { 611 pmd_downgrade = true; 612 } 613 } 614 } 615 616 if (pmd_downgrade) { 617 /* 618 * Make sure 'entry' remains valid while we drop 619 * the i_pages lock. 620 */ 621 dax_lock_entry(xas, entry); 622 623 /* 624 * Besides huge zero pages the only other thing that gets 625 * downgraded are empty entries which don't need to be 626 * unmapped. 627 */ 628 if (dax_is_zero_entry(entry)) { 629 xas_unlock_irq(xas); 630 unmap_mapping_pages(mapping, 631 xas->xa_index & ~PG_PMD_COLOUR, 632 PG_PMD_NR, false); 633 xas_reset(xas); 634 xas_lock_irq(xas); 635 } 636 637 dax_disassociate_entry(entry, mapping, false); 638 xas_store(xas, NULL); /* undo the PMD join */ 639 dax_wake_entry(xas, entry, WAKE_ALL); 640 mapping->nrpages -= PG_PMD_NR; 641 entry = NULL; 642 xas_set(xas, index); 643 } 644 645 if (entry) { 646 dax_lock_entry(xas, entry); 647 } else { 648 unsigned long flags = DAX_EMPTY; 649 650 if (order > 0) 651 flags |= DAX_PMD; 652 entry = dax_make_entry(pfn_to_pfn_t(0), flags); 653 dax_lock_entry(xas, entry); 654 if (xas_error(xas)) 655 goto out_unlock; 656 mapping->nrpages += 1UL << order; 657 } 658 659 out_unlock: 660 xas_unlock_irq(xas); 661 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) 662 goto retry; 663 if (xas->xa_node == XA_ERROR(-ENOMEM)) 664 return xa_mk_internal(VM_FAULT_OOM); 665 if (xas_error(xas)) 666 return xa_mk_internal(VM_FAULT_SIGBUS); 667 return entry; 668 fallback: 669 xas_unlock_irq(xas); 670 return xa_mk_internal(VM_FAULT_FALLBACK); 671 } 672 673 /** 674 * dax_layout_busy_page_range - find first pinned page in @mapping 675 * @mapping: address space to scan for a page with ref count > 1 676 * @start: Starting offset. Page containing 'start' is included. 677 * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX, 678 * pages from 'start' till the end of file are included. 679 * 680 * DAX requires ZONE_DEVICE mapped pages. These pages are never 681 * 'onlined' to the page allocator so they are considered idle when 682 * page->count == 1. A filesystem uses this interface to determine if 683 * any page in the mapping is busy, i.e. for DMA, or other 684 * get_user_pages() usages. 685 * 686 * It is expected that the filesystem is holding locks to block the 687 * establishment of new mappings in this address_space. I.e. it expects 688 * to be able to run unmap_mapping_range() and subsequently not race 689 * mapping_mapped() becoming true. 690 */ 691 struct page *dax_layout_busy_page_range(struct address_space *mapping, 692 loff_t start, loff_t end) 693 { 694 void *entry; 695 unsigned int scanned = 0; 696 struct page *page = NULL; 697 pgoff_t start_idx = start >> PAGE_SHIFT; 698 pgoff_t end_idx; 699 XA_STATE(xas, &mapping->i_pages, start_idx); 700 701 /* 702 * In the 'limited' case get_user_pages() for dax is disabled. 703 */ 704 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 705 return NULL; 706 707 if (!dax_mapping(mapping) || !mapping_mapped(mapping)) 708 return NULL; 709 710 /* If end == LLONG_MAX, all pages from start to till end of file */ 711 if (end == LLONG_MAX) 712 end_idx = ULONG_MAX; 713 else 714 end_idx = end >> PAGE_SHIFT; 715 /* 716 * If we race get_user_pages_fast() here either we'll see the 717 * elevated page count in the iteration and wait, or 718 * get_user_pages_fast() will see that the page it took a reference 719 * against is no longer mapped in the page tables and bail to the 720 * get_user_pages() slow path. The slow path is protected by 721 * pte_lock() and pmd_lock(). New references are not taken without 722 * holding those locks, and unmap_mapping_pages() will not zero the 723 * pte or pmd without holding the respective lock, so we are 724 * guaranteed to either see new references or prevent new 725 * references from being established. 726 */ 727 unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0); 728 729 xas_lock_irq(&xas); 730 xas_for_each(&xas, entry, end_idx) { 731 if (WARN_ON_ONCE(!xa_is_value(entry))) 732 continue; 733 if (unlikely(dax_is_locked(entry))) 734 entry = get_unlocked_entry(&xas, 0); 735 if (entry) 736 page = dax_busy_page(entry); 737 put_unlocked_entry(&xas, entry, WAKE_NEXT); 738 if (page) 739 break; 740 if (++scanned % XA_CHECK_SCHED) 741 continue; 742 743 xas_pause(&xas); 744 xas_unlock_irq(&xas); 745 cond_resched(); 746 xas_lock_irq(&xas); 747 } 748 xas_unlock_irq(&xas); 749 return page; 750 } 751 EXPORT_SYMBOL_GPL(dax_layout_busy_page_range); 752 753 struct page *dax_layout_busy_page(struct address_space *mapping) 754 { 755 return dax_layout_busy_page_range(mapping, 0, LLONG_MAX); 756 } 757 EXPORT_SYMBOL_GPL(dax_layout_busy_page); 758 759 static int __dax_invalidate_entry(struct address_space *mapping, 760 pgoff_t index, bool trunc) 761 { 762 XA_STATE(xas, &mapping->i_pages, index); 763 int ret = 0; 764 void *entry; 765 766 xas_lock_irq(&xas); 767 entry = get_unlocked_entry(&xas, 0); 768 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 769 goto out; 770 if (!trunc && 771 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || 772 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) 773 goto out; 774 dax_disassociate_entry(entry, mapping, trunc); 775 xas_store(&xas, NULL); 776 mapping->nrpages -= 1UL << dax_entry_order(entry); 777 ret = 1; 778 out: 779 put_unlocked_entry(&xas, entry, WAKE_ALL); 780 xas_unlock_irq(&xas); 781 return ret; 782 } 783 784 /* 785 * Delete DAX entry at @index from @mapping. Wait for it 786 * to be unlocked before deleting it. 787 */ 788 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 789 { 790 int ret = __dax_invalidate_entry(mapping, index, true); 791 792 /* 793 * This gets called from truncate / punch_hole path. As such, the caller 794 * must hold locks protecting against concurrent modifications of the 795 * page cache (usually fs-private i_mmap_sem for writing). Since the 796 * caller has seen a DAX entry for this index, we better find it 797 * at that index as well... 798 */ 799 WARN_ON_ONCE(!ret); 800 return ret; 801 } 802 803 /* 804 * Invalidate DAX entry if it is clean. 805 */ 806 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 807 pgoff_t index) 808 { 809 return __dax_invalidate_entry(mapping, index, false); 810 } 811 812 static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos) 813 { 814 return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset); 815 } 816 817 static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter) 818 { 819 pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos); 820 void *vto, *kaddr; 821 long rc; 822 int id; 823 824 id = dax_read_lock(); 825 rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS, 826 &kaddr, NULL); 827 if (rc < 0) { 828 dax_read_unlock(id); 829 return rc; 830 } 831 vto = kmap_atomic(vmf->cow_page); 832 copy_user_page(vto, kaddr, vmf->address, vmf->cow_page); 833 kunmap_atomic(vto); 834 dax_read_unlock(id); 835 return 0; 836 } 837 838 /* 839 * MAP_SYNC on a dax mapping guarantees dirty metadata is 840 * flushed on write-faults (non-cow), but not read-faults. 841 */ 842 static bool dax_fault_is_synchronous(const struct iomap_iter *iter, 843 struct vm_area_struct *vma) 844 { 845 return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) && 846 (iter->iomap.flags & IOMAP_F_DIRTY); 847 } 848 849 static bool dax_fault_is_cow(const struct iomap_iter *iter) 850 { 851 return (iter->flags & IOMAP_WRITE) && 852 (iter->iomap.flags & IOMAP_F_SHARED); 853 } 854 855 /* 856 * By this point grab_mapping_entry() has ensured that we have a locked entry 857 * of the appropriate size so we don't have to worry about downgrading PMDs to 858 * PTEs. If we happen to be trying to insert a PTE and there is a PMD 859 * already in the tree, we will skip the insertion and just dirty the PMD as 860 * appropriate. 861 */ 862 static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, 863 const struct iomap_iter *iter, void *entry, pfn_t pfn, 864 unsigned long flags) 865 { 866 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 867 void *new_entry = dax_make_entry(pfn, flags); 868 bool dirty = !dax_fault_is_synchronous(iter, vmf->vma); 869 bool cow = dax_fault_is_cow(iter); 870 871 if (dirty) 872 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 873 874 if (cow || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) { 875 unsigned long index = xas->xa_index; 876 /* we are replacing a zero page with block mapping */ 877 if (dax_is_pmd_entry(entry)) 878 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 879 PG_PMD_NR, false); 880 else /* pte entry */ 881 unmap_mapping_pages(mapping, index, 1, false); 882 } 883 884 xas_reset(xas); 885 xas_lock_irq(xas); 886 if (cow || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 887 void *old; 888 889 dax_disassociate_entry(entry, mapping, false); 890 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, 891 cow); 892 /* 893 * Only swap our new entry into the page cache if the current 894 * entry is a zero page or an empty entry. If a normal PTE or 895 * PMD entry is already in the cache, we leave it alone. This 896 * means that if we are trying to insert a PTE and the 897 * existing entry is a PMD, we will just leave the PMD in the 898 * tree and dirty it if necessary. 899 */ 900 old = dax_lock_entry(xas, new_entry); 901 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | 902 DAX_LOCKED)); 903 entry = new_entry; 904 } else { 905 xas_load(xas); /* Walk the xa_state */ 906 } 907 908 if (dirty) 909 xas_set_mark(xas, PAGECACHE_TAG_DIRTY); 910 911 if (cow) 912 xas_set_mark(xas, PAGECACHE_TAG_TOWRITE); 913 914 xas_unlock_irq(xas); 915 return entry; 916 } 917 918 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, 919 struct address_space *mapping, void *entry) 920 { 921 unsigned long pfn, index, count, end; 922 long ret = 0; 923 struct vm_area_struct *vma; 924 925 /* 926 * A page got tagged dirty in DAX mapping? Something is seriously 927 * wrong. 928 */ 929 if (WARN_ON(!xa_is_value(entry))) 930 return -EIO; 931 932 if (unlikely(dax_is_locked(entry))) { 933 void *old_entry = entry; 934 935 entry = get_unlocked_entry(xas, 0); 936 937 /* Entry got punched out / reallocated? */ 938 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 939 goto put_unlocked; 940 /* 941 * Entry got reallocated elsewhere? No need to writeback. 942 * We have to compare pfns as we must not bail out due to 943 * difference in lockbit or entry type. 944 */ 945 if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) 946 goto put_unlocked; 947 if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 948 dax_is_zero_entry(entry))) { 949 ret = -EIO; 950 goto put_unlocked; 951 } 952 953 /* Another fsync thread may have already done this entry */ 954 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) 955 goto put_unlocked; 956 } 957 958 /* Lock the entry to serialize with page faults */ 959 dax_lock_entry(xas, entry); 960 961 /* 962 * We can clear the tag now but we have to be careful so that concurrent 963 * dax_writeback_one() calls for the same index cannot finish before we 964 * actually flush the caches. This is achieved as the calls will look 965 * at the entry only under the i_pages lock and once they do that 966 * they will see the entry locked and wait for it to unlock. 967 */ 968 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); 969 xas_unlock_irq(xas); 970 971 /* 972 * If dax_writeback_mapping_range() was given a wbc->range_start 973 * in the middle of a PMD, the 'index' we use needs to be 974 * aligned to the start of the PMD. 975 * This allows us to flush for PMD_SIZE and not have to worry about 976 * partial PMD writebacks. 977 */ 978 pfn = dax_to_pfn(entry); 979 count = 1UL << dax_entry_order(entry); 980 index = xas->xa_index & ~(count - 1); 981 end = index + count - 1; 982 983 /* Walk all mappings of a given index of a file and writeprotect them */ 984 i_mmap_lock_read(mapping); 985 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) { 986 pfn_mkclean_range(pfn, count, index, vma); 987 cond_resched(); 988 } 989 i_mmap_unlock_read(mapping); 990 991 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE); 992 /* 993 * After we have flushed the cache, we can clear the dirty tag. There 994 * cannot be new dirty data in the pfn after the flush has completed as 995 * the pfn mappings are writeprotected and fault waits for mapping 996 * entry lock. 997 */ 998 xas_reset(xas); 999 xas_lock_irq(xas); 1000 xas_store(xas, entry); 1001 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); 1002 dax_wake_entry(xas, entry, WAKE_NEXT); 1003 1004 trace_dax_writeback_one(mapping->host, index, count); 1005 return ret; 1006 1007 put_unlocked: 1008 put_unlocked_entry(xas, entry, WAKE_NEXT); 1009 return ret; 1010 } 1011 1012 /* 1013 * Flush the mapping to the persistent domain within the byte range of [start, 1014 * end]. This is required by data integrity operations to ensure file data is 1015 * on persistent storage prior to completion of the operation. 1016 */ 1017 int dax_writeback_mapping_range(struct address_space *mapping, 1018 struct dax_device *dax_dev, struct writeback_control *wbc) 1019 { 1020 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); 1021 struct inode *inode = mapping->host; 1022 pgoff_t end_index = wbc->range_end >> PAGE_SHIFT; 1023 void *entry; 1024 int ret = 0; 1025 unsigned int scanned = 0; 1026 1027 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 1028 return -EIO; 1029 1030 if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL) 1031 return 0; 1032 1033 trace_dax_writeback_range(inode, xas.xa_index, end_index); 1034 1035 tag_pages_for_writeback(mapping, xas.xa_index, end_index); 1036 1037 xas_lock_irq(&xas); 1038 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { 1039 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); 1040 if (ret < 0) { 1041 mapping_set_error(mapping, ret); 1042 break; 1043 } 1044 if (++scanned % XA_CHECK_SCHED) 1045 continue; 1046 1047 xas_pause(&xas); 1048 xas_unlock_irq(&xas); 1049 cond_resched(); 1050 xas_lock_irq(&xas); 1051 } 1052 xas_unlock_irq(&xas); 1053 trace_dax_writeback_range_done(inode, xas.xa_index, end_index); 1054 return ret; 1055 } 1056 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 1057 1058 static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos, 1059 size_t size, void **kaddr, pfn_t *pfnp) 1060 { 1061 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1062 int id, rc = 0; 1063 long length; 1064 1065 id = dax_read_lock(); 1066 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), 1067 DAX_ACCESS, kaddr, pfnp); 1068 if (length < 0) { 1069 rc = length; 1070 goto out; 1071 } 1072 if (!pfnp) 1073 goto out_check_addr; 1074 rc = -EINVAL; 1075 if (PFN_PHYS(length) < size) 1076 goto out; 1077 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) 1078 goto out; 1079 /* For larger pages we need devmap */ 1080 if (length > 1 && !pfn_t_devmap(*pfnp)) 1081 goto out; 1082 rc = 0; 1083 1084 out_check_addr: 1085 if (!kaddr) 1086 goto out; 1087 if (!*kaddr) 1088 rc = -EFAULT; 1089 out: 1090 dax_read_unlock(id); 1091 return rc; 1092 } 1093 1094 /** 1095 * dax_iomap_cow_copy - Copy the data from source to destination before write 1096 * @pos: address to do copy from. 1097 * @length: size of copy operation. 1098 * @align_size: aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE) 1099 * @srcmap: iomap srcmap 1100 * @daddr: destination address to copy to. 1101 * 1102 * This can be called from two places. Either during DAX write fault (page 1103 * aligned), to copy the length size data to daddr. Or, while doing normal DAX 1104 * write operation, dax_iomap_actor() might call this to do the copy of either 1105 * start or end unaligned address. In the latter case the rest of the copy of 1106 * aligned ranges is taken care by dax_iomap_actor() itself. 1107 */ 1108 static int dax_iomap_cow_copy(loff_t pos, uint64_t length, size_t align_size, 1109 const struct iomap *srcmap, void *daddr) 1110 { 1111 loff_t head_off = pos & (align_size - 1); 1112 size_t size = ALIGN(head_off + length, align_size); 1113 loff_t end = pos + length; 1114 loff_t pg_end = round_up(end, align_size); 1115 bool copy_all = head_off == 0 && end == pg_end; 1116 void *saddr = 0; 1117 int ret = 0; 1118 1119 ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL); 1120 if (ret) 1121 return ret; 1122 1123 if (copy_all) { 1124 ret = copy_mc_to_kernel(daddr, saddr, length); 1125 return ret ? -EIO : 0; 1126 } 1127 1128 /* Copy the head part of the range */ 1129 if (head_off) { 1130 ret = copy_mc_to_kernel(daddr, saddr, head_off); 1131 if (ret) 1132 return -EIO; 1133 } 1134 1135 /* Copy the tail part of the range */ 1136 if (end < pg_end) { 1137 loff_t tail_off = head_off + length; 1138 loff_t tail_len = pg_end - end; 1139 1140 ret = copy_mc_to_kernel(daddr + tail_off, saddr + tail_off, 1141 tail_len); 1142 if (ret) 1143 return -EIO; 1144 } 1145 return 0; 1146 } 1147 1148 /* 1149 * The user has performed a load from a hole in the file. Allocating a new 1150 * page in the file would cause excessive storage usage for workloads with 1151 * sparse files. Instead we insert a read-only mapping of the 4k zero page. 1152 * If this page is ever written to we will re-fault and change the mapping to 1153 * point to real DAX storage instead. 1154 */ 1155 static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, 1156 const struct iomap_iter *iter, void **entry) 1157 { 1158 struct inode *inode = iter->inode; 1159 unsigned long vaddr = vmf->address; 1160 pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); 1161 vm_fault_t ret; 1162 1163 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); 1164 1165 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); 1166 trace_dax_load_hole(inode, vmf, ret); 1167 return ret; 1168 } 1169 1170 #ifdef CONFIG_FS_DAX_PMD 1171 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, 1172 const struct iomap_iter *iter, void **entry) 1173 { 1174 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1175 unsigned long pmd_addr = vmf->address & PMD_MASK; 1176 struct vm_area_struct *vma = vmf->vma; 1177 struct inode *inode = mapping->host; 1178 pgtable_t pgtable = NULL; 1179 struct page *zero_page; 1180 spinlock_t *ptl; 1181 pmd_t pmd_entry; 1182 pfn_t pfn; 1183 1184 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1185 1186 if (unlikely(!zero_page)) 1187 goto fallback; 1188 1189 pfn = page_to_pfn_t(zero_page); 1190 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, 1191 DAX_PMD | DAX_ZERO_PAGE); 1192 1193 if (arch_needs_pgtable_deposit()) { 1194 pgtable = pte_alloc_one(vma->vm_mm); 1195 if (!pgtable) 1196 return VM_FAULT_OOM; 1197 } 1198 1199 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1200 if (!pmd_none(*(vmf->pmd))) { 1201 spin_unlock(ptl); 1202 goto fallback; 1203 } 1204 1205 if (pgtable) { 1206 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 1207 mm_inc_nr_ptes(vma->vm_mm); 1208 } 1209 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1210 pmd_entry = pmd_mkhuge(pmd_entry); 1211 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1212 spin_unlock(ptl); 1213 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); 1214 return VM_FAULT_NOPAGE; 1215 1216 fallback: 1217 if (pgtable) 1218 pte_free(vma->vm_mm, pgtable); 1219 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); 1220 return VM_FAULT_FALLBACK; 1221 } 1222 #else 1223 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, 1224 const struct iomap_iter *iter, void **entry) 1225 { 1226 return VM_FAULT_FALLBACK; 1227 } 1228 #endif /* CONFIG_FS_DAX_PMD */ 1229 1230 static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size) 1231 { 1232 const struct iomap *iomap = &iter->iomap; 1233 const struct iomap *srcmap = iomap_iter_srcmap(iter); 1234 unsigned offset = offset_in_page(pos); 1235 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1236 void *kaddr; 1237 long ret; 1238 1239 ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr, 1240 NULL); 1241 if (ret < 0) 1242 return ret; 1243 memset(kaddr + offset, 0, size); 1244 if (srcmap->addr != iomap->addr) { 1245 ret = dax_iomap_cow_copy(pos, size, PAGE_SIZE, srcmap, 1246 kaddr); 1247 if (ret < 0) 1248 return ret; 1249 dax_flush(iomap->dax_dev, kaddr, PAGE_SIZE); 1250 } else 1251 dax_flush(iomap->dax_dev, kaddr + offset, size); 1252 return ret; 1253 } 1254 1255 static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero) 1256 { 1257 const struct iomap *iomap = &iter->iomap; 1258 const struct iomap *srcmap = iomap_iter_srcmap(iter); 1259 loff_t pos = iter->pos; 1260 u64 length = iomap_length(iter); 1261 s64 written = 0; 1262 1263 /* already zeroed? we're done. */ 1264 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 1265 return length; 1266 1267 /* 1268 * invalidate the pages whose sharing state is to be changed 1269 * because of CoW. 1270 */ 1271 if (iomap->flags & IOMAP_F_SHARED) 1272 invalidate_inode_pages2_range(iter->inode->i_mapping, 1273 pos >> PAGE_SHIFT, 1274 (pos + length - 1) >> PAGE_SHIFT); 1275 1276 do { 1277 unsigned offset = offset_in_page(pos); 1278 unsigned size = min_t(u64, PAGE_SIZE - offset, length); 1279 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1280 long rc; 1281 int id; 1282 1283 id = dax_read_lock(); 1284 if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE) 1285 rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1); 1286 else 1287 rc = dax_memzero(iter, pos, size); 1288 dax_read_unlock(id); 1289 1290 if (rc < 0) 1291 return rc; 1292 pos += size; 1293 length -= size; 1294 written += size; 1295 } while (length > 0); 1296 1297 if (did_zero) 1298 *did_zero = true; 1299 return written; 1300 } 1301 1302 int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 1303 const struct iomap_ops *ops) 1304 { 1305 struct iomap_iter iter = { 1306 .inode = inode, 1307 .pos = pos, 1308 .len = len, 1309 .flags = IOMAP_DAX | IOMAP_ZERO, 1310 }; 1311 int ret; 1312 1313 while ((ret = iomap_iter(&iter, ops)) > 0) 1314 iter.processed = dax_zero_iter(&iter, did_zero); 1315 return ret; 1316 } 1317 EXPORT_SYMBOL_GPL(dax_zero_range); 1318 1319 int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 1320 const struct iomap_ops *ops) 1321 { 1322 unsigned int blocksize = i_blocksize(inode); 1323 unsigned int off = pos & (blocksize - 1); 1324 1325 /* Block boundary? Nothing to do */ 1326 if (!off) 1327 return 0; 1328 return dax_zero_range(inode, pos, blocksize - off, did_zero, ops); 1329 } 1330 EXPORT_SYMBOL_GPL(dax_truncate_page); 1331 1332 static loff_t dax_iomap_iter(const struct iomap_iter *iomi, 1333 struct iov_iter *iter) 1334 { 1335 const struct iomap *iomap = &iomi->iomap; 1336 const struct iomap *srcmap = iomap_iter_srcmap(iomi); 1337 loff_t length = iomap_length(iomi); 1338 loff_t pos = iomi->pos; 1339 struct dax_device *dax_dev = iomap->dax_dev; 1340 loff_t end = pos + length, done = 0; 1341 bool write = iov_iter_rw(iter) == WRITE; 1342 bool cow = write && iomap->flags & IOMAP_F_SHARED; 1343 ssize_t ret = 0; 1344 size_t xfer; 1345 int id; 1346 1347 if (!write) { 1348 end = min(end, i_size_read(iomi->inode)); 1349 if (pos >= end) 1350 return 0; 1351 1352 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 1353 return iov_iter_zero(min(length, end - pos), iter); 1354 } 1355 1356 /* 1357 * In DAX mode, enforce either pure overwrites of written extents, or 1358 * writes to unwritten extents as part of a copy-on-write operation. 1359 */ 1360 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED && 1361 !(iomap->flags & IOMAP_F_SHARED))) 1362 return -EIO; 1363 1364 /* 1365 * Write can allocate block for an area which has a hole page mapped 1366 * into page tables. We have to tear down these mappings so that data 1367 * written by write(2) is visible in mmap. 1368 */ 1369 if (iomap->flags & IOMAP_F_NEW || cow) { 1370 invalidate_inode_pages2_range(iomi->inode->i_mapping, 1371 pos >> PAGE_SHIFT, 1372 (end - 1) >> PAGE_SHIFT); 1373 } 1374 1375 id = dax_read_lock(); 1376 while (pos < end) { 1377 unsigned offset = pos & (PAGE_SIZE - 1); 1378 const size_t size = ALIGN(length + offset, PAGE_SIZE); 1379 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1380 ssize_t map_len; 1381 bool recovery = false; 1382 void *kaddr; 1383 1384 if (fatal_signal_pending(current)) { 1385 ret = -EINTR; 1386 break; 1387 } 1388 1389 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 1390 DAX_ACCESS, &kaddr, NULL); 1391 if (map_len == -EIO && iov_iter_rw(iter) == WRITE) { 1392 map_len = dax_direct_access(dax_dev, pgoff, 1393 PHYS_PFN(size), DAX_RECOVERY_WRITE, 1394 &kaddr, NULL); 1395 if (map_len > 0) 1396 recovery = true; 1397 } 1398 if (map_len < 0) { 1399 ret = map_len; 1400 break; 1401 } 1402 1403 if (cow) { 1404 ret = dax_iomap_cow_copy(pos, length, PAGE_SIZE, srcmap, 1405 kaddr); 1406 if (ret) 1407 break; 1408 } 1409 1410 map_len = PFN_PHYS(map_len); 1411 kaddr += offset; 1412 map_len -= offset; 1413 if (map_len > end - pos) 1414 map_len = end - pos; 1415 1416 if (recovery) 1417 xfer = dax_recovery_write(dax_dev, pgoff, kaddr, 1418 map_len, iter); 1419 else if (write) 1420 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1421 map_len, iter); 1422 else 1423 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, 1424 map_len, iter); 1425 1426 pos += xfer; 1427 length -= xfer; 1428 done += xfer; 1429 1430 if (xfer == 0) 1431 ret = -EFAULT; 1432 if (xfer < map_len) 1433 break; 1434 } 1435 dax_read_unlock(id); 1436 1437 return done ? done : ret; 1438 } 1439 1440 /** 1441 * dax_iomap_rw - Perform I/O to a DAX file 1442 * @iocb: The control block for this I/O 1443 * @iter: The addresses to do I/O from or to 1444 * @ops: iomap ops passed from the file system 1445 * 1446 * This function performs read and write operations to directly mapped 1447 * persistent memory. The callers needs to take care of read/write exclusion 1448 * and evicting any page cache pages in the region under I/O. 1449 */ 1450 ssize_t 1451 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 1452 const struct iomap_ops *ops) 1453 { 1454 struct iomap_iter iomi = { 1455 .inode = iocb->ki_filp->f_mapping->host, 1456 .pos = iocb->ki_pos, 1457 .len = iov_iter_count(iter), 1458 .flags = IOMAP_DAX, 1459 }; 1460 loff_t done = 0; 1461 int ret; 1462 1463 if (!iomi.len) 1464 return 0; 1465 1466 if (iov_iter_rw(iter) == WRITE) { 1467 lockdep_assert_held_write(&iomi.inode->i_rwsem); 1468 iomi.flags |= IOMAP_WRITE; 1469 } else { 1470 lockdep_assert_held(&iomi.inode->i_rwsem); 1471 } 1472 1473 if (iocb->ki_flags & IOCB_NOWAIT) 1474 iomi.flags |= IOMAP_NOWAIT; 1475 1476 while ((ret = iomap_iter(&iomi, ops)) > 0) 1477 iomi.processed = dax_iomap_iter(&iomi, iter); 1478 1479 done = iomi.pos - iocb->ki_pos; 1480 iocb->ki_pos = iomi.pos; 1481 return done ? done : ret; 1482 } 1483 EXPORT_SYMBOL_GPL(dax_iomap_rw); 1484 1485 static vm_fault_t dax_fault_return(int error) 1486 { 1487 if (error == 0) 1488 return VM_FAULT_NOPAGE; 1489 return vmf_error(error); 1490 } 1491 1492 /* 1493 * When handling a synchronous page fault and the inode need a fsync, we can 1494 * insert the PTE/PMD into page tables only after that fsync happened. Skip 1495 * insertion for now and return the pfn so that caller can insert it after the 1496 * fsync is done. 1497 */ 1498 static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn) 1499 { 1500 if (WARN_ON_ONCE(!pfnp)) 1501 return VM_FAULT_SIGBUS; 1502 *pfnp = pfn; 1503 return VM_FAULT_NEEDDSYNC; 1504 } 1505 1506 static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf, 1507 const struct iomap_iter *iter) 1508 { 1509 vm_fault_t ret; 1510 int error = 0; 1511 1512 switch (iter->iomap.type) { 1513 case IOMAP_HOLE: 1514 case IOMAP_UNWRITTEN: 1515 clear_user_highpage(vmf->cow_page, vmf->address); 1516 break; 1517 case IOMAP_MAPPED: 1518 error = copy_cow_page_dax(vmf, iter); 1519 break; 1520 default: 1521 WARN_ON_ONCE(1); 1522 error = -EIO; 1523 break; 1524 } 1525 1526 if (error) 1527 return dax_fault_return(error); 1528 1529 __SetPageUptodate(vmf->cow_page); 1530 ret = finish_fault(vmf); 1531 if (!ret) 1532 return VM_FAULT_DONE_COW; 1533 return ret; 1534 } 1535 1536 /** 1537 * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault. 1538 * @vmf: vm fault instance 1539 * @iter: iomap iter 1540 * @pfnp: pfn to be returned 1541 * @xas: the dax mapping tree of a file 1542 * @entry: an unlocked dax entry to be inserted 1543 * @pmd: distinguish whether it is a pmd fault 1544 */ 1545 static vm_fault_t dax_fault_iter(struct vm_fault *vmf, 1546 const struct iomap_iter *iter, pfn_t *pfnp, 1547 struct xa_state *xas, void **entry, bool pmd) 1548 { 1549 const struct iomap *iomap = &iter->iomap; 1550 const struct iomap *srcmap = &iter->srcmap; 1551 size_t size = pmd ? PMD_SIZE : PAGE_SIZE; 1552 loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; 1553 bool write = iter->flags & IOMAP_WRITE; 1554 unsigned long entry_flags = pmd ? DAX_PMD : 0; 1555 int err = 0; 1556 pfn_t pfn; 1557 void *kaddr; 1558 1559 if (!pmd && vmf->cow_page) 1560 return dax_fault_cow_page(vmf, iter); 1561 1562 /* if we are reading UNWRITTEN and HOLE, return a hole. */ 1563 if (!write && 1564 (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) { 1565 if (!pmd) 1566 return dax_load_hole(xas, vmf, iter, entry); 1567 return dax_pmd_load_hole(xas, vmf, iter, entry); 1568 } 1569 1570 if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) { 1571 WARN_ON_ONCE(1); 1572 return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS; 1573 } 1574 1575 err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn); 1576 if (err) 1577 return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err); 1578 1579 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags); 1580 1581 if (write && 1582 srcmap->type != IOMAP_HOLE && srcmap->addr != iomap->addr) { 1583 err = dax_iomap_cow_copy(pos, size, size, srcmap, kaddr); 1584 if (err) 1585 return dax_fault_return(err); 1586 } 1587 1588 if (dax_fault_is_synchronous(iter, vmf->vma)) 1589 return dax_fault_synchronous_pfnp(pfnp, pfn); 1590 1591 /* insert PMD pfn */ 1592 if (pmd) 1593 return vmf_insert_pfn_pmd(vmf, pfn, write); 1594 1595 /* insert PTE pfn */ 1596 if (write) 1597 return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 1598 return vmf_insert_mixed(vmf->vma, vmf->address, pfn); 1599 } 1600 1601 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1602 int *iomap_errp, const struct iomap_ops *ops) 1603 { 1604 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1605 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); 1606 struct iomap_iter iter = { 1607 .inode = mapping->host, 1608 .pos = (loff_t)vmf->pgoff << PAGE_SHIFT, 1609 .len = PAGE_SIZE, 1610 .flags = IOMAP_DAX | IOMAP_FAULT, 1611 }; 1612 vm_fault_t ret = 0; 1613 void *entry; 1614 int error; 1615 1616 trace_dax_pte_fault(iter.inode, vmf, ret); 1617 /* 1618 * Check whether offset isn't beyond end of file now. Caller is supposed 1619 * to hold locks serializing us with truncate / punch hole so this is 1620 * a reliable test. 1621 */ 1622 if (iter.pos >= i_size_read(iter.inode)) { 1623 ret = VM_FAULT_SIGBUS; 1624 goto out; 1625 } 1626 1627 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) 1628 iter.flags |= IOMAP_WRITE; 1629 1630 entry = grab_mapping_entry(&xas, mapping, 0); 1631 if (xa_is_internal(entry)) { 1632 ret = xa_to_internal(entry); 1633 goto out; 1634 } 1635 1636 /* 1637 * It is possible, particularly with mixed reads & writes to private 1638 * mappings, that we have raced with a PMD fault that overlaps with 1639 * the PTE we need to set up. If so just return and the fault will be 1640 * retried. 1641 */ 1642 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1643 ret = VM_FAULT_NOPAGE; 1644 goto unlock_entry; 1645 } 1646 1647 while ((error = iomap_iter(&iter, ops)) > 0) { 1648 if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) { 1649 iter.processed = -EIO; /* fs corruption? */ 1650 continue; 1651 } 1652 1653 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false); 1654 if (ret != VM_FAULT_SIGBUS && 1655 (iter.iomap.flags & IOMAP_F_NEW)) { 1656 count_vm_event(PGMAJFAULT); 1657 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); 1658 ret |= VM_FAULT_MAJOR; 1659 } 1660 1661 if (!(ret & VM_FAULT_ERROR)) 1662 iter.processed = PAGE_SIZE; 1663 } 1664 1665 if (iomap_errp) 1666 *iomap_errp = error; 1667 if (!ret && error) 1668 ret = dax_fault_return(error); 1669 1670 unlock_entry: 1671 dax_unlock_entry(&xas, entry); 1672 out: 1673 trace_dax_pte_fault_done(iter.inode, vmf, ret); 1674 return ret; 1675 } 1676 1677 #ifdef CONFIG_FS_DAX_PMD 1678 static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas, 1679 pgoff_t max_pgoff) 1680 { 1681 unsigned long pmd_addr = vmf->address & PMD_MASK; 1682 bool write = vmf->flags & FAULT_FLAG_WRITE; 1683 1684 /* 1685 * Make sure that the faulting address's PMD offset (color) matches 1686 * the PMD offset from the start of the file. This is necessary so 1687 * that a PMD range in the page table overlaps exactly with a PMD 1688 * range in the page cache. 1689 */ 1690 if ((vmf->pgoff & PG_PMD_COLOUR) != 1691 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 1692 return true; 1693 1694 /* Fall back to PTEs if we're going to COW */ 1695 if (write && !(vmf->vma->vm_flags & VM_SHARED)) 1696 return true; 1697 1698 /* If the PMD would extend outside the VMA */ 1699 if (pmd_addr < vmf->vma->vm_start) 1700 return true; 1701 if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end) 1702 return true; 1703 1704 /* If the PMD would extend beyond the file size */ 1705 if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff) 1706 return true; 1707 1708 return false; 1709 } 1710 1711 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1712 const struct iomap_ops *ops) 1713 { 1714 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1715 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); 1716 struct iomap_iter iter = { 1717 .inode = mapping->host, 1718 .len = PMD_SIZE, 1719 .flags = IOMAP_DAX | IOMAP_FAULT, 1720 }; 1721 vm_fault_t ret = VM_FAULT_FALLBACK; 1722 pgoff_t max_pgoff; 1723 void *entry; 1724 int error; 1725 1726 if (vmf->flags & FAULT_FLAG_WRITE) 1727 iter.flags |= IOMAP_WRITE; 1728 1729 /* 1730 * Check whether offset isn't beyond end of file now. Caller is 1731 * supposed to hold locks serializing us with truncate / punch hole so 1732 * this is a reliable test. 1733 */ 1734 max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE); 1735 1736 trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0); 1737 1738 if (xas.xa_index >= max_pgoff) { 1739 ret = VM_FAULT_SIGBUS; 1740 goto out; 1741 } 1742 1743 if (dax_fault_check_fallback(vmf, &xas, max_pgoff)) 1744 goto fallback; 1745 1746 /* 1747 * grab_mapping_entry() will make sure we get an empty PMD entry, 1748 * a zero PMD entry or a DAX PMD. If it can't (because a PTE 1749 * entry is already in the array, for instance), it will return 1750 * VM_FAULT_FALLBACK. 1751 */ 1752 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); 1753 if (xa_is_internal(entry)) { 1754 ret = xa_to_internal(entry); 1755 goto fallback; 1756 } 1757 1758 /* 1759 * It is possible, particularly with mixed reads & writes to private 1760 * mappings, that we have raced with a PTE fault that overlaps with 1761 * the PMD we need to set up. If so just return and the fault will be 1762 * retried. 1763 */ 1764 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1765 !pmd_devmap(*vmf->pmd)) { 1766 ret = 0; 1767 goto unlock_entry; 1768 } 1769 1770 iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT; 1771 while ((error = iomap_iter(&iter, ops)) > 0) { 1772 if (iomap_length(&iter) < PMD_SIZE) 1773 continue; /* actually breaks out of the loop */ 1774 1775 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true); 1776 if (ret != VM_FAULT_FALLBACK) 1777 iter.processed = PMD_SIZE; 1778 } 1779 1780 unlock_entry: 1781 dax_unlock_entry(&xas, entry); 1782 fallback: 1783 if (ret == VM_FAULT_FALLBACK) { 1784 split_huge_pmd(vmf->vma, vmf->pmd, vmf->address); 1785 count_vm_event(THP_FAULT_FALLBACK); 1786 } 1787 out: 1788 trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret); 1789 return ret; 1790 } 1791 #else 1792 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1793 const struct iomap_ops *ops) 1794 { 1795 return VM_FAULT_FALLBACK; 1796 } 1797 #endif /* CONFIG_FS_DAX_PMD */ 1798 1799 /** 1800 * dax_iomap_fault - handle a page fault on a DAX file 1801 * @vmf: The description of the fault 1802 * @pe_size: Size of the page to fault in 1803 * @pfnp: PFN to insert for synchronous faults if fsync is required 1804 * @iomap_errp: Storage for detailed error code in case of error 1805 * @ops: Iomap ops passed from the file system 1806 * 1807 * When a page fault occurs, filesystems may call this helper in 1808 * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1809 * has done all the necessary locking for page fault to proceed 1810 * successfully. 1811 */ 1812 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1813 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) 1814 { 1815 switch (pe_size) { 1816 case PE_SIZE_PTE: 1817 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); 1818 case PE_SIZE_PMD: 1819 return dax_iomap_pmd_fault(vmf, pfnp, ops); 1820 default: 1821 return VM_FAULT_FALLBACK; 1822 } 1823 } 1824 EXPORT_SYMBOL_GPL(dax_iomap_fault); 1825 1826 /* 1827 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables 1828 * @vmf: The description of the fault 1829 * @pfn: PFN to insert 1830 * @order: Order of entry to insert. 1831 * 1832 * This function inserts a writeable PTE or PMD entry into the page tables 1833 * for an mmaped DAX file. It also marks the page cache entry as dirty. 1834 */ 1835 static vm_fault_t 1836 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) 1837 { 1838 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1839 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); 1840 void *entry; 1841 vm_fault_t ret; 1842 1843 xas_lock_irq(&xas); 1844 entry = get_unlocked_entry(&xas, order); 1845 /* Did we race with someone splitting entry or so? */ 1846 if (!entry || dax_is_conflict(entry) || 1847 (order == 0 && !dax_is_pte_entry(entry))) { 1848 put_unlocked_entry(&xas, entry, WAKE_NEXT); 1849 xas_unlock_irq(&xas); 1850 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 1851 VM_FAULT_NOPAGE); 1852 return VM_FAULT_NOPAGE; 1853 } 1854 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); 1855 dax_lock_entry(&xas, entry); 1856 xas_unlock_irq(&xas); 1857 if (order == 0) 1858 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 1859 #ifdef CONFIG_FS_DAX_PMD 1860 else if (order == PMD_ORDER) 1861 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE); 1862 #endif 1863 else 1864 ret = VM_FAULT_FALLBACK; 1865 dax_unlock_entry(&xas, entry); 1866 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); 1867 return ret; 1868 } 1869 1870 /** 1871 * dax_finish_sync_fault - finish synchronous page fault 1872 * @vmf: The description of the fault 1873 * @pe_size: Size of entry to be inserted 1874 * @pfn: PFN to insert 1875 * 1876 * This function ensures that the file range touched by the page fault is 1877 * stored persistently on the media and handles inserting of appropriate page 1878 * table entry. 1879 */ 1880 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 1881 enum page_entry_size pe_size, pfn_t pfn) 1882 { 1883 int err; 1884 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; 1885 unsigned int order = pe_order(pe_size); 1886 size_t len = PAGE_SIZE << order; 1887 1888 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); 1889 if (err) 1890 return VM_FAULT_SIGBUS; 1891 return dax_insert_pfn_mkwrite(vmf, pfn, order); 1892 } 1893 EXPORT_SYMBOL_GPL(dax_finish_sync_fault); 1894 1895 static loff_t dax_range_compare_iter(struct iomap_iter *it_src, 1896 struct iomap_iter *it_dest, u64 len, bool *same) 1897 { 1898 const struct iomap *smap = &it_src->iomap; 1899 const struct iomap *dmap = &it_dest->iomap; 1900 loff_t pos1 = it_src->pos, pos2 = it_dest->pos; 1901 void *saddr, *daddr; 1902 int id, ret; 1903 1904 len = min(len, min(smap->length, dmap->length)); 1905 1906 if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) { 1907 *same = true; 1908 return len; 1909 } 1910 1911 if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) { 1912 *same = false; 1913 return 0; 1914 } 1915 1916 id = dax_read_lock(); 1917 ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE), 1918 &saddr, NULL); 1919 if (ret < 0) 1920 goto out_unlock; 1921 1922 ret = dax_iomap_direct_access(dmap, pos2, ALIGN(pos2 + len, PAGE_SIZE), 1923 &daddr, NULL); 1924 if (ret < 0) 1925 goto out_unlock; 1926 1927 *same = !memcmp(saddr, daddr, len); 1928 if (!*same) 1929 len = 0; 1930 dax_read_unlock(id); 1931 return len; 1932 1933 out_unlock: 1934 dax_read_unlock(id); 1935 return -EIO; 1936 } 1937 1938 int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff, 1939 struct inode *dst, loff_t dstoff, loff_t len, bool *same, 1940 const struct iomap_ops *ops) 1941 { 1942 struct iomap_iter src_iter = { 1943 .inode = src, 1944 .pos = srcoff, 1945 .len = len, 1946 .flags = IOMAP_DAX, 1947 }; 1948 struct iomap_iter dst_iter = { 1949 .inode = dst, 1950 .pos = dstoff, 1951 .len = len, 1952 .flags = IOMAP_DAX, 1953 }; 1954 int ret; 1955 1956 while ((ret = iomap_iter(&src_iter, ops)) > 0) { 1957 while ((ret = iomap_iter(&dst_iter, ops)) > 0) { 1958 dst_iter.processed = dax_range_compare_iter(&src_iter, 1959 &dst_iter, len, same); 1960 } 1961 if (ret <= 0) 1962 src_iter.processed = ret; 1963 } 1964 return ret; 1965 } 1966 1967 int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in, 1968 struct file *file_out, loff_t pos_out, 1969 loff_t *len, unsigned int remap_flags, 1970 const struct iomap_ops *ops) 1971 { 1972 return __generic_remap_file_range_prep(file_in, pos_in, file_out, 1973 pos_out, len, remap_flags, ops); 1974 } 1975 EXPORT_SYMBOL_GPL(dax_remap_file_range_prep); 1976