1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/filemap.c 4 * 5 * Copyright (C) 1994-1999 Linus Torvalds 6 */ 7 8 /* 9 * This file handles the generic file mmap semantics used by 10 * most "normal" filesystems (but you don't /have/ to use this: 11 * the NFS filesystem used to do this differently, for example) 12 */ 13 #include <linux/export.h> 14 #include <linux/compiler.h> 15 #include <linux/dax.h> 16 #include <linux/fs.h> 17 #include <linux/sched/signal.h> 18 #include <linux/uaccess.h> 19 #include <linux/capability.h> 20 #include <linux/kernel_stat.h> 21 #include <linux/gfp.h> 22 #include <linux/mm.h> 23 #include <linux/swap.h> 24 #include <linux/mman.h> 25 #include <linux/pagemap.h> 26 #include <linux/file.h> 27 #include <linux/uio.h> 28 #include <linux/error-injection.h> 29 #include <linux/hash.h> 30 #include <linux/writeback.h> 31 #include <linux/backing-dev.h> 32 #include <linux/pagevec.h> 33 #include <linux/security.h> 34 #include <linux/cpuset.h> 35 #include <linux/hugetlb.h> 36 #include <linux/memcontrol.h> 37 #include <linux/cleancache.h> 38 #include <linux/shmem_fs.h> 39 #include <linux/rmap.h> 40 #include <linux/delayacct.h> 41 #include <linux/psi.h> 42 #include <linux/ramfs.h> 43 #include <linux/page_idle.h> 44 #include <asm/pgalloc.h> 45 #include <asm/tlbflush.h> 46 #include "internal.h" 47 48 #define CREATE_TRACE_POINTS 49 #include <trace/events/filemap.h> 50 51 /* 52 * FIXME: remove all knowledge of the buffer layer from the core VM 53 */ 54 #include <linux/buffer_head.h> /* for try_to_free_buffers */ 55 56 #include <asm/mman.h> 57 58 /* 59 * Shared mappings implemented 30.11.1994. It's not fully working yet, 60 * though. 61 * 62 * Shared mappings now work. 15.8.1995 Bruno. 63 * 64 * finished 'unifying' the page and buffer cache and SMP-threaded the 65 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 66 * 67 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 68 */ 69 70 /* 71 * Lock ordering: 72 * 73 * ->i_mmap_rwsem (truncate_pagecache) 74 * ->private_lock (__free_pte->__set_page_dirty_buffers) 75 * ->swap_lock (exclusive_swap_page, others) 76 * ->i_pages lock 77 * 78 * ->i_rwsem 79 * ->invalidate_lock (acquired by fs in truncate path) 80 * ->i_mmap_rwsem (truncate->unmap_mapping_range) 81 * 82 * ->mmap_lock 83 * ->i_mmap_rwsem 84 * ->page_table_lock or pte_lock (various, mainly in memory.c) 85 * ->i_pages lock (arch-dependent flush_dcache_mmap_lock) 86 * 87 * ->mmap_lock 88 * ->invalidate_lock (filemap_fault) 89 * ->lock_page (filemap_fault, access_process_vm) 90 * 91 * ->i_rwsem (generic_perform_write) 92 * ->mmap_lock (fault_in_readable->do_page_fault) 93 * 94 * bdi->wb.list_lock 95 * sb_lock (fs/fs-writeback.c) 96 * ->i_pages lock (__sync_single_inode) 97 * 98 * ->i_mmap_rwsem 99 * ->anon_vma.lock (vma_adjust) 100 * 101 * ->anon_vma.lock 102 * ->page_table_lock or pte_lock (anon_vma_prepare and various) 103 * 104 * ->page_table_lock or pte_lock 105 * ->swap_lock (try_to_unmap_one) 106 * ->private_lock (try_to_unmap_one) 107 * ->i_pages lock (try_to_unmap_one) 108 * ->lruvec->lru_lock (follow_page->mark_page_accessed) 109 * ->lruvec->lru_lock (check_pte_range->isolate_lru_page) 110 * ->private_lock (page_remove_rmap->set_page_dirty) 111 * ->i_pages lock (page_remove_rmap->set_page_dirty) 112 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) 113 * ->inode->i_lock (page_remove_rmap->set_page_dirty) 114 * ->memcg->move_lock (page_remove_rmap->lock_page_memcg) 115 * bdi.wb->list_lock (zap_pte_range->set_page_dirty) 116 * ->inode->i_lock (zap_pte_range->set_page_dirty) 117 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 118 * 119 * ->i_mmap_rwsem 120 * ->tasklist_lock (memory_failure, collect_procs_ao) 121 */ 122 123 static void page_cache_delete(struct address_space *mapping, 124 struct page *page, void *shadow) 125 { 126 XA_STATE(xas, &mapping->i_pages, page->index); 127 unsigned int nr = 1; 128 129 mapping_set_update(&xas, mapping); 130 131 /* hugetlb pages are represented by a single entry in the xarray */ 132 if (!PageHuge(page)) { 133 xas_set_order(&xas, page->index, compound_order(page)); 134 nr = compound_nr(page); 135 } 136 137 VM_BUG_ON_PAGE(!PageLocked(page), page); 138 VM_BUG_ON_PAGE(PageTail(page), page); 139 VM_BUG_ON_PAGE(nr != 1 && shadow, page); 140 141 xas_store(&xas, shadow); 142 xas_init_marks(&xas); 143 144 page->mapping = NULL; 145 /* Leave page->index set: truncation lookup relies upon it */ 146 mapping->nrpages -= nr; 147 } 148 149 static void unaccount_page_cache_page(struct address_space *mapping, 150 struct page *page) 151 { 152 int nr; 153 154 /* 155 * if we're uptodate, flush out into the cleancache, otherwise 156 * invalidate any existing cleancache entries. We can't leave 157 * stale data around in the cleancache once our page is gone 158 */ 159 if (PageUptodate(page) && PageMappedToDisk(page)) 160 cleancache_put_page(page); 161 else 162 cleancache_invalidate_page(mapping, page); 163 164 VM_BUG_ON_PAGE(PageTail(page), page); 165 VM_BUG_ON_PAGE(page_mapped(page), page); 166 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) { 167 int mapcount; 168 169 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n", 170 current->comm, page_to_pfn(page)); 171 dump_page(page, "still mapped when deleted"); 172 dump_stack(); 173 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 174 175 mapcount = page_mapcount(page); 176 if (mapping_exiting(mapping) && 177 page_count(page) >= mapcount + 2) { 178 /* 179 * All vmas have already been torn down, so it's 180 * a good bet that actually the page is unmapped, 181 * and we'd prefer not to leak it: if we're wrong, 182 * some other bad page check should catch it later. 183 */ 184 page_mapcount_reset(page); 185 page_ref_sub(page, mapcount); 186 } 187 } 188 189 /* hugetlb pages do not participate in page cache accounting. */ 190 if (PageHuge(page)) 191 return; 192 193 nr = thp_nr_pages(page); 194 195 __mod_lruvec_page_state(page, NR_FILE_PAGES, -nr); 196 if (PageSwapBacked(page)) { 197 __mod_lruvec_page_state(page, NR_SHMEM, -nr); 198 if (PageTransHuge(page)) 199 __mod_lruvec_page_state(page, NR_SHMEM_THPS, -nr); 200 } else if (PageTransHuge(page)) { 201 __mod_lruvec_page_state(page, NR_FILE_THPS, -nr); 202 filemap_nr_thps_dec(mapping); 203 } 204 205 /* 206 * At this point page must be either written or cleaned by 207 * truncate. Dirty page here signals a bug and loss of 208 * unwritten data. 209 * 210 * This fixes dirty accounting after removing the page entirely 211 * but leaves PageDirty set: it has no effect for truncated 212 * page and anyway will be cleared before returning page into 213 * buddy allocator. 214 */ 215 if (WARN_ON_ONCE(PageDirty(page))) 216 account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); 217 } 218 219 /* 220 * Delete a page from the page cache and free it. Caller has to make 221 * sure the page is locked and that nobody else uses it - or that usage 222 * is safe. The caller must hold the i_pages lock. 223 */ 224 void __delete_from_page_cache(struct page *page, void *shadow) 225 { 226 struct address_space *mapping = page->mapping; 227 228 trace_mm_filemap_delete_from_page_cache(page); 229 230 unaccount_page_cache_page(mapping, page); 231 page_cache_delete(mapping, page, shadow); 232 } 233 234 static void page_cache_free_page(struct address_space *mapping, 235 struct page *page) 236 { 237 void (*freepage)(struct page *); 238 239 freepage = mapping->a_ops->freepage; 240 if (freepage) 241 freepage(page); 242 243 if (PageTransHuge(page) && !PageHuge(page)) { 244 page_ref_sub(page, thp_nr_pages(page)); 245 VM_BUG_ON_PAGE(page_count(page) <= 0, page); 246 } else { 247 put_page(page); 248 } 249 } 250 251 /** 252 * delete_from_page_cache - delete page from page cache 253 * @page: the page which the kernel is trying to remove from page cache 254 * 255 * This must be called only on pages that have been verified to be in the page 256 * cache and locked. It will never put the page into the free list, the caller 257 * has a reference on the page. 258 */ 259 void delete_from_page_cache(struct page *page) 260 { 261 struct address_space *mapping = page_mapping(page); 262 263 BUG_ON(!PageLocked(page)); 264 spin_lock(&mapping->host->i_lock); 265 xa_lock_irq(&mapping->i_pages); 266 __delete_from_page_cache(page, NULL); 267 xa_unlock_irq(&mapping->i_pages); 268 if (mapping_shrinkable(mapping)) 269 inode_add_lru(mapping->host); 270 spin_unlock(&mapping->host->i_lock); 271 272 page_cache_free_page(mapping, page); 273 } 274 EXPORT_SYMBOL(delete_from_page_cache); 275 276 /* 277 * page_cache_delete_batch - delete several pages from page cache 278 * @mapping: the mapping to which pages belong 279 * @pvec: pagevec with pages to delete 280 * 281 * The function walks over mapping->i_pages and removes pages passed in @pvec 282 * from the mapping. The function expects @pvec to be sorted by page index 283 * and is optimised for it to be dense. 284 * It tolerates holes in @pvec (mapping entries at those indices are not 285 * modified). The function expects only THP head pages to be present in the 286 * @pvec. 287 * 288 * The function expects the i_pages lock to be held. 289 */ 290 static void page_cache_delete_batch(struct address_space *mapping, 291 struct pagevec *pvec) 292 { 293 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); 294 int total_pages = 0; 295 int i = 0; 296 struct page *page; 297 298 mapping_set_update(&xas, mapping); 299 xas_for_each(&xas, page, ULONG_MAX) { 300 if (i >= pagevec_count(pvec)) 301 break; 302 303 /* A swap/dax/shadow entry got inserted? Skip it. */ 304 if (xa_is_value(page)) 305 continue; 306 /* 307 * A page got inserted in our range? Skip it. We have our 308 * pages locked so they are protected from being removed. 309 * If we see a page whose index is higher than ours, it 310 * means our page has been removed, which shouldn't be 311 * possible because we're holding the PageLock. 312 */ 313 if (page != pvec->pages[i]) { 314 VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, 315 page); 316 continue; 317 } 318 319 WARN_ON_ONCE(!PageLocked(page)); 320 321 if (page->index == xas.xa_index) 322 page->mapping = NULL; 323 /* Leave page->index set: truncation lookup relies on it */ 324 325 /* 326 * Move to the next page in the vector if this is a regular 327 * page or the index is of the last sub-page of this compound 328 * page. 329 */ 330 if (page->index + compound_nr(page) - 1 == xas.xa_index) 331 i++; 332 xas_store(&xas, NULL); 333 total_pages++; 334 } 335 mapping->nrpages -= total_pages; 336 } 337 338 void delete_from_page_cache_batch(struct address_space *mapping, 339 struct pagevec *pvec) 340 { 341 int i; 342 343 if (!pagevec_count(pvec)) 344 return; 345 346 spin_lock(&mapping->host->i_lock); 347 xa_lock_irq(&mapping->i_pages); 348 for (i = 0; i < pagevec_count(pvec); i++) { 349 trace_mm_filemap_delete_from_page_cache(pvec->pages[i]); 350 351 unaccount_page_cache_page(mapping, pvec->pages[i]); 352 } 353 page_cache_delete_batch(mapping, pvec); 354 xa_unlock_irq(&mapping->i_pages); 355 if (mapping_shrinkable(mapping)) 356 inode_add_lru(mapping->host); 357 spin_unlock(&mapping->host->i_lock); 358 359 for (i = 0; i < pagevec_count(pvec); i++) 360 page_cache_free_page(mapping, pvec->pages[i]); 361 } 362 363 int filemap_check_errors(struct address_space *mapping) 364 { 365 int ret = 0; 366 /* Check for outstanding write errors */ 367 if (test_bit(AS_ENOSPC, &mapping->flags) && 368 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 369 ret = -ENOSPC; 370 if (test_bit(AS_EIO, &mapping->flags) && 371 test_and_clear_bit(AS_EIO, &mapping->flags)) 372 ret = -EIO; 373 return ret; 374 } 375 EXPORT_SYMBOL(filemap_check_errors); 376 377 static int filemap_check_and_keep_errors(struct address_space *mapping) 378 { 379 /* Check for outstanding write errors */ 380 if (test_bit(AS_EIO, &mapping->flags)) 381 return -EIO; 382 if (test_bit(AS_ENOSPC, &mapping->flags)) 383 return -ENOSPC; 384 return 0; 385 } 386 387 /** 388 * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range 389 * @mapping: address space structure to write 390 * @wbc: the writeback_control controlling the writeout 391 * 392 * Call writepages on the mapping using the provided wbc to control the 393 * writeout. 394 * 395 * Return: %0 on success, negative error code otherwise. 396 */ 397 int filemap_fdatawrite_wbc(struct address_space *mapping, 398 struct writeback_control *wbc) 399 { 400 int ret; 401 402 if (!mapping_can_writeback(mapping) || 403 !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 404 return 0; 405 406 wbc_attach_fdatawrite_inode(wbc, mapping->host); 407 ret = do_writepages(mapping, wbc); 408 wbc_detach_inode(wbc); 409 return ret; 410 } 411 EXPORT_SYMBOL(filemap_fdatawrite_wbc); 412 413 /** 414 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 415 * @mapping: address space structure to write 416 * @start: offset in bytes where the range starts 417 * @end: offset in bytes where the range ends (inclusive) 418 * @sync_mode: enable synchronous operation 419 * 420 * Start writeback against all of a mapping's dirty pages that lie 421 * within the byte offsets <start, end> inclusive. 422 * 423 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 424 * opposed to a regular memory cleansing writeback. The difference between 425 * these two operations is that if a dirty page/buffer is encountered, it must 426 * be waited upon, and not just skipped over. 427 * 428 * Return: %0 on success, negative error code otherwise. 429 */ 430 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 431 loff_t end, int sync_mode) 432 { 433 struct writeback_control wbc = { 434 .sync_mode = sync_mode, 435 .nr_to_write = LONG_MAX, 436 .range_start = start, 437 .range_end = end, 438 }; 439 440 return filemap_fdatawrite_wbc(mapping, &wbc); 441 } 442 443 static inline int __filemap_fdatawrite(struct address_space *mapping, 444 int sync_mode) 445 { 446 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); 447 } 448 449 int filemap_fdatawrite(struct address_space *mapping) 450 { 451 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); 452 } 453 EXPORT_SYMBOL(filemap_fdatawrite); 454 455 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 456 loff_t end) 457 { 458 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 459 } 460 EXPORT_SYMBOL(filemap_fdatawrite_range); 461 462 /** 463 * filemap_flush - mostly a non-blocking flush 464 * @mapping: target address_space 465 * 466 * This is a mostly non-blocking flush. Not suitable for data-integrity 467 * purposes - I/O may not be started against all dirty pages. 468 * 469 * Return: %0 on success, negative error code otherwise. 470 */ 471 int filemap_flush(struct address_space *mapping) 472 { 473 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); 474 } 475 EXPORT_SYMBOL(filemap_flush); 476 477 /** 478 * filemap_range_has_page - check if a page exists in range. 479 * @mapping: address space within which to check 480 * @start_byte: offset in bytes where the range starts 481 * @end_byte: offset in bytes where the range ends (inclusive) 482 * 483 * Find at least one page in the range supplied, usually used to check if 484 * direct writing in this range will trigger a writeback. 485 * 486 * Return: %true if at least one page exists in the specified range, 487 * %false otherwise. 488 */ 489 bool filemap_range_has_page(struct address_space *mapping, 490 loff_t start_byte, loff_t end_byte) 491 { 492 struct page *page; 493 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); 494 pgoff_t max = end_byte >> PAGE_SHIFT; 495 496 if (end_byte < start_byte) 497 return false; 498 499 rcu_read_lock(); 500 for (;;) { 501 page = xas_find(&xas, max); 502 if (xas_retry(&xas, page)) 503 continue; 504 /* Shadow entries don't count */ 505 if (xa_is_value(page)) 506 continue; 507 /* 508 * We don't need to try to pin this page; we're about to 509 * release the RCU lock anyway. It is enough to know that 510 * there was a page here recently. 511 */ 512 break; 513 } 514 rcu_read_unlock(); 515 516 return page != NULL; 517 } 518 EXPORT_SYMBOL(filemap_range_has_page); 519 520 static void __filemap_fdatawait_range(struct address_space *mapping, 521 loff_t start_byte, loff_t end_byte) 522 { 523 pgoff_t index = start_byte >> PAGE_SHIFT; 524 pgoff_t end = end_byte >> PAGE_SHIFT; 525 struct pagevec pvec; 526 int nr_pages; 527 528 if (end_byte < start_byte) 529 return; 530 531 pagevec_init(&pvec); 532 while (index <= end) { 533 unsigned i; 534 535 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, 536 end, PAGECACHE_TAG_WRITEBACK); 537 if (!nr_pages) 538 break; 539 540 for (i = 0; i < nr_pages; i++) { 541 struct page *page = pvec.pages[i]; 542 543 wait_on_page_writeback(page); 544 ClearPageError(page); 545 } 546 pagevec_release(&pvec); 547 cond_resched(); 548 } 549 } 550 551 /** 552 * filemap_fdatawait_range - wait for writeback to complete 553 * @mapping: address space structure to wait for 554 * @start_byte: offset in bytes where the range starts 555 * @end_byte: offset in bytes where the range ends (inclusive) 556 * 557 * Walk the list of under-writeback pages of the given address space 558 * in the given range and wait for all of them. Check error status of 559 * the address space and return it. 560 * 561 * Since the error status of the address space is cleared by this function, 562 * callers are responsible for checking the return value and handling and/or 563 * reporting the error. 564 * 565 * Return: error status of the address space. 566 */ 567 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, 568 loff_t end_byte) 569 { 570 __filemap_fdatawait_range(mapping, start_byte, end_byte); 571 return filemap_check_errors(mapping); 572 } 573 EXPORT_SYMBOL(filemap_fdatawait_range); 574 575 /** 576 * filemap_fdatawait_range_keep_errors - wait for writeback to complete 577 * @mapping: address space structure to wait for 578 * @start_byte: offset in bytes where the range starts 579 * @end_byte: offset in bytes where the range ends (inclusive) 580 * 581 * Walk the list of under-writeback pages of the given address space in the 582 * given range and wait for all of them. Unlike filemap_fdatawait_range(), 583 * this function does not clear error status of the address space. 584 * 585 * Use this function if callers don't handle errors themselves. Expected 586 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), 587 * fsfreeze(8) 588 */ 589 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, 590 loff_t start_byte, loff_t end_byte) 591 { 592 __filemap_fdatawait_range(mapping, start_byte, end_byte); 593 return filemap_check_and_keep_errors(mapping); 594 } 595 EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors); 596 597 /** 598 * file_fdatawait_range - wait for writeback to complete 599 * @file: file pointing to address space structure to wait for 600 * @start_byte: offset in bytes where the range starts 601 * @end_byte: offset in bytes where the range ends (inclusive) 602 * 603 * Walk the list of under-writeback pages of the address space that file 604 * refers to, in the given range and wait for all of them. Check error 605 * status of the address space vs. the file->f_wb_err cursor and return it. 606 * 607 * Since the error status of the file is advanced by this function, 608 * callers are responsible for checking the return value and handling and/or 609 * reporting the error. 610 * 611 * Return: error status of the address space vs. the file->f_wb_err cursor. 612 */ 613 int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte) 614 { 615 struct address_space *mapping = file->f_mapping; 616 617 __filemap_fdatawait_range(mapping, start_byte, end_byte); 618 return file_check_and_advance_wb_err(file); 619 } 620 EXPORT_SYMBOL(file_fdatawait_range); 621 622 /** 623 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors 624 * @mapping: address space structure to wait for 625 * 626 * Walk the list of under-writeback pages of the given address space 627 * and wait for all of them. Unlike filemap_fdatawait(), this function 628 * does not clear error status of the address space. 629 * 630 * Use this function if callers don't handle errors themselves. Expected 631 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), 632 * fsfreeze(8) 633 * 634 * Return: error status of the address space. 635 */ 636 int filemap_fdatawait_keep_errors(struct address_space *mapping) 637 { 638 __filemap_fdatawait_range(mapping, 0, LLONG_MAX); 639 return filemap_check_and_keep_errors(mapping); 640 } 641 EXPORT_SYMBOL(filemap_fdatawait_keep_errors); 642 643 /* Returns true if writeback might be needed or already in progress. */ 644 static bool mapping_needs_writeback(struct address_space *mapping) 645 { 646 return mapping->nrpages; 647 } 648 649 static bool filemap_range_has_writeback(struct address_space *mapping, 650 loff_t start_byte, loff_t end_byte) 651 { 652 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); 653 pgoff_t max = end_byte >> PAGE_SHIFT; 654 struct page *page; 655 656 if (end_byte < start_byte) 657 return false; 658 659 rcu_read_lock(); 660 xas_for_each(&xas, page, max) { 661 if (xas_retry(&xas, page)) 662 continue; 663 if (xa_is_value(page)) 664 continue; 665 if (PageDirty(page) || PageLocked(page) || PageWriteback(page)) 666 break; 667 } 668 rcu_read_unlock(); 669 return page != NULL; 670 671 } 672 673 /** 674 * filemap_range_needs_writeback - check if range potentially needs writeback 675 * @mapping: address space within which to check 676 * @start_byte: offset in bytes where the range starts 677 * @end_byte: offset in bytes where the range ends (inclusive) 678 * 679 * Find at least one page in the range supplied, usually used to check if 680 * direct writing in this range will trigger a writeback. Used by O_DIRECT 681 * read/write with IOCB_NOWAIT, to see if the caller needs to do 682 * filemap_write_and_wait_range() before proceeding. 683 * 684 * Return: %true if the caller should do filemap_write_and_wait_range() before 685 * doing O_DIRECT to a page in this range, %false otherwise. 686 */ 687 bool filemap_range_needs_writeback(struct address_space *mapping, 688 loff_t start_byte, loff_t end_byte) 689 { 690 if (!mapping_needs_writeback(mapping)) 691 return false; 692 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 693 !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) 694 return false; 695 return filemap_range_has_writeback(mapping, start_byte, end_byte); 696 } 697 EXPORT_SYMBOL_GPL(filemap_range_needs_writeback); 698 699 /** 700 * filemap_write_and_wait_range - write out & wait on a file range 701 * @mapping: the address_space for the pages 702 * @lstart: offset in bytes where the range starts 703 * @lend: offset in bytes where the range ends (inclusive) 704 * 705 * Write out and wait upon file offsets lstart->lend, inclusive. 706 * 707 * Note that @lend is inclusive (describes the last byte to be written) so 708 * that this function can be used to write to the very end-of-file (end = -1). 709 * 710 * Return: error status of the address space. 711 */ 712 int filemap_write_and_wait_range(struct address_space *mapping, 713 loff_t lstart, loff_t lend) 714 { 715 int err = 0; 716 717 if (mapping_needs_writeback(mapping)) { 718 err = __filemap_fdatawrite_range(mapping, lstart, lend, 719 WB_SYNC_ALL); 720 /* 721 * Even if the above returned error, the pages may be 722 * written partially (e.g. -ENOSPC), so we wait for it. 723 * But the -EIO is special case, it may indicate the worst 724 * thing (e.g. bug) happened, so we avoid waiting for it. 725 */ 726 if (err != -EIO) { 727 int err2 = filemap_fdatawait_range(mapping, 728 lstart, lend); 729 if (!err) 730 err = err2; 731 } else { 732 /* Clear any previously stored errors */ 733 filemap_check_errors(mapping); 734 } 735 } else { 736 err = filemap_check_errors(mapping); 737 } 738 return err; 739 } 740 EXPORT_SYMBOL(filemap_write_and_wait_range); 741 742 void __filemap_set_wb_err(struct address_space *mapping, int err) 743 { 744 errseq_t eseq = errseq_set(&mapping->wb_err, err); 745 746 trace_filemap_set_wb_err(mapping, eseq); 747 } 748 EXPORT_SYMBOL(__filemap_set_wb_err); 749 750 /** 751 * file_check_and_advance_wb_err - report wb error (if any) that was previously 752 * and advance wb_err to current one 753 * @file: struct file on which the error is being reported 754 * 755 * When userland calls fsync (or something like nfsd does the equivalent), we 756 * want to report any writeback errors that occurred since the last fsync (or 757 * since the file was opened if there haven't been any). 758 * 759 * Grab the wb_err from the mapping. If it matches what we have in the file, 760 * then just quickly return 0. The file is all caught up. 761 * 762 * If it doesn't match, then take the mapping value, set the "seen" flag in 763 * it and try to swap it into place. If it works, or another task beat us 764 * to it with the new value, then update the f_wb_err and return the error 765 * portion. The error at this point must be reported via proper channels 766 * (a'la fsync, or NFS COMMIT operation, etc.). 767 * 768 * While we handle mapping->wb_err with atomic operations, the f_wb_err 769 * value is protected by the f_lock since we must ensure that it reflects 770 * the latest value swapped in for this file descriptor. 771 * 772 * Return: %0 on success, negative error code otherwise. 773 */ 774 int file_check_and_advance_wb_err(struct file *file) 775 { 776 int err = 0; 777 errseq_t old = READ_ONCE(file->f_wb_err); 778 struct address_space *mapping = file->f_mapping; 779 780 /* Locklessly handle the common case where nothing has changed */ 781 if (errseq_check(&mapping->wb_err, old)) { 782 /* Something changed, must use slow path */ 783 spin_lock(&file->f_lock); 784 old = file->f_wb_err; 785 err = errseq_check_and_advance(&mapping->wb_err, 786 &file->f_wb_err); 787 trace_file_check_and_advance_wb_err(file, old); 788 spin_unlock(&file->f_lock); 789 } 790 791 /* 792 * We're mostly using this function as a drop in replacement for 793 * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect 794 * that the legacy code would have had on these flags. 795 */ 796 clear_bit(AS_EIO, &mapping->flags); 797 clear_bit(AS_ENOSPC, &mapping->flags); 798 return err; 799 } 800 EXPORT_SYMBOL(file_check_and_advance_wb_err); 801 802 /** 803 * file_write_and_wait_range - write out & wait on a file range 804 * @file: file pointing to address_space with pages 805 * @lstart: offset in bytes where the range starts 806 * @lend: offset in bytes where the range ends (inclusive) 807 * 808 * Write out and wait upon file offsets lstart->lend, inclusive. 809 * 810 * Note that @lend is inclusive (describes the last byte to be written) so 811 * that this function can be used to write to the very end-of-file (end = -1). 812 * 813 * After writing out and waiting on the data, we check and advance the 814 * f_wb_err cursor to the latest value, and return any errors detected there. 815 * 816 * Return: %0 on success, negative error code otherwise. 817 */ 818 int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend) 819 { 820 int err = 0, err2; 821 struct address_space *mapping = file->f_mapping; 822 823 if (mapping_needs_writeback(mapping)) { 824 err = __filemap_fdatawrite_range(mapping, lstart, lend, 825 WB_SYNC_ALL); 826 /* See comment of filemap_write_and_wait() */ 827 if (err != -EIO) 828 __filemap_fdatawait_range(mapping, lstart, lend); 829 } 830 err2 = file_check_and_advance_wb_err(file); 831 if (!err) 832 err = err2; 833 return err; 834 } 835 EXPORT_SYMBOL(file_write_and_wait_range); 836 837 /** 838 * replace_page_cache_page - replace a pagecache page with a new one 839 * @old: page to be replaced 840 * @new: page to replace with 841 * 842 * This function replaces a page in the pagecache with a new one. On 843 * success it acquires the pagecache reference for the new page and 844 * drops it for the old page. Both the old and new pages must be 845 * locked. This function does not add the new page to the LRU, the 846 * caller must do that. 847 * 848 * The remove + add is atomic. This function cannot fail. 849 */ 850 void replace_page_cache_page(struct page *old, struct page *new) 851 { 852 struct folio *fold = page_folio(old); 853 struct folio *fnew = page_folio(new); 854 struct address_space *mapping = old->mapping; 855 void (*freepage)(struct page *) = mapping->a_ops->freepage; 856 pgoff_t offset = old->index; 857 XA_STATE(xas, &mapping->i_pages, offset); 858 859 VM_BUG_ON_PAGE(!PageLocked(old), old); 860 VM_BUG_ON_PAGE(!PageLocked(new), new); 861 VM_BUG_ON_PAGE(new->mapping, new); 862 863 get_page(new); 864 new->mapping = mapping; 865 new->index = offset; 866 867 mem_cgroup_migrate(fold, fnew); 868 869 xas_lock_irq(&xas); 870 xas_store(&xas, new); 871 872 old->mapping = NULL; 873 /* hugetlb pages do not participate in page cache accounting. */ 874 if (!PageHuge(old)) 875 __dec_lruvec_page_state(old, NR_FILE_PAGES); 876 if (!PageHuge(new)) 877 __inc_lruvec_page_state(new, NR_FILE_PAGES); 878 if (PageSwapBacked(old)) 879 __dec_lruvec_page_state(old, NR_SHMEM); 880 if (PageSwapBacked(new)) 881 __inc_lruvec_page_state(new, NR_SHMEM); 882 xas_unlock_irq(&xas); 883 if (freepage) 884 freepage(old); 885 put_page(old); 886 } 887 EXPORT_SYMBOL_GPL(replace_page_cache_page); 888 889 noinline int __filemap_add_folio(struct address_space *mapping, 890 struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp) 891 { 892 XA_STATE(xas, &mapping->i_pages, index); 893 int huge = folio_test_hugetlb(folio); 894 int error; 895 bool charged = false; 896 897 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 898 VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio); 899 mapping_set_update(&xas, mapping); 900 901 folio_get(folio); 902 folio->mapping = mapping; 903 folio->index = index; 904 905 if (!huge) { 906 error = mem_cgroup_charge(folio, NULL, gfp); 907 VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); 908 if (error) 909 goto error; 910 charged = true; 911 } 912 913 gfp &= GFP_RECLAIM_MASK; 914 915 do { 916 unsigned int order = xa_get_order(xas.xa, xas.xa_index); 917 void *entry, *old = NULL; 918 919 if (order > folio_order(folio)) 920 xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index), 921 order, gfp); 922 xas_lock_irq(&xas); 923 xas_for_each_conflict(&xas, entry) { 924 old = entry; 925 if (!xa_is_value(entry)) { 926 xas_set_err(&xas, -EEXIST); 927 goto unlock; 928 } 929 } 930 931 if (old) { 932 if (shadowp) 933 *shadowp = old; 934 /* entry may have been split before we acquired lock */ 935 order = xa_get_order(xas.xa, xas.xa_index); 936 if (order > folio_order(folio)) { 937 xas_split(&xas, old, order); 938 xas_reset(&xas); 939 } 940 } 941 942 xas_store(&xas, folio); 943 if (xas_error(&xas)) 944 goto unlock; 945 946 mapping->nrpages++; 947 948 /* hugetlb pages do not participate in page cache accounting */ 949 if (!huge) 950 __lruvec_stat_add_folio(folio, NR_FILE_PAGES); 951 unlock: 952 xas_unlock_irq(&xas); 953 } while (xas_nomem(&xas, gfp)); 954 955 if (xas_error(&xas)) { 956 error = xas_error(&xas); 957 if (charged) 958 mem_cgroup_uncharge(folio); 959 goto error; 960 } 961 962 trace_mm_filemap_add_to_page_cache(&folio->page); 963 return 0; 964 error: 965 folio->mapping = NULL; 966 /* Leave page->index set: truncation relies upon it */ 967 folio_put(folio); 968 return error; 969 } 970 ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO); 971 972 /** 973 * add_to_page_cache_locked - add a locked page to the pagecache 974 * @page: page to add 975 * @mapping: the page's address_space 976 * @offset: page index 977 * @gfp_mask: page allocation mode 978 * 979 * This function is used to add a page to the pagecache. It must be locked. 980 * This function does not add the page to the LRU. The caller must do that. 981 * 982 * Return: %0 on success, negative error code otherwise. 983 */ 984 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 985 pgoff_t offset, gfp_t gfp_mask) 986 { 987 return __filemap_add_folio(mapping, page_folio(page), offset, 988 gfp_mask, NULL); 989 } 990 EXPORT_SYMBOL(add_to_page_cache_locked); 991 992 int filemap_add_folio(struct address_space *mapping, struct folio *folio, 993 pgoff_t index, gfp_t gfp) 994 { 995 void *shadow = NULL; 996 int ret; 997 998 __folio_set_locked(folio); 999 ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow); 1000 if (unlikely(ret)) 1001 __folio_clear_locked(folio); 1002 else { 1003 /* 1004 * The folio might have been evicted from cache only 1005 * recently, in which case it should be activated like 1006 * any other repeatedly accessed folio. 1007 * The exception is folios getting rewritten; evicting other 1008 * data from the working set, only to cache data that will 1009 * get overwritten with something else, is a waste of memory. 1010 */ 1011 WARN_ON_ONCE(folio_test_active(folio)); 1012 if (!(gfp & __GFP_WRITE) && shadow) 1013 workingset_refault(folio, shadow); 1014 folio_add_lru(folio); 1015 } 1016 return ret; 1017 } 1018 EXPORT_SYMBOL_GPL(filemap_add_folio); 1019 1020 #ifdef CONFIG_NUMA 1021 struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) 1022 { 1023 int n; 1024 struct folio *folio; 1025 1026 if (cpuset_do_page_mem_spread()) { 1027 unsigned int cpuset_mems_cookie; 1028 do { 1029 cpuset_mems_cookie = read_mems_allowed_begin(); 1030 n = cpuset_mem_spread_node(); 1031 folio = __folio_alloc_node(gfp, order, n); 1032 } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie)); 1033 1034 return folio; 1035 } 1036 return folio_alloc(gfp, order); 1037 } 1038 EXPORT_SYMBOL(filemap_alloc_folio); 1039 #endif 1040 1041 /* 1042 * filemap_invalidate_lock_two - lock invalidate_lock for two mappings 1043 * 1044 * Lock exclusively invalidate_lock of any passed mapping that is not NULL. 1045 * 1046 * @mapping1: the first mapping to lock 1047 * @mapping2: the second mapping to lock 1048 */ 1049 void filemap_invalidate_lock_two(struct address_space *mapping1, 1050 struct address_space *mapping2) 1051 { 1052 if (mapping1 > mapping2) 1053 swap(mapping1, mapping2); 1054 if (mapping1) 1055 down_write(&mapping1->invalidate_lock); 1056 if (mapping2 && mapping1 != mapping2) 1057 down_write_nested(&mapping2->invalidate_lock, 1); 1058 } 1059 EXPORT_SYMBOL(filemap_invalidate_lock_two); 1060 1061 /* 1062 * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings 1063 * 1064 * Unlock exclusive invalidate_lock of any passed mapping that is not NULL. 1065 * 1066 * @mapping1: the first mapping to unlock 1067 * @mapping2: the second mapping to unlock 1068 */ 1069 void filemap_invalidate_unlock_two(struct address_space *mapping1, 1070 struct address_space *mapping2) 1071 { 1072 if (mapping1) 1073 up_write(&mapping1->invalidate_lock); 1074 if (mapping2 && mapping1 != mapping2) 1075 up_write(&mapping2->invalidate_lock); 1076 } 1077 EXPORT_SYMBOL(filemap_invalidate_unlock_two); 1078 1079 /* 1080 * In order to wait for pages to become available there must be 1081 * waitqueues associated with pages. By using a hash table of 1082 * waitqueues where the bucket discipline is to maintain all 1083 * waiters on the same queue and wake all when any of the pages 1084 * become available, and for the woken contexts to check to be 1085 * sure the appropriate page became available, this saves space 1086 * at a cost of "thundering herd" phenomena during rare hash 1087 * collisions. 1088 */ 1089 #define PAGE_WAIT_TABLE_BITS 8 1090 #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS) 1091 static wait_queue_head_t folio_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned; 1092 1093 static wait_queue_head_t *folio_waitqueue(struct folio *folio) 1094 { 1095 return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)]; 1096 } 1097 1098 void __init pagecache_init(void) 1099 { 1100 int i; 1101 1102 for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++) 1103 init_waitqueue_head(&folio_wait_table[i]); 1104 1105 page_writeback_init(); 1106 } 1107 1108 /* 1109 * The page wait code treats the "wait->flags" somewhat unusually, because 1110 * we have multiple different kinds of waits, not just the usual "exclusive" 1111 * one. 1112 * 1113 * We have: 1114 * 1115 * (a) no special bits set: 1116 * 1117 * We're just waiting for the bit to be released, and when a waker 1118 * calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up, 1119 * and remove it from the wait queue. 1120 * 1121 * Simple and straightforward. 1122 * 1123 * (b) WQ_FLAG_EXCLUSIVE: 1124 * 1125 * The waiter is waiting to get the lock, and only one waiter should 1126 * be woken up to avoid any thundering herd behavior. We'll set the 1127 * WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue. 1128 * 1129 * This is the traditional exclusive wait. 1130 * 1131 * (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM: 1132 * 1133 * The waiter is waiting to get the bit, and additionally wants the 1134 * lock to be transferred to it for fair lock behavior. If the lock 1135 * cannot be taken, we stop walking the wait queue without waking 1136 * the waiter. 1137 * 1138 * This is the "fair lock handoff" case, and in addition to setting 1139 * WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see 1140 * that it now has the lock. 1141 */ 1142 static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) 1143 { 1144 unsigned int flags; 1145 struct wait_page_key *key = arg; 1146 struct wait_page_queue *wait_page 1147 = container_of(wait, struct wait_page_queue, wait); 1148 1149 if (!wake_page_match(wait_page, key)) 1150 return 0; 1151 1152 /* 1153 * If it's a lock handoff wait, we get the bit for it, and 1154 * stop walking (and do not wake it up) if we can't. 1155 */ 1156 flags = wait->flags; 1157 if (flags & WQ_FLAG_EXCLUSIVE) { 1158 if (test_bit(key->bit_nr, &key->folio->flags)) 1159 return -1; 1160 if (flags & WQ_FLAG_CUSTOM) { 1161 if (test_and_set_bit(key->bit_nr, &key->folio->flags)) 1162 return -1; 1163 flags |= WQ_FLAG_DONE; 1164 } 1165 } 1166 1167 /* 1168 * We are holding the wait-queue lock, but the waiter that 1169 * is waiting for this will be checking the flags without 1170 * any locking. 1171 * 1172 * So update the flags atomically, and wake up the waiter 1173 * afterwards to avoid any races. This store-release pairs 1174 * with the load-acquire in folio_wait_bit_common(). 1175 */ 1176 smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN); 1177 wake_up_state(wait->private, mode); 1178 1179 /* 1180 * Ok, we have successfully done what we're waiting for, 1181 * and we can unconditionally remove the wait entry. 1182 * 1183 * Note that this pairs with the "finish_wait()" in the 1184 * waiter, and has to be the absolute last thing we do. 1185 * After this list_del_init(&wait->entry) the wait entry 1186 * might be de-allocated and the process might even have 1187 * exited. 1188 */ 1189 list_del_init_careful(&wait->entry); 1190 return (flags & WQ_FLAG_EXCLUSIVE) != 0; 1191 } 1192 1193 static void folio_wake_bit(struct folio *folio, int bit_nr) 1194 { 1195 wait_queue_head_t *q = folio_waitqueue(folio); 1196 struct wait_page_key key; 1197 unsigned long flags; 1198 wait_queue_entry_t bookmark; 1199 1200 key.folio = folio; 1201 key.bit_nr = bit_nr; 1202 key.page_match = 0; 1203 1204 bookmark.flags = 0; 1205 bookmark.private = NULL; 1206 bookmark.func = NULL; 1207 INIT_LIST_HEAD(&bookmark.entry); 1208 1209 spin_lock_irqsave(&q->lock, flags); 1210 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark); 1211 1212 while (bookmark.flags & WQ_FLAG_BOOKMARK) { 1213 /* 1214 * Take a breather from holding the lock, 1215 * allow pages that finish wake up asynchronously 1216 * to acquire the lock and remove themselves 1217 * from wait queue 1218 */ 1219 spin_unlock_irqrestore(&q->lock, flags); 1220 cpu_relax(); 1221 spin_lock_irqsave(&q->lock, flags); 1222 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark); 1223 } 1224 1225 /* 1226 * It is possible for other pages to have collided on the waitqueue 1227 * hash, so in that case check for a page match. That prevents a long- 1228 * term waiter 1229 * 1230 * It is still possible to miss a case here, when we woke page waiters 1231 * and removed them from the waitqueue, but there are still other 1232 * page waiters. 1233 */ 1234 if (!waitqueue_active(q) || !key.page_match) { 1235 folio_clear_waiters(folio); 1236 /* 1237 * It's possible to miss clearing Waiters here, when we woke 1238 * our page waiters, but the hashed waitqueue has waiters for 1239 * other pages on it. 1240 * 1241 * That's okay, it's a rare case. The next waker will clear it. 1242 */ 1243 } 1244 spin_unlock_irqrestore(&q->lock, flags); 1245 } 1246 1247 static void folio_wake(struct folio *folio, int bit) 1248 { 1249 if (!folio_test_waiters(folio)) 1250 return; 1251 folio_wake_bit(folio, bit); 1252 } 1253 1254 /* 1255 * A choice of three behaviors for folio_wait_bit_common(): 1256 */ 1257 enum behavior { 1258 EXCLUSIVE, /* Hold ref to page and take the bit when woken, like 1259 * __folio_lock() waiting on then setting PG_locked. 1260 */ 1261 SHARED, /* Hold ref to page and check the bit when woken, like 1262 * wait_on_page_writeback() waiting on PG_writeback. 1263 */ 1264 DROP, /* Drop ref to page before wait, no check when woken, 1265 * like put_and_wait_on_page_locked() on PG_locked. 1266 */ 1267 }; 1268 1269 /* 1270 * Attempt to check (or get) the folio flag, and mark us done 1271 * if successful. 1272 */ 1273 static inline bool folio_trylock_flag(struct folio *folio, int bit_nr, 1274 struct wait_queue_entry *wait) 1275 { 1276 if (wait->flags & WQ_FLAG_EXCLUSIVE) { 1277 if (test_and_set_bit(bit_nr, &folio->flags)) 1278 return false; 1279 } else if (test_bit(bit_nr, &folio->flags)) 1280 return false; 1281 1282 wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE; 1283 return true; 1284 } 1285 1286 /* How many times do we accept lock stealing from under a waiter? */ 1287 int sysctl_page_lock_unfairness = 5; 1288 1289 static inline int folio_wait_bit_common(struct folio *folio, int bit_nr, 1290 int state, enum behavior behavior) 1291 { 1292 wait_queue_head_t *q = folio_waitqueue(folio); 1293 int unfairness = sysctl_page_lock_unfairness; 1294 struct wait_page_queue wait_page; 1295 wait_queue_entry_t *wait = &wait_page.wait; 1296 bool thrashing = false; 1297 bool delayacct = false; 1298 unsigned long pflags; 1299 1300 if (bit_nr == PG_locked && 1301 !folio_test_uptodate(folio) && folio_test_workingset(folio)) { 1302 if (!folio_test_swapbacked(folio)) { 1303 delayacct_thrashing_start(); 1304 delayacct = true; 1305 } 1306 psi_memstall_enter(&pflags); 1307 thrashing = true; 1308 } 1309 1310 init_wait(wait); 1311 wait->func = wake_page_function; 1312 wait_page.folio = folio; 1313 wait_page.bit_nr = bit_nr; 1314 1315 repeat: 1316 wait->flags = 0; 1317 if (behavior == EXCLUSIVE) { 1318 wait->flags = WQ_FLAG_EXCLUSIVE; 1319 if (--unfairness < 0) 1320 wait->flags |= WQ_FLAG_CUSTOM; 1321 } 1322 1323 /* 1324 * Do one last check whether we can get the 1325 * page bit synchronously. 1326 * 1327 * Do the folio_set_waiters() marking before that 1328 * to let any waker we _just_ missed know they 1329 * need to wake us up (otherwise they'll never 1330 * even go to the slow case that looks at the 1331 * page queue), and add ourselves to the wait 1332 * queue if we need to sleep. 1333 * 1334 * This part needs to be done under the queue 1335 * lock to avoid races. 1336 */ 1337 spin_lock_irq(&q->lock); 1338 folio_set_waiters(folio); 1339 if (!folio_trylock_flag(folio, bit_nr, wait)) 1340 __add_wait_queue_entry_tail(q, wait); 1341 spin_unlock_irq(&q->lock); 1342 1343 /* 1344 * From now on, all the logic will be based on 1345 * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to 1346 * see whether the page bit testing has already 1347 * been done by the wake function. 1348 * 1349 * We can drop our reference to the folio. 1350 */ 1351 if (behavior == DROP) 1352 folio_put(folio); 1353 1354 /* 1355 * Note that until the "finish_wait()", or until 1356 * we see the WQ_FLAG_WOKEN flag, we need to 1357 * be very careful with the 'wait->flags', because 1358 * we may race with a waker that sets them. 1359 */ 1360 for (;;) { 1361 unsigned int flags; 1362 1363 set_current_state(state); 1364 1365 /* Loop until we've been woken or interrupted */ 1366 flags = smp_load_acquire(&wait->flags); 1367 if (!(flags & WQ_FLAG_WOKEN)) { 1368 if (signal_pending_state(state, current)) 1369 break; 1370 1371 io_schedule(); 1372 continue; 1373 } 1374 1375 /* If we were non-exclusive, we're done */ 1376 if (behavior != EXCLUSIVE) 1377 break; 1378 1379 /* If the waker got the lock for us, we're done */ 1380 if (flags & WQ_FLAG_DONE) 1381 break; 1382 1383 /* 1384 * Otherwise, if we're getting the lock, we need to 1385 * try to get it ourselves. 1386 * 1387 * And if that fails, we'll have to retry this all. 1388 */ 1389 if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0)))) 1390 goto repeat; 1391 1392 wait->flags |= WQ_FLAG_DONE; 1393 break; 1394 } 1395 1396 /* 1397 * If a signal happened, this 'finish_wait()' may remove the last 1398 * waiter from the wait-queues, but the folio waiters bit will remain 1399 * set. That's ok. The next wakeup will take care of it, and trying 1400 * to do it here would be difficult and prone to races. 1401 */ 1402 finish_wait(q, wait); 1403 1404 if (thrashing) { 1405 if (delayacct) 1406 delayacct_thrashing_end(); 1407 psi_memstall_leave(&pflags); 1408 } 1409 1410 /* 1411 * NOTE! The wait->flags weren't stable until we've done the 1412 * 'finish_wait()', and we could have exited the loop above due 1413 * to a signal, and had a wakeup event happen after the signal 1414 * test but before the 'finish_wait()'. 1415 * 1416 * So only after the finish_wait() can we reliably determine 1417 * if we got woken up or not, so we can now figure out the final 1418 * return value based on that state without races. 1419 * 1420 * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive 1421 * waiter, but an exclusive one requires WQ_FLAG_DONE. 1422 */ 1423 if (behavior == EXCLUSIVE) 1424 return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR; 1425 1426 return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR; 1427 } 1428 1429 void folio_wait_bit(struct folio *folio, int bit_nr) 1430 { 1431 folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED); 1432 } 1433 EXPORT_SYMBOL(folio_wait_bit); 1434 1435 int folio_wait_bit_killable(struct folio *folio, int bit_nr) 1436 { 1437 return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED); 1438 } 1439 EXPORT_SYMBOL(folio_wait_bit_killable); 1440 1441 /** 1442 * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked 1443 * @page: The page to wait for. 1444 * @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc). 1445 * 1446 * The caller should hold a reference on @page. They expect the page to 1447 * become unlocked relatively soon, but do not wish to hold up migration 1448 * (for example) by holding the reference while waiting for the page to 1449 * come unlocked. After this function returns, the caller should not 1450 * dereference @page. 1451 * 1452 * Return: 0 if the page was unlocked or -EINTR if interrupted by a signal. 1453 */ 1454 int put_and_wait_on_page_locked(struct page *page, int state) 1455 { 1456 return folio_wait_bit_common(page_folio(page), PG_locked, state, 1457 DROP); 1458 } 1459 1460 /** 1461 * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue 1462 * @folio: Folio defining the wait queue of interest 1463 * @waiter: Waiter to add to the queue 1464 * 1465 * Add an arbitrary @waiter to the wait queue for the nominated @folio. 1466 */ 1467 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter) 1468 { 1469 wait_queue_head_t *q = folio_waitqueue(folio); 1470 unsigned long flags; 1471 1472 spin_lock_irqsave(&q->lock, flags); 1473 __add_wait_queue_entry_tail(q, waiter); 1474 folio_set_waiters(folio); 1475 spin_unlock_irqrestore(&q->lock, flags); 1476 } 1477 EXPORT_SYMBOL_GPL(folio_add_wait_queue); 1478 1479 #ifndef clear_bit_unlock_is_negative_byte 1480 1481 /* 1482 * PG_waiters is the high bit in the same byte as PG_lock. 1483 * 1484 * On x86 (and on many other architectures), we can clear PG_lock and 1485 * test the sign bit at the same time. But if the architecture does 1486 * not support that special operation, we just do this all by hand 1487 * instead. 1488 * 1489 * The read of PG_waiters has to be after (or concurrently with) PG_locked 1490 * being cleared, but a memory barrier should be unnecessary since it is 1491 * in the same byte as PG_locked. 1492 */ 1493 static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem) 1494 { 1495 clear_bit_unlock(nr, mem); 1496 /* smp_mb__after_atomic(); */ 1497 return test_bit(PG_waiters, mem); 1498 } 1499 1500 #endif 1501 1502 /** 1503 * folio_unlock - Unlock a locked folio. 1504 * @folio: The folio. 1505 * 1506 * Unlocks the folio and wakes up any thread sleeping on the page lock. 1507 * 1508 * Context: May be called from interrupt or process context. May not be 1509 * called from NMI context. 1510 */ 1511 void folio_unlock(struct folio *folio) 1512 { 1513 /* Bit 7 allows x86 to check the byte's sign bit */ 1514 BUILD_BUG_ON(PG_waiters != 7); 1515 BUILD_BUG_ON(PG_locked > 7); 1516 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1517 if (clear_bit_unlock_is_negative_byte(PG_locked, folio_flags(folio, 0))) 1518 folio_wake_bit(folio, PG_locked); 1519 } 1520 EXPORT_SYMBOL(folio_unlock); 1521 1522 /** 1523 * folio_end_private_2 - Clear PG_private_2 and wake any waiters. 1524 * @folio: The folio. 1525 * 1526 * Clear the PG_private_2 bit on a folio and wake up any sleepers waiting for 1527 * it. The folio reference held for PG_private_2 being set is released. 1528 * 1529 * This is, for example, used when a netfs folio is being written to a local 1530 * disk cache, thereby allowing writes to the cache for the same folio to be 1531 * serialised. 1532 */ 1533 void folio_end_private_2(struct folio *folio) 1534 { 1535 VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio); 1536 clear_bit_unlock(PG_private_2, folio_flags(folio, 0)); 1537 folio_wake_bit(folio, PG_private_2); 1538 folio_put(folio); 1539 } 1540 EXPORT_SYMBOL(folio_end_private_2); 1541 1542 /** 1543 * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio. 1544 * @folio: The folio to wait on. 1545 * 1546 * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio. 1547 */ 1548 void folio_wait_private_2(struct folio *folio) 1549 { 1550 while (folio_test_private_2(folio)) 1551 folio_wait_bit(folio, PG_private_2); 1552 } 1553 EXPORT_SYMBOL(folio_wait_private_2); 1554 1555 /** 1556 * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio. 1557 * @folio: The folio to wait on. 1558 * 1559 * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio or until a 1560 * fatal signal is received by the calling task. 1561 * 1562 * Return: 1563 * - 0 if successful. 1564 * - -EINTR if a fatal signal was encountered. 1565 */ 1566 int folio_wait_private_2_killable(struct folio *folio) 1567 { 1568 int ret = 0; 1569 1570 while (folio_test_private_2(folio)) { 1571 ret = folio_wait_bit_killable(folio, PG_private_2); 1572 if (ret < 0) 1573 break; 1574 } 1575 1576 return ret; 1577 } 1578 EXPORT_SYMBOL(folio_wait_private_2_killable); 1579 1580 /** 1581 * folio_end_writeback - End writeback against a folio. 1582 * @folio: The folio. 1583 */ 1584 void folio_end_writeback(struct folio *folio) 1585 { 1586 /* 1587 * folio_test_clear_reclaim() could be used here but it is an 1588 * atomic operation and overkill in this particular case. Failing 1589 * to shuffle a folio marked for immediate reclaim is too mild 1590 * a gain to justify taking an atomic operation penalty at the 1591 * end of every folio writeback. 1592 */ 1593 if (folio_test_reclaim(folio)) { 1594 folio_clear_reclaim(folio); 1595 folio_rotate_reclaimable(folio); 1596 } 1597 1598 /* 1599 * Writeback does not hold a folio reference of its own, relying 1600 * on truncation to wait for the clearing of PG_writeback. 1601 * But here we must make sure that the folio is not freed and 1602 * reused before the folio_wake(). 1603 */ 1604 folio_get(folio); 1605 if (!__folio_end_writeback(folio)) 1606 BUG(); 1607 1608 smp_mb__after_atomic(); 1609 folio_wake(folio, PG_writeback); 1610 acct_reclaim_writeback(folio); 1611 folio_put(folio); 1612 } 1613 EXPORT_SYMBOL(folio_end_writeback); 1614 1615 /* 1616 * After completing I/O on a page, call this routine to update the page 1617 * flags appropriately 1618 */ 1619 void page_endio(struct page *page, bool is_write, int err) 1620 { 1621 if (!is_write) { 1622 if (!err) { 1623 SetPageUptodate(page); 1624 } else { 1625 ClearPageUptodate(page); 1626 SetPageError(page); 1627 } 1628 unlock_page(page); 1629 } else { 1630 if (err) { 1631 struct address_space *mapping; 1632 1633 SetPageError(page); 1634 mapping = page_mapping(page); 1635 if (mapping) 1636 mapping_set_error(mapping, err); 1637 } 1638 end_page_writeback(page); 1639 } 1640 } 1641 EXPORT_SYMBOL_GPL(page_endio); 1642 1643 /** 1644 * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it. 1645 * @folio: The folio to lock 1646 */ 1647 void __folio_lock(struct folio *folio) 1648 { 1649 folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE, 1650 EXCLUSIVE); 1651 } 1652 EXPORT_SYMBOL(__folio_lock); 1653 1654 int __folio_lock_killable(struct folio *folio) 1655 { 1656 return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE, 1657 EXCLUSIVE); 1658 } 1659 EXPORT_SYMBOL_GPL(__folio_lock_killable); 1660 1661 static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait) 1662 { 1663 struct wait_queue_head *q = folio_waitqueue(folio); 1664 int ret = 0; 1665 1666 wait->folio = folio; 1667 wait->bit_nr = PG_locked; 1668 1669 spin_lock_irq(&q->lock); 1670 __add_wait_queue_entry_tail(q, &wait->wait); 1671 folio_set_waiters(folio); 1672 ret = !folio_trylock(folio); 1673 /* 1674 * If we were successful now, we know we're still on the 1675 * waitqueue as we're still under the lock. This means it's 1676 * safe to remove and return success, we know the callback 1677 * isn't going to trigger. 1678 */ 1679 if (!ret) 1680 __remove_wait_queue(q, &wait->wait); 1681 else 1682 ret = -EIOCBQUEUED; 1683 spin_unlock_irq(&q->lock); 1684 return ret; 1685 } 1686 1687 /* 1688 * Return values: 1689 * true - folio is locked; mmap_lock is still held. 1690 * false - folio is not locked. 1691 * mmap_lock has been released (mmap_read_unlock(), unless flags had both 1692 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in 1693 * which case mmap_lock is still held. 1694 * 1695 * If neither ALLOW_RETRY nor KILLABLE are set, will always return true 1696 * with the folio locked and the mmap_lock unperturbed. 1697 */ 1698 bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm, 1699 unsigned int flags) 1700 { 1701 if (fault_flag_allow_retry_first(flags)) { 1702 /* 1703 * CAUTION! In this case, mmap_lock is not released 1704 * even though return 0. 1705 */ 1706 if (flags & FAULT_FLAG_RETRY_NOWAIT) 1707 return false; 1708 1709 mmap_read_unlock(mm); 1710 if (flags & FAULT_FLAG_KILLABLE) 1711 folio_wait_locked_killable(folio); 1712 else 1713 folio_wait_locked(folio); 1714 return false; 1715 } 1716 if (flags & FAULT_FLAG_KILLABLE) { 1717 bool ret; 1718 1719 ret = __folio_lock_killable(folio); 1720 if (ret) { 1721 mmap_read_unlock(mm); 1722 return false; 1723 } 1724 } else { 1725 __folio_lock(folio); 1726 } 1727 1728 return true; 1729 } 1730 1731 /** 1732 * page_cache_next_miss() - Find the next gap in the page cache. 1733 * @mapping: Mapping. 1734 * @index: Index. 1735 * @max_scan: Maximum range to search. 1736 * 1737 * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the 1738 * gap with the lowest index. 1739 * 1740 * This function may be called under the rcu_read_lock. However, this will 1741 * not atomically search a snapshot of the cache at a single point in time. 1742 * For example, if a gap is created at index 5, then subsequently a gap is 1743 * created at index 10, page_cache_next_miss covering both indices may 1744 * return 10 if called under the rcu_read_lock. 1745 * 1746 * Return: The index of the gap if found, otherwise an index outside the 1747 * range specified (in which case 'return - index >= max_scan' will be true). 1748 * In the rare case of index wrap-around, 0 will be returned. 1749 */ 1750 pgoff_t page_cache_next_miss(struct address_space *mapping, 1751 pgoff_t index, unsigned long max_scan) 1752 { 1753 XA_STATE(xas, &mapping->i_pages, index); 1754 1755 while (max_scan--) { 1756 void *entry = xas_next(&xas); 1757 if (!entry || xa_is_value(entry)) 1758 break; 1759 if (xas.xa_index == 0) 1760 break; 1761 } 1762 1763 return xas.xa_index; 1764 } 1765 EXPORT_SYMBOL(page_cache_next_miss); 1766 1767 /** 1768 * page_cache_prev_miss() - Find the previous gap in the page cache. 1769 * @mapping: Mapping. 1770 * @index: Index. 1771 * @max_scan: Maximum range to search. 1772 * 1773 * Search the range [max(index - max_scan + 1, 0), index] for the 1774 * gap with the highest index. 1775 * 1776 * This function may be called under the rcu_read_lock. However, this will 1777 * not atomically search a snapshot of the cache at a single point in time. 1778 * For example, if a gap is created at index 10, then subsequently a gap is 1779 * created at index 5, page_cache_prev_miss() covering both indices may 1780 * return 5 if called under the rcu_read_lock. 1781 * 1782 * Return: The index of the gap if found, otherwise an index outside the 1783 * range specified (in which case 'index - return >= max_scan' will be true). 1784 * In the rare case of wrap-around, ULONG_MAX will be returned. 1785 */ 1786 pgoff_t page_cache_prev_miss(struct address_space *mapping, 1787 pgoff_t index, unsigned long max_scan) 1788 { 1789 XA_STATE(xas, &mapping->i_pages, index); 1790 1791 while (max_scan--) { 1792 void *entry = xas_prev(&xas); 1793 if (!entry || xa_is_value(entry)) 1794 break; 1795 if (xas.xa_index == ULONG_MAX) 1796 break; 1797 } 1798 1799 return xas.xa_index; 1800 } 1801 EXPORT_SYMBOL(page_cache_prev_miss); 1802 1803 /* 1804 * Lockless page cache protocol: 1805 * On the lookup side: 1806 * 1. Load the folio from i_pages 1807 * 2. Increment the refcount if it's not zero 1808 * 3. If the folio is not found by xas_reload(), put the refcount and retry 1809 * 1810 * On the removal side: 1811 * A. Freeze the page (by zeroing the refcount if nobody else has a reference) 1812 * B. Remove the page from i_pages 1813 * C. Return the page to the page allocator 1814 * 1815 * This means that any page may have its reference count temporarily 1816 * increased by a speculative page cache (or fast GUP) lookup as it can 1817 * be allocated by another user before the RCU grace period expires. 1818 * Because the refcount temporarily acquired here may end up being the 1819 * last refcount on the page, any page allocation must be freeable by 1820 * folio_put(). 1821 */ 1822 1823 /* 1824 * mapping_get_entry - Get a page cache entry. 1825 * @mapping: the address_space to search 1826 * @index: The page cache index. 1827 * 1828 * Looks up the page cache entry at @mapping & @index. If it is a folio, 1829 * it is returned with an increased refcount. If it is a shadow entry 1830 * of a previously evicted folio, or a swap entry from shmem/tmpfs, 1831 * it is returned without further action. 1832 * 1833 * Return: The folio, swap or shadow entry, %NULL if nothing is found. 1834 */ 1835 static void *mapping_get_entry(struct address_space *mapping, pgoff_t index) 1836 { 1837 XA_STATE(xas, &mapping->i_pages, index); 1838 struct folio *folio; 1839 1840 rcu_read_lock(); 1841 repeat: 1842 xas_reset(&xas); 1843 folio = xas_load(&xas); 1844 if (xas_retry(&xas, folio)) 1845 goto repeat; 1846 /* 1847 * A shadow entry of a recently evicted page, or a swap entry from 1848 * shmem/tmpfs. Return it without attempting to raise page count. 1849 */ 1850 if (!folio || xa_is_value(folio)) 1851 goto out; 1852 1853 if (!folio_try_get_rcu(folio)) 1854 goto repeat; 1855 1856 if (unlikely(folio != xas_reload(&xas))) { 1857 folio_put(folio); 1858 goto repeat; 1859 } 1860 out: 1861 rcu_read_unlock(); 1862 1863 return folio; 1864 } 1865 1866 /** 1867 * __filemap_get_folio - Find and get a reference to a folio. 1868 * @mapping: The address_space to search. 1869 * @index: The page index. 1870 * @fgp_flags: %FGP flags modify how the folio is returned. 1871 * @gfp: Memory allocation flags to use if %FGP_CREAT is specified. 1872 * 1873 * Looks up the page cache entry at @mapping & @index. 1874 * 1875 * @fgp_flags can be zero or more of these flags: 1876 * 1877 * * %FGP_ACCESSED - The folio will be marked accessed. 1878 * * %FGP_LOCK - The folio is returned locked. 1879 * * %FGP_ENTRY - If there is a shadow / swap / DAX entry, return it 1880 * instead of allocating a new folio to replace it. 1881 * * %FGP_CREAT - If no page is present then a new page is allocated using 1882 * @gfp and added to the page cache and the VM's LRU list. 1883 * The page is returned locked and with an increased refcount. 1884 * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the 1885 * page is already in cache. If the page was allocated, unlock it before 1886 * returning so the caller can do the same dance. 1887 * * %FGP_WRITE - The page will be written to by the caller. 1888 * * %FGP_NOFS - __GFP_FS will get cleared in gfp. 1889 * * %FGP_NOWAIT - Don't get blocked by page lock. 1890 * * %FGP_STABLE - Wait for the folio to be stable (finished writeback) 1891 * 1892 * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even 1893 * if the %GFP flags specified for %FGP_CREAT are atomic. 1894 * 1895 * If there is a page cache page, it is returned with an increased refcount. 1896 * 1897 * Return: The found folio or %NULL otherwise. 1898 */ 1899 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, 1900 int fgp_flags, gfp_t gfp) 1901 { 1902 struct folio *folio; 1903 1904 repeat: 1905 folio = mapping_get_entry(mapping, index); 1906 if (xa_is_value(folio)) { 1907 if (fgp_flags & FGP_ENTRY) 1908 return folio; 1909 folio = NULL; 1910 } 1911 if (!folio) 1912 goto no_page; 1913 1914 if (fgp_flags & FGP_LOCK) { 1915 if (fgp_flags & FGP_NOWAIT) { 1916 if (!folio_trylock(folio)) { 1917 folio_put(folio); 1918 return NULL; 1919 } 1920 } else { 1921 folio_lock(folio); 1922 } 1923 1924 /* Has the page been truncated? */ 1925 if (unlikely(folio->mapping != mapping)) { 1926 folio_unlock(folio); 1927 folio_put(folio); 1928 goto repeat; 1929 } 1930 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); 1931 } 1932 1933 if (fgp_flags & FGP_ACCESSED) 1934 folio_mark_accessed(folio); 1935 else if (fgp_flags & FGP_WRITE) { 1936 /* Clear idle flag for buffer write */ 1937 if (folio_test_idle(folio)) 1938 folio_clear_idle(folio); 1939 } 1940 1941 if (fgp_flags & FGP_STABLE) 1942 folio_wait_stable(folio); 1943 no_page: 1944 if (!folio && (fgp_flags & FGP_CREAT)) { 1945 int err; 1946 if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping)) 1947 gfp |= __GFP_WRITE; 1948 if (fgp_flags & FGP_NOFS) 1949 gfp &= ~__GFP_FS; 1950 1951 folio = filemap_alloc_folio(gfp, 0); 1952 if (!folio) 1953 return NULL; 1954 1955 if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP)))) 1956 fgp_flags |= FGP_LOCK; 1957 1958 /* Init accessed so avoid atomic mark_page_accessed later */ 1959 if (fgp_flags & FGP_ACCESSED) 1960 __folio_set_referenced(folio); 1961 1962 err = filemap_add_folio(mapping, folio, index, gfp); 1963 if (unlikely(err)) { 1964 folio_put(folio); 1965 folio = NULL; 1966 if (err == -EEXIST) 1967 goto repeat; 1968 } 1969 1970 /* 1971 * filemap_add_folio locks the page, and for mmap 1972 * we expect an unlocked page. 1973 */ 1974 if (folio && (fgp_flags & FGP_FOR_MMAP)) 1975 folio_unlock(folio); 1976 } 1977 1978 return folio; 1979 } 1980 EXPORT_SYMBOL(__filemap_get_folio); 1981 1982 static inline struct page *find_get_entry(struct xa_state *xas, pgoff_t max, 1983 xa_mark_t mark) 1984 { 1985 struct page *page; 1986 1987 retry: 1988 if (mark == XA_PRESENT) 1989 page = xas_find(xas, max); 1990 else 1991 page = xas_find_marked(xas, max, mark); 1992 1993 if (xas_retry(xas, page)) 1994 goto retry; 1995 /* 1996 * A shadow entry of a recently evicted page, a swap 1997 * entry from shmem/tmpfs or a DAX entry. Return it 1998 * without attempting to raise page count. 1999 */ 2000 if (!page || xa_is_value(page)) 2001 return page; 2002 2003 if (!page_cache_get_speculative(page)) 2004 goto reset; 2005 2006 /* Has the page moved or been split? */ 2007 if (unlikely(page != xas_reload(xas))) { 2008 put_page(page); 2009 goto reset; 2010 } 2011 2012 return page; 2013 reset: 2014 xas_reset(xas); 2015 goto retry; 2016 } 2017 2018 /** 2019 * find_get_entries - gang pagecache lookup 2020 * @mapping: The address_space to search 2021 * @start: The starting page cache index 2022 * @end: The final page index (inclusive). 2023 * @pvec: Where the resulting entries are placed. 2024 * @indices: The cache indices corresponding to the entries in @entries 2025 * 2026 * find_get_entries() will search for and return a batch of entries in 2027 * the mapping. The entries are placed in @pvec. find_get_entries() 2028 * takes a reference on any actual pages it returns. 2029 * 2030 * The search returns a group of mapping-contiguous page cache entries 2031 * with ascending indexes. There may be holes in the indices due to 2032 * not-present pages. 2033 * 2034 * Any shadow entries of evicted pages, or swap entries from 2035 * shmem/tmpfs, are included in the returned array. 2036 * 2037 * If it finds a Transparent Huge Page, head or tail, find_get_entries() 2038 * stops at that page: the caller is likely to have a better way to handle 2039 * the compound page as a whole, and then skip its extent, than repeatedly 2040 * calling find_get_entries() to return all its tails. 2041 * 2042 * Return: the number of pages and shadow entries which were found. 2043 */ 2044 unsigned find_get_entries(struct address_space *mapping, pgoff_t start, 2045 pgoff_t end, struct pagevec *pvec, pgoff_t *indices) 2046 { 2047 XA_STATE(xas, &mapping->i_pages, start); 2048 struct page *page; 2049 unsigned int ret = 0; 2050 unsigned nr_entries = PAGEVEC_SIZE; 2051 2052 rcu_read_lock(); 2053 while ((page = find_get_entry(&xas, end, XA_PRESENT))) { 2054 /* 2055 * Terminate early on finding a THP, to allow the caller to 2056 * handle it all at once; but continue if this is hugetlbfs. 2057 */ 2058 if (!xa_is_value(page) && PageTransHuge(page) && 2059 !PageHuge(page)) { 2060 page = find_subpage(page, xas.xa_index); 2061 nr_entries = ret + 1; 2062 } 2063 2064 indices[ret] = xas.xa_index; 2065 pvec->pages[ret] = page; 2066 if (++ret == nr_entries) 2067 break; 2068 } 2069 rcu_read_unlock(); 2070 2071 pvec->nr = ret; 2072 return ret; 2073 } 2074 2075 /** 2076 * find_lock_entries - Find a batch of pagecache entries. 2077 * @mapping: The address_space to search. 2078 * @start: The starting page cache index. 2079 * @end: The final page index (inclusive). 2080 * @pvec: Where the resulting entries are placed. 2081 * @indices: The cache indices of the entries in @pvec. 2082 * 2083 * find_lock_entries() will return a batch of entries from @mapping. 2084 * Swap, shadow and DAX entries are included. Pages are returned 2085 * locked and with an incremented refcount. Pages which are locked by 2086 * somebody else or under writeback are skipped. Only the head page of 2087 * a THP is returned. Pages which are partially outside the range are 2088 * not returned. 2089 * 2090 * The entries have ascending indexes. The indices may not be consecutive 2091 * due to not-present entries, THP pages, pages which could not be locked 2092 * or pages under writeback. 2093 * 2094 * Return: The number of entries which were found. 2095 */ 2096 unsigned find_lock_entries(struct address_space *mapping, pgoff_t start, 2097 pgoff_t end, struct pagevec *pvec, pgoff_t *indices) 2098 { 2099 XA_STATE(xas, &mapping->i_pages, start); 2100 struct page *page; 2101 2102 rcu_read_lock(); 2103 while ((page = find_get_entry(&xas, end, XA_PRESENT))) { 2104 if (!xa_is_value(page)) { 2105 if (page->index < start) 2106 goto put; 2107 if (page->index + thp_nr_pages(page) - 1 > end) 2108 goto put; 2109 if (!trylock_page(page)) 2110 goto put; 2111 if (page->mapping != mapping || PageWriteback(page)) 2112 goto unlock; 2113 VM_BUG_ON_PAGE(!thp_contains(page, xas.xa_index), 2114 page); 2115 } 2116 indices[pvec->nr] = xas.xa_index; 2117 if (!pagevec_add(pvec, page)) 2118 break; 2119 goto next; 2120 unlock: 2121 unlock_page(page); 2122 put: 2123 put_page(page); 2124 next: 2125 if (!xa_is_value(page) && PageTransHuge(page)) { 2126 unsigned int nr_pages = thp_nr_pages(page); 2127 2128 /* Final THP may cross MAX_LFS_FILESIZE on 32-bit */ 2129 xas_set(&xas, page->index + nr_pages); 2130 if (xas.xa_index < nr_pages) 2131 break; 2132 } 2133 } 2134 rcu_read_unlock(); 2135 2136 return pagevec_count(pvec); 2137 } 2138 2139 /** 2140 * find_get_pages_range - gang pagecache lookup 2141 * @mapping: The address_space to search 2142 * @start: The starting page index 2143 * @end: The final page index (inclusive) 2144 * @nr_pages: The maximum number of pages 2145 * @pages: Where the resulting pages are placed 2146 * 2147 * find_get_pages_range() will search for and return a group of up to @nr_pages 2148 * pages in the mapping starting at index @start and up to index @end 2149 * (inclusive). The pages are placed at @pages. find_get_pages_range() takes 2150 * a reference against the returned pages. 2151 * 2152 * The search returns a group of mapping-contiguous pages with ascending 2153 * indexes. There may be holes in the indices due to not-present pages. 2154 * We also update @start to index the next page for the traversal. 2155 * 2156 * Return: the number of pages which were found. If this number is 2157 * smaller than @nr_pages, the end of specified range has been 2158 * reached. 2159 */ 2160 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, 2161 pgoff_t end, unsigned int nr_pages, 2162 struct page **pages) 2163 { 2164 XA_STATE(xas, &mapping->i_pages, *start); 2165 struct page *page; 2166 unsigned ret = 0; 2167 2168 if (unlikely(!nr_pages)) 2169 return 0; 2170 2171 rcu_read_lock(); 2172 while ((page = find_get_entry(&xas, end, XA_PRESENT))) { 2173 /* Skip over shadow, swap and DAX entries */ 2174 if (xa_is_value(page)) 2175 continue; 2176 2177 pages[ret] = find_subpage(page, xas.xa_index); 2178 if (++ret == nr_pages) { 2179 *start = xas.xa_index + 1; 2180 goto out; 2181 } 2182 } 2183 2184 /* 2185 * We come here when there is no page beyond @end. We take care to not 2186 * overflow the index @start as it confuses some of the callers. This 2187 * breaks the iteration when there is a page at index -1 but that is 2188 * already broken anyway. 2189 */ 2190 if (end == (pgoff_t)-1) 2191 *start = (pgoff_t)-1; 2192 else 2193 *start = end + 1; 2194 out: 2195 rcu_read_unlock(); 2196 2197 return ret; 2198 } 2199 2200 /** 2201 * find_get_pages_contig - gang contiguous pagecache lookup 2202 * @mapping: The address_space to search 2203 * @index: The starting page index 2204 * @nr_pages: The maximum number of pages 2205 * @pages: Where the resulting pages are placed 2206 * 2207 * find_get_pages_contig() works exactly like find_get_pages(), except 2208 * that the returned number of pages are guaranteed to be contiguous. 2209 * 2210 * Return: the number of pages which were found. 2211 */ 2212 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, 2213 unsigned int nr_pages, struct page **pages) 2214 { 2215 XA_STATE(xas, &mapping->i_pages, index); 2216 struct page *page; 2217 unsigned int ret = 0; 2218 2219 if (unlikely(!nr_pages)) 2220 return 0; 2221 2222 rcu_read_lock(); 2223 for (page = xas_load(&xas); page; page = xas_next(&xas)) { 2224 if (xas_retry(&xas, page)) 2225 continue; 2226 /* 2227 * If the entry has been swapped out, we can stop looking. 2228 * No current caller is looking for DAX entries. 2229 */ 2230 if (xa_is_value(page)) 2231 break; 2232 2233 if (!page_cache_get_speculative(page)) 2234 goto retry; 2235 2236 /* Has the page moved or been split? */ 2237 if (unlikely(page != xas_reload(&xas))) 2238 goto put_page; 2239 2240 pages[ret] = find_subpage(page, xas.xa_index); 2241 if (++ret == nr_pages) 2242 break; 2243 continue; 2244 put_page: 2245 put_page(page); 2246 retry: 2247 xas_reset(&xas); 2248 } 2249 rcu_read_unlock(); 2250 return ret; 2251 } 2252 EXPORT_SYMBOL(find_get_pages_contig); 2253 2254 /** 2255 * find_get_pages_range_tag - Find and return head pages matching @tag. 2256 * @mapping: the address_space to search 2257 * @index: the starting page index 2258 * @end: The final page index (inclusive) 2259 * @tag: the tag index 2260 * @nr_pages: the maximum number of pages 2261 * @pages: where the resulting pages are placed 2262 * 2263 * Like find_get_pages(), except we only return head pages which are tagged 2264 * with @tag. @index is updated to the index immediately after the last 2265 * page we return, ready for the next iteration. 2266 * 2267 * Return: the number of pages which were found. 2268 */ 2269 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, 2270 pgoff_t end, xa_mark_t tag, unsigned int nr_pages, 2271 struct page **pages) 2272 { 2273 XA_STATE(xas, &mapping->i_pages, *index); 2274 struct page *page; 2275 unsigned ret = 0; 2276 2277 if (unlikely(!nr_pages)) 2278 return 0; 2279 2280 rcu_read_lock(); 2281 while ((page = find_get_entry(&xas, end, tag))) { 2282 /* 2283 * Shadow entries should never be tagged, but this iteration 2284 * is lockless so there is a window for page reclaim to evict 2285 * a page we saw tagged. Skip over it. 2286 */ 2287 if (xa_is_value(page)) 2288 continue; 2289 2290 pages[ret] = page; 2291 if (++ret == nr_pages) { 2292 *index = page->index + thp_nr_pages(page); 2293 goto out; 2294 } 2295 } 2296 2297 /* 2298 * We come here when we got to @end. We take care to not overflow the 2299 * index @index as it confuses some of the callers. This breaks the 2300 * iteration when there is a page at index -1 but that is already 2301 * broken anyway. 2302 */ 2303 if (end == (pgoff_t)-1) 2304 *index = (pgoff_t)-1; 2305 else 2306 *index = end + 1; 2307 out: 2308 rcu_read_unlock(); 2309 2310 return ret; 2311 } 2312 EXPORT_SYMBOL(find_get_pages_range_tag); 2313 2314 /* 2315 * CD/DVDs are error prone. When a medium error occurs, the driver may fail 2316 * a _large_ part of the i/o request. Imagine the worst scenario: 2317 * 2318 * ---R__________________________________________B__________ 2319 * ^ reading here ^ bad block(assume 4k) 2320 * 2321 * read(R) => miss => readahead(R...B) => media error => frustrating retries 2322 * => failing the whole request => read(R) => read(R+1) => 2323 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => 2324 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => 2325 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... 2326 * 2327 * It is going insane. Fix it by quickly scaling down the readahead size. 2328 */ 2329 static void shrink_readahead_size_eio(struct file_ra_state *ra) 2330 { 2331 ra->ra_pages /= 4; 2332 } 2333 2334 /* 2335 * filemap_get_read_batch - Get a batch of pages for read 2336 * 2337 * Get a batch of pages which represent a contiguous range of bytes 2338 * in the file. No tail pages will be returned. If @index is in the 2339 * middle of a THP, the entire THP will be returned. The last page in 2340 * the batch may have Readahead set or be not Uptodate so that the 2341 * caller can take the appropriate action. 2342 */ 2343 static void filemap_get_read_batch(struct address_space *mapping, 2344 pgoff_t index, pgoff_t max, struct pagevec *pvec) 2345 { 2346 XA_STATE(xas, &mapping->i_pages, index); 2347 struct page *head; 2348 2349 rcu_read_lock(); 2350 for (head = xas_load(&xas); head; head = xas_next(&xas)) { 2351 if (xas_retry(&xas, head)) 2352 continue; 2353 if (xas.xa_index > max || xa_is_value(head)) 2354 break; 2355 if (!page_cache_get_speculative(head)) 2356 goto retry; 2357 2358 /* Has the page moved or been split? */ 2359 if (unlikely(head != xas_reload(&xas))) 2360 goto put_page; 2361 2362 if (!pagevec_add(pvec, head)) 2363 break; 2364 if (!PageUptodate(head)) 2365 break; 2366 if (PageReadahead(head)) 2367 break; 2368 xas.xa_index = head->index + thp_nr_pages(head) - 1; 2369 xas.xa_offset = (xas.xa_index >> xas.xa_shift) & XA_CHUNK_MASK; 2370 continue; 2371 put_page: 2372 put_page(head); 2373 retry: 2374 xas_reset(&xas); 2375 } 2376 rcu_read_unlock(); 2377 } 2378 2379 static int filemap_read_page(struct file *file, struct address_space *mapping, 2380 struct page *page) 2381 { 2382 int error; 2383 2384 /* 2385 * A previous I/O error may have been due to temporary failures, 2386 * eg. multipath errors. PG_error will be set again if readpage 2387 * fails. 2388 */ 2389 ClearPageError(page); 2390 /* Start the actual read. The read will unlock the page. */ 2391 error = mapping->a_ops->readpage(file, page); 2392 if (error) 2393 return error; 2394 2395 error = wait_on_page_locked_killable(page); 2396 if (error) 2397 return error; 2398 if (PageUptodate(page)) 2399 return 0; 2400 shrink_readahead_size_eio(&file->f_ra); 2401 return -EIO; 2402 } 2403 2404 static bool filemap_range_uptodate(struct address_space *mapping, 2405 loff_t pos, struct iov_iter *iter, struct page *page) 2406 { 2407 int count; 2408 2409 if (PageUptodate(page)) 2410 return true; 2411 /* pipes can't handle partially uptodate pages */ 2412 if (iov_iter_is_pipe(iter)) 2413 return false; 2414 if (!mapping->a_ops->is_partially_uptodate) 2415 return false; 2416 if (mapping->host->i_blkbits >= (PAGE_SHIFT + thp_order(page))) 2417 return false; 2418 2419 count = iter->count; 2420 if (page_offset(page) > pos) { 2421 count -= page_offset(page) - pos; 2422 pos = 0; 2423 } else { 2424 pos -= page_offset(page); 2425 } 2426 2427 return mapping->a_ops->is_partially_uptodate(page, pos, count); 2428 } 2429 2430 static int filemap_update_page(struct kiocb *iocb, 2431 struct address_space *mapping, struct iov_iter *iter, 2432 struct page *page) 2433 { 2434 struct folio *folio = page_folio(page); 2435 int error; 2436 2437 if (iocb->ki_flags & IOCB_NOWAIT) { 2438 if (!filemap_invalidate_trylock_shared(mapping)) 2439 return -EAGAIN; 2440 } else { 2441 filemap_invalidate_lock_shared(mapping); 2442 } 2443 2444 if (!folio_trylock(folio)) { 2445 error = -EAGAIN; 2446 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) 2447 goto unlock_mapping; 2448 if (!(iocb->ki_flags & IOCB_WAITQ)) { 2449 filemap_invalidate_unlock_shared(mapping); 2450 put_and_wait_on_page_locked(&folio->page, TASK_KILLABLE); 2451 return AOP_TRUNCATED_PAGE; 2452 } 2453 error = __folio_lock_async(folio, iocb->ki_waitq); 2454 if (error) 2455 goto unlock_mapping; 2456 } 2457 2458 error = AOP_TRUNCATED_PAGE; 2459 if (!folio->mapping) 2460 goto unlock; 2461 2462 error = 0; 2463 if (filemap_range_uptodate(mapping, iocb->ki_pos, iter, &folio->page)) 2464 goto unlock; 2465 2466 error = -EAGAIN; 2467 if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ)) 2468 goto unlock; 2469 2470 error = filemap_read_page(iocb->ki_filp, mapping, &folio->page); 2471 goto unlock_mapping; 2472 unlock: 2473 folio_unlock(folio); 2474 unlock_mapping: 2475 filemap_invalidate_unlock_shared(mapping); 2476 if (error == AOP_TRUNCATED_PAGE) 2477 folio_put(folio); 2478 return error; 2479 } 2480 2481 static int filemap_create_page(struct file *file, 2482 struct address_space *mapping, pgoff_t index, 2483 struct pagevec *pvec) 2484 { 2485 struct page *page; 2486 int error; 2487 2488 page = page_cache_alloc(mapping); 2489 if (!page) 2490 return -ENOMEM; 2491 2492 /* 2493 * Protect against truncate / hole punch. Grabbing invalidate_lock here 2494 * assures we cannot instantiate and bring uptodate new pagecache pages 2495 * after evicting page cache during truncate and before actually 2496 * freeing blocks. Note that we could release invalidate_lock after 2497 * inserting the page into page cache as the locked page would then be 2498 * enough to synchronize with hole punching. But there are code paths 2499 * such as filemap_update_page() filling in partially uptodate pages or 2500 * ->readpages() that need to hold invalidate_lock while mapping blocks 2501 * for IO so let's hold the lock here as well to keep locking rules 2502 * simple. 2503 */ 2504 filemap_invalidate_lock_shared(mapping); 2505 error = add_to_page_cache_lru(page, mapping, index, 2506 mapping_gfp_constraint(mapping, GFP_KERNEL)); 2507 if (error == -EEXIST) 2508 error = AOP_TRUNCATED_PAGE; 2509 if (error) 2510 goto error; 2511 2512 error = filemap_read_page(file, mapping, page); 2513 if (error) 2514 goto error; 2515 2516 filemap_invalidate_unlock_shared(mapping); 2517 pagevec_add(pvec, page); 2518 return 0; 2519 error: 2520 filemap_invalidate_unlock_shared(mapping); 2521 put_page(page); 2522 return error; 2523 } 2524 2525 static int filemap_readahead(struct kiocb *iocb, struct file *file, 2526 struct address_space *mapping, struct page *page, 2527 pgoff_t last_index) 2528 { 2529 if (iocb->ki_flags & IOCB_NOIO) 2530 return -EAGAIN; 2531 page_cache_async_readahead(mapping, &file->f_ra, file, page, 2532 page->index, last_index - page->index); 2533 return 0; 2534 } 2535 2536 static int filemap_get_pages(struct kiocb *iocb, struct iov_iter *iter, 2537 struct pagevec *pvec) 2538 { 2539 struct file *filp = iocb->ki_filp; 2540 struct address_space *mapping = filp->f_mapping; 2541 struct file_ra_state *ra = &filp->f_ra; 2542 pgoff_t index = iocb->ki_pos >> PAGE_SHIFT; 2543 pgoff_t last_index; 2544 struct page *page; 2545 int err = 0; 2546 2547 last_index = DIV_ROUND_UP(iocb->ki_pos + iter->count, PAGE_SIZE); 2548 retry: 2549 if (fatal_signal_pending(current)) 2550 return -EINTR; 2551 2552 filemap_get_read_batch(mapping, index, last_index, pvec); 2553 if (!pagevec_count(pvec)) { 2554 if (iocb->ki_flags & IOCB_NOIO) 2555 return -EAGAIN; 2556 page_cache_sync_readahead(mapping, ra, filp, index, 2557 last_index - index); 2558 filemap_get_read_batch(mapping, index, last_index, pvec); 2559 } 2560 if (!pagevec_count(pvec)) { 2561 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ)) 2562 return -EAGAIN; 2563 err = filemap_create_page(filp, mapping, 2564 iocb->ki_pos >> PAGE_SHIFT, pvec); 2565 if (err == AOP_TRUNCATED_PAGE) 2566 goto retry; 2567 return err; 2568 } 2569 2570 page = pvec->pages[pagevec_count(pvec) - 1]; 2571 if (PageReadahead(page)) { 2572 err = filemap_readahead(iocb, filp, mapping, page, last_index); 2573 if (err) 2574 goto err; 2575 } 2576 if (!PageUptodate(page)) { 2577 if ((iocb->ki_flags & IOCB_WAITQ) && pagevec_count(pvec) > 1) 2578 iocb->ki_flags |= IOCB_NOWAIT; 2579 err = filemap_update_page(iocb, mapping, iter, page); 2580 if (err) 2581 goto err; 2582 } 2583 2584 return 0; 2585 err: 2586 if (err < 0) 2587 put_page(page); 2588 if (likely(--pvec->nr)) 2589 return 0; 2590 if (err == AOP_TRUNCATED_PAGE) 2591 goto retry; 2592 return err; 2593 } 2594 2595 /** 2596 * filemap_read - Read data from the page cache. 2597 * @iocb: The iocb to read. 2598 * @iter: Destination for the data. 2599 * @already_read: Number of bytes already read by the caller. 2600 * 2601 * Copies data from the page cache. If the data is not currently present, 2602 * uses the readahead and readpage address_space operations to fetch it. 2603 * 2604 * Return: Total number of bytes copied, including those already read by 2605 * the caller. If an error happens before any bytes are copied, returns 2606 * a negative error number. 2607 */ 2608 ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter, 2609 ssize_t already_read) 2610 { 2611 struct file *filp = iocb->ki_filp; 2612 struct file_ra_state *ra = &filp->f_ra; 2613 struct address_space *mapping = filp->f_mapping; 2614 struct inode *inode = mapping->host; 2615 struct pagevec pvec; 2616 int i, error = 0; 2617 bool writably_mapped; 2618 loff_t isize, end_offset; 2619 2620 if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes)) 2621 return 0; 2622 if (unlikely(!iov_iter_count(iter))) 2623 return 0; 2624 2625 iov_iter_truncate(iter, inode->i_sb->s_maxbytes); 2626 pagevec_init(&pvec); 2627 2628 do { 2629 cond_resched(); 2630 2631 /* 2632 * If we've already successfully copied some data, then we 2633 * can no longer safely return -EIOCBQUEUED. Hence mark 2634 * an async read NOWAIT at that point. 2635 */ 2636 if ((iocb->ki_flags & IOCB_WAITQ) && already_read) 2637 iocb->ki_flags |= IOCB_NOWAIT; 2638 2639 if (unlikely(iocb->ki_pos >= i_size_read(inode))) 2640 break; 2641 2642 error = filemap_get_pages(iocb, iter, &pvec); 2643 if (error < 0) 2644 break; 2645 2646 /* 2647 * i_size must be checked after we know the pages are Uptodate. 2648 * 2649 * Checking i_size after the check allows us to calculate 2650 * the correct value for "nr", which means the zero-filled 2651 * part of the page is not copied back to userspace (unless 2652 * another truncate extends the file - this is desired though). 2653 */ 2654 isize = i_size_read(inode); 2655 if (unlikely(iocb->ki_pos >= isize)) 2656 goto put_pages; 2657 end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count); 2658 2659 /* 2660 * Once we start copying data, we don't want to be touching any 2661 * cachelines that might be contended: 2662 */ 2663 writably_mapped = mapping_writably_mapped(mapping); 2664 2665 /* 2666 * When a sequential read accesses a page several times, only 2667 * mark it as accessed the first time. 2668 */ 2669 if (iocb->ki_pos >> PAGE_SHIFT != 2670 ra->prev_pos >> PAGE_SHIFT) 2671 mark_page_accessed(pvec.pages[0]); 2672 2673 for (i = 0; i < pagevec_count(&pvec); i++) { 2674 struct page *page = pvec.pages[i]; 2675 size_t page_size = thp_size(page); 2676 size_t offset = iocb->ki_pos & (page_size - 1); 2677 size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos, 2678 page_size - offset); 2679 size_t copied; 2680 2681 if (end_offset < page_offset(page)) 2682 break; 2683 if (i > 0) 2684 mark_page_accessed(page); 2685 /* 2686 * If users can be writing to this page using arbitrary 2687 * virtual addresses, take care about potential aliasing 2688 * before reading the page on the kernel side. 2689 */ 2690 if (writably_mapped) { 2691 int j; 2692 2693 for (j = 0; j < thp_nr_pages(page); j++) 2694 flush_dcache_page(page + j); 2695 } 2696 2697 copied = copy_page_to_iter(page, offset, bytes, iter); 2698 2699 already_read += copied; 2700 iocb->ki_pos += copied; 2701 ra->prev_pos = iocb->ki_pos; 2702 2703 if (copied < bytes) { 2704 error = -EFAULT; 2705 break; 2706 } 2707 } 2708 put_pages: 2709 for (i = 0; i < pagevec_count(&pvec); i++) 2710 put_page(pvec.pages[i]); 2711 pagevec_reinit(&pvec); 2712 } while (iov_iter_count(iter) && iocb->ki_pos < isize && !error); 2713 2714 file_accessed(filp); 2715 2716 return already_read ? already_read : error; 2717 } 2718 EXPORT_SYMBOL_GPL(filemap_read); 2719 2720 /** 2721 * generic_file_read_iter - generic filesystem read routine 2722 * @iocb: kernel I/O control block 2723 * @iter: destination for the data read 2724 * 2725 * This is the "read_iter()" routine for all filesystems 2726 * that can use the page cache directly. 2727 * 2728 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall 2729 * be returned when no data can be read without waiting for I/O requests 2730 * to complete; it doesn't prevent readahead. 2731 * 2732 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O 2733 * requests shall be made for the read or for readahead. When no data 2734 * can be read, -EAGAIN shall be returned. When readahead would be 2735 * triggered, a partial, possibly empty read shall be returned. 2736 * 2737 * Return: 2738 * * number of bytes copied, even for partial reads 2739 * * negative error code (or 0 if IOCB_NOIO) if nothing was read 2740 */ 2741 ssize_t 2742 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 2743 { 2744 size_t count = iov_iter_count(iter); 2745 ssize_t retval = 0; 2746 2747 if (!count) 2748 return 0; /* skip atime */ 2749 2750 if (iocb->ki_flags & IOCB_DIRECT) { 2751 struct file *file = iocb->ki_filp; 2752 struct address_space *mapping = file->f_mapping; 2753 struct inode *inode = mapping->host; 2754 2755 if (iocb->ki_flags & IOCB_NOWAIT) { 2756 if (filemap_range_needs_writeback(mapping, iocb->ki_pos, 2757 iocb->ki_pos + count - 1)) 2758 return -EAGAIN; 2759 } else { 2760 retval = filemap_write_and_wait_range(mapping, 2761 iocb->ki_pos, 2762 iocb->ki_pos + count - 1); 2763 if (retval < 0) 2764 return retval; 2765 } 2766 2767 file_accessed(file); 2768 2769 retval = mapping->a_ops->direct_IO(iocb, iter); 2770 if (retval >= 0) { 2771 iocb->ki_pos += retval; 2772 count -= retval; 2773 } 2774 if (retval != -EIOCBQUEUED) 2775 iov_iter_revert(iter, count - iov_iter_count(iter)); 2776 2777 /* 2778 * Btrfs can have a short DIO read if we encounter 2779 * compressed extents, so if there was an error, or if 2780 * we've already read everything we wanted to, or if 2781 * there was a short read because we hit EOF, go ahead 2782 * and return. Otherwise fallthrough to buffered io for 2783 * the rest of the read. Buffered reads will not work for 2784 * DAX files, so don't bother trying. 2785 */ 2786 if (retval < 0 || !count || IS_DAX(inode)) 2787 return retval; 2788 if (iocb->ki_pos >= i_size_read(inode)) 2789 return retval; 2790 } 2791 2792 return filemap_read(iocb, iter, retval); 2793 } 2794 EXPORT_SYMBOL(generic_file_read_iter); 2795 2796 static inline loff_t page_seek_hole_data(struct xa_state *xas, 2797 struct address_space *mapping, struct page *page, 2798 loff_t start, loff_t end, bool seek_data) 2799 { 2800 const struct address_space_operations *ops = mapping->a_ops; 2801 size_t offset, bsz = i_blocksize(mapping->host); 2802 2803 if (xa_is_value(page) || PageUptodate(page)) 2804 return seek_data ? start : end; 2805 if (!ops->is_partially_uptodate) 2806 return seek_data ? end : start; 2807 2808 xas_pause(xas); 2809 rcu_read_unlock(); 2810 lock_page(page); 2811 if (unlikely(page->mapping != mapping)) 2812 goto unlock; 2813 2814 offset = offset_in_thp(page, start) & ~(bsz - 1); 2815 2816 do { 2817 if (ops->is_partially_uptodate(page, offset, bsz) == seek_data) 2818 break; 2819 start = (start + bsz) & ~(bsz - 1); 2820 offset += bsz; 2821 } while (offset < thp_size(page)); 2822 unlock: 2823 unlock_page(page); 2824 rcu_read_lock(); 2825 return start; 2826 } 2827 2828 static inline 2829 unsigned int seek_page_size(struct xa_state *xas, struct page *page) 2830 { 2831 if (xa_is_value(page)) 2832 return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index); 2833 return thp_size(page); 2834 } 2835 2836 /** 2837 * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache. 2838 * @mapping: Address space to search. 2839 * @start: First byte to consider. 2840 * @end: Limit of search (exclusive). 2841 * @whence: Either SEEK_HOLE or SEEK_DATA. 2842 * 2843 * If the page cache knows which blocks contain holes and which blocks 2844 * contain data, your filesystem can use this function to implement 2845 * SEEK_HOLE and SEEK_DATA. This is useful for filesystems which are 2846 * entirely memory-based such as tmpfs, and filesystems which support 2847 * unwritten extents. 2848 * 2849 * Return: The requested offset on success, or -ENXIO if @whence specifies 2850 * SEEK_DATA and there is no data after @start. There is an implicit hole 2851 * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start 2852 * and @end contain data. 2853 */ 2854 loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start, 2855 loff_t end, int whence) 2856 { 2857 XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT); 2858 pgoff_t max = (end - 1) >> PAGE_SHIFT; 2859 bool seek_data = (whence == SEEK_DATA); 2860 struct page *page; 2861 2862 if (end <= start) 2863 return -ENXIO; 2864 2865 rcu_read_lock(); 2866 while ((page = find_get_entry(&xas, max, XA_PRESENT))) { 2867 loff_t pos = (u64)xas.xa_index << PAGE_SHIFT; 2868 unsigned int seek_size; 2869 2870 if (start < pos) { 2871 if (!seek_data) 2872 goto unlock; 2873 start = pos; 2874 } 2875 2876 seek_size = seek_page_size(&xas, page); 2877 pos = round_up(pos + 1, seek_size); 2878 start = page_seek_hole_data(&xas, mapping, page, start, pos, 2879 seek_data); 2880 if (start < pos) 2881 goto unlock; 2882 if (start >= end) 2883 break; 2884 if (seek_size > PAGE_SIZE) 2885 xas_set(&xas, pos >> PAGE_SHIFT); 2886 if (!xa_is_value(page)) 2887 put_page(page); 2888 } 2889 if (seek_data) 2890 start = -ENXIO; 2891 unlock: 2892 rcu_read_unlock(); 2893 if (page && !xa_is_value(page)) 2894 put_page(page); 2895 if (start > end) 2896 return end; 2897 return start; 2898 } 2899 2900 #ifdef CONFIG_MMU 2901 #define MMAP_LOTSAMISS (100) 2902 /* 2903 * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock 2904 * @vmf - the vm_fault for this fault. 2905 * @page - the page to lock. 2906 * @fpin - the pointer to the file we may pin (or is already pinned). 2907 * 2908 * This works similar to lock_page_or_retry in that it can drop the mmap_lock. 2909 * It differs in that it actually returns the page locked if it returns 1 and 0 2910 * if it couldn't lock the page. If we did have to drop the mmap_lock then fpin 2911 * will point to the pinned file and needs to be fput()'ed at a later point. 2912 */ 2913 static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page, 2914 struct file **fpin) 2915 { 2916 struct folio *folio = page_folio(page); 2917 2918 if (folio_trylock(folio)) 2919 return 1; 2920 2921 /* 2922 * NOTE! This will make us return with VM_FAULT_RETRY, but with 2923 * the mmap_lock still held. That's how FAULT_FLAG_RETRY_NOWAIT 2924 * is supposed to work. We have way too many special cases.. 2925 */ 2926 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) 2927 return 0; 2928 2929 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); 2930 if (vmf->flags & FAULT_FLAG_KILLABLE) { 2931 if (__folio_lock_killable(folio)) { 2932 /* 2933 * We didn't have the right flags to drop the mmap_lock, 2934 * but all fault_handlers only check for fatal signals 2935 * if we return VM_FAULT_RETRY, so we need to drop the 2936 * mmap_lock here and return 0 if we don't have a fpin. 2937 */ 2938 if (*fpin == NULL) 2939 mmap_read_unlock(vmf->vma->vm_mm); 2940 return 0; 2941 } 2942 } else 2943 __folio_lock(folio); 2944 2945 return 1; 2946 } 2947 2948 /* 2949 * Synchronous readahead happens when we don't even find a page in the page 2950 * cache at all. We don't want to perform IO under the mmap sem, so if we have 2951 * to drop the mmap sem we return the file that was pinned in order for us to do 2952 * that. If we didn't pin a file then we return NULL. The file that is 2953 * returned needs to be fput()'ed when we're done with it. 2954 */ 2955 static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) 2956 { 2957 struct file *file = vmf->vma->vm_file; 2958 struct file_ra_state *ra = &file->f_ra; 2959 struct address_space *mapping = file->f_mapping; 2960 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); 2961 struct file *fpin = NULL; 2962 unsigned int mmap_miss; 2963 2964 /* If we don't want any read-ahead, don't bother */ 2965 if (vmf->vma->vm_flags & VM_RAND_READ) 2966 return fpin; 2967 if (!ra->ra_pages) 2968 return fpin; 2969 2970 if (vmf->vma->vm_flags & VM_SEQ_READ) { 2971 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 2972 page_cache_sync_ra(&ractl, ra->ra_pages); 2973 return fpin; 2974 } 2975 2976 /* Avoid banging the cache line if not needed */ 2977 mmap_miss = READ_ONCE(ra->mmap_miss); 2978 if (mmap_miss < MMAP_LOTSAMISS * 10) 2979 WRITE_ONCE(ra->mmap_miss, ++mmap_miss); 2980 2981 /* 2982 * Do we miss much more than hit in this file? If so, 2983 * stop bothering with read-ahead. It will only hurt. 2984 */ 2985 if (mmap_miss > MMAP_LOTSAMISS) 2986 return fpin; 2987 2988 /* 2989 * mmap read-around 2990 */ 2991 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 2992 ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2); 2993 ra->size = ra->ra_pages; 2994 ra->async_size = ra->ra_pages / 4; 2995 ractl._index = ra->start; 2996 do_page_cache_ra(&ractl, ra->size, ra->async_size); 2997 return fpin; 2998 } 2999 3000 /* 3001 * Asynchronous readahead happens when we find the page and PG_readahead, 3002 * so we want to possibly extend the readahead further. We return the file that 3003 * was pinned if we have to drop the mmap_lock in order to do IO. 3004 */ 3005 static struct file *do_async_mmap_readahead(struct vm_fault *vmf, 3006 struct page *page) 3007 { 3008 struct file *file = vmf->vma->vm_file; 3009 struct file_ra_state *ra = &file->f_ra; 3010 struct address_space *mapping = file->f_mapping; 3011 struct file *fpin = NULL; 3012 unsigned int mmap_miss; 3013 pgoff_t offset = vmf->pgoff; 3014 3015 /* If we don't want any read-ahead, don't bother */ 3016 if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) 3017 return fpin; 3018 mmap_miss = READ_ONCE(ra->mmap_miss); 3019 if (mmap_miss) 3020 WRITE_ONCE(ra->mmap_miss, --mmap_miss); 3021 if (PageReadahead(page)) { 3022 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 3023 page_cache_async_readahead(mapping, ra, file, 3024 page, offset, ra->ra_pages); 3025 } 3026 return fpin; 3027 } 3028 3029 /** 3030 * filemap_fault - read in file data for page fault handling 3031 * @vmf: struct vm_fault containing details of the fault 3032 * 3033 * filemap_fault() is invoked via the vma operations vector for a 3034 * mapped memory region to read in file data during a page fault. 3035 * 3036 * The goto's are kind of ugly, but this streamlines the normal case of having 3037 * it in the page cache, and handles the special cases reasonably without 3038 * having a lot of duplicated code. 3039 * 3040 * vma->vm_mm->mmap_lock must be held on entry. 3041 * 3042 * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock 3043 * may be dropped before doing I/O or by lock_page_maybe_drop_mmap(). 3044 * 3045 * If our return value does not have VM_FAULT_RETRY set, the mmap_lock 3046 * has not been released. 3047 * 3048 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. 3049 * 3050 * Return: bitwise-OR of %VM_FAULT_ codes. 3051 */ 3052 vm_fault_t filemap_fault(struct vm_fault *vmf) 3053 { 3054 int error; 3055 struct file *file = vmf->vma->vm_file; 3056 struct file *fpin = NULL; 3057 struct address_space *mapping = file->f_mapping; 3058 struct inode *inode = mapping->host; 3059 pgoff_t offset = vmf->pgoff; 3060 pgoff_t max_off; 3061 struct page *page; 3062 vm_fault_t ret = 0; 3063 bool mapping_locked = false; 3064 3065 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 3066 if (unlikely(offset >= max_off)) 3067 return VM_FAULT_SIGBUS; 3068 3069 /* 3070 * Do we have something in the page cache already? 3071 */ 3072 page = find_get_page(mapping, offset); 3073 if (likely(page)) { 3074 /* 3075 * We found the page, so try async readahead before waiting for 3076 * the lock. 3077 */ 3078 if (!(vmf->flags & FAULT_FLAG_TRIED)) 3079 fpin = do_async_mmap_readahead(vmf, page); 3080 if (unlikely(!PageUptodate(page))) { 3081 filemap_invalidate_lock_shared(mapping); 3082 mapping_locked = true; 3083 } 3084 } else { 3085 /* No page in the page cache at all */ 3086 count_vm_event(PGMAJFAULT); 3087 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); 3088 ret = VM_FAULT_MAJOR; 3089 fpin = do_sync_mmap_readahead(vmf); 3090 retry_find: 3091 /* 3092 * See comment in filemap_create_page() why we need 3093 * invalidate_lock 3094 */ 3095 if (!mapping_locked) { 3096 filemap_invalidate_lock_shared(mapping); 3097 mapping_locked = true; 3098 } 3099 page = pagecache_get_page(mapping, offset, 3100 FGP_CREAT|FGP_FOR_MMAP, 3101 vmf->gfp_mask); 3102 if (!page) { 3103 if (fpin) 3104 goto out_retry; 3105 filemap_invalidate_unlock_shared(mapping); 3106 return VM_FAULT_OOM; 3107 } 3108 } 3109 3110 if (!lock_page_maybe_drop_mmap(vmf, page, &fpin)) 3111 goto out_retry; 3112 3113 /* Did it get truncated? */ 3114 if (unlikely(compound_head(page)->mapping != mapping)) { 3115 unlock_page(page); 3116 put_page(page); 3117 goto retry_find; 3118 } 3119 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); 3120 3121 /* 3122 * We have a locked page in the page cache, now we need to check 3123 * that it's up-to-date. If not, it is going to be due to an error. 3124 */ 3125 if (unlikely(!PageUptodate(page))) { 3126 /* 3127 * The page was in cache and uptodate and now it is not. 3128 * Strange but possible since we didn't hold the page lock all 3129 * the time. Let's drop everything get the invalidate lock and 3130 * try again. 3131 */ 3132 if (!mapping_locked) { 3133 unlock_page(page); 3134 put_page(page); 3135 goto retry_find; 3136 } 3137 goto page_not_uptodate; 3138 } 3139 3140 /* 3141 * We've made it this far and we had to drop our mmap_lock, now is the 3142 * time to return to the upper layer and have it re-find the vma and 3143 * redo the fault. 3144 */ 3145 if (fpin) { 3146 unlock_page(page); 3147 goto out_retry; 3148 } 3149 if (mapping_locked) 3150 filemap_invalidate_unlock_shared(mapping); 3151 3152 /* 3153 * Found the page and have a reference on it. 3154 * We must recheck i_size under page lock. 3155 */ 3156 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 3157 if (unlikely(offset >= max_off)) { 3158 unlock_page(page); 3159 put_page(page); 3160 return VM_FAULT_SIGBUS; 3161 } 3162 3163 vmf->page = page; 3164 return ret | VM_FAULT_LOCKED; 3165 3166 page_not_uptodate: 3167 /* 3168 * Umm, take care of errors if the page isn't up-to-date. 3169 * Try to re-read it _once_. We do this synchronously, 3170 * because there really aren't any performance issues here 3171 * and we need to check for errors. 3172 */ 3173 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 3174 error = filemap_read_page(file, mapping, page); 3175 if (fpin) 3176 goto out_retry; 3177 put_page(page); 3178 3179 if (!error || error == AOP_TRUNCATED_PAGE) 3180 goto retry_find; 3181 filemap_invalidate_unlock_shared(mapping); 3182 3183 return VM_FAULT_SIGBUS; 3184 3185 out_retry: 3186 /* 3187 * We dropped the mmap_lock, we need to return to the fault handler to 3188 * re-find the vma and come back and find our hopefully still populated 3189 * page. 3190 */ 3191 if (page) 3192 put_page(page); 3193 if (mapping_locked) 3194 filemap_invalidate_unlock_shared(mapping); 3195 if (fpin) 3196 fput(fpin); 3197 return ret | VM_FAULT_RETRY; 3198 } 3199 EXPORT_SYMBOL(filemap_fault); 3200 3201 static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page) 3202 { 3203 struct mm_struct *mm = vmf->vma->vm_mm; 3204 3205 /* Huge page is mapped? No need to proceed. */ 3206 if (pmd_trans_huge(*vmf->pmd)) { 3207 unlock_page(page); 3208 put_page(page); 3209 return true; 3210 } 3211 3212 if (pmd_none(*vmf->pmd) && PageTransHuge(page)) { 3213 vm_fault_t ret = do_set_pmd(vmf, page); 3214 if (!ret) { 3215 /* The page is mapped successfully, reference consumed. */ 3216 unlock_page(page); 3217 return true; 3218 } 3219 } 3220 3221 if (pmd_none(*vmf->pmd)) 3222 pmd_install(mm, vmf->pmd, &vmf->prealloc_pte); 3223 3224 /* See comment in handle_pte_fault() */ 3225 if (pmd_devmap_trans_unstable(vmf->pmd)) { 3226 unlock_page(page); 3227 put_page(page); 3228 return true; 3229 } 3230 3231 return false; 3232 } 3233 3234 static struct page *next_uptodate_page(struct page *page, 3235 struct address_space *mapping, 3236 struct xa_state *xas, pgoff_t end_pgoff) 3237 { 3238 unsigned long max_idx; 3239 3240 do { 3241 if (!page) 3242 return NULL; 3243 if (xas_retry(xas, page)) 3244 continue; 3245 if (xa_is_value(page)) 3246 continue; 3247 if (PageLocked(page)) 3248 continue; 3249 if (!page_cache_get_speculative(page)) 3250 continue; 3251 /* Has the page moved or been split? */ 3252 if (unlikely(page != xas_reload(xas))) 3253 goto skip; 3254 if (!PageUptodate(page) || PageReadahead(page)) 3255 goto skip; 3256 if (PageHWPoison(page)) 3257 goto skip; 3258 if (!trylock_page(page)) 3259 goto skip; 3260 if (page->mapping != mapping) 3261 goto unlock; 3262 if (!PageUptodate(page)) 3263 goto unlock; 3264 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); 3265 if (xas->xa_index >= max_idx) 3266 goto unlock; 3267 return page; 3268 unlock: 3269 unlock_page(page); 3270 skip: 3271 put_page(page); 3272 } while ((page = xas_next_entry(xas, end_pgoff)) != NULL); 3273 3274 return NULL; 3275 } 3276 3277 static inline struct page *first_map_page(struct address_space *mapping, 3278 struct xa_state *xas, 3279 pgoff_t end_pgoff) 3280 { 3281 return next_uptodate_page(xas_find(xas, end_pgoff), 3282 mapping, xas, end_pgoff); 3283 } 3284 3285 static inline struct page *next_map_page(struct address_space *mapping, 3286 struct xa_state *xas, 3287 pgoff_t end_pgoff) 3288 { 3289 return next_uptodate_page(xas_next_entry(xas, end_pgoff), 3290 mapping, xas, end_pgoff); 3291 } 3292 3293 vm_fault_t filemap_map_pages(struct vm_fault *vmf, 3294 pgoff_t start_pgoff, pgoff_t end_pgoff) 3295 { 3296 struct vm_area_struct *vma = vmf->vma; 3297 struct file *file = vma->vm_file; 3298 struct address_space *mapping = file->f_mapping; 3299 pgoff_t last_pgoff = start_pgoff; 3300 unsigned long addr; 3301 XA_STATE(xas, &mapping->i_pages, start_pgoff); 3302 struct page *head, *page; 3303 unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss); 3304 vm_fault_t ret = 0; 3305 3306 rcu_read_lock(); 3307 head = first_map_page(mapping, &xas, end_pgoff); 3308 if (!head) 3309 goto out; 3310 3311 if (filemap_map_pmd(vmf, head)) { 3312 ret = VM_FAULT_NOPAGE; 3313 goto out; 3314 } 3315 3316 addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT); 3317 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); 3318 do { 3319 page = find_subpage(head, xas.xa_index); 3320 if (PageHWPoison(page)) 3321 goto unlock; 3322 3323 if (mmap_miss > 0) 3324 mmap_miss--; 3325 3326 addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT; 3327 vmf->pte += xas.xa_index - last_pgoff; 3328 last_pgoff = xas.xa_index; 3329 3330 if (!pte_none(*vmf->pte)) 3331 goto unlock; 3332 3333 /* We're about to handle the fault */ 3334 if (vmf->address == addr) 3335 ret = VM_FAULT_NOPAGE; 3336 3337 do_set_pte(vmf, page, addr); 3338 /* no need to invalidate: a not-present page won't be cached */ 3339 update_mmu_cache(vma, addr, vmf->pte); 3340 unlock_page(head); 3341 continue; 3342 unlock: 3343 unlock_page(head); 3344 put_page(head); 3345 } while ((head = next_map_page(mapping, &xas, end_pgoff)) != NULL); 3346 pte_unmap_unlock(vmf->pte, vmf->ptl); 3347 out: 3348 rcu_read_unlock(); 3349 WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss); 3350 return ret; 3351 } 3352 EXPORT_SYMBOL(filemap_map_pages); 3353 3354 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) 3355 { 3356 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 3357 struct page *page = vmf->page; 3358 vm_fault_t ret = VM_FAULT_LOCKED; 3359 3360 sb_start_pagefault(mapping->host->i_sb); 3361 file_update_time(vmf->vma->vm_file); 3362 lock_page(page); 3363 if (page->mapping != mapping) { 3364 unlock_page(page); 3365 ret = VM_FAULT_NOPAGE; 3366 goto out; 3367 } 3368 /* 3369 * We mark the page dirty already here so that when freeze is in 3370 * progress, we are guaranteed that writeback during freezing will 3371 * see the dirty page and writeprotect it again. 3372 */ 3373 set_page_dirty(page); 3374 wait_for_stable_page(page); 3375 out: 3376 sb_end_pagefault(mapping->host->i_sb); 3377 return ret; 3378 } 3379 3380 const struct vm_operations_struct generic_file_vm_ops = { 3381 .fault = filemap_fault, 3382 .map_pages = filemap_map_pages, 3383 .page_mkwrite = filemap_page_mkwrite, 3384 }; 3385 3386 /* This is used for a general mmap of a disk file */ 3387 3388 int generic_file_mmap(struct file *file, struct vm_area_struct *vma) 3389 { 3390 struct address_space *mapping = file->f_mapping; 3391 3392 if (!mapping->a_ops->readpage) 3393 return -ENOEXEC; 3394 file_accessed(file); 3395 vma->vm_ops = &generic_file_vm_ops; 3396 return 0; 3397 } 3398 3399 /* 3400 * This is for filesystems which do not implement ->writepage. 3401 */ 3402 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 3403 { 3404 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 3405 return -EINVAL; 3406 return generic_file_mmap(file, vma); 3407 } 3408 #else 3409 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) 3410 { 3411 return VM_FAULT_SIGBUS; 3412 } 3413 int generic_file_mmap(struct file *file, struct vm_area_struct *vma) 3414 { 3415 return -ENOSYS; 3416 } 3417 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 3418 { 3419 return -ENOSYS; 3420 } 3421 #endif /* CONFIG_MMU */ 3422 3423 EXPORT_SYMBOL(filemap_page_mkwrite); 3424 EXPORT_SYMBOL(generic_file_mmap); 3425 EXPORT_SYMBOL(generic_file_readonly_mmap); 3426 3427 static struct page *wait_on_page_read(struct page *page) 3428 { 3429 if (!IS_ERR(page)) { 3430 wait_on_page_locked(page); 3431 if (!PageUptodate(page)) { 3432 put_page(page); 3433 page = ERR_PTR(-EIO); 3434 } 3435 } 3436 return page; 3437 } 3438 3439 static struct page *do_read_cache_page(struct address_space *mapping, 3440 pgoff_t index, 3441 int (*filler)(void *, struct page *), 3442 void *data, 3443 gfp_t gfp) 3444 { 3445 struct page *page; 3446 int err; 3447 repeat: 3448 page = find_get_page(mapping, index); 3449 if (!page) { 3450 page = __page_cache_alloc(gfp); 3451 if (!page) 3452 return ERR_PTR(-ENOMEM); 3453 err = add_to_page_cache_lru(page, mapping, index, gfp); 3454 if (unlikely(err)) { 3455 put_page(page); 3456 if (err == -EEXIST) 3457 goto repeat; 3458 /* Presumably ENOMEM for xarray node */ 3459 return ERR_PTR(err); 3460 } 3461 3462 filler: 3463 if (filler) 3464 err = filler(data, page); 3465 else 3466 err = mapping->a_ops->readpage(data, page); 3467 3468 if (err < 0) { 3469 put_page(page); 3470 return ERR_PTR(err); 3471 } 3472 3473 page = wait_on_page_read(page); 3474 if (IS_ERR(page)) 3475 return page; 3476 goto out; 3477 } 3478 if (PageUptodate(page)) 3479 goto out; 3480 3481 /* 3482 * Page is not up to date and may be locked due to one of the following 3483 * case a: Page is being filled and the page lock is held 3484 * case b: Read/write error clearing the page uptodate status 3485 * case c: Truncation in progress (page locked) 3486 * case d: Reclaim in progress 3487 * 3488 * Case a, the page will be up to date when the page is unlocked. 3489 * There is no need to serialise on the page lock here as the page 3490 * is pinned so the lock gives no additional protection. Even if the 3491 * page is truncated, the data is still valid if PageUptodate as 3492 * it's a race vs truncate race. 3493 * Case b, the page will not be up to date 3494 * Case c, the page may be truncated but in itself, the data may still 3495 * be valid after IO completes as it's a read vs truncate race. The 3496 * operation must restart if the page is not uptodate on unlock but 3497 * otherwise serialising on page lock to stabilise the mapping gives 3498 * no additional guarantees to the caller as the page lock is 3499 * released before return. 3500 * Case d, similar to truncation. If reclaim holds the page lock, it 3501 * will be a race with remove_mapping that determines if the mapping 3502 * is valid on unlock but otherwise the data is valid and there is 3503 * no need to serialise with page lock. 3504 * 3505 * As the page lock gives no additional guarantee, we optimistically 3506 * wait on the page to be unlocked and check if it's up to date and 3507 * use the page if it is. Otherwise, the page lock is required to 3508 * distinguish between the different cases. The motivation is that we 3509 * avoid spurious serialisations and wakeups when multiple processes 3510 * wait on the same page for IO to complete. 3511 */ 3512 wait_on_page_locked(page); 3513 if (PageUptodate(page)) 3514 goto out; 3515 3516 /* Distinguish between all the cases under the safety of the lock */ 3517 lock_page(page); 3518 3519 /* Case c or d, restart the operation */ 3520 if (!page->mapping) { 3521 unlock_page(page); 3522 put_page(page); 3523 goto repeat; 3524 } 3525 3526 /* Someone else locked and filled the page in a very small window */ 3527 if (PageUptodate(page)) { 3528 unlock_page(page); 3529 goto out; 3530 } 3531 3532 /* 3533 * A previous I/O error may have been due to temporary 3534 * failures. 3535 * Clear page error before actual read, PG_error will be 3536 * set again if read page fails. 3537 */ 3538 ClearPageError(page); 3539 goto filler; 3540 3541 out: 3542 mark_page_accessed(page); 3543 return page; 3544 } 3545 3546 /** 3547 * read_cache_page - read into page cache, fill it if needed 3548 * @mapping: the page's address_space 3549 * @index: the page index 3550 * @filler: function to perform the read 3551 * @data: first arg to filler(data, page) function, often left as NULL 3552 * 3553 * Read into the page cache. If a page already exists, and PageUptodate() is 3554 * not set, try to fill the page and wait for it to become unlocked. 3555 * 3556 * If the page does not get brought uptodate, return -EIO. 3557 * 3558 * The function expects mapping->invalidate_lock to be already held. 3559 * 3560 * Return: up to date page on success, ERR_PTR() on failure. 3561 */ 3562 struct page *read_cache_page(struct address_space *mapping, 3563 pgoff_t index, 3564 int (*filler)(void *, struct page *), 3565 void *data) 3566 { 3567 return do_read_cache_page(mapping, index, filler, data, 3568 mapping_gfp_mask(mapping)); 3569 } 3570 EXPORT_SYMBOL(read_cache_page); 3571 3572 /** 3573 * read_cache_page_gfp - read into page cache, using specified page allocation flags. 3574 * @mapping: the page's address_space 3575 * @index: the page index 3576 * @gfp: the page allocator flags to use if allocating 3577 * 3578 * This is the same as "read_mapping_page(mapping, index, NULL)", but with 3579 * any new page allocations done using the specified allocation flags. 3580 * 3581 * If the page does not get brought uptodate, return -EIO. 3582 * 3583 * The function expects mapping->invalidate_lock to be already held. 3584 * 3585 * Return: up to date page on success, ERR_PTR() on failure. 3586 */ 3587 struct page *read_cache_page_gfp(struct address_space *mapping, 3588 pgoff_t index, 3589 gfp_t gfp) 3590 { 3591 return do_read_cache_page(mapping, index, NULL, NULL, gfp); 3592 } 3593 EXPORT_SYMBOL(read_cache_page_gfp); 3594 3595 int pagecache_write_begin(struct file *file, struct address_space *mapping, 3596 loff_t pos, unsigned len, unsigned flags, 3597 struct page **pagep, void **fsdata) 3598 { 3599 const struct address_space_operations *aops = mapping->a_ops; 3600 3601 return aops->write_begin(file, mapping, pos, len, flags, 3602 pagep, fsdata); 3603 } 3604 EXPORT_SYMBOL(pagecache_write_begin); 3605 3606 int pagecache_write_end(struct file *file, struct address_space *mapping, 3607 loff_t pos, unsigned len, unsigned copied, 3608 struct page *page, void *fsdata) 3609 { 3610 const struct address_space_operations *aops = mapping->a_ops; 3611 3612 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); 3613 } 3614 EXPORT_SYMBOL(pagecache_write_end); 3615 3616 /* 3617 * Warn about a page cache invalidation failure during a direct I/O write. 3618 */ 3619 void dio_warn_stale_pagecache(struct file *filp) 3620 { 3621 static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST); 3622 char pathname[128]; 3623 char *path; 3624 3625 errseq_set(&filp->f_mapping->wb_err, -EIO); 3626 if (__ratelimit(&_rs)) { 3627 path = file_path(filp, pathname, sizeof(pathname)); 3628 if (IS_ERR(path)) 3629 path = "(unknown)"; 3630 pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision with buffered I/O!\n"); 3631 pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid, 3632 current->comm); 3633 } 3634 } 3635 3636 ssize_t 3637 generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) 3638 { 3639 struct file *file = iocb->ki_filp; 3640 struct address_space *mapping = file->f_mapping; 3641 struct inode *inode = mapping->host; 3642 loff_t pos = iocb->ki_pos; 3643 ssize_t written; 3644 size_t write_len; 3645 pgoff_t end; 3646 3647 write_len = iov_iter_count(from); 3648 end = (pos + write_len - 1) >> PAGE_SHIFT; 3649 3650 if (iocb->ki_flags & IOCB_NOWAIT) { 3651 /* If there are pages to writeback, return */ 3652 if (filemap_range_has_page(file->f_mapping, pos, 3653 pos + write_len - 1)) 3654 return -EAGAIN; 3655 } else { 3656 written = filemap_write_and_wait_range(mapping, pos, 3657 pos + write_len - 1); 3658 if (written) 3659 goto out; 3660 } 3661 3662 /* 3663 * After a write we want buffered reads to be sure to go to disk to get 3664 * the new data. We invalidate clean cached page from the region we're 3665 * about to write. We do this *before* the write so that we can return 3666 * without clobbering -EIOCBQUEUED from ->direct_IO(). 3667 */ 3668 written = invalidate_inode_pages2_range(mapping, 3669 pos >> PAGE_SHIFT, end); 3670 /* 3671 * If a page can not be invalidated, return 0 to fall back 3672 * to buffered write. 3673 */ 3674 if (written) { 3675 if (written == -EBUSY) 3676 return 0; 3677 goto out; 3678 } 3679 3680 written = mapping->a_ops->direct_IO(iocb, from); 3681 3682 /* 3683 * Finally, try again to invalidate clean pages which might have been 3684 * cached by non-direct readahead, or faulted in by get_user_pages() 3685 * if the source of the write was an mmap'ed region of the file 3686 * we're writing. Either one is a pretty crazy thing to do, 3687 * so we don't support it 100%. If this invalidation 3688 * fails, tough, the write still worked... 3689 * 3690 * Most of the time we do not need this since dio_complete() will do 3691 * the invalidation for us. However there are some file systems that 3692 * do not end up with dio_complete() being called, so let's not break 3693 * them by removing it completely. 3694 * 3695 * Noticeable example is a blkdev_direct_IO(). 3696 * 3697 * Skip invalidation for async writes or if mapping has no pages. 3698 */ 3699 if (written > 0 && mapping->nrpages && 3700 invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end)) 3701 dio_warn_stale_pagecache(file); 3702 3703 if (written > 0) { 3704 pos += written; 3705 write_len -= written; 3706 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { 3707 i_size_write(inode, pos); 3708 mark_inode_dirty(inode); 3709 } 3710 iocb->ki_pos = pos; 3711 } 3712 if (written != -EIOCBQUEUED) 3713 iov_iter_revert(from, write_len - iov_iter_count(from)); 3714 out: 3715 return written; 3716 } 3717 EXPORT_SYMBOL(generic_file_direct_write); 3718 3719 ssize_t generic_perform_write(struct file *file, 3720 struct iov_iter *i, loff_t pos) 3721 { 3722 struct address_space *mapping = file->f_mapping; 3723 const struct address_space_operations *a_ops = mapping->a_ops; 3724 long status = 0; 3725 ssize_t written = 0; 3726 unsigned int flags = 0; 3727 3728 do { 3729 struct page *page; 3730 unsigned long offset; /* Offset into pagecache page */ 3731 unsigned long bytes; /* Bytes to write to page */ 3732 size_t copied; /* Bytes copied from user */ 3733 void *fsdata; 3734 3735 offset = (pos & (PAGE_SIZE - 1)); 3736 bytes = min_t(unsigned long, PAGE_SIZE - offset, 3737 iov_iter_count(i)); 3738 3739 again: 3740 /* 3741 * Bring in the user page that we will copy from _first_. 3742 * Otherwise there's a nasty deadlock on copying from the 3743 * same page as we're writing to, without it being marked 3744 * up-to-date. 3745 */ 3746 if (unlikely(fault_in_iov_iter_readable(i, bytes))) { 3747 status = -EFAULT; 3748 break; 3749 } 3750 3751 if (fatal_signal_pending(current)) { 3752 status = -EINTR; 3753 break; 3754 } 3755 3756 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 3757 &page, &fsdata); 3758 if (unlikely(status < 0)) 3759 break; 3760 3761 if (mapping_writably_mapped(mapping)) 3762 flush_dcache_page(page); 3763 3764 copied = copy_page_from_iter_atomic(page, offset, bytes, i); 3765 flush_dcache_page(page); 3766 3767 status = a_ops->write_end(file, mapping, pos, bytes, copied, 3768 page, fsdata); 3769 if (unlikely(status != copied)) { 3770 iov_iter_revert(i, copied - max(status, 0L)); 3771 if (unlikely(status < 0)) 3772 break; 3773 } 3774 cond_resched(); 3775 3776 if (unlikely(status == 0)) { 3777 /* 3778 * A short copy made ->write_end() reject the 3779 * thing entirely. Might be memory poisoning 3780 * halfway through, might be a race with munmap, 3781 * might be severe memory pressure. 3782 */ 3783 if (copied) 3784 bytes = copied; 3785 goto again; 3786 } 3787 pos += status; 3788 written += status; 3789 3790 balance_dirty_pages_ratelimited(mapping); 3791 } while (iov_iter_count(i)); 3792 3793 return written ? written : status; 3794 } 3795 EXPORT_SYMBOL(generic_perform_write); 3796 3797 /** 3798 * __generic_file_write_iter - write data to a file 3799 * @iocb: IO state structure (file, offset, etc.) 3800 * @from: iov_iter with data to write 3801 * 3802 * This function does all the work needed for actually writing data to a 3803 * file. It does all basic checks, removes SUID from the file, updates 3804 * modification times and calls proper subroutines depending on whether we 3805 * do direct IO or a standard buffered write. 3806 * 3807 * It expects i_rwsem to be grabbed unless we work on a block device or similar 3808 * object which does not need locking at all. 3809 * 3810 * This function does *not* take care of syncing data in case of O_SYNC write. 3811 * A caller has to handle it. This is mainly due to the fact that we want to 3812 * avoid syncing under i_rwsem. 3813 * 3814 * Return: 3815 * * number of bytes written, even for truncated writes 3816 * * negative error code if no data has been written at all 3817 */ 3818 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 3819 { 3820 struct file *file = iocb->ki_filp; 3821 struct address_space *mapping = file->f_mapping; 3822 struct inode *inode = mapping->host; 3823 ssize_t written = 0; 3824 ssize_t err; 3825 ssize_t status; 3826 3827 /* We can write back this queue in page reclaim */ 3828 current->backing_dev_info = inode_to_bdi(inode); 3829 err = file_remove_privs(file); 3830 if (err) 3831 goto out; 3832 3833 err = file_update_time(file); 3834 if (err) 3835 goto out; 3836 3837 if (iocb->ki_flags & IOCB_DIRECT) { 3838 loff_t pos, endbyte; 3839 3840 written = generic_file_direct_write(iocb, from); 3841 /* 3842 * If the write stopped short of completing, fall back to 3843 * buffered writes. Some filesystems do this for writes to 3844 * holes, for example. For DAX files, a buffered write will 3845 * not succeed (even if it did, DAX does not handle dirty 3846 * page-cache pages correctly). 3847 */ 3848 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) 3849 goto out; 3850 3851 status = generic_perform_write(file, from, pos = iocb->ki_pos); 3852 /* 3853 * If generic_perform_write() returned a synchronous error 3854 * then we want to return the number of bytes which were 3855 * direct-written, or the error code if that was zero. Note 3856 * that this differs from normal direct-io semantics, which 3857 * will return -EFOO even if some bytes were written. 3858 */ 3859 if (unlikely(status < 0)) { 3860 err = status; 3861 goto out; 3862 } 3863 /* 3864 * We need to ensure that the page cache pages are written to 3865 * disk and invalidated to preserve the expected O_DIRECT 3866 * semantics. 3867 */ 3868 endbyte = pos + status - 1; 3869 err = filemap_write_and_wait_range(mapping, pos, endbyte); 3870 if (err == 0) { 3871 iocb->ki_pos = endbyte + 1; 3872 written += status; 3873 invalidate_mapping_pages(mapping, 3874 pos >> PAGE_SHIFT, 3875 endbyte >> PAGE_SHIFT); 3876 } else { 3877 /* 3878 * We don't know how much we wrote, so just return 3879 * the number of bytes which were direct-written 3880 */ 3881 } 3882 } else { 3883 written = generic_perform_write(file, from, iocb->ki_pos); 3884 if (likely(written > 0)) 3885 iocb->ki_pos += written; 3886 } 3887 out: 3888 current->backing_dev_info = NULL; 3889 return written ? written : err; 3890 } 3891 EXPORT_SYMBOL(__generic_file_write_iter); 3892 3893 /** 3894 * generic_file_write_iter - write data to a file 3895 * @iocb: IO state structure 3896 * @from: iov_iter with data to write 3897 * 3898 * This is a wrapper around __generic_file_write_iter() to be used by most 3899 * filesystems. It takes care of syncing the file in case of O_SYNC file 3900 * and acquires i_rwsem as needed. 3901 * Return: 3902 * * negative error code if no data has been written at all of 3903 * vfs_fsync_range() failed for a synchronous write 3904 * * number of bytes written, even for truncated writes 3905 */ 3906 ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 3907 { 3908 struct file *file = iocb->ki_filp; 3909 struct inode *inode = file->f_mapping->host; 3910 ssize_t ret; 3911 3912 inode_lock(inode); 3913 ret = generic_write_checks(iocb, from); 3914 if (ret > 0) 3915 ret = __generic_file_write_iter(iocb, from); 3916 inode_unlock(inode); 3917 3918 if (ret > 0) 3919 ret = generic_write_sync(iocb, ret); 3920 return ret; 3921 } 3922 EXPORT_SYMBOL(generic_file_write_iter); 3923 3924 /** 3925 * try_to_release_page() - release old fs-specific metadata on a page 3926 * 3927 * @page: the page which the kernel is trying to free 3928 * @gfp_mask: memory allocation flags (and I/O mode) 3929 * 3930 * The address_space is to try to release any data against the page 3931 * (presumably at page->private). 3932 * 3933 * This may also be called if PG_fscache is set on a page, indicating that the 3934 * page is known to the local caching routines. 3935 * 3936 * The @gfp_mask argument specifies whether I/O may be performed to release 3937 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS). 3938 * 3939 * Return: %1 if the release was successful, otherwise return zero. 3940 */ 3941 int try_to_release_page(struct page *page, gfp_t gfp_mask) 3942 { 3943 struct address_space * const mapping = page->mapping; 3944 3945 BUG_ON(!PageLocked(page)); 3946 if (PageWriteback(page)) 3947 return 0; 3948 3949 if (mapping && mapping->a_ops->releasepage) 3950 return mapping->a_ops->releasepage(page, gfp_mask); 3951 return try_to_free_buffers(page); 3952 } 3953 3954 EXPORT_SYMBOL(try_to_release_page); 3955