1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/filemap.c 4 * 5 * Copyright (C) 1994-1999 Linus Torvalds 6 */ 7 8 /* 9 * This file handles the generic file mmap semantics used by 10 * most "normal" filesystems (but you don't /have/ to use this: 11 * the NFS filesystem used to do this differently, for example) 12 */ 13 #include <linux/export.h> 14 #include <linux/compiler.h> 15 #include <linux/dax.h> 16 #include <linux/fs.h> 17 #include <linux/sched/signal.h> 18 #include <linux/uaccess.h> 19 #include <linux/capability.h> 20 #include <linux/kernel_stat.h> 21 #include <linux/gfp.h> 22 #include <linux/mm.h> 23 #include <linux/swap.h> 24 #include <linux/mman.h> 25 #include <linux/pagemap.h> 26 #include <linux/file.h> 27 #include <linux/uio.h> 28 #include <linux/error-injection.h> 29 #include <linux/hash.h> 30 #include <linux/writeback.h> 31 #include <linux/backing-dev.h> 32 #include <linux/pagevec.h> 33 #include <linux/blkdev.h> 34 #include <linux/security.h> 35 #include <linux/cpuset.h> 36 #include <linux/hugetlb.h> 37 #include <linux/memcontrol.h> 38 #include <linux/cleancache.h> 39 #include <linux/shmem_fs.h> 40 #include <linux/rmap.h> 41 #include <linux/delayacct.h> 42 #include <linux/psi.h> 43 #include <linux/ramfs.h> 44 #include <linux/page_idle.h> 45 #include "internal.h" 46 47 #define CREATE_TRACE_POINTS 48 #include <trace/events/filemap.h> 49 50 /* 51 * FIXME: remove all knowledge of the buffer layer from the core VM 52 */ 53 #include <linux/buffer_head.h> /* for try_to_free_buffers */ 54 55 #include <asm/mman.h> 56 57 /* 58 * Shared mappings implemented 30.11.1994. It's not fully working yet, 59 * though. 60 * 61 * Shared mappings now work. 15.8.1995 Bruno. 62 * 63 * finished 'unifying' the page and buffer cache and SMP-threaded the 64 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 65 * 66 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 67 */ 68 69 /* 70 * Lock ordering: 71 * 72 * ->i_mmap_rwsem (truncate_pagecache) 73 * ->private_lock (__free_pte->__set_page_dirty_buffers) 74 * ->swap_lock (exclusive_swap_page, others) 75 * ->i_pages lock 76 * 77 * ->i_mutex 78 * ->i_mmap_rwsem (truncate->unmap_mapping_range) 79 * 80 * ->mmap_lock 81 * ->i_mmap_rwsem 82 * ->page_table_lock or pte_lock (various, mainly in memory.c) 83 * ->i_pages lock (arch-dependent flush_dcache_mmap_lock) 84 * 85 * ->mmap_lock 86 * ->lock_page (access_process_vm) 87 * 88 * ->i_mutex (generic_perform_write) 89 * ->mmap_lock (fault_in_pages_readable->do_page_fault) 90 * 91 * bdi->wb.list_lock 92 * sb_lock (fs/fs-writeback.c) 93 * ->i_pages lock (__sync_single_inode) 94 * 95 * ->i_mmap_rwsem 96 * ->anon_vma.lock (vma_adjust) 97 * 98 * ->anon_vma.lock 99 * ->page_table_lock or pte_lock (anon_vma_prepare and various) 100 * 101 * ->page_table_lock or pte_lock 102 * ->swap_lock (try_to_unmap_one) 103 * ->private_lock (try_to_unmap_one) 104 * ->i_pages lock (try_to_unmap_one) 105 * ->pgdat->lru_lock (follow_page->mark_page_accessed) 106 * ->pgdat->lru_lock (check_pte_range->isolate_lru_page) 107 * ->private_lock (page_remove_rmap->set_page_dirty) 108 * ->i_pages lock (page_remove_rmap->set_page_dirty) 109 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) 110 * ->inode->i_lock (page_remove_rmap->set_page_dirty) 111 * ->memcg->move_lock (page_remove_rmap->lock_page_memcg) 112 * bdi.wb->list_lock (zap_pte_range->set_page_dirty) 113 * ->inode->i_lock (zap_pte_range->set_page_dirty) 114 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 115 * 116 * ->i_mmap_rwsem 117 * ->tasklist_lock (memory_failure, collect_procs_ao) 118 */ 119 120 static void page_cache_delete(struct address_space *mapping, 121 struct page *page, void *shadow) 122 { 123 XA_STATE(xas, &mapping->i_pages, page->index); 124 unsigned int nr = 1; 125 126 mapping_set_update(&xas, mapping); 127 128 /* hugetlb pages are represented by a single entry in the xarray */ 129 if (!PageHuge(page)) { 130 xas_set_order(&xas, page->index, compound_order(page)); 131 nr = compound_nr(page); 132 } 133 134 VM_BUG_ON_PAGE(!PageLocked(page), page); 135 VM_BUG_ON_PAGE(PageTail(page), page); 136 VM_BUG_ON_PAGE(nr != 1 && shadow, page); 137 138 xas_store(&xas, shadow); 139 xas_init_marks(&xas); 140 141 page->mapping = NULL; 142 /* Leave page->index set: truncation lookup relies upon it */ 143 144 if (shadow) { 145 mapping->nrexceptional += nr; 146 /* 147 * Make sure the nrexceptional update is committed before 148 * the nrpages update so that final truncate racing 149 * with reclaim does not see both counters 0 at the 150 * same time and miss a shadow entry. 151 */ 152 smp_wmb(); 153 } 154 mapping->nrpages -= nr; 155 } 156 157 static void unaccount_page_cache_page(struct address_space *mapping, 158 struct page *page) 159 { 160 int nr; 161 162 /* 163 * if we're uptodate, flush out into the cleancache, otherwise 164 * invalidate any existing cleancache entries. We can't leave 165 * stale data around in the cleancache once our page is gone 166 */ 167 if (PageUptodate(page) && PageMappedToDisk(page)) 168 cleancache_put_page(page); 169 else 170 cleancache_invalidate_page(mapping, page); 171 172 VM_BUG_ON_PAGE(PageTail(page), page); 173 VM_BUG_ON_PAGE(page_mapped(page), page); 174 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) { 175 int mapcount; 176 177 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n", 178 current->comm, page_to_pfn(page)); 179 dump_page(page, "still mapped when deleted"); 180 dump_stack(); 181 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 182 183 mapcount = page_mapcount(page); 184 if (mapping_exiting(mapping) && 185 page_count(page) >= mapcount + 2) { 186 /* 187 * All vmas have already been torn down, so it's 188 * a good bet that actually the page is unmapped, 189 * and we'd prefer not to leak it: if we're wrong, 190 * some other bad page check should catch it later. 191 */ 192 page_mapcount_reset(page); 193 page_ref_sub(page, mapcount); 194 } 195 } 196 197 /* hugetlb pages do not participate in page cache accounting. */ 198 if (PageHuge(page)) 199 return; 200 201 nr = thp_nr_pages(page); 202 203 __mod_lruvec_page_state(page, NR_FILE_PAGES, -nr); 204 if (PageSwapBacked(page)) { 205 __mod_lruvec_page_state(page, NR_SHMEM, -nr); 206 if (PageTransHuge(page)) 207 __dec_node_page_state(page, NR_SHMEM_THPS); 208 } else if (PageTransHuge(page)) { 209 __dec_node_page_state(page, NR_FILE_THPS); 210 filemap_nr_thps_dec(mapping); 211 } 212 213 /* 214 * At this point page must be either written or cleaned by 215 * truncate. Dirty page here signals a bug and loss of 216 * unwritten data. 217 * 218 * This fixes dirty accounting after removing the page entirely 219 * but leaves PageDirty set: it has no effect for truncated 220 * page and anyway will be cleared before returning page into 221 * buddy allocator. 222 */ 223 if (WARN_ON_ONCE(PageDirty(page))) 224 account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); 225 } 226 227 /* 228 * Delete a page from the page cache and free it. Caller has to make 229 * sure the page is locked and that nobody else uses it - or that usage 230 * is safe. The caller must hold the i_pages lock. 231 */ 232 void __delete_from_page_cache(struct page *page, void *shadow) 233 { 234 struct address_space *mapping = page->mapping; 235 236 trace_mm_filemap_delete_from_page_cache(page); 237 238 unaccount_page_cache_page(mapping, page); 239 page_cache_delete(mapping, page, shadow); 240 } 241 242 static void page_cache_free_page(struct address_space *mapping, 243 struct page *page) 244 { 245 void (*freepage)(struct page *); 246 247 freepage = mapping->a_ops->freepage; 248 if (freepage) 249 freepage(page); 250 251 if (PageTransHuge(page) && !PageHuge(page)) { 252 page_ref_sub(page, thp_nr_pages(page)); 253 VM_BUG_ON_PAGE(page_count(page) <= 0, page); 254 } else { 255 put_page(page); 256 } 257 } 258 259 /** 260 * delete_from_page_cache - delete page from page cache 261 * @page: the page which the kernel is trying to remove from page cache 262 * 263 * This must be called only on pages that have been verified to be in the page 264 * cache and locked. It will never put the page into the free list, the caller 265 * has a reference on the page. 266 */ 267 void delete_from_page_cache(struct page *page) 268 { 269 struct address_space *mapping = page_mapping(page); 270 unsigned long flags; 271 272 BUG_ON(!PageLocked(page)); 273 xa_lock_irqsave(&mapping->i_pages, flags); 274 __delete_from_page_cache(page, NULL); 275 xa_unlock_irqrestore(&mapping->i_pages, flags); 276 277 page_cache_free_page(mapping, page); 278 } 279 EXPORT_SYMBOL(delete_from_page_cache); 280 281 /* 282 * page_cache_delete_batch - delete several pages from page cache 283 * @mapping: the mapping to which pages belong 284 * @pvec: pagevec with pages to delete 285 * 286 * The function walks over mapping->i_pages and removes pages passed in @pvec 287 * from the mapping. The function expects @pvec to be sorted by page index 288 * and is optimised for it to be dense. 289 * It tolerates holes in @pvec (mapping entries at those indices are not 290 * modified). The function expects only THP head pages to be present in the 291 * @pvec. 292 * 293 * The function expects the i_pages lock to be held. 294 */ 295 static void page_cache_delete_batch(struct address_space *mapping, 296 struct pagevec *pvec) 297 { 298 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); 299 int total_pages = 0; 300 int i = 0; 301 struct page *page; 302 303 mapping_set_update(&xas, mapping); 304 xas_for_each(&xas, page, ULONG_MAX) { 305 if (i >= pagevec_count(pvec)) 306 break; 307 308 /* A swap/dax/shadow entry got inserted? Skip it. */ 309 if (xa_is_value(page)) 310 continue; 311 /* 312 * A page got inserted in our range? Skip it. We have our 313 * pages locked so they are protected from being removed. 314 * If we see a page whose index is higher than ours, it 315 * means our page has been removed, which shouldn't be 316 * possible because we're holding the PageLock. 317 */ 318 if (page != pvec->pages[i]) { 319 VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, 320 page); 321 continue; 322 } 323 324 WARN_ON_ONCE(!PageLocked(page)); 325 326 if (page->index == xas.xa_index) 327 page->mapping = NULL; 328 /* Leave page->index set: truncation lookup relies on it */ 329 330 /* 331 * Move to the next page in the vector if this is a regular 332 * page or the index is of the last sub-page of this compound 333 * page. 334 */ 335 if (page->index + compound_nr(page) - 1 == xas.xa_index) 336 i++; 337 xas_store(&xas, NULL); 338 total_pages++; 339 } 340 mapping->nrpages -= total_pages; 341 } 342 343 void delete_from_page_cache_batch(struct address_space *mapping, 344 struct pagevec *pvec) 345 { 346 int i; 347 unsigned long flags; 348 349 if (!pagevec_count(pvec)) 350 return; 351 352 xa_lock_irqsave(&mapping->i_pages, flags); 353 for (i = 0; i < pagevec_count(pvec); i++) { 354 trace_mm_filemap_delete_from_page_cache(pvec->pages[i]); 355 356 unaccount_page_cache_page(mapping, pvec->pages[i]); 357 } 358 page_cache_delete_batch(mapping, pvec); 359 xa_unlock_irqrestore(&mapping->i_pages, flags); 360 361 for (i = 0; i < pagevec_count(pvec); i++) 362 page_cache_free_page(mapping, pvec->pages[i]); 363 } 364 365 int filemap_check_errors(struct address_space *mapping) 366 { 367 int ret = 0; 368 /* Check for outstanding write errors */ 369 if (test_bit(AS_ENOSPC, &mapping->flags) && 370 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 371 ret = -ENOSPC; 372 if (test_bit(AS_EIO, &mapping->flags) && 373 test_and_clear_bit(AS_EIO, &mapping->flags)) 374 ret = -EIO; 375 return ret; 376 } 377 EXPORT_SYMBOL(filemap_check_errors); 378 379 static int filemap_check_and_keep_errors(struct address_space *mapping) 380 { 381 /* Check for outstanding write errors */ 382 if (test_bit(AS_EIO, &mapping->flags)) 383 return -EIO; 384 if (test_bit(AS_ENOSPC, &mapping->flags)) 385 return -ENOSPC; 386 return 0; 387 } 388 389 /** 390 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 391 * @mapping: address space structure to write 392 * @start: offset in bytes where the range starts 393 * @end: offset in bytes where the range ends (inclusive) 394 * @sync_mode: enable synchronous operation 395 * 396 * Start writeback against all of a mapping's dirty pages that lie 397 * within the byte offsets <start, end> inclusive. 398 * 399 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 400 * opposed to a regular memory cleansing writeback. The difference between 401 * these two operations is that if a dirty page/buffer is encountered, it must 402 * be waited upon, and not just skipped over. 403 * 404 * Return: %0 on success, negative error code otherwise. 405 */ 406 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 407 loff_t end, int sync_mode) 408 { 409 int ret; 410 struct writeback_control wbc = { 411 .sync_mode = sync_mode, 412 .nr_to_write = LONG_MAX, 413 .range_start = start, 414 .range_end = end, 415 }; 416 417 if (!mapping_can_writeback(mapping) || 418 !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 419 return 0; 420 421 wbc_attach_fdatawrite_inode(&wbc, mapping->host); 422 ret = do_writepages(mapping, &wbc); 423 wbc_detach_inode(&wbc); 424 return ret; 425 } 426 427 static inline int __filemap_fdatawrite(struct address_space *mapping, 428 int sync_mode) 429 { 430 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); 431 } 432 433 int filemap_fdatawrite(struct address_space *mapping) 434 { 435 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); 436 } 437 EXPORT_SYMBOL(filemap_fdatawrite); 438 439 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 440 loff_t end) 441 { 442 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 443 } 444 EXPORT_SYMBOL(filemap_fdatawrite_range); 445 446 /** 447 * filemap_flush - mostly a non-blocking flush 448 * @mapping: target address_space 449 * 450 * This is a mostly non-blocking flush. Not suitable for data-integrity 451 * purposes - I/O may not be started against all dirty pages. 452 * 453 * Return: %0 on success, negative error code otherwise. 454 */ 455 int filemap_flush(struct address_space *mapping) 456 { 457 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); 458 } 459 EXPORT_SYMBOL(filemap_flush); 460 461 /** 462 * filemap_range_has_page - check if a page exists in range. 463 * @mapping: address space within which to check 464 * @start_byte: offset in bytes where the range starts 465 * @end_byte: offset in bytes where the range ends (inclusive) 466 * 467 * Find at least one page in the range supplied, usually used to check if 468 * direct writing in this range will trigger a writeback. 469 * 470 * Return: %true if at least one page exists in the specified range, 471 * %false otherwise. 472 */ 473 bool filemap_range_has_page(struct address_space *mapping, 474 loff_t start_byte, loff_t end_byte) 475 { 476 struct page *page; 477 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); 478 pgoff_t max = end_byte >> PAGE_SHIFT; 479 480 if (end_byte < start_byte) 481 return false; 482 483 rcu_read_lock(); 484 for (;;) { 485 page = xas_find(&xas, max); 486 if (xas_retry(&xas, page)) 487 continue; 488 /* Shadow entries don't count */ 489 if (xa_is_value(page)) 490 continue; 491 /* 492 * We don't need to try to pin this page; we're about to 493 * release the RCU lock anyway. It is enough to know that 494 * there was a page here recently. 495 */ 496 break; 497 } 498 rcu_read_unlock(); 499 500 return page != NULL; 501 } 502 EXPORT_SYMBOL(filemap_range_has_page); 503 504 static void __filemap_fdatawait_range(struct address_space *mapping, 505 loff_t start_byte, loff_t end_byte) 506 { 507 pgoff_t index = start_byte >> PAGE_SHIFT; 508 pgoff_t end = end_byte >> PAGE_SHIFT; 509 struct pagevec pvec; 510 int nr_pages; 511 512 if (end_byte < start_byte) 513 return; 514 515 pagevec_init(&pvec); 516 while (index <= end) { 517 unsigned i; 518 519 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, 520 end, PAGECACHE_TAG_WRITEBACK); 521 if (!nr_pages) 522 break; 523 524 for (i = 0; i < nr_pages; i++) { 525 struct page *page = pvec.pages[i]; 526 527 wait_on_page_writeback(page); 528 ClearPageError(page); 529 } 530 pagevec_release(&pvec); 531 cond_resched(); 532 } 533 } 534 535 /** 536 * filemap_fdatawait_range - wait for writeback to complete 537 * @mapping: address space structure to wait for 538 * @start_byte: offset in bytes where the range starts 539 * @end_byte: offset in bytes where the range ends (inclusive) 540 * 541 * Walk the list of under-writeback pages of the given address space 542 * in the given range and wait for all of them. Check error status of 543 * the address space and return it. 544 * 545 * Since the error status of the address space is cleared by this function, 546 * callers are responsible for checking the return value and handling and/or 547 * reporting the error. 548 * 549 * Return: error status of the address space. 550 */ 551 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, 552 loff_t end_byte) 553 { 554 __filemap_fdatawait_range(mapping, start_byte, end_byte); 555 return filemap_check_errors(mapping); 556 } 557 EXPORT_SYMBOL(filemap_fdatawait_range); 558 559 /** 560 * filemap_fdatawait_range_keep_errors - wait for writeback to complete 561 * @mapping: address space structure to wait for 562 * @start_byte: offset in bytes where the range starts 563 * @end_byte: offset in bytes where the range ends (inclusive) 564 * 565 * Walk the list of under-writeback pages of the given address space in the 566 * given range and wait for all of them. Unlike filemap_fdatawait_range(), 567 * this function does not clear error status of the address space. 568 * 569 * Use this function if callers don't handle errors themselves. Expected 570 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), 571 * fsfreeze(8) 572 */ 573 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, 574 loff_t start_byte, loff_t end_byte) 575 { 576 __filemap_fdatawait_range(mapping, start_byte, end_byte); 577 return filemap_check_and_keep_errors(mapping); 578 } 579 EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors); 580 581 /** 582 * file_fdatawait_range - wait for writeback to complete 583 * @file: file pointing to address space structure to wait for 584 * @start_byte: offset in bytes where the range starts 585 * @end_byte: offset in bytes where the range ends (inclusive) 586 * 587 * Walk the list of under-writeback pages of the address space that file 588 * refers to, in the given range and wait for all of them. Check error 589 * status of the address space vs. the file->f_wb_err cursor and return it. 590 * 591 * Since the error status of the file is advanced by this function, 592 * callers are responsible for checking the return value and handling and/or 593 * reporting the error. 594 * 595 * Return: error status of the address space vs. the file->f_wb_err cursor. 596 */ 597 int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte) 598 { 599 struct address_space *mapping = file->f_mapping; 600 601 __filemap_fdatawait_range(mapping, start_byte, end_byte); 602 return file_check_and_advance_wb_err(file); 603 } 604 EXPORT_SYMBOL(file_fdatawait_range); 605 606 /** 607 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors 608 * @mapping: address space structure to wait for 609 * 610 * Walk the list of under-writeback pages of the given address space 611 * and wait for all of them. Unlike filemap_fdatawait(), this function 612 * does not clear error status of the address space. 613 * 614 * Use this function if callers don't handle errors themselves. Expected 615 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), 616 * fsfreeze(8) 617 * 618 * Return: error status of the address space. 619 */ 620 int filemap_fdatawait_keep_errors(struct address_space *mapping) 621 { 622 __filemap_fdatawait_range(mapping, 0, LLONG_MAX); 623 return filemap_check_and_keep_errors(mapping); 624 } 625 EXPORT_SYMBOL(filemap_fdatawait_keep_errors); 626 627 /* Returns true if writeback might be needed or already in progress. */ 628 static bool mapping_needs_writeback(struct address_space *mapping) 629 { 630 if (dax_mapping(mapping)) 631 return mapping->nrexceptional; 632 633 return mapping->nrpages; 634 } 635 636 /** 637 * filemap_write_and_wait_range - write out & wait on a file range 638 * @mapping: the address_space for the pages 639 * @lstart: offset in bytes where the range starts 640 * @lend: offset in bytes where the range ends (inclusive) 641 * 642 * Write out and wait upon file offsets lstart->lend, inclusive. 643 * 644 * Note that @lend is inclusive (describes the last byte to be written) so 645 * that this function can be used to write to the very end-of-file (end = -1). 646 * 647 * Return: error status of the address space. 648 */ 649 int filemap_write_and_wait_range(struct address_space *mapping, 650 loff_t lstart, loff_t lend) 651 { 652 int err = 0; 653 654 if (mapping_needs_writeback(mapping)) { 655 err = __filemap_fdatawrite_range(mapping, lstart, lend, 656 WB_SYNC_ALL); 657 /* 658 * Even if the above returned error, the pages may be 659 * written partially (e.g. -ENOSPC), so we wait for it. 660 * But the -EIO is special case, it may indicate the worst 661 * thing (e.g. bug) happened, so we avoid waiting for it. 662 */ 663 if (err != -EIO) { 664 int err2 = filemap_fdatawait_range(mapping, 665 lstart, lend); 666 if (!err) 667 err = err2; 668 } else { 669 /* Clear any previously stored errors */ 670 filemap_check_errors(mapping); 671 } 672 } else { 673 err = filemap_check_errors(mapping); 674 } 675 return err; 676 } 677 EXPORT_SYMBOL(filemap_write_and_wait_range); 678 679 void __filemap_set_wb_err(struct address_space *mapping, int err) 680 { 681 errseq_t eseq = errseq_set(&mapping->wb_err, err); 682 683 trace_filemap_set_wb_err(mapping, eseq); 684 } 685 EXPORT_SYMBOL(__filemap_set_wb_err); 686 687 /** 688 * file_check_and_advance_wb_err - report wb error (if any) that was previously 689 * and advance wb_err to current one 690 * @file: struct file on which the error is being reported 691 * 692 * When userland calls fsync (or something like nfsd does the equivalent), we 693 * want to report any writeback errors that occurred since the last fsync (or 694 * since the file was opened if there haven't been any). 695 * 696 * Grab the wb_err from the mapping. If it matches what we have in the file, 697 * then just quickly return 0. The file is all caught up. 698 * 699 * If it doesn't match, then take the mapping value, set the "seen" flag in 700 * it and try to swap it into place. If it works, or another task beat us 701 * to it with the new value, then update the f_wb_err and return the error 702 * portion. The error at this point must be reported via proper channels 703 * (a'la fsync, or NFS COMMIT operation, etc.). 704 * 705 * While we handle mapping->wb_err with atomic operations, the f_wb_err 706 * value is protected by the f_lock since we must ensure that it reflects 707 * the latest value swapped in for this file descriptor. 708 * 709 * Return: %0 on success, negative error code otherwise. 710 */ 711 int file_check_and_advance_wb_err(struct file *file) 712 { 713 int err = 0; 714 errseq_t old = READ_ONCE(file->f_wb_err); 715 struct address_space *mapping = file->f_mapping; 716 717 /* Locklessly handle the common case where nothing has changed */ 718 if (errseq_check(&mapping->wb_err, old)) { 719 /* Something changed, must use slow path */ 720 spin_lock(&file->f_lock); 721 old = file->f_wb_err; 722 err = errseq_check_and_advance(&mapping->wb_err, 723 &file->f_wb_err); 724 trace_file_check_and_advance_wb_err(file, old); 725 spin_unlock(&file->f_lock); 726 } 727 728 /* 729 * We're mostly using this function as a drop in replacement for 730 * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect 731 * that the legacy code would have had on these flags. 732 */ 733 clear_bit(AS_EIO, &mapping->flags); 734 clear_bit(AS_ENOSPC, &mapping->flags); 735 return err; 736 } 737 EXPORT_SYMBOL(file_check_and_advance_wb_err); 738 739 /** 740 * file_write_and_wait_range - write out & wait on a file range 741 * @file: file pointing to address_space with pages 742 * @lstart: offset in bytes where the range starts 743 * @lend: offset in bytes where the range ends (inclusive) 744 * 745 * Write out and wait upon file offsets lstart->lend, inclusive. 746 * 747 * Note that @lend is inclusive (describes the last byte to be written) so 748 * that this function can be used to write to the very end-of-file (end = -1). 749 * 750 * After writing out and waiting on the data, we check and advance the 751 * f_wb_err cursor to the latest value, and return any errors detected there. 752 * 753 * Return: %0 on success, negative error code otherwise. 754 */ 755 int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend) 756 { 757 int err = 0, err2; 758 struct address_space *mapping = file->f_mapping; 759 760 if (mapping_needs_writeback(mapping)) { 761 err = __filemap_fdatawrite_range(mapping, lstart, lend, 762 WB_SYNC_ALL); 763 /* See comment of filemap_write_and_wait() */ 764 if (err != -EIO) 765 __filemap_fdatawait_range(mapping, lstart, lend); 766 } 767 err2 = file_check_and_advance_wb_err(file); 768 if (!err) 769 err = err2; 770 return err; 771 } 772 EXPORT_SYMBOL(file_write_and_wait_range); 773 774 /** 775 * replace_page_cache_page - replace a pagecache page with a new one 776 * @old: page to be replaced 777 * @new: page to replace with 778 * @gfp_mask: allocation mode 779 * 780 * This function replaces a page in the pagecache with a new one. On 781 * success it acquires the pagecache reference for the new page and 782 * drops it for the old page. Both the old and new pages must be 783 * locked. This function does not add the new page to the LRU, the 784 * caller must do that. 785 * 786 * The remove + add is atomic. This function cannot fail. 787 * 788 * Return: %0 789 */ 790 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) 791 { 792 struct address_space *mapping = old->mapping; 793 void (*freepage)(struct page *) = mapping->a_ops->freepage; 794 pgoff_t offset = old->index; 795 XA_STATE(xas, &mapping->i_pages, offset); 796 unsigned long flags; 797 798 VM_BUG_ON_PAGE(!PageLocked(old), old); 799 VM_BUG_ON_PAGE(!PageLocked(new), new); 800 VM_BUG_ON_PAGE(new->mapping, new); 801 802 get_page(new); 803 new->mapping = mapping; 804 new->index = offset; 805 806 mem_cgroup_migrate(old, new); 807 808 xas_lock_irqsave(&xas, flags); 809 xas_store(&xas, new); 810 811 old->mapping = NULL; 812 /* hugetlb pages do not participate in page cache accounting. */ 813 if (!PageHuge(old)) 814 __dec_lruvec_page_state(old, NR_FILE_PAGES); 815 if (!PageHuge(new)) 816 __inc_lruvec_page_state(new, NR_FILE_PAGES); 817 if (PageSwapBacked(old)) 818 __dec_lruvec_page_state(old, NR_SHMEM); 819 if (PageSwapBacked(new)) 820 __inc_lruvec_page_state(new, NR_SHMEM); 821 xas_unlock_irqrestore(&xas, flags); 822 if (freepage) 823 freepage(old); 824 put_page(old); 825 826 return 0; 827 } 828 EXPORT_SYMBOL_GPL(replace_page_cache_page); 829 830 static int __add_to_page_cache_locked(struct page *page, 831 struct address_space *mapping, 832 pgoff_t offset, gfp_t gfp, 833 void **shadowp) 834 { 835 XA_STATE(xas, &mapping->i_pages, offset); 836 int huge = PageHuge(page); 837 int error; 838 839 VM_BUG_ON_PAGE(!PageLocked(page), page); 840 VM_BUG_ON_PAGE(PageSwapBacked(page), page); 841 mapping_set_update(&xas, mapping); 842 843 get_page(page); 844 page->mapping = mapping; 845 page->index = offset; 846 847 if (!huge) { 848 error = mem_cgroup_charge(page, current->mm, gfp); 849 if (error) 850 goto error; 851 } 852 853 gfp &= GFP_RECLAIM_MASK; 854 855 do { 856 unsigned int order = xa_get_order(xas.xa, xas.xa_index); 857 void *entry, *old = NULL; 858 859 if (order > thp_order(page)) 860 xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index), 861 order, gfp); 862 xas_lock_irq(&xas); 863 xas_for_each_conflict(&xas, entry) { 864 old = entry; 865 if (!xa_is_value(entry)) { 866 xas_set_err(&xas, -EEXIST); 867 goto unlock; 868 } 869 } 870 871 if (old) { 872 if (shadowp) 873 *shadowp = old; 874 /* entry may have been split before we acquired lock */ 875 order = xa_get_order(xas.xa, xas.xa_index); 876 if (order > thp_order(page)) { 877 xas_split(&xas, old, order); 878 xas_reset(&xas); 879 } 880 } 881 882 xas_store(&xas, page); 883 if (xas_error(&xas)) 884 goto unlock; 885 886 if (old) 887 mapping->nrexceptional--; 888 mapping->nrpages++; 889 890 /* hugetlb pages do not participate in page cache accounting */ 891 if (!huge) 892 __inc_lruvec_page_state(page, NR_FILE_PAGES); 893 unlock: 894 xas_unlock_irq(&xas); 895 } while (xas_nomem(&xas, gfp)); 896 897 if (xas_error(&xas)) { 898 error = xas_error(&xas); 899 goto error; 900 } 901 902 trace_mm_filemap_add_to_page_cache(page); 903 return 0; 904 error: 905 page->mapping = NULL; 906 /* Leave page->index set: truncation relies upon it */ 907 put_page(page); 908 return error; 909 } 910 ALLOW_ERROR_INJECTION(__add_to_page_cache_locked, ERRNO); 911 912 /** 913 * add_to_page_cache_locked - add a locked page to the pagecache 914 * @page: page to add 915 * @mapping: the page's address_space 916 * @offset: page index 917 * @gfp_mask: page allocation mode 918 * 919 * This function is used to add a page to the pagecache. It must be locked. 920 * This function does not add the page to the LRU. The caller must do that. 921 * 922 * Return: %0 on success, negative error code otherwise. 923 */ 924 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 925 pgoff_t offset, gfp_t gfp_mask) 926 { 927 return __add_to_page_cache_locked(page, mapping, offset, 928 gfp_mask, NULL); 929 } 930 EXPORT_SYMBOL(add_to_page_cache_locked); 931 932 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 933 pgoff_t offset, gfp_t gfp_mask) 934 { 935 void *shadow = NULL; 936 int ret; 937 938 __SetPageLocked(page); 939 ret = __add_to_page_cache_locked(page, mapping, offset, 940 gfp_mask, &shadow); 941 if (unlikely(ret)) 942 __ClearPageLocked(page); 943 else { 944 /* 945 * The page might have been evicted from cache only 946 * recently, in which case it should be activated like 947 * any other repeatedly accessed page. 948 * The exception is pages getting rewritten; evicting other 949 * data from the working set, only to cache data that will 950 * get overwritten with something else, is a waste of memory. 951 */ 952 WARN_ON_ONCE(PageActive(page)); 953 if (!(gfp_mask & __GFP_WRITE) && shadow) 954 workingset_refault(page, shadow); 955 lru_cache_add(page); 956 } 957 return ret; 958 } 959 EXPORT_SYMBOL_GPL(add_to_page_cache_lru); 960 961 #ifdef CONFIG_NUMA 962 struct page *__page_cache_alloc(gfp_t gfp) 963 { 964 int n; 965 struct page *page; 966 967 if (cpuset_do_page_mem_spread()) { 968 unsigned int cpuset_mems_cookie; 969 do { 970 cpuset_mems_cookie = read_mems_allowed_begin(); 971 n = cpuset_mem_spread_node(); 972 page = __alloc_pages_node(n, gfp, 0); 973 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); 974 975 return page; 976 } 977 return alloc_pages(gfp, 0); 978 } 979 EXPORT_SYMBOL(__page_cache_alloc); 980 #endif 981 982 /* 983 * In order to wait for pages to become available there must be 984 * waitqueues associated with pages. By using a hash table of 985 * waitqueues where the bucket discipline is to maintain all 986 * waiters on the same queue and wake all when any of the pages 987 * become available, and for the woken contexts to check to be 988 * sure the appropriate page became available, this saves space 989 * at a cost of "thundering herd" phenomena during rare hash 990 * collisions. 991 */ 992 #define PAGE_WAIT_TABLE_BITS 8 993 #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS) 994 static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned; 995 996 static wait_queue_head_t *page_waitqueue(struct page *page) 997 { 998 return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)]; 999 } 1000 1001 void __init pagecache_init(void) 1002 { 1003 int i; 1004 1005 for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++) 1006 init_waitqueue_head(&page_wait_table[i]); 1007 1008 page_writeback_init(); 1009 } 1010 1011 /* 1012 * The page wait code treats the "wait->flags" somewhat unusually, because 1013 * we have multiple different kinds of waits, not just the usual "exclusive" 1014 * one. 1015 * 1016 * We have: 1017 * 1018 * (a) no special bits set: 1019 * 1020 * We're just waiting for the bit to be released, and when a waker 1021 * calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up, 1022 * and remove it from the wait queue. 1023 * 1024 * Simple and straightforward. 1025 * 1026 * (b) WQ_FLAG_EXCLUSIVE: 1027 * 1028 * The waiter is waiting to get the lock, and only one waiter should 1029 * be woken up to avoid any thundering herd behavior. We'll set the 1030 * WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue. 1031 * 1032 * This is the traditional exclusive wait. 1033 * 1034 * (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM: 1035 * 1036 * The waiter is waiting to get the bit, and additionally wants the 1037 * lock to be transferred to it for fair lock behavior. If the lock 1038 * cannot be taken, we stop walking the wait queue without waking 1039 * the waiter. 1040 * 1041 * This is the "fair lock handoff" case, and in addition to setting 1042 * WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see 1043 * that it now has the lock. 1044 */ 1045 static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) 1046 { 1047 unsigned int flags; 1048 struct wait_page_key *key = arg; 1049 struct wait_page_queue *wait_page 1050 = container_of(wait, struct wait_page_queue, wait); 1051 1052 if (!wake_page_match(wait_page, key)) 1053 return 0; 1054 1055 /* 1056 * If it's a lock handoff wait, we get the bit for it, and 1057 * stop walking (and do not wake it up) if we can't. 1058 */ 1059 flags = wait->flags; 1060 if (flags & WQ_FLAG_EXCLUSIVE) { 1061 if (test_bit(key->bit_nr, &key->page->flags)) 1062 return -1; 1063 if (flags & WQ_FLAG_CUSTOM) { 1064 if (test_and_set_bit(key->bit_nr, &key->page->flags)) 1065 return -1; 1066 flags |= WQ_FLAG_DONE; 1067 } 1068 } 1069 1070 /* 1071 * We are holding the wait-queue lock, but the waiter that 1072 * is waiting for this will be checking the flags without 1073 * any locking. 1074 * 1075 * So update the flags atomically, and wake up the waiter 1076 * afterwards to avoid any races. This store-release pairs 1077 * with the load-acquire in wait_on_page_bit_common(). 1078 */ 1079 smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN); 1080 wake_up_state(wait->private, mode); 1081 1082 /* 1083 * Ok, we have successfully done what we're waiting for, 1084 * and we can unconditionally remove the wait entry. 1085 * 1086 * Note that this pairs with the "finish_wait()" in the 1087 * waiter, and has to be the absolute last thing we do. 1088 * After this list_del_init(&wait->entry) the wait entry 1089 * might be de-allocated and the process might even have 1090 * exited. 1091 */ 1092 list_del_init_careful(&wait->entry); 1093 return (flags & WQ_FLAG_EXCLUSIVE) != 0; 1094 } 1095 1096 static void wake_up_page_bit(struct page *page, int bit_nr) 1097 { 1098 wait_queue_head_t *q = page_waitqueue(page); 1099 struct wait_page_key key; 1100 unsigned long flags; 1101 wait_queue_entry_t bookmark; 1102 1103 key.page = page; 1104 key.bit_nr = bit_nr; 1105 key.page_match = 0; 1106 1107 bookmark.flags = 0; 1108 bookmark.private = NULL; 1109 bookmark.func = NULL; 1110 INIT_LIST_HEAD(&bookmark.entry); 1111 1112 spin_lock_irqsave(&q->lock, flags); 1113 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark); 1114 1115 while (bookmark.flags & WQ_FLAG_BOOKMARK) { 1116 /* 1117 * Take a breather from holding the lock, 1118 * allow pages that finish wake up asynchronously 1119 * to acquire the lock and remove themselves 1120 * from wait queue 1121 */ 1122 spin_unlock_irqrestore(&q->lock, flags); 1123 cpu_relax(); 1124 spin_lock_irqsave(&q->lock, flags); 1125 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark); 1126 } 1127 1128 /* 1129 * It is possible for other pages to have collided on the waitqueue 1130 * hash, so in that case check for a page match. That prevents a long- 1131 * term waiter 1132 * 1133 * It is still possible to miss a case here, when we woke page waiters 1134 * and removed them from the waitqueue, but there are still other 1135 * page waiters. 1136 */ 1137 if (!waitqueue_active(q) || !key.page_match) { 1138 ClearPageWaiters(page); 1139 /* 1140 * It's possible to miss clearing Waiters here, when we woke 1141 * our page waiters, but the hashed waitqueue has waiters for 1142 * other pages on it. 1143 * 1144 * That's okay, it's a rare case. The next waker will clear it. 1145 */ 1146 } 1147 spin_unlock_irqrestore(&q->lock, flags); 1148 } 1149 1150 static void wake_up_page(struct page *page, int bit) 1151 { 1152 if (!PageWaiters(page)) 1153 return; 1154 wake_up_page_bit(page, bit); 1155 } 1156 1157 /* 1158 * A choice of three behaviors for wait_on_page_bit_common(): 1159 */ 1160 enum behavior { 1161 EXCLUSIVE, /* Hold ref to page and take the bit when woken, like 1162 * __lock_page() waiting on then setting PG_locked. 1163 */ 1164 SHARED, /* Hold ref to page and check the bit when woken, like 1165 * wait_on_page_writeback() waiting on PG_writeback. 1166 */ 1167 DROP, /* Drop ref to page before wait, no check when woken, 1168 * like put_and_wait_on_page_locked() on PG_locked. 1169 */ 1170 }; 1171 1172 /* 1173 * Attempt to check (or get) the page bit, and mark us done 1174 * if successful. 1175 */ 1176 static inline bool trylock_page_bit_common(struct page *page, int bit_nr, 1177 struct wait_queue_entry *wait) 1178 { 1179 if (wait->flags & WQ_FLAG_EXCLUSIVE) { 1180 if (test_and_set_bit(bit_nr, &page->flags)) 1181 return false; 1182 } else if (test_bit(bit_nr, &page->flags)) 1183 return false; 1184 1185 wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE; 1186 return true; 1187 } 1188 1189 /* How many times do we accept lock stealing from under a waiter? */ 1190 int sysctl_page_lock_unfairness = 5; 1191 1192 static inline int wait_on_page_bit_common(wait_queue_head_t *q, 1193 struct page *page, int bit_nr, int state, enum behavior behavior) 1194 { 1195 int unfairness = sysctl_page_lock_unfairness; 1196 struct wait_page_queue wait_page; 1197 wait_queue_entry_t *wait = &wait_page.wait; 1198 bool thrashing = false; 1199 bool delayacct = false; 1200 unsigned long pflags; 1201 1202 if (bit_nr == PG_locked && 1203 !PageUptodate(page) && PageWorkingset(page)) { 1204 if (!PageSwapBacked(page)) { 1205 delayacct_thrashing_start(); 1206 delayacct = true; 1207 } 1208 psi_memstall_enter(&pflags); 1209 thrashing = true; 1210 } 1211 1212 init_wait(wait); 1213 wait->func = wake_page_function; 1214 wait_page.page = page; 1215 wait_page.bit_nr = bit_nr; 1216 1217 repeat: 1218 wait->flags = 0; 1219 if (behavior == EXCLUSIVE) { 1220 wait->flags = WQ_FLAG_EXCLUSIVE; 1221 if (--unfairness < 0) 1222 wait->flags |= WQ_FLAG_CUSTOM; 1223 } 1224 1225 /* 1226 * Do one last check whether we can get the 1227 * page bit synchronously. 1228 * 1229 * Do the SetPageWaiters() marking before that 1230 * to let any waker we _just_ missed know they 1231 * need to wake us up (otherwise they'll never 1232 * even go to the slow case that looks at the 1233 * page queue), and add ourselves to the wait 1234 * queue if we need to sleep. 1235 * 1236 * This part needs to be done under the queue 1237 * lock to avoid races. 1238 */ 1239 spin_lock_irq(&q->lock); 1240 SetPageWaiters(page); 1241 if (!trylock_page_bit_common(page, bit_nr, wait)) 1242 __add_wait_queue_entry_tail(q, wait); 1243 spin_unlock_irq(&q->lock); 1244 1245 /* 1246 * From now on, all the logic will be based on 1247 * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to 1248 * see whether the page bit testing has already 1249 * been done by the wake function. 1250 * 1251 * We can drop our reference to the page. 1252 */ 1253 if (behavior == DROP) 1254 put_page(page); 1255 1256 /* 1257 * Note that until the "finish_wait()", or until 1258 * we see the WQ_FLAG_WOKEN flag, we need to 1259 * be very careful with the 'wait->flags', because 1260 * we may race with a waker that sets them. 1261 */ 1262 for (;;) { 1263 unsigned int flags; 1264 1265 set_current_state(state); 1266 1267 /* Loop until we've been woken or interrupted */ 1268 flags = smp_load_acquire(&wait->flags); 1269 if (!(flags & WQ_FLAG_WOKEN)) { 1270 if (signal_pending_state(state, current)) 1271 break; 1272 1273 io_schedule(); 1274 continue; 1275 } 1276 1277 /* If we were non-exclusive, we're done */ 1278 if (behavior != EXCLUSIVE) 1279 break; 1280 1281 /* If the waker got the lock for us, we're done */ 1282 if (flags & WQ_FLAG_DONE) 1283 break; 1284 1285 /* 1286 * Otherwise, if we're getting the lock, we need to 1287 * try to get it ourselves. 1288 * 1289 * And if that fails, we'll have to retry this all. 1290 */ 1291 if (unlikely(test_and_set_bit(bit_nr, &page->flags))) 1292 goto repeat; 1293 1294 wait->flags |= WQ_FLAG_DONE; 1295 break; 1296 } 1297 1298 /* 1299 * If a signal happened, this 'finish_wait()' may remove the last 1300 * waiter from the wait-queues, but the PageWaiters bit will remain 1301 * set. That's ok. The next wakeup will take care of it, and trying 1302 * to do it here would be difficult and prone to races. 1303 */ 1304 finish_wait(q, wait); 1305 1306 if (thrashing) { 1307 if (delayacct) 1308 delayacct_thrashing_end(); 1309 psi_memstall_leave(&pflags); 1310 } 1311 1312 /* 1313 * NOTE! The wait->flags weren't stable until we've done the 1314 * 'finish_wait()', and we could have exited the loop above due 1315 * to a signal, and had a wakeup event happen after the signal 1316 * test but before the 'finish_wait()'. 1317 * 1318 * So only after the finish_wait() can we reliably determine 1319 * if we got woken up or not, so we can now figure out the final 1320 * return value based on that state without races. 1321 * 1322 * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive 1323 * waiter, but an exclusive one requires WQ_FLAG_DONE. 1324 */ 1325 if (behavior == EXCLUSIVE) 1326 return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR; 1327 1328 return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR; 1329 } 1330 1331 void wait_on_page_bit(struct page *page, int bit_nr) 1332 { 1333 wait_queue_head_t *q = page_waitqueue(page); 1334 wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED); 1335 } 1336 EXPORT_SYMBOL(wait_on_page_bit); 1337 1338 int wait_on_page_bit_killable(struct page *page, int bit_nr) 1339 { 1340 wait_queue_head_t *q = page_waitqueue(page); 1341 return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED); 1342 } 1343 EXPORT_SYMBOL(wait_on_page_bit_killable); 1344 1345 static int __wait_on_page_locked_async(struct page *page, 1346 struct wait_page_queue *wait, bool set) 1347 { 1348 struct wait_queue_head *q = page_waitqueue(page); 1349 int ret = 0; 1350 1351 wait->page = page; 1352 wait->bit_nr = PG_locked; 1353 1354 spin_lock_irq(&q->lock); 1355 __add_wait_queue_entry_tail(q, &wait->wait); 1356 SetPageWaiters(page); 1357 if (set) 1358 ret = !trylock_page(page); 1359 else 1360 ret = PageLocked(page); 1361 /* 1362 * If we were succesful now, we know we're still on the 1363 * waitqueue as we're still under the lock. This means it's 1364 * safe to remove and return success, we know the callback 1365 * isn't going to trigger. 1366 */ 1367 if (!ret) 1368 __remove_wait_queue(q, &wait->wait); 1369 else 1370 ret = -EIOCBQUEUED; 1371 spin_unlock_irq(&q->lock); 1372 return ret; 1373 } 1374 1375 static int wait_on_page_locked_async(struct page *page, 1376 struct wait_page_queue *wait) 1377 { 1378 if (!PageLocked(page)) 1379 return 0; 1380 return __wait_on_page_locked_async(compound_head(page), wait, false); 1381 } 1382 1383 /** 1384 * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked 1385 * @page: The page to wait for. 1386 * 1387 * The caller should hold a reference on @page. They expect the page to 1388 * become unlocked relatively soon, but do not wish to hold up migration 1389 * (for example) by holding the reference while waiting for the page to 1390 * come unlocked. After this function returns, the caller should not 1391 * dereference @page. 1392 */ 1393 void put_and_wait_on_page_locked(struct page *page) 1394 { 1395 wait_queue_head_t *q; 1396 1397 page = compound_head(page); 1398 q = page_waitqueue(page); 1399 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, DROP); 1400 } 1401 1402 /** 1403 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue 1404 * @page: Page defining the wait queue of interest 1405 * @waiter: Waiter to add to the queue 1406 * 1407 * Add an arbitrary @waiter to the wait queue for the nominated @page. 1408 */ 1409 void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter) 1410 { 1411 wait_queue_head_t *q = page_waitqueue(page); 1412 unsigned long flags; 1413 1414 spin_lock_irqsave(&q->lock, flags); 1415 __add_wait_queue_entry_tail(q, waiter); 1416 SetPageWaiters(page); 1417 spin_unlock_irqrestore(&q->lock, flags); 1418 } 1419 EXPORT_SYMBOL_GPL(add_page_wait_queue); 1420 1421 #ifndef clear_bit_unlock_is_negative_byte 1422 1423 /* 1424 * PG_waiters is the high bit in the same byte as PG_lock. 1425 * 1426 * On x86 (and on many other architectures), we can clear PG_lock and 1427 * test the sign bit at the same time. But if the architecture does 1428 * not support that special operation, we just do this all by hand 1429 * instead. 1430 * 1431 * The read of PG_waiters has to be after (or concurrently with) PG_locked 1432 * being cleared, but a memory barrier should be unnecessary since it is 1433 * in the same byte as PG_locked. 1434 */ 1435 static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem) 1436 { 1437 clear_bit_unlock(nr, mem); 1438 /* smp_mb__after_atomic(); */ 1439 return test_bit(PG_waiters, mem); 1440 } 1441 1442 #endif 1443 1444 /** 1445 * unlock_page - unlock a locked page 1446 * @page: the page 1447 * 1448 * Unlocks the page and wakes up sleepers in wait_on_page_locked(). 1449 * Also wakes sleepers in wait_on_page_writeback() because the wakeup 1450 * mechanism between PageLocked pages and PageWriteback pages is shared. 1451 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 1452 * 1453 * Note that this depends on PG_waiters being the sign bit in the byte 1454 * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to 1455 * clear the PG_locked bit and test PG_waiters at the same time fairly 1456 * portably (architectures that do LL/SC can test any bit, while x86 can 1457 * test the sign bit). 1458 */ 1459 void unlock_page(struct page *page) 1460 { 1461 BUILD_BUG_ON(PG_waiters != 7); 1462 page = compound_head(page); 1463 VM_BUG_ON_PAGE(!PageLocked(page), page); 1464 if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags)) 1465 wake_up_page_bit(page, PG_locked); 1466 } 1467 EXPORT_SYMBOL(unlock_page); 1468 1469 /** 1470 * end_page_writeback - end writeback against a page 1471 * @page: the page 1472 */ 1473 void end_page_writeback(struct page *page) 1474 { 1475 /* 1476 * TestClearPageReclaim could be used here but it is an atomic 1477 * operation and overkill in this particular case. Failing to 1478 * shuffle a page marked for immediate reclaim is too mild to 1479 * justify taking an atomic operation penalty at the end of 1480 * ever page writeback. 1481 */ 1482 if (PageReclaim(page)) { 1483 ClearPageReclaim(page); 1484 rotate_reclaimable_page(page); 1485 } 1486 1487 if (!test_clear_page_writeback(page)) 1488 BUG(); 1489 1490 smp_mb__after_atomic(); 1491 wake_up_page(page, PG_writeback); 1492 } 1493 EXPORT_SYMBOL(end_page_writeback); 1494 1495 /* 1496 * After completing I/O on a page, call this routine to update the page 1497 * flags appropriately 1498 */ 1499 void page_endio(struct page *page, bool is_write, int err) 1500 { 1501 if (!is_write) { 1502 if (!err) { 1503 SetPageUptodate(page); 1504 } else { 1505 ClearPageUptodate(page); 1506 SetPageError(page); 1507 } 1508 unlock_page(page); 1509 } else { 1510 if (err) { 1511 struct address_space *mapping; 1512 1513 SetPageError(page); 1514 mapping = page_mapping(page); 1515 if (mapping) 1516 mapping_set_error(mapping, err); 1517 } 1518 end_page_writeback(page); 1519 } 1520 } 1521 EXPORT_SYMBOL_GPL(page_endio); 1522 1523 /** 1524 * __lock_page - get a lock on the page, assuming we need to sleep to get it 1525 * @__page: the page to lock 1526 */ 1527 void __lock_page(struct page *__page) 1528 { 1529 struct page *page = compound_head(__page); 1530 wait_queue_head_t *q = page_waitqueue(page); 1531 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, 1532 EXCLUSIVE); 1533 } 1534 EXPORT_SYMBOL(__lock_page); 1535 1536 int __lock_page_killable(struct page *__page) 1537 { 1538 struct page *page = compound_head(__page); 1539 wait_queue_head_t *q = page_waitqueue(page); 1540 return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, 1541 EXCLUSIVE); 1542 } 1543 EXPORT_SYMBOL_GPL(__lock_page_killable); 1544 1545 int __lock_page_async(struct page *page, struct wait_page_queue *wait) 1546 { 1547 return __wait_on_page_locked_async(page, wait, true); 1548 } 1549 1550 /* 1551 * Return values: 1552 * 1 - page is locked; mmap_lock is still held. 1553 * 0 - page is not locked. 1554 * mmap_lock has been released (mmap_read_unlock(), unless flags had both 1555 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in 1556 * which case mmap_lock is still held. 1557 * 1558 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1 1559 * with the page locked and the mmap_lock unperturbed. 1560 */ 1561 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 1562 unsigned int flags) 1563 { 1564 if (fault_flag_allow_retry_first(flags)) { 1565 /* 1566 * CAUTION! In this case, mmap_lock is not released 1567 * even though return 0. 1568 */ 1569 if (flags & FAULT_FLAG_RETRY_NOWAIT) 1570 return 0; 1571 1572 mmap_read_unlock(mm); 1573 if (flags & FAULT_FLAG_KILLABLE) 1574 wait_on_page_locked_killable(page); 1575 else 1576 wait_on_page_locked(page); 1577 return 0; 1578 } else { 1579 if (flags & FAULT_FLAG_KILLABLE) { 1580 int ret; 1581 1582 ret = __lock_page_killable(page); 1583 if (ret) { 1584 mmap_read_unlock(mm); 1585 return 0; 1586 } 1587 } else 1588 __lock_page(page); 1589 return 1; 1590 } 1591 } 1592 1593 /** 1594 * page_cache_next_miss() - Find the next gap in the page cache. 1595 * @mapping: Mapping. 1596 * @index: Index. 1597 * @max_scan: Maximum range to search. 1598 * 1599 * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the 1600 * gap with the lowest index. 1601 * 1602 * This function may be called under the rcu_read_lock. However, this will 1603 * not atomically search a snapshot of the cache at a single point in time. 1604 * For example, if a gap is created at index 5, then subsequently a gap is 1605 * created at index 10, page_cache_next_miss covering both indices may 1606 * return 10 if called under the rcu_read_lock. 1607 * 1608 * Return: The index of the gap if found, otherwise an index outside the 1609 * range specified (in which case 'return - index >= max_scan' will be true). 1610 * In the rare case of index wrap-around, 0 will be returned. 1611 */ 1612 pgoff_t page_cache_next_miss(struct address_space *mapping, 1613 pgoff_t index, unsigned long max_scan) 1614 { 1615 XA_STATE(xas, &mapping->i_pages, index); 1616 1617 while (max_scan--) { 1618 void *entry = xas_next(&xas); 1619 if (!entry || xa_is_value(entry)) 1620 break; 1621 if (xas.xa_index == 0) 1622 break; 1623 } 1624 1625 return xas.xa_index; 1626 } 1627 EXPORT_SYMBOL(page_cache_next_miss); 1628 1629 /** 1630 * page_cache_prev_miss() - Find the previous gap in the page cache. 1631 * @mapping: Mapping. 1632 * @index: Index. 1633 * @max_scan: Maximum range to search. 1634 * 1635 * Search the range [max(index - max_scan + 1, 0), index] for the 1636 * gap with the highest index. 1637 * 1638 * This function may be called under the rcu_read_lock. However, this will 1639 * not atomically search a snapshot of the cache at a single point in time. 1640 * For example, if a gap is created at index 10, then subsequently a gap is 1641 * created at index 5, page_cache_prev_miss() covering both indices may 1642 * return 5 if called under the rcu_read_lock. 1643 * 1644 * Return: The index of the gap if found, otherwise an index outside the 1645 * range specified (in which case 'index - return >= max_scan' will be true). 1646 * In the rare case of wrap-around, ULONG_MAX will be returned. 1647 */ 1648 pgoff_t page_cache_prev_miss(struct address_space *mapping, 1649 pgoff_t index, unsigned long max_scan) 1650 { 1651 XA_STATE(xas, &mapping->i_pages, index); 1652 1653 while (max_scan--) { 1654 void *entry = xas_prev(&xas); 1655 if (!entry || xa_is_value(entry)) 1656 break; 1657 if (xas.xa_index == ULONG_MAX) 1658 break; 1659 } 1660 1661 return xas.xa_index; 1662 } 1663 EXPORT_SYMBOL(page_cache_prev_miss); 1664 1665 /** 1666 * find_get_entry - find and get a page cache entry 1667 * @mapping: the address_space to search 1668 * @index: The page cache index. 1669 * 1670 * Looks up the page cache slot at @mapping & @offset. If there is a 1671 * page cache page, the head page is returned with an increased refcount. 1672 * 1673 * If the slot holds a shadow entry of a previously evicted page, or a 1674 * swap entry from shmem/tmpfs, it is returned. 1675 * 1676 * Return: The head page or shadow entry, %NULL if nothing is found. 1677 */ 1678 struct page *find_get_entry(struct address_space *mapping, pgoff_t index) 1679 { 1680 XA_STATE(xas, &mapping->i_pages, index); 1681 struct page *page; 1682 1683 rcu_read_lock(); 1684 repeat: 1685 xas_reset(&xas); 1686 page = xas_load(&xas); 1687 if (xas_retry(&xas, page)) 1688 goto repeat; 1689 /* 1690 * A shadow entry of a recently evicted page, or a swap entry from 1691 * shmem/tmpfs. Return it without attempting to raise page count. 1692 */ 1693 if (!page || xa_is_value(page)) 1694 goto out; 1695 1696 if (!page_cache_get_speculative(page)) 1697 goto repeat; 1698 1699 /* 1700 * Has the page moved or been split? 1701 * This is part of the lockless pagecache protocol. See 1702 * include/linux/pagemap.h for details. 1703 */ 1704 if (unlikely(page != xas_reload(&xas))) { 1705 put_page(page); 1706 goto repeat; 1707 } 1708 out: 1709 rcu_read_unlock(); 1710 1711 return page; 1712 } 1713 1714 /** 1715 * find_lock_entry - Locate and lock a page cache entry. 1716 * @mapping: The address_space to search. 1717 * @index: The page cache index. 1718 * 1719 * Looks up the page at @mapping & @index. If there is a page in the 1720 * cache, the head page is returned locked and with an increased refcount. 1721 * 1722 * If the slot holds a shadow entry of a previously evicted page, or a 1723 * swap entry from shmem/tmpfs, it is returned. 1724 * 1725 * Context: May sleep. 1726 * Return: The head page or shadow entry, %NULL if nothing is found. 1727 */ 1728 struct page *find_lock_entry(struct address_space *mapping, pgoff_t index) 1729 { 1730 struct page *page; 1731 1732 repeat: 1733 page = find_get_entry(mapping, index); 1734 if (page && !xa_is_value(page)) { 1735 lock_page(page); 1736 /* Has the page been truncated? */ 1737 if (unlikely(page->mapping != mapping)) { 1738 unlock_page(page); 1739 put_page(page); 1740 goto repeat; 1741 } 1742 VM_BUG_ON_PAGE(!thp_contains(page, index), page); 1743 } 1744 return page; 1745 } 1746 1747 /** 1748 * pagecache_get_page - Find and get a reference to a page. 1749 * @mapping: The address_space to search. 1750 * @index: The page index. 1751 * @fgp_flags: %FGP flags modify how the page is returned. 1752 * @gfp_mask: Memory allocation flags to use if %FGP_CREAT is specified. 1753 * 1754 * Looks up the page cache entry at @mapping & @index. 1755 * 1756 * @fgp_flags can be zero or more of these flags: 1757 * 1758 * * %FGP_ACCESSED - The page will be marked accessed. 1759 * * %FGP_LOCK - The page is returned locked. 1760 * * %FGP_HEAD - If the page is present and a THP, return the head page 1761 * rather than the exact page specified by the index. 1762 * * %FGP_CREAT - If no page is present then a new page is allocated using 1763 * @gfp_mask and added to the page cache and the VM's LRU list. 1764 * The page is returned locked and with an increased refcount. 1765 * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the 1766 * page is already in cache. If the page was allocated, unlock it before 1767 * returning so the caller can do the same dance. 1768 * * %FGP_WRITE - The page will be written 1769 * * %FGP_NOFS - __GFP_FS will get cleared in gfp mask 1770 * * %FGP_NOWAIT - Don't get blocked by page lock 1771 * 1772 * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even 1773 * if the %GFP flags specified for %FGP_CREAT are atomic. 1774 * 1775 * If there is a page cache page, it is returned with an increased refcount. 1776 * 1777 * Return: The found page or %NULL otherwise. 1778 */ 1779 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, 1780 int fgp_flags, gfp_t gfp_mask) 1781 { 1782 struct page *page; 1783 1784 repeat: 1785 page = find_get_entry(mapping, index); 1786 if (xa_is_value(page)) 1787 page = NULL; 1788 if (!page) 1789 goto no_page; 1790 1791 if (fgp_flags & FGP_LOCK) { 1792 if (fgp_flags & FGP_NOWAIT) { 1793 if (!trylock_page(page)) { 1794 put_page(page); 1795 return NULL; 1796 } 1797 } else { 1798 lock_page(page); 1799 } 1800 1801 /* Has the page been truncated? */ 1802 if (unlikely(page->mapping != mapping)) { 1803 unlock_page(page); 1804 put_page(page); 1805 goto repeat; 1806 } 1807 VM_BUG_ON_PAGE(!thp_contains(page, index), page); 1808 } 1809 1810 if (fgp_flags & FGP_ACCESSED) 1811 mark_page_accessed(page); 1812 else if (fgp_flags & FGP_WRITE) { 1813 /* Clear idle flag for buffer write */ 1814 if (page_is_idle(page)) 1815 clear_page_idle(page); 1816 } 1817 if (!(fgp_flags & FGP_HEAD)) 1818 page = find_subpage(page, index); 1819 1820 no_page: 1821 if (!page && (fgp_flags & FGP_CREAT)) { 1822 int err; 1823 if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping)) 1824 gfp_mask |= __GFP_WRITE; 1825 if (fgp_flags & FGP_NOFS) 1826 gfp_mask &= ~__GFP_FS; 1827 1828 page = __page_cache_alloc(gfp_mask); 1829 if (!page) 1830 return NULL; 1831 1832 if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP)))) 1833 fgp_flags |= FGP_LOCK; 1834 1835 /* Init accessed so avoid atomic mark_page_accessed later */ 1836 if (fgp_flags & FGP_ACCESSED) 1837 __SetPageReferenced(page); 1838 1839 err = add_to_page_cache_lru(page, mapping, index, gfp_mask); 1840 if (unlikely(err)) { 1841 put_page(page); 1842 page = NULL; 1843 if (err == -EEXIST) 1844 goto repeat; 1845 } 1846 1847 /* 1848 * add_to_page_cache_lru locks the page, and for mmap we expect 1849 * an unlocked page. 1850 */ 1851 if (page && (fgp_flags & FGP_FOR_MMAP)) 1852 unlock_page(page); 1853 } 1854 1855 return page; 1856 } 1857 EXPORT_SYMBOL(pagecache_get_page); 1858 1859 /** 1860 * find_get_entries - gang pagecache lookup 1861 * @mapping: The address_space to search 1862 * @start: The starting page cache index 1863 * @nr_entries: The maximum number of entries 1864 * @entries: Where the resulting entries are placed 1865 * @indices: The cache indices corresponding to the entries in @entries 1866 * 1867 * find_get_entries() will search for and return a group of up to 1868 * @nr_entries entries in the mapping. The entries are placed at 1869 * @entries. find_get_entries() takes a reference against any actual 1870 * pages it returns. 1871 * 1872 * The search returns a group of mapping-contiguous page cache entries 1873 * with ascending indexes. There may be holes in the indices due to 1874 * not-present pages. 1875 * 1876 * Any shadow entries of evicted pages, or swap entries from 1877 * shmem/tmpfs, are included in the returned array. 1878 * 1879 * If it finds a Transparent Huge Page, head or tail, find_get_entries() 1880 * stops at that page: the caller is likely to have a better way to handle 1881 * the compound page as a whole, and then skip its extent, than repeatedly 1882 * calling find_get_entries() to return all its tails. 1883 * 1884 * Return: the number of pages and shadow entries which were found. 1885 */ 1886 unsigned find_get_entries(struct address_space *mapping, 1887 pgoff_t start, unsigned int nr_entries, 1888 struct page **entries, pgoff_t *indices) 1889 { 1890 XA_STATE(xas, &mapping->i_pages, start); 1891 struct page *page; 1892 unsigned int ret = 0; 1893 1894 if (!nr_entries) 1895 return 0; 1896 1897 rcu_read_lock(); 1898 xas_for_each(&xas, page, ULONG_MAX) { 1899 if (xas_retry(&xas, page)) 1900 continue; 1901 /* 1902 * A shadow entry of a recently evicted page, a swap 1903 * entry from shmem/tmpfs or a DAX entry. Return it 1904 * without attempting to raise page count. 1905 */ 1906 if (xa_is_value(page)) 1907 goto export; 1908 1909 if (!page_cache_get_speculative(page)) 1910 goto retry; 1911 1912 /* Has the page moved or been split? */ 1913 if (unlikely(page != xas_reload(&xas))) 1914 goto put_page; 1915 1916 /* 1917 * Terminate early on finding a THP, to allow the caller to 1918 * handle it all at once; but continue if this is hugetlbfs. 1919 */ 1920 if (PageTransHuge(page) && !PageHuge(page)) { 1921 page = find_subpage(page, xas.xa_index); 1922 nr_entries = ret + 1; 1923 } 1924 export: 1925 indices[ret] = xas.xa_index; 1926 entries[ret] = page; 1927 if (++ret == nr_entries) 1928 break; 1929 continue; 1930 put_page: 1931 put_page(page); 1932 retry: 1933 xas_reset(&xas); 1934 } 1935 rcu_read_unlock(); 1936 return ret; 1937 } 1938 1939 /** 1940 * find_get_pages_range - gang pagecache lookup 1941 * @mapping: The address_space to search 1942 * @start: The starting page index 1943 * @end: The final page index (inclusive) 1944 * @nr_pages: The maximum number of pages 1945 * @pages: Where the resulting pages are placed 1946 * 1947 * find_get_pages_range() will search for and return a group of up to @nr_pages 1948 * pages in the mapping starting at index @start and up to index @end 1949 * (inclusive). The pages are placed at @pages. find_get_pages_range() takes 1950 * a reference against the returned pages. 1951 * 1952 * The search returns a group of mapping-contiguous pages with ascending 1953 * indexes. There may be holes in the indices due to not-present pages. 1954 * We also update @start to index the next page for the traversal. 1955 * 1956 * Return: the number of pages which were found. If this number is 1957 * smaller than @nr_pages, the end of specified range has been 1958 * reached. 1959 */ 1960 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, 1961 pgoff_t end, unsigned int nr_pages, 1962 struct page **pages) 1963 { 1964 XA_STATE(xas, &mapping->i_pages, *start); 1965 struct page *page; 1966 unsigned ret = 0; 1967 1968 if (unlikely(!nr_pages)) 1969 return 0; 1970 1971 rcu_read_lock(); 1972 xas_for_each(&xas, page, end) { 1973 if (xas_retry(&xas, page)) 1974 continue; 1975 /* Skip over shadow, swap and DAX entries */ 1976 if (xa_is_value(page)) 1977 continue; 1978 1979 if (!page_cache_get_speculative(page)) 1980 goto retry; 1981 1982 /* Has the page moved or been split? */ 1983 if (unlikely(page != xas_reload(&xas))) 1984 goto put_page; 1985 1986 pages[ret] = find_subpage(page, xas.xa_index); 1987 if (++ret == nr_pages) { 1988 *start = xas.xa_index + 1; 1989 goto out; 1990 } 1991 continue; 1992 put_page: 1993 put_page(page); 1994 retry: 1995 xas_reset(&xas); 1996 } 1997 1998 /* 1999 * We come here when there is no page beyond @end. We take care to not 2000 * overflow the index @start as it confuses some of the callers. This 2001 * breaks the iteration when there is a page at index -1 but that is 2002 * already broken anyway. 2003 */ 2004 if (end == (pgoff_t)-1) 2005 *start = (pgoff_t)-1; 2006 else 2007 *start = end + 1; 2008 out: 2009 rcu_read_unlock(); 2010 2011 return ret; 2012 } 2013 2014 /** 2015 * find_get_pages_contig - gang contiguous pagecache lookup 2016 * @mapping: The address_space to search 2017 * @index: The starting page index 2018 * @nr_pages: The maximum number of pages 2019 * @pages: Where the resulting pages are placed 2020 * 2021 * find_get_pages_contig() works exactly like find_get_pages(), except 2022 * that the returned number of pages are guaranteed to be contiguous. 2023 * 2024 * Return: the number of pages which were found. 2025 */ 2026 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, 2027 unsigned int nr_pages, struct page **pages) 2028 { 2029 XA_STATE(xas, &mapping->i_pages, index); 2030 struct page *page; 2031 unsigned int ret = 0; 2032 2033 if (unlikely(!nr_pages)) 2034 return 0; 2035 2036 rcu_read_lock(); 2037 for (page = xas_load(&xas); page; page = xas_next(&xas)) { 2038 if (xas_retry(&xas, page)) 2039 continue; 2040 /* 2041 * If the entry has been swapped out, we can stop looking. 2042 * No current caller is looking for DAX entries. 2043 */ 2044 if (xa_is_value(page)) 2045 break; 2046 2047 if (!page_cache_get_speculative(page)) 2048 goto retry; 2049 2050 /* Has the page moved or been split? */ 2051 if (unlikely(page != xas_reload(&xas))) 2052 goto put_page; 2053 2054 pages[ret] = find_subpage(page, xas.xa_index); 2055 if (++ret == nr_pages) 2056 break; 2057 continue; 2058 put_page: 2059 put_page(page); 2060 retry: 2061 xas_reset(&xas); 2062 } 2063 rcu_read_unlock(); 2064 return ret; 2065 } 2066 EXPORT_SYMBOL(find_get_pages_contig); 2067 2068 /** 2069 * find_get_pages_range_tag - find and return pages in given range matching @tag 2070 * @mapping: the address_space to search 2071 * @index: the starting page index 2072 * @end: The final page index (inclusive) 2073 * @tag: the tag index 2074 * @nr_pages: the maximum number of pages 2075 * @pages: where the resulting pages are placed 2076 * 2077 * Like find_get_pages, except we only return pages which are tagged with 2078 * @tag. We update @index to index the next page for the traversal. 2079 * 2080 * Return: the number of pages which were found. 2081 */ 2082 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, 2083 pgoff_t end, xa_mark_t tag, unsigned int nr_pages, 2084 struct page **pages) 2085 { 2086 XA_STATE(xas, &mapping->i_pages, *index); 2087 struct page *page; 2088 unsigned ret = 0; 2089 2090 if (unlikely(!nr_pages)) 2091 return 0; 2092 2093 rcu_read_lock(); 2094 xas_for_each_marked(&xas, page, end, tag) { 2095 if (xas_retry(&xas, page)) 2096 continue; 2097 /* 2098 * Shadow entries should never be tagged, but this iteration 2099 * is lockless so there is a window for page reclaim to evict 2100 * a page we saw tagged. Skip over it. 2101 */ 2102 if (xa_is_value(page)) 2103 continue; 2104 2105 if (!page_cache_get_speculative(page)) 2106 goto retry; 2107 2108 /* Has the page moved or been split? */ 2109 if (unlikely(page != xas_reload(&xas))) 2110 goto put_page; 2111 2112 pages[ret] = find_subpage(page, xas.xa_index); 2113 if (++ret == nr_pages) { 2114 *index = xas.xa_index + 1; 2115 goto out; 2116 } 2117 continue; 2118 put_page: 2119 put_page(page); 2120 retry: 2121 xas_reset(&xas); 2122 } 2123 2124 /* 2125 * We come here when we got to @end. We take care to not overflow the 2126 * index @index as it confuses some of the callers. This breaks the 2127 * iteration when there is a page at index -1 but that is already 2128 * broken anyway. 2129 */ 2130 if (end == (pgoff_t)-1) 2131 *index = (pgoff_t)-1; 2132 else 2133 *index = end + 1; 2134 out: 2135 rcu_read_unlock(); 2136 2137 return ret; 2138 } 2139 EXPORT_SYMBOL(find_get_pages_range_tag); 2140 2141 /* 2142 * CD/DVDs are error prone. When a medium error occurs, the driver may fail 2143 * a _large_ part of the i/o request. Imagine the worst scenario: 2144 * 2145 * ---R__________________________________________B__________ 2146 * ^ reading here ^ bad block(assume 4k) 2147 * 2148 * read(R) => miss => readahead(R...B) => media error => frustrating retries 2149 * => failing the whole request => read(R) => read(R+1) => 2150 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => 2151 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => 2152 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... 2153 * 2154 * It is going insane. Fix it by quickly scaling down the readahead size. 2155 */ 2156 static void shrink_readahead_size_eio(struct file_ra_state *ra) 2157 { 2158 ra->ra_pages /= 4; 2159 } 2160 2161 /** 2162 * generic_file_buffered_read - generic file read routine 2163 * @iocb: the iocb to read 2164 * @iter: data destination 2165 * @written: already copied 2166 * 2167 * This is a generic file read routine, and uses the 2168 * mapping->a_ops->readpage() function for the actual low-level stuff. 2169 * 2170 * This is really ugly. But the goto's actually try to clarify some 2171 * of the logic when it comes to error handling etc. 2172 * 2173 * Return: 2174 * * total number of bytes copied, including those the were already @written 2175 * * negative error code if nothing was copied 2176 */ 2177 ssize_t generic_file_buffered_read(struct kiocb *iocb, 2178 struct iov_iter *iter, ssize_t written) 2179 { 2180 struct file *filp = iocb->ki_filp; 2181 struct address_space *mapping = filp->f_mapping; 2182 struct inode *inode = mapping->host; 2183 struct file_ra_state *ra = &filp->f_ra; 2184 loff_t *ppos = &iocb->ki_pos; 2185 pgoff_t index; 2186 pgoff_t last_index; 2187 pgoff_t prev_index; 2188 unsigned long offset; /* offset into pagecache page */ 2189 unsigned int prev_offset; 2190 int error = 0; 2191 2192 if (unlikely(*ppos >= inode->i_sb->s_maxbytes)) 2193 return 0; 2194 iov_iter_truncate(iter, inode->i_sb->s_maxbytes); 2195 2196 index = *ppos >> PAGE_SHIFT; 2197 prev_index = ra->prev_pos >> PAGE_SHIFT; 2198 prev_offset = ra->prev_pos & (PAGE_SIZE-1); 2199 last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT; 2200 offset = *ppos & ~PAGE_MASK; 2201 2202 for (;;) { 2203 struct page *page; 2204 pgoff_t end_index; 2205 loff_t isize; 2206 unsigned long nr, ret; 2207 2208 cond_resched(); 2209 find_page: 2210 if (fatal_signal_pending(current)) { 2211 error = -EINTR; 2212 goto out; 2213 } 2214 2215 page = find_get_page(mapping, index); 2216 if (!page) { 2217 if (iocb->ki_flags & IOCB_NOIO) 2218 goto would_block; 2219 page_cache_sync_readahead(mapping, 2220 ra, filp, 2221 index, last_index - index); 2222 page = find_get_page(mapping, index); 2223 if (unlikely(page == NULL)) 2224 goto no_cached_page; 2225 } 2226 if (PageReadahead(page)) { 2227 if (iocb->ki_flags & IOCB_NOIO) { 2228 put_page(page); 2229 goto out; 2230 } 2231 page_cache_async_readahead(mapping, 2232 ra, filp, page, 2233 index, last_index - index); 2234 } 2235 if (!PageUptodate(page)) { 2236 /* 2237 * See comment in do_read_cache_page on why 2238 * wait_on_page_locked is used to avoid unnecessarily 2239 * serialisations and why it's safe. 2240 */ 2241 if (iocb->ki_flags & IOCB_WAITQ) { 2242 if (written) { 2243 put_page(page); 2244 goto out; 2245 } 2246 error = wait_on_page_locked_async(page, 2247 iocb->ki_waitq); 2248 } else { 2249 if (iocb->ki_flags & IOCB_NOWAIT) { 2250 put_page(page); 2251 goto would_block; 2252 } 2253 error = wait_on_page_locked_killable(page); 2254 } 2255 if (unlikely(error)) 2256 goto readpage_error; 2257 if (PageUptodate(page)) 2258 goto page_ok; 2259 2260 if (inode->i_blkbits == PAGE_SHIFT || 2261 !mapping->a_ops->is_partially_uptodate) 2262 goto page_not_up_to_date; 2263 /* pipes can't handle partially uptodate pages */ 2264 if (unlikely(iov_iter_is_pipe(iter))) 2265 goto page_not_up_to_date; 2266 if (!trylock_page(page)) 2267 goto page_not_up_to_date; 2268 /* Did it get truncated before we got the lock? */ 2269 if (!page->mapping) 2270 goto page_not_up_to_date_locked; 2271 if (!mapping->a_ops->is_partially_uptodate(page, 2272 offset, iter->count)) 2273 goto page_not_up_to_date_locked; 2274 unlock_page(page); 2275 } 2276 page_ok: 2277 /* 2278 * i_size must be checked after we know the page is Uptodate. 2279 * 2280 * Checking i_size after the check allows us to calculate 2281 * the correct value for "nr", which means the zero-filled 2282 * part of the page is not copied back to userspace (unless 2283 * another truncate extends the file - this is desired though). 2284 */ 2285 2286 isize = i_size_read(inode); 2287 end_index = (isize - 1) >> PAGE_SHIFT; 2288 if (unlikely(!isize || index > end_index)) { 2289 put_page(page); 2290 goto out; 2291 } 2292 2293 /* nr is the maximum number of bytes to copy from this page */ 2294 nr = PAGE_SIZE; 2295 if (index == end_index) { 2296 nr = ((isize - 1) & ~PAGE_MASK) + 1; 2297 if (nr <= offset) { 2298 put_page(page); 2299 goto out; 2300 } 2301 } 2302 nr = nr - offset; 2303 2304 /* If users can be writing to this page using arbitrary 2305 * virtual addresses, take care about potential aliasing 2306 * before reading the page on the kernel side. 2307 */ 2308 if (mapping_writably_mapped(mapping)) 2309 flush_dcache_page(page); 2310 2311 /* 2312 * When a sequential read accesses a page several times, 2313 * only mark it as accessed the first time. 2314 */ 2315 if (prev_index != index || offset != prev_offset) 2316 mark_page_accessed(page); 2317 prev_index = index; 2318 2319 /* 2320 * Ok, we have the page, and it's up-to-date, so 2321 * now we can copy it to user space... 2322 */ 2323 2324 ret = copy_page_to_iter(page, offset, nr, iter); 2325 offset += ret; 2326 index += offset >> PAGE_SHIFT; 2327 offset &= ~PAGE_MASK; 2328 prev_offset = offset; 2329 2330 put_page(page); 2331 written += ret; 2332 if (!iov_iter_count(iter)) 2333 goto out; 2334 if (ret < nr) { 2335 error = -EFAULT; 2336 goto out; 2337 } 2338 continue; 2339 2340 page_not_up_to_date: 2341 /* Get exclusive access to the page ... */ 2342 if (iocb->ki_flags & IOCB_WAITQ) 2343 error = lock_page_async(page, iocb->ki_waitq); 2344 else 2345 error = lock_page_killable(page); 2346 if (unlikely(error)) 2347 goto readpage_error; 2348 2349 page_not_up_to_date_locked: 2350 /* Did it get truncated before we got the lock? */ 2351 if (!page->mapping) { 2352 unlock_page(page); 2353 put_page(page); 2354 continue; 2355 } 2356 2357 /* Did somebody else fill it already? */ 2358 if (PageUptodate(page)) { 2359 unlock_page(page); 2360 goto page_ok; 2361 } 2362 2363 readpage: 2364 if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT)) { 2365 unlock_page(page); 2366 put_page(page); 2367 goto would_block; 2368 } 2369 /* 2370 * A previous I/O error may have been due to temporary 2371 * failures, eg. multipath errors. 2372 * PG_error will be set again if readpage fails. 2373 */ 2374 ClearPageError(page); 2375 /* Start the actual read. The read will unlock the page. */ 2376 error = mapping->a_ops->readpage(filp, page); 2377 2378 if (unlikely(error)) { 2379 if (error == AOP_TRUNCATED_PAGE) { 2380 put_page(page); 2381 error = 0; 2382 goto find_page; 2383 } 2384 goto readpage_error; 2385 } 2386 2387 if (!PageUptodate(page)) { 2388 if (iocb->ki_flags & IOCB_WAITQ) 2389 error = lock_page_async(page, iocb->ki_waitq); 2390 else 2391 error = lock_page_killable(page); 2392 2393 if (unlikely(error)) 2394 goto readpage_error; 2395 if (!PageUptodate(page)) { 2396 if (page->mapping == NULL) { 2397 /* 2398 * invalidate_mapping_pages got it 2399 */ 2400 unlock_page(page); 2401 put_page(page); 2402 goto find_page; 2403 } 2404 unlock_page(page); 2405 shrink_readahead_size_eio(ra); 2406 error = -EIO; 2407 goto readpage_error; 2408 } 2409 unlock_page(page); 2410 } 2411 2412 goto page_ok; 2413 2414 readpage_error: 2415 /* UHHUH! A synchronous read error occurred. Report it */ 2416 put_page(page); 2417 goto out; 2418 2419 no_cached_page: 2420 /* 2421 * Ok, it wasn't cached, so we need to create a new 2422 * page.. 2423 */ 2424 page = page_cache_alloc(mapping); 2425 if (!page) { 2426 error = -ENOMEM; 2427 goto out; 2428 } 2429 error = add_to_page_cache_lru(page, mapping, index, 2430 mapping_gfp_constraint(mapping, GFP_KERNEL)); 2431 if (error) { 2432 put_page(page); 2433 if (error == -EEXIST) { 2434 error = 0; 2435 goto find_page; 2436 } 2437 goto out; 2438 } 2439 goto readpage; 2440 } 2441 2442 would_block: 2443 error = -EAGAIN; 2444 out: 2445 ra->prev_pos = prev_index; 2446 ra->prev_pos <<= PAGE_SHIFT; 2447 ra->prev_pos |= prev_offset; 2448 2449 *ppos = ((loff_t)index << PAGE_SHIFT) + offset; 2450 file_accessed(filp); 2451 return written ? written : error; 2452 } 2453 EXPORT_SYMBOL_GPL(generic_file_buffered_read); 2454 2455 /** 2456 * generic_file_read_iter - generic filesystem read routine 2457 * @iocb: kernel I/O control block 2458 * @iter: destination for the data read 2459 * 2460 * This is the "read_iter()" routine for all filesystems 2461 * that can use the page cache directly. 2462 * 2463 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall 2464 * be returned when no data can be read without waiting for I/O requests 2465 * to complete; it doesn't prevent readahead. 2466 * 2467 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O 2468 * requests shall be made for the read or for readahead. When no data 2469 * can be read, -EAGAIN shall be returned. When readahead would be 2470 * triggered, a partial, possibly empty read shall be returned. 2471 * 2472 * Return: 2473 * * number of bytes copied, even for partial reads 2474 * * negative error code (or 0 if IOCB_NOIO) if nothing was read 2475 */ 2476 ssize_t 2477 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 2478 { 2479 size_t count = iov_iter_count(iter); 2480 ssize_t retval = 0; 2481 2482 if (!count) 2483 goto out; /* skip atime */ 2484 2485 if (iocb->ki_flags & IOCB_DIRECT) { 2486 struct file *file = iocb->ki_filp; 2487 struct address_space *mapping = file->f_mapping; 2488 struct inode *inode = mapping->host; 2489 loff_t size; 2490 2491 size = i_size_read(inode); 2492 if (iocb->ki_flags & IOCB_NOWAIT) { 2493 if (filemap_range_has_page(mapping, iocb->ki_pos, 2494 iocb->ki_pos + count - 1)) 2495 return -EAGAIN; 2496 } else { 2497 retval = filemap_write_and_wait_range(mapping, 2498 iocb->ki_pos, 2499 iocb->ki_pos + count - 1); 2500 if (retval < 0) 2501 goto out; 2502 } 2503 2504 file_accessed(file); 2505 2506 retval = mapping->a_ops->direct_IO(iocb, iter); 2507 if (retval >= 0) { 2508 iocb->ki_pos += retval; 2509 count -= retval; 2510 } 2511 iov_iter_revert(iter, count - iov_iter_count(iter)); 2512 2513 /* 2514 * Btrfs can have a short DIO read if we encounter 2515 * compressed extents, so if there was an error, or if 2516 * we've already read everything we wanted to, or if 2517 * there was a short read because we hit EOF, go ahead 2518 * and return. Otherwise fallthrough to buffered io for 2519 * the rest of the read. Buffered reads will not work for 2520 * DAX files, so don't bother trying. 2521 */ 2522 if (retval < 0 || !count || iocb->ki_pos >= size || 2523 IS_DAX(inode)) 2524 goto out; 2525 } 2526 2527 retval = generic_file_buffered_read(iocb, iter, retval); 2528 out: 2529 return retval; 2530 } 2531 EXPORT_SYMBOL(generic_file_read_iter); 2532 2533 #ifdef CONFIG_MMU 2534 #define MMAP_LOTSAMISS (100) 2535 /* 2536 * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock 2537 * @vmf - the vm_fault for this fault. 2538 * @page - the page to lock. 2539 * @fpin - the pointer to the file we may pin (or is already pinned). 2540 * 2541 * This works similar to lock_page_or_retry in that it can drop the mmap_lock. 2542 * It differs in that it actually returns the page locked if it returns 1 and 0 2543 * if it couldn't lock the page. If we did have to drop the mmap_lock then fpin 2544 * will point to the pinned file and needs to be fput()'ed at a later point. 2545 */ 2546 static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page, 2547 struct file **fpin) 2548 { 2549 if (trylock_page(page)) 2550 return 1; 2551 2552 /* 2553 * NOTE! This will make us return with VM_FAULT_RETRY, but with 2554 * the mmap_lock still held. That's how FAULT_FLAG_RETRY_NOWAIT 2555 * is supposed to work. We have way too many special cases.. 2556 */ 2557 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) 2558 return 0; 2559 2560 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); 2561 if (vmf->flags & FAULT_FLAG_KILLABLE) { 2562 if (__lock_page_killable(page)) { 2563 /* 2564 * We didn't have the right flags to drop the mmap_lock, 2565 * but all fault_handlers only check for fatal signals 2566 * if we return VM_FAULT_RETRY, so we need to drop the 2567 * mmap_lock here and return 0 if we don't have a fpin. 2568 */ 2569 if (*fpin == NULL) 2570 mmap_read_unlock(vmf->vma->vm_mm); 2571 return 0; 2572 } 2573 } else 2574 __lock_page(page); 2575 return 1; 2576 } 2577 2578 2579 /* 2580 * Synchronous readahead happens when we don't even find a page in the page 2581 * cache at all. We don't want to perform IO under the mmap sem, so if we have 2582 * to drop the mmap sem we return the file that was pinned in order for us to do 2583 * that. If we didn't pin a file then we return NULL. The file that is 2584 * returned needs to be fput()'ed when we're done with it. 2585 */ 2586 static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) 2587 { 2588 struct file *file = vmf->vma->vm_file; 2589 struct file_ra_state *ra = &file->f_ra; 2590 struct address_space *mapping = file->f_mapping; 2591 DEFINE_READAHEAD(ractl, file, mapping, vmf->pgoff); 2592 struct file *fpin = NULL; 2593 unsigned int mmap_miss; 2594 2595 /* If we don't want any read-ahead, don't bother */ 2596 if (vmf->vma->vm_flags & VM_RAND_READ) 2597 return fpin; 2598 if (!ra->ra_pages) 2599 return fpin; 2600 2601 if (vmf->vma->vm_flags & VM_SEQ_READ) { 2602 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 2603 page_cache_sync_ra(&ractl, ra, ra->ra_pages); 2604 return fpin; 2605 } 2606 2607 /* Avoid banging the cache line if not needed */ 2608 mmap_miss = READ_ONCE(ra->mmap_miss); 2609 if (mmap_miss < MMAP_LOTSAMISS * 10) 2610 WRITE_ONCE(ra->mmap_miss, ++mmap_miss); 2611 2612 /* 2613 * Do we miss much more than hit in this file? If so, 2614 * stop bothering with read-ahead. It will only hurt. 2615 */ 2616 if (mmap_miss > MMAP_LOTSAMISS) 2617 return fpin; 2618 2619 /* 2620 * mmap read-around 2621 */ 2622 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 2623 ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2); 2624 ra->size = ra->ra_pages; 2625 ra->async_size = ra->ra_pages / 4; 2626 ractl._index = ra->start; 2627 do_page_cache_ra(&ractl, ra->size, ra->async_size); 2628 return fpin; 2629 } 2630 2631 /* 2632 * Asynchronous readahead happens when we find the page and PG_readahead, 2633 * so we want to possibly extend the readahead further. We return the file that 2634 * was pinned if we have to drop the mmap_lock in order to do IO. 2635 */ 2636 static struct file *do_async_mmap_readahead(struct vm_fault *vmf, 2637 struct page *page) 2638 { 2639 struct file *file = vmf->vma->vm_file; 2640 struct file_ra_state *ra = &file->f_ra; 2641 struct address_space *mapping = file->f_mapping; 2642 struct file *fpin = NULL; 2643 unsigned int mmap_miss; 2644 pgoff_t offset = vmf->pgoff; 2645 2646 /* If we don't want any read-ahead, don't bother */ 2647 if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) 2648 return fpin; 2649 mmap_miss = READ_ONCE(ra->mmap_miss); 2650 if (mmap_miss) 2651 WRITE_ONCE(ra->mmap_miss, --mmap_miss); 2652 if (PageReadahead(page)) { 2653 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 2654 page_cache_async_readahead(mapping, ra, file, 2655 page, offset, ra->ra_pages); 2656 } 2657 return fpin; 2658 } 2659 2660 /** 2661 * filemap_fault - read in file data for page fault handling 2662 * @vmf: struct vm_fault containing details of the fault 2663 * 2664 * filemap_fault() is invoked via the vma operations vector for a 2665 * mapped memory region to read in file data during a page fault. 2666 * 2667 * The goto's are kind of ugly, but this streamlines the normal case of having 2668 * it in the page cache, and handles the special cases reasonably without 2669 * having a lot of duplicated code. 2670 * 2671 * vma->vm_mm->mmap_lock must be held on entry. 2672 * 2673 * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock 2674 * may be dropped before doing I/O or by lock_page_maybe_drop_mmap(). 2675 * 2676 * If our return value does not have VM_FAULT_RETRY set, the mmap_lock 2677 * has not been released. 2678 * 2679 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. 2680 * 2681 * Return: bitwise-OR of %VM_FAULT_ codes. 2682 */ 2683 vm_fault_t filemap_fault(struct vm_fault *vmf) 2684 { 2685 int error; 2686 struct file *file = vmf->vma->vm_file; 2687 struct file *fpin = NULL; 2688 struct address_space *mapping = file->f_mapping; 2689 struct file_ra_state *ra = &file->f_ra; 2690 struct inode *inode = mapping->host; 2691 pgoff_t offset = vmf->pgoff; 2692 pgoff_t max_off; 2693 struct page *page; 2694 vm_fault_t ret = 0; 2695 2696 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 2697 if (unlikely(offset >= max_off)) 2698 return VM_FAULT_SIGBUS; 2699 2700 /* 2701 * Do we have something in the page cache already? 2702 */ 2703 page = find_get_page(mapping, offset); 2704 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { 2705 /* 2706 * We found the page, so try async readahead before 2707 * waiting for the lock. 2708 */ 2709 fpin = do_async_mmap_readahead(vmf, page); 2710 } else if (!page) { 2711 /* No page in the page cache at all */ 2712 count_vm_event(PGMAJFAULT); 2713 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); 2714 ret = VM_FAULT_MAJOR; 2715 fpin = do_sync_mmap_readahead(vmf); 2716 retry_find: 2717 page = pagecache_get_page(mapping, offset, 2718 FGP_CREAT|FGP_FOR_MMAP, 2719 vmf->gfp_mask); 2720 if (!page) { 2721 if (fpin) 2722 goto out_retry; 2723 return VM_FAULT_OOM; 2724 } 2725 } 2726 2727 if (!lock_page_maybe_drop_mmap(vmf, page, &fpin)) 2728 goto out_retry; 2729 2730 /* Did it get truncated? */ 2731 if (unlikely(compound_head(page)->mapping != mapping)) { 2732 unlock_page(page); 2733 put_page(page); 2734 goto retry_find; 2735 } 2736 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); 2737 2738 /* 2739 * We have a locked page in the page cache, now we need to check 2740 * that it's up-to-date. If not, it is going to be due to an error. 2741 */ 2742 if (unlikely(!PageUptodate(page))) 2743 goto page_not_uptodate; 2744 2745 /* 2746 * We've made it this far and we had to drop our mmap_lock, now is the 2747 * time to return to the upper layer and have it re-find the vma and 2748 * redo the fault. 2749 */ 2750 if (fpin) { 2751 unlock_page(page); 2752 goto out_retry; 2753 } 2754 2755 /* 2756 * Found the page and have a reference on it. 2757 * We must recheck i_size under page lock. 2758 */ 2759 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 2760 if (unlikely(offset >= max_off)) { 2761 unlock_page(page); 2762 put_page(page); 2763 return VM_FAULT_SIGBUS; 2764 } 2765 2766 vmf->page = page; 2767 return ret | VM_FAULT_LOCKED; 2768 2769 page_not_uptodate: 2770 /* 2771 * Umm, take care of errors if the page isn't up-to-date. 2772 * Try to re-read it _once_. We do this synchronously, 2773 * because there really aren't any performance issues here 2774 * and we need to check for errors. 2775 */ 2776 ClearPageError(page); 2777 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 2778 error = mapping->a_ops->readpage(file, page); 2779 if (!error) { 2780 wait_on_page_locked(page); 2781 if (!PageUptodate(page)) 2782 error = -EIO; 2783 } 2784 if (fpin) 2785 goto out_retry; 2786 put_page(page); 2787 2788 if (!error || error == AOP_TRUNCATED_PAGE) 2789 goto retry_find; 2790 2791 shrink_readahead_size_eio(ra); 2792 return VM_FAULT_SIGBUS; 2793 2794 out_retry: 2795 /* 2796 * We dropped the mmap_lock, we need to return to the fault handler to 2797 * re-find the vma and come back and find our hopefully still populated 2798 * page. 2799 */ 2800 if (page) 2801 put_page(page); 2802 if (fpin) 2803 fput(fpin); 2804 return ret | VM_FAULT_RETRY; 2805 } 2806 EXPORT_SYMBOL(filemap_fault); 2807 2808 void filemap_map_pages(struct vm_fault *vmf, 2809 pgoff_t start_pgoff, pgoff_t end_pgoff) 2810 { 2811 struct file *file = vmf->vma->vm_file; 2812 struct address_space *mapping = file->f_mapping; 2813 pgoff_t last_pgoff = start_pgoff; 2814 unsigned long max_idx; 2815 XA_STATE(xas, &mapping->i_pages, start_pgoff); 2816 struct page *head, *page; 2817 unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss); 2818 2819 rcu_read_lock(); 2820 xas_for_each(&xas, head, end_pgoff) { 2821 if (xas_retry(&xas, head)) 2822 continue; 2823 if (xa_is_value(head)) 2824 goto next; 2825 2826 /* 2827 * Check for a locked page first, as a speculative 2828 * reference may adversely influence page migration. 2829 */ 2830 if (PageLocked(head)) 2831 goto next; 2832 if (!page_cache_get_speculative(head)) 2833 goto next; 2834 2835 /* Has the page moved or been split? */ 2836 if (unlikely(head != xas_reload(&xas))) 2837 goto skip; 2838 page = find_subpage(head, xas.xa_index); 2839 2840 if (!PageUptodate(head) || 2841 PageReadahead(page) || 2842 PageHWPoison(page)) 2843 goto skip; 2844 if (!trylock_page(head)) 2845 goto skip; 2846 2847 if (head->mapping != mapping || !PageUptodate(head)) 2848 goto unlock; 2849 2850 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); 2851 if (xas.xa_index >= max_idx) 2852 goto unlock; 2853 2854 if (mmap_miss > 0) 2855 mmap_miss--; 2856 2857 vmf->address += (xas.xa_index - last_pgoff) << PAGE_SHIFT; 2858 if (vmf->pte) 2859 vmf->pte += xas.xa_index - last_pgoff; 2860 last_pgoff = xas.xa_index; 2861 if (alloc_set_pte(vmf, page)) 2862 goto unlock; 2863 unlock_page(head); 2864 goto next; 2865 unlock: 2866 unlock_page(head); 2867 skip: 2868 put_page(head); 2869 next: 2870 /* Huge page is mapped? No need to proceed. */ 2871 if (pmd_trans_huge(*vmf->pmd)) 2872 break; 2873 } 2874 rcu_read_unlock(); 2875 WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss); 2876 } 2877 EXPORT_SYMBOL(filemap_map_pages); 2878 2879 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) 2880 { 2881 struct page *page = vmf->page; 2882 struct inode *inode = file_inode(vmf->vma->vm_file); 2883 vm_fault_t ret = VM_FAULT_LOCKED; 2884 2885 sb_start_pagefault(inode->i_sb); 2886 file_update_time(vmf->vma->vm_file); 2887 lock_page(page); 2888 if (page->mapping != inode->i_mapping) { 2889 unlock_page(page); 2890 ret = VM_FAULT_NOPAGE; 2891 goto out; 2892 } 2893 /* 2894 * We mark the page dirty already here so that when freeze is in 2895 * progress, we are guaranteed that writeback during freezing will 2896 * see the dirty page and writeprotect it again. 2897 */ 2898 set_page_dirty(page); 2899 wait_for_stable_page(page); 2900 out: 2901 sb_end_pagefault(inode->i_sb); 2902 return ret; 2903 } 2904 2905 const struct vm_operations_struct generic_file_vm_ops = { 2906 .fault = filemap_fault, 2907 .map_pages = filemap_map_pages, 2908 .page_mkwrite = filemap_page_mkwrite, 2909 }; 2910 2911 /* This is used for a general mmap of a disk file */ 2912 2913 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2914 { 2915 struct address_space *mapping = file->f_mapping; 2916 2917 if (!mapping->a_ops->readpage) 2918 return -ENOEXEC; 2919 file_accessed(file); 2920 vma->vm_ops = &generic_file_vm_ops; 2921 return 0; 2922 } 2923 2924 /* 2925 * This is for filesystems which do not implement ->writepage. 2926 */ 2927 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 2928 { 2929 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 2930 return -EINVAL; 2931 return generic_file_mmap(file, vma); 2932 } 2933 #else 2934 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) 2935 { 2936 return VM_FAULT_SIGBUS; 2937 } 2938 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2939 { 2940 return -ENOSYS; 2941 } 2942 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) 2943 { 2944 return -ENOSYS; 2945 } 2946 #endif /* CONFIG_MMU */ 2947 2948 EXPORT_SYMBOL(filemap_page_mkwrite); 2949 EXPORT_SYMBOL(generic_file_mmap); 2950 EXPORT_SYMBOL(generic_file_readonly_mmap); 2951 2952 static struct page *wait_on_page_read(struct page *page) 2953 { 2954 if (!IS_ERR(page)) { 2955 wait_on_page_locked(page); 2956 if (!PageUptodate(page)) { 2957 put_page(page); 2958 page = ERR_PTR(-EIO); 2959 } 2960 } 2961 return page; 2962 } 2963 2964 static struct page *do_read_cache_page(struct address_space *mapping, 2965 pgoff_t index, 2966 int (*filler)(void *, struct page *), 2967 void *data, 2968 gfp_t gfp) 2969 { 2970 struct page *page; 2971 int err; 2972 repeat: 2973 page = find_get_page(mapping, index); 2974 if (!page) { 2975 page = __page_cache_alloc(gfp); 2976 if (!page) 2977 return ERR_PTR(-ENOMEM); 2978 err = add_to_page_cache_lru(page, mapping, index, gfp); 2979 if (unlikely(err)) { 2980 put_page(page); 2981 if (err == -EEXIST) 2982 goto repeat; 2983 /* Presumably ENOMEM for xarray node */ 2984 return ERR_PTR(err); 2985 } 2986 2987 filler: 2988 if (filler) 2989 err = filler(data, page); 2990 else 2991 err = mapping->a_ops->readpage(data, page); 2992 2993 if (err < 0) { 2994 put_page(page); 2995 return ERR_PTR(err); 2996 } 2997 2998 page = wait_on_page_read(page); 2999 if (IS_ERR(page)) 3000 return page; 3001 goto out; 3002 } 3003 if (PageUptodate(page)) 3004 goto out; 3005 3006 /* 3007 * Page is not up to date and may be locked due to one of the following 3008 * case a: Page is being filled and the page lock is held 3009 * case b: Read/write error clearing the page uptodate status 3010 * case c: Truncation in progress (page locked) 3011 * case d: Reclaim in progress 3012 * 3013 * Case a, the page will be up to date when the page is unlocked. 3014 * There is no need to serialise on the page lock here as the page 3015 * is pinned so the lock gives no additional protection. Even if the 3016 * page is truncated, the data is still valid if PageUptodate as 3017 * it's a race vs truncate race. 3018 * Case b, the page will not be up to date 3019 * Case c, the page may be truncated but in itself, the data may still 3020 * be valid after IO completes as it's a read vs truncate race. The 3021 * operation must restart if the page is not uptodate on unlock but 3022 * otherwise serialising on page lock to stabilise the mapping gives 3023 * no additional guarantees to the caller as the page lock is 3024 * released before return. 3025 * Case d, similar to truncation. If reclaim holds the page lock, it 3026 * will be a race with remove_mapping that determines if the mapping 3027 * is valid on unlock but otherwise the data is valid and there is 3028 * no need to serialise with page lock. 3029 * 3030 * As the page lock gives no additional guarantee, we optimistically 3031 * wait on the page to be unlocked and check if it's up to date and 3032 * use the page if it is. Otherwise, the page lock is required to 3033 * distinguish between the different cases. The motivation is that we 3034 * avoid spurious serialisations and wakeups when multiple processes 3035 * wait on the same page for IO to complete. 3036 */ 3037 wait_on_page_locked(page); 3038 if (PageUptodate(page)) 3039 goto out; 3040 3041 /* Distinguish between all the cases under the safety of the lock */ 3042 lock_page(page); 3043 3044 /* Case c or d, restart the operation */ 3045 if (!page->mapping) { 3046 unlock_page(page); 3047 put_page(page); 3048 goto repeat; 3049 } 3050 3051 /* Someone else locked and filled the page in a very small window */ 3052 if (PageUptodate(page)) { 3053 unlock_page(page); 3054 goto out; 3055 } 3056 3057 /* 3058 * A previous I/O error may have been due to temporary 3059 * failures. 3060 * Clear page error before actual read, PG_error will be 3061 * set again if read page fails. 3062 */ 3063 ClearPageError(page); 3064 goto filler; 3065 3066 out: 3067 mark_page_accessed(page); 3068 return page; 3069 } 3070 3071 /** 3072 * read_cache_page - read into page cache, fill it if needed 3073 * @mapping: the page's address_space 3074 * @index: the page index 3075 * @filler: function to perform the read 3076 * @data: first arg to filler(data, page) function, often left as NULL 3077 * 3078 * Read into the page cache. If a page already exists, and PageUptodate() is 3079 * not set, try to fill the page and wait for it to become unlocked. 3080 * 3081 * If the page does not get brought uptodate, return -EIO. 3082 * 3083 * Return: up to date page on success, ERR_PTR() on failure. 3084 */ 3085 struct page *read_cache_page(struct address_space *mapping, 3086 pgoff_t index, 3087 int (*filler)(void *, struct page *), 3088 void *data) 3089 { 3090 return do_read_cache_page(mapping, index, filler, data, 3091 mapping_gfp_mask(mapping)); 3092 } 3093 EXPORT_SYMBOL(read_cache_page); 3094 3095 /** 3096 * read_cache_page_gfp - read into page cache, using specified page allocation flags. 3097 * @mapping: the page's address_space 3098 * @index: the page index 3099 * @gfp: the page allocator flags to use if allocating 3100 * 3101 * This is the same as "read_mapping_page(mapping, index, NULL)", but with 3102 * any new page allocations done using the specified allocation flags. 3103 * 3104 * If the page does not get brought uptodate, return -EIO. 3105 * 3106 * Return: up to date page on success, ERR_PTR() on failure. 3107 */ 3108 struct page *read_cache_page_gfp(struct address_space *mapping, 3109 pgoff_t index, 3110 gfp_t gfp) 3111 { 3112 return do_read_cache_page(mapping, index, NULL, NULL, gfp); 3113 } 3114 EXPORT_SYMBOL(read_cache_page_gfp); 3115 3116 /* 3117 * Don't operate on ranges the page cache doesn't support, and don't exceed the 3118 * LFS limits. If pos is under the limit it becomes a short access. If it 3119 * exceeds the limit we return -EFBIG. 3120 */ 3121 static int generic_write_check_limits(struct file *file, loff_t pos, 3122 loff_t *count) 3123 { 3124 struct inode *inode = file->f_mapping->host; 3125 loff_t max_size = inode->i_sb->s_maxbytes; 3126 loff_t limit = rlimit(RLIMIT_FSIZE); 3127 3128 if (limit != RLIM_INFINITY) { 3129 if (pos >= limit) { 3130 send_sig(SIGXFSZ, current, 0); 3131 return -EFBIG; 3132 } 3133 *count = min(*count, limit - pos); 3134 } 3135 3136 if (!(file->f_flags & O_LARGEFILE)) 3137 max_size = MAX_NON_LFS; 3138 3139 if (unlikely(pos >= max_size)) 3140 return -EFBIG; 3141 3142 *count = min(*count, max_size - pos); 3143 3144 return 0; 3145 } 3146 3147 /* 3148 * Performs necessary checks before doing a write 3149 * 3150 * Can adjust writing position or amount of bytes to write. 3151 * Returns appropriate error code that caller should return or 3152 * zero in case that write should be allowed. 3153 */ 3154 inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) 3155 { 3156 struct file *file = iocb->ki_filp; 3157 struct inode *inode = file->f_mapping->host; 3158 loff_t count; 3159 int ret; 3160 3161 if (IS_SWAPFILE(inode)) 3162 return -ETXTBSY; 3163 3164 if (!iov_iter_count(from)) 3165 return 0; 3166 3167 /* FIXME: this is for backwards compatibility with 2.4 */ 3168 if (iocb->ki_flags & IOCB_APPEND) 3169 iocb->ki_pos = i_size_read(inode); 3170 3171 if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) 3172 return -EINVAL; 3173 3174 count = iov_iter_count(from); 3175 ret = generic_write_check_limits(file, iocb->ki_pos, &count); 3176 if (ret) 3177 return ret; 3178 3179 iov_iter_truncate(from, count); 3180 return iov_iter_count(from); 3181 } 3182 EXPORT_SYMBOL(generic_write_checks); 3183 3184 /* 3185 * Performs necessary checks before doing a clone. 3186 * 3187 * Can adjust amount of bytes to clone via @req_count argument. 3188 * Returns appropriate error code that caller should return or 3189 * zero in case the clone should be allowed. 3190 */ 3191 int generic_remap_checks(struct file *file_in, loff_t pos_in, 3192 struct file *file_out, loff_t pos_out, 3193 loff_t *req_count, unsigned int remap_flags) 3194 { 3195 struct inode *inode_in = file_in->f_mapping->host; 3196 struct inode *inode_out = file_out->f_mapping->host; 3197 uint64_t count = *req_count; 3198 uint64_t bcount; 3199 loff_t size_in, size_out; 3200 loff_t bs = inode_out->i_sb->s_blocksize; 3201 int ret; 3202 3203 /* The start of both ranges must be aligned to an fs block. */ 3204 if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs)) 3205 return -EINVAL; 3206 3207 /* Ensure offsets don't wrap. */ 3208 if (pos_in + count < pos_in || pos_out + count < pos_out) 3209 return -EINVAL; 3210 3211 size_in = i_size_read(inode_in); 3212 size_out = i_size_read(inode_out); 3213 3214 /* Dedupe requires both ranges to be within EOF. */ 3215 if ((remap_flags & REMAP_FILE_DEDUP) && 3216 (pos_in >= size_in || pos_in + count > size_in || 3217 pos_out >= size_out || pos_out + count > size_out)) 3218 return -EINVAL; 3219 3220 /* Ensure the infile range is within the infile. */ 3221 if (pos_in >= size_in) 3222 return -EINVAL; 3223 count = min(count, size_in - (uint64_t)pos_in); 3224 3225 ret = generic_write_check_limits(file_out, pos_out, &count); 3226 if (ret) 3227 return ret; 3228 3229 /* 3230 * If the user wanted us to link to the infile's EOF, round up to the 3231 * next block boundary for this check. 3232 * 3233 * Otherwise, make sure the count is also block-aligned, having 3234 * already confirmed the starting offsets' block alignment. 3235 */ 3236 if (pos_in + count == size_in) { 3237 bcount = ALIGN(size_in, bs) - pos_in; 3238 } else { 3239 if (!IS_ALIGNED(count, bs)) 3240 count = ALIGN_DOWN(count, bs); 3241 bcount = count; 3242 } 3243 3244 /* Don't allow overlapped cloning within the same file. */ 3245 if (inode_in == inode_out && 3246 pos_out + bcount > pos_in && 3247 pos_out < pos_in + bcount) 3248 return -EINVAL; 3249 3250 /* 3251 * We shortened the request but the caller can't deal with that, so 3252 * bounce the request back to userspace. 3253 */ 3254 if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN)) 3255 return -EINVAL; 3256 3257 *req_count = count; 3258 return 0; 3259 } 3260 3261 3262 /* 3263 * Performs common checks before doing a file copy/clone 3264 * from @file_in to @file_out. 3265 */ 3266 int generic_file_rw_checks(struct file *file_in, struct file *file_out) 3267 { 3268 struct inode *inode_in = file_inode(file_in); 3269 struct inode *inode_out = file_inode(file_out); 3270 3271 /* Don't copy dirs, pipes, sockets... */ 3272 if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) 3273 return -EISDIR; 3274 if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode)) 3275 return -EINVAL; 3276 3277 if (!(file_in->f_mode & FMODE_READ) || 3278 !(file_out->f_mode & FMODE_WRITE) || 3279 (file_out->f_flags & O_APPEND)) 3280 return -EBADF; 3281 3282 return 0; 3283 } 3284 3285 /* 3286 * Performs necessary checks before doing a file copy 3287 * 3288 * Can adjust amount of bytes to copy via @req_count argument. 3289 * Returns appropriate error code that caller should return or 3290 * zero in case the copy should be allowed. 3291 */ 3292 int generic_copy_file_checks(struct file *file_in, loff_t pos_in, 3293 struct file *file_out, loff_t pos_out, 3294 size_t *req_count, unsigned int flags) 3295 { 3296 struct inode *inode_in = file_inode(file_in); 3297 struct inode *inode_out = file_inode(file_out); 3298 uint64_t count = *req_count; 3299 loff_t size_in; 3300 int ret; 3301 3302 ret = generic_file_rw_checks(file_in, file_out); 3303 if (ret) 3304 return ret; 3305 3306 /* Don't touch certain kinds of inodes */ 3307 if (IS_IMMUTABLE(inode_out)) 3308 return -EPERM; 3309 3310 if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out)) 3311 return -ETXTBSY; 3312 3313 /* Ensure offsets don't wrap. */ 3314 if (pos_in + count < pos_in || pos_out + count < pos_out) 3315 return -EOVERFLOW; 3316 3317 /* Shorten the copy to EOF */ 3318 size_in = i_size_read(inode_in); 3319 if (pos_in >= size_in) 3320 count = 0; 3321 else 3322 count = min(count, size_in - (uint64_t)pos_in); 3323 3324 ret = generic_write_check_limits(file_out, pos_out, &count); 3325 if (ret) 3326 return ret; 3327 3328 /* Don't allow overlapped copying within the same file. */ 3329 if (inode_in == inode_out && 3330 pos_out + count > pos_in && 3331 pos_out < pos_in + count) 3332 return -EINVAL; 3333 3334 *req_count = count; 3335 return 0; 3336 } 3337 3338 int pagecache_write_begin(struct file *file, struct address_space *mapping, 3339 loff_t pos, unsigned len, unsigned flags, 3340 struct page **pagep, void **fsdata) 3341 { 3342 const struct address_space_operations *aops = mapping->a_ops; 3343 3344 return aops->write_begin(file, mapping, pos, len, flags, 3345 pagep, fsdata); 3346 } 3347 EXPORT_SYMBOL(pagecache_write_begin); 3348 3349 int pagecache_write_end(struct file *file, struct address_space *mapping, 3350 loff_t pos, unsigned len, unsigned copied, 3351 struct page *page, void *fsdata) 3352 { 3353 const struct address_space_operations *aops = mapping->a_ops; 3354 3355 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); 3356 } 3357 EXPORT_SYMBOL(pagecache_write_end); 3358 3359 /* 3360 * Warn about a page cache invalidation failure during a direct I/O write. 3361 */ 3362 void dio_warn_stale_pagecache(struct file *filp) 3363 { 3364 static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST); 3365 char pathname[128]; 3366 struct inode *inode = file_inode(filp); 3367 char *path; 3368 3369 errseq_set(&inode->i_mapping->wb_err, -EIO); 3370 if (__ratelimit(&_rs)) { 3371 path = file_path(filp, pathname, sizeof(pathname)); 3372 if (IS_ERR(path)) 3373 path = "(unknown)"; 3374 pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision with buffered I/O!\n"); 3375 pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid, 3376 current->comm); 3377 } 3378 } 3379 3380 ssize_t 3381 generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) 3382 { 3383 struct file *file = iocb->ki_filp; 3384 struct address_space *mapping = file->f_mapping; 3385 struct inode *inode = mapping->host; 3386 loff_t pos = iocb->ki_pos; 3387 ssize_t written; 3388 size_t write_len; 3389 pgoff_t end; 3390 3391 write_len = iov_iter_count(from); 3392 end = (pos + write_len - 1) >> PAGE_SHIFT; 3393 3394 if (iocb->ki_flags & IOCB_NOWAIT) { 3395 /* If there are pages to writeback, return */ 3396 if (filemap_range_has_page(inode->i_mapping, pos, 3397 pos + write_len - 1)) 3398 return -EAGAIN; 3399 } else { 3400 written = filemap_write_and_wait_range(mapping, pos, 3401 pos + write_len - 1); 3402 if (written) 3403 goto out; 3404 } 3405 3406 /* 3407 * After a write we want buffered reads to be sure to go to disk to get 3408 * the new data. We invalidate clean cached page from the region we're 3409 * about to write. We do this *before* the write so that we can return 3410 * without clobbering -EIOCBQUEUED from ->direct_IO(). 3411 */ 3412 written = invalidate_inode_pages2_range(mapping, 3413 pos >> PAGE_SHIFT, end); 3414 /* 3415 * If a page can not be invalidated, return 0 to fall back 3416 * to buffered write. 3417 */ 3418 if (written) { 3419 if (written == -EBUSY) 3420 return 0; 3421 goto out; 3422 } 3423 3424 written = mapping->a_ops->direct_IO(iocb, from); 3425 3426 /* 3427 * Finally, try again to invalidate clean pages which might have been 3428 * cached by non-direct readahead, or faulted in by get_user_pages() 3429 * if the source of the write was an mmap'ed region of the file 3430 * we're writing. Either one is a pretty crazy thing to do, 3431 * so we don't support it 100%. If this invalidation 3432 * fails, tough, the write still worked... 3433 * 3434 * Most of the time we do not need this since dio_complete() will do 3435 * the invalidation for us. However there are some file systems that 3436 * do not end up with dio_complete() being called, so let's not break 3437 * them by removing it completely. 3438 * 3439 * Noticeable example is a blkdev_direct_IO(). 3440 * 3441 * Skip invalidation for async writes or if mapping has no pages. 3442 */ 3443 if (written > 0 && mapping->nrpages && 3444 invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end)) 3445 dio_warn_stale_pagecache(file); 3446 3447 if (written > 0) { 3448 pos += written; 3449 write_len -= written; 3450 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { 3451 i_size_write(inode, pos); 3452 mark_inode_dirty(inode); 3453 } 3454 iocb->ki_pos = pos; 3455 } 3456 iov_iter_revert(from, write_len - iov_iter_count(from)); 3457 out: 3458 return written; 3459 } 3460 EXPORT_SYMBOL(generic_file_direct_write); 3461 3462 /* 3463 * Find or create a page at the given pagecache position. Return the locked 3464 * page. This function is specifically for buffered writes. 3465 */ 3466 struct page *grab_cache_page_write_begin(struct address_space *mapping, 3467 pgoff_t index, unsigned flags) 3468 { 3469 struct page *page; 3470 int fgp_flags = FGP_LOCK|FGP_WRITE|FGP_CREAT; 3471 3472 if (flags & AOP_FLAG_NOFS) 3473 fgp_flags |= FGP_NOFS; 3474 3475 page = pagecache_get_page(mapping, index, fgp_flags, 3476 mapping_gfp_mask(mapping)); 3477 if (page) 3478 wait_for_stable_page(page); 3479 3480 return page; 3481 } 3482 EXPORT_SYMBOL(grab_cache_page_write_begin); 3483 3484 ssize_t generic_perform_write(struct file *file, 3485 struct iov_iter *i, loff_t pos) 3486 { 3487 struct address_space *mapping = file->f_mapping; 3488 const struct address_space_operations *a_ops = mapping->a_ops; 3489 long status = 0; 3490 ssize_t written = 0; 3491 unsigned int flags = 0; 3492 3493 do { 3494 struct page *page; 3495 unsigned long offset; /* Offset into pagecache page */ 3496 unsigned long bytes; /* Bytes to write to page */ 3497 size_t copied; /* Bytes copied from user */ 3498 void *fsdata; 3499 3500 offset = (pos & (PAGE_SIZE - 1)); 3501 bytes = min_t(unsigned long, PAGE_SIZE - offset, 3502 iov_iter_count(i)); 3503 3504 again: 3505 /* 3506 * Bring in the user page that we will copy from _first_. 3507 * Otherwise there's a nasty deadlock on copying from the 3508 * same page as we're writing to, without it being marked 3509 * up-to-date. 3510 * 3511 * Not only is this an optimisation, but it is also required 3512 * to check that the address is actually valid, when atomic 3513 * usercopies are used, below. 3514 */ 3515 if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 3516 status = -EFAULT; 3517 break; 3518 } 3519 3520 if (fatal_signal_pending(current)) { 3521 status = -EINTR; 3522 break; 3523 } 3524 3525 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 3526 &page, &fsdata); 3527 if (unlikely(status < 0)) 3528 break; 3529 3530 if (mapping_writably_mapped(mapping)) 3531 flush_dcache_page(page); 3532 3533 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 3534 flush_dcache_page(page); 3535 3536 status = a_ops->write_end(file, mapping, pos, bytes, copied, 3537 page, fsdata); 3538 if (unlikely(status < 0)) 3539 break; 3540 copied = status; 3541 3542 cond_resched(); 3543 3544 iov_iter_advance(i, copied); 3545 if (unlikely(copied == 0)) { 3546 /* 3547 * If we were unable to copy any data at all, we must 3548 * fall back to a single segment length write. 3549 * 3550 * If we didn't fallback here, we could livelock 3551 * because not all segments in the iov can be copied at 3552 * once without a pagefault. 3553 */ 3554 bytes = min_t(unsigned long, PAGE_SIZE - offset, 3555 iov_iter_single_seg_count(i)); 3556 goto again; 3557 } 3558 pos += copied; 3559 written += copied; 3560 3561 balance_dirty_pages_ratelimited(mapping); 3562 } while (iov_iter_count(i)); 3563 3564 return written ? written : status; 3565 } 3566 EXPORT_SYMBOL(generic_perform_write); 3567 3568 /** 3569 * __generic_file_write_iter - write data to a file 3570 * @iocb: IO state structure (file, offset, etc.) 3571 * @from: iov_iter with data to write 3572 * 3573 * This function does all the work needed for actually writing data to a 3574 * file. It does all basic checks, removes SUID from the file, updates 3575 * modification times and calls proper subroutines depending on whether we 3576 * do direct IO or a standard buffered write. 3577 * 3578 * It expects i_mutex to be grabbed unless we work on a block device or similar 3579 * object which does not need locking at all. 3580 * 3581 * This function does *not* take care of syncing data in case of O_SYNC write. 3582 * A caller has to handle it. This is mainly due to the fact that we want to 3583 * avoid syncing under i_mutex. 3584 * 3585 * Return: 3586 * * number of bytes written, even for truncated writes 3587 * * negative error code if no data has been written at all 3588 */ 3589 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 3590 { 3591 struct file *file = iocb->ki_filp; 3592 struct address_space * mapping = file->f_mapping; 3593 struct inode *inode = mapping->host; 3594 ssize_t written = 0; 3595 ssize_t err; 3596 ssize_t status; 3597 3598 /* We can write back this queue in page reclaim */ 3599 current->backing_dev_info = inode_to_bdi(inode); 3600 err = file_remove_privs(file); 3601 if (err) 3602 goto out; 3603 3604 err = file_update_time(file); 3605 if (err) 3606 goto out; 3607 3608 if (iocb->ki_flags & IOCB_DIRECT) { 3609 loff_t pos, endbyte; 3610 3611 written = generic_file_direct_write(iocb, from); 3612 /* 3613 * If the write stopped short of completing, fall back to 3614 * buffered writes. Some filesystems do this for writes to 3615 * holes, for example. For DAX files, a buffered write will 3616 * not succeed (even if it did, DAX does not handle dirty 3617 * page-cache pages correctly). 3618 */ 3619 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) 3620 goto out; 3621 3622 status = generic_perform_write(file, from, pos = iocb->ki_pos); 3623 /* 3624 * If generic_perform_write() returned a synchronous error 3625 * then we want to return the number of bytes which were 3626 * direct-written, or the error code if that was zero. Note 3627 * that this differs from normal direct-io semantics, which 3628 * will return -EFOO even if some bytes were written. 3629 */ 3630 if (unlikely(status < 0)) { 3631 err = status; 3632 goto out; 3633 } 3634 /* 3635 * We need to ensure that the page cache pages are written to 3636 * disk and invalidated to preserve the expected O_DIRECT 3637 * semantics. 3638 */ 3639 endbyte = pos + status - 1; 3640 err = filemap_write_and_wait_range(mapping, pos, endbyte); 3641 if (err == 0) { 3642 iocb->ki_pos = endbyte + 1; 3643 written += status; 3644 invalidate_mapping_pages(mapping, 3645 pos >> PAGE_SHIFT, 3646 endbyte >> PAGE_SHIFT); 3647 } else { 3648 /* 3649 * We don't know how much we wrote, so just return 3650 * the number of bytes which were direct-written 3651 */ 3652 } 3653 } else { 3654 written = generic_perform_write(file, from, iocb->ki_pos); 3655 if (likely(written > 0)) 3656 iocb->ki_pos += written; 3657 } 3658 out: 3659 current->backing_dev_info = NULL; 3660 return written ? written : err; 3661 } 3662 EXPORT_SYMBOL(__generic_file_write_iter); 3663 3664 /** 3665 * generic_file_write_iter - write data to a file 3666 * @iocb: IO state structure 3667 * @from: iov_iter with data to write 3668 * 3669 * This is a wrapper around __generic_file_write_iter() to be used by most 3670 * filesystems. It takes care of syncing the file in case of O_SYNC file 3671 * and acquires i_mutex as needed. 3672 * Return: 3673 * * negative error code if no data has been written at all of 3674 * vfs_fsync_range() failed for a synchronous write 3675 * * number of bytes written, even for truncated writes 3676 */ 3677 ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 3678 { 3679 struct file *file = iocb->ki_filp; 3680 struct inode *inode = file->f_mapping->host; 3681 ssize_t ret; 3682 3683 inode_lock(inode); 3684 ret = generic_write_checks(iocb, from); 3685 if (ret > 0) 3686 ret = __generic_file_write_iter(iocb, from); 3687 inode_unlock(inode); 3688 3689 if (ret > 0) 3690 ret = generic_write_sync(iocb, ret); 3691 return ret; 3692 } 3693 EXPORT_SYMBOL(generic_file_write_iter); 3694 3695 /** 3696 * try_to_release_page() - release old fs-specific metadata on a page 3697 * 3698 * @page: the page which the kernel is trying to free 3699 * @gfp_mask: memory allocation flags (and I/O mode) 3700 * 3701 * The address_space is to try to release any data against the page 3702 * (presumably at page->private). 3703 * 3704 * This may also be called if PG_fscache is set on a page, indicating that the 3705 * page is known to the local caching routines. 3706 * 3707 * The @gfp_mask argument specifies whether I/O may be performed to release 3708 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS). 3709 * 3710 * Return: %1 if the release was successful, otherwise return zero. 3711 */ 3712 int try_to_release_page(struct page *page, gfp_t gfp_mask) 3713 { 3714 struct address_space * const mapping = page->mapping; 3715 3716 BUG_ON(!PageLocked(page)); 3717 if (PageWriteback(page)) 3718 return 0; 3719 3720 if (mapping && mapping->a_ops->releasepage) 3721 return mapping->a_ops->releasepage(page, gfp_mask); 3722 return try_to_free_buffers(page); 3723 } 3724 3725 EXPORT_SYMBOL(try_to_release_page); 3726