1 /* 2 * linux/mm/filemap.c 3 * 4 * Copyright (C) 1994-1999 Linus Torvalds 5 */ 6 7 /* 8 * This file handles the generic file mmap semantics used by 9 * most "normal" filesystems (but you don't /have/ to use this: 10 * the NFS filesystem used to do this differently, for example) 11 */ 12 #include <linux/export.h> 13 #include <linux/compiler.h> 14 #include <linux/dax.h> 15 #include <linux/fs.h> 16 #include <linux/sched/signal.h> 17 #include <linux/uaccess.h> 18 #include <linux/capability.h> 19 #include <linux/kernel_stat.h> 20 #include <linux/gfp.h> 21 #include <linux/mm.h> 22 #include <linux/swap.h> 23 #include <linux/mman.h> 24 #include <linux/pagemap.h> 25 #include <linux/file.h> 26 #include <linux/uio.h> 27 #include <linux/error-injection.h> 28 #include <linux/hash.h> 29 #include <linux/writeback.h> 30 #include <linux/backing-dev.h> 31 #include <linux/pagevec.h> 32 #include <linux/blkdev.h> 33 #include <linux/security.h> 34 #include <linux/cpuset.h> 35 #include <linux/hugetlb.h> 36 #include <linux/memcontrol.h> 37 #include <linux/cleancache.h> 38 #include <linux/shmem_fs.h> 39 #include <linux/rmap.h> 40 #include <linux/delayacct.h> 41 #include <linux/psi.h> 42 #include "internal.h" 43 44 #define CREATE_TRACE_POINTS 45 #include <trace/events/filemap.h> 46 47 /* 48 * FIXME: remove all knowledge of the buffer layer from the core VM 49 */ 50 #include <linux/buffer_head.h> /* for try_to_free_buffers */ 51 52 #include <asm/mman.h> 53 54 /* 55 * Shared mappings implemented 30.11.1994. It's not fully working yet, 56 * though. 57 * 58 * Shared mappings now work. 15.8.1995 Bruno. 59 * 60 * finished 'unifying' the page and buffer cache and SMP-threaded the 61 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 62 * 63 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 64 */ 65 66 /* 67 * Lock ordering: 68 * 69 * ->i_mmap_rwsem (truncate_pagecache) 70 * ->private_lock (__free_pte->__set_page_dirty_buffers) 71 * ->swap_lock (exclusive_swap_page, others) 72 * ->i_pages lock 73 * 74 * ->i_mutex 75 * ->i_mmap_rwsem (truncate->unmap_mapping_range) 76 * 77 * ->mmap_sem 78 * ->i_mmap_rwsem 79 * ->page_table_lock or pte_lock (various, mainly in memory.c) 80 * ->i_pages lock (arch-dependent flush_dcache_mmap_lock) 81 * 82 * ->mmap_sem 83 * ->lock_page (access_process_vm) 84 * 85 * ->i_mutex (generic_perform_write) 86 * ->mmap_sem (fault_in_pages_readable->do_page_fault) 87 * 88 * bdi->wb.list_lock 89 * sb_lock (fs/fs-writeback.c) 90 * ->i_pages lock (__sync_single_inode) 91 * 92 * ->i_mmap_rwsem 93 * ->anon_vma.lock (vma_adjust) 94 * 95 * ->anon_vma.lock 96 * ->page_table_lock or pte_lock (anon_vma_prepare and various) 97 * 98 * ->page_table_lock or pte_lock 99 * ->swap_lock (try_to_unmap_one) 100 * ->private_lock (try_to_unmap_one) 101 * ->i_pages lock (try_to_unmap_one) 102 * ->pgdat->lru_lock (follow_page->mark_page_accessed) 103 * ->pgdat->lru_lock (check_pte_range->isolate_lru_page) 104 * ->private_lock (page_remove_rmap->set_page_dirty) 105 * ->i_pages lock (page_remove_rmap->set_page_dirty) 106 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) 107 * ->inode->i_lock (page_remove_rmap->set_page_dirty) 108 * ->memcg->move_lock (page_remove_rmap->lock_page_memcg) 109 * bdi.wb->list_lock (zap_pte_range->set_page_dirty) 110 * ->inode->i_lock (zap_pte_range->set_page_dirty) 111 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 112 * 113 * ->i_mmap_rwsem 114 * ->tasklist_lock (memory_failure, collect_procs_ao) 115 */ 116 117 static void page_cache_delete(struct address_space *mapping, 118 struct page *page, void *shadow) 119 { 120 XA_STATE(xas, &mapping->i_pages, page->index); 121 unsigned int nr = 1; 122 123 mapping_set_update(&xas, mapping); 124 125 /* hugetlb pages are represented by a single entry in the xarray */ 126 if (!PageHuge(page)) { 127 xas_set_order(&xas, page->index, compound_order(page)); 128 nr = 1U << compound_order(page); 129 } 130 131 VM_BUG_ON_PAGE(!PageLocked(page), page); 132 VM_BUG_ON_PAGE(PageTail(page), page); 133 VM_BUG_ON_PAGE(nr != 1 && shadow, page); 134 135 xas_store(&xas, shadow); 136 xas_init_marks(&xas); 137 138 page->mapping = NULL; 139 /* Leave page->index set: truncation lookup relies upon it */ 140 141 if (shadow) { 142 mapping->nrexceptional += nr; 143 /* 144 * Make sure the nrexceptional update is committed before 145 * the nrpages update so that final truncate racing 146 * with reclaim does not see both counters 0 at the 147 * same time and miss a shadow entry. 148 */ 149 smp_wmb(); 150 } 151 mapping->nrpages -= nr; 152 } 153 154 static void unaccount_page_cache_page(struct address_space *mapping, 155 struct page *page) 156 { 157 int nr; 158 159 /* 160 * if we're uptodate, flush out into the cleancache, otherwise 161 * invalidate any existing cleancache entries. We can't leave 162 * stale data around in the cleancache once our page is gone 163 */ 164 if (PageUptodate(page) && PageMappedToDisk(page)) 165 cleancache_put_page(page); 166 else 167 cleancache_invalidate_page(mapping, page); 168 169 VM_BUG_ON_PAGE(PageTail(page), page); 170 VM_BUG_ON_PAGE(page_mapped(page), page); 171 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) { 172 int mapcount; 173 174 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n", 175 current->comm, page_to_pfn(page)); 176 dump_page(page, "still mapped when deleted"); 177 dump_stack(); 178 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 179 180 mapcount = page_mapcount(page); 181 if (mapping_exiting(mapping) && 182 page_count(page) >= mapcount + 2) { 183 /* 184 * All vmas have already been torn down, so it's 185 * a good bet that actually the page is unmapped, 186 * and we'd prefer not to leak it: if we're wrong, 187 * some other bad page check should catch it later. 188 */ 189 page_mapcount_reset(page); 190 page_ref_sub(page, mapcount); 191 } 192 } 193 194 /* hugetlb pages do not participate in page cache accounting. */ 195 if (PageHuge(page)) 196 return; 197 198 nr = hpage_nr_pages(page); 199 200 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); 201 if (PageSwapBacked(page)) { 202 __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr); 203 if (PageTransHuge(page)) 204 __dec_node_page_state(page, NR_SHMEM_THPS); 205 } else { 206 VM_BUG_ON_PAGE(PageTransHuge(page), page); 207 } 208 209 /* 210 * At this point page must be either written or cleaned by 211 * truncate. Dirty page here signals a bug and loss of 212 * unwritten data. 213 * 214 * This fixes dirty accounting after removing the page entirely 215 * but leaves PageDirty set: it has no effect for truncated 216 * page and anyway will be cleared before returning page into 217 * buddy allocator. 218 */ 219 if (WARN_ON_ONCE(PageDirty(page))) 220 account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); 221 } 222 223 /* 224 * Delete a page from the page cache and free it. Caller has to make 225 * sure the page is locked and that nobody else uses it - or that usage 226 * is safe. The caller must hold the i_pages lock. 227 */ 228 void __delete_from_page_cache(struct page *page, void *shadow) 229 { 230 struct address_space *mapping = page->mapping; 231 232 trace_mm_filemap_delete_from_page_cache(page); 233 234 unaccount_page_cache_page(mapping, page); 235 page_cache_delete(mapping, page, shadow); 236 } 237 238 static void page_cache_free_page(struct address_space *mapping, 239 struct page *page) 240 { 241 void (*freepage)(struct page *); 242 243 freepage = mapping->a_ops->freepage; 244 if (freepage) 245 freepage(page); 246 247 if (PageTransHuge(page) && !PageHuge(page)) { 248 page_ref_sub(page, HPAGE_PMD_NR); 249 VM_BUG_ON_PAGE(page_count(page) <= 0, page); 250 } else { 251 put_page(page); 252 } 253 } 254 255 /** 256 * delete_from_page_cache - delete page from page cache 257 * @page: the page which the kernel is trying to remove from page cache 258 * 259 * This must be called only on pages that have been verified to be in the page 260 * cache and locked. It will never put the page into the free list, the caller 261 * has a reference on the page. 262 */ 263 void delete_from_page_cache(struct page *page) 264 { 265 struct address_space *mapping = page_mapping(page); 266 unsigned long flags; 267 268 BUG_ON(!PageLocked(page)); 269 xa_lock_irqsave(&mapping->i_pages, flags); 270 __delete_from_page_cache(page, NULL); 271 xa_unlock_irqrestore(&mapping->i_pages, flags); 272 273 page_cache_free_page(mapping, page); 274 } 275 EXPORT_SYMBOL(delete_from_page_cache); 276 277 /* 278 * page_cache_delete_batch - delete several pages from page cache 279 * @mapping: the mapping to which pages belong 280 * @pvec: pagevec with pages to delete 281 * 282 * The function walks over mapping->i_pages and removes pages passed in @pvec 283 * from the mapping. The function expects @pvec to be sorted by page index 284 * and is optimised for it to be dense. 285 * It tolerates holes in @pvec (mapping entries at those indices are not 286 * modified). The function expects only THP head pages to be present in the 287 * @pvec. 288 * 289 * The function expects the i_pages lock to be held. 290 */ 291 static void page_cache_delete_batch(struct address_space *mapping, 292 struct pagevec *pvec) 293 { 294 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); 295 int total_pages = 0; 296 int i = 0; 297 struct page *page; 298 299 mapping_set_update(&xas, mapping); 300 xas_for_each(&xas, page, ULONG_MAX) { 301 if (i >= pagevec_count(pvec)) 302 break; 303 304 /* A swap/dax/shadow entry got inserted? Skip it. */ 305 if (xa_is_value(page)) 306 continue; 307 /* 308 * A page got inserted in our range? Skip it. We have our 309 * pages locked so they are protected from being removed. 310 * If we see a page whose index is higher than ours, it 311 * means our page has been removed, which shouldn't be 312 * possible because we're holding the PageLock. 313 */ 314 if (page != pvec->pages[i]) { 315 VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, 316 page); 317 continue; 318 } 319 320 WARN_ON_ONCE(!PageLocked(page)); 321 322 if (page->index == xas.xa_index) 323 page->mapping = NULL; 324 /* Leave page->index set: truncation lookup relies on it */ 325 326 /* 327 * Move to the next page in the vector if this is a regular 328 * page or the index is of the last sub-page of this compound 329 * page. 330 */ 331 if (page->index + (1UL << compound_order(page)) - 1 == 332 xas.xa_index) 333 i++; 334 xas_store(&xas, NULL); 335 total_pages++; 336 } 337 mapping->nrpages -= total_pages; 338 } 339 340 void delete_from_page_cache_batch(struct address_space *mapping, 341 struct pagevec *pvec) 342 { 343 int i; 344 unsigned long flags; 345 346 if (!pagevec_count(pvec)) 347 return; 348 349 xa_lock_irqsave(&mapping->i_pages, flags); 350 for (i = 0; i < pagevec_count(pvec); i++) { 351 trace_mm_filemap_delete_from_page_cache(pvec->pages[i]); 352 353 unaccount_page_cache_page(mapping, pvec->pages[i]); 354 } 355 page_cache_delete_batch(mapping, pvec); 356 xa_unlock_irqrestore(&mapping->i_pages, flags); 357 358 for (i = 0; i < pagevec_count(pvec); i++) 359 page_cache_free_page(mapping, pvec->pages[i]); 360 } 361 362 int filemap_check_errors(struct address_space *mapping) 363 { 364 int ret = 0; 365 /* Check for outstanding write errors */ 366 if (test_bit(AS_ENOSPC, &mapping->flags) && 367 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 368 ret = -ENOSPC; 369 if (test_bit(AS_EIO, &mapping->flags) && 370 test_and_clear_bit(AS_EIO, &mapping->flags)) 371 ret = -EIO; 372 return ret; 373 } 374 EXPORT_SYMBOL(filemap_check_errors); 375 376 static int filemap_check_and_keep_errors(struct address_space *mapping) 377 { 378 /* Check for outstanding write errors */ 379 if (test_bit(AS_EIO, &mapping->flags)) 380 return -EIO; 381 if (test_bit(AS_ENOSPC, &mapping->flags)) 382 return -ENOSPC; 383 return 0; 384 } 385 386 /** 387 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 388 * @mapping: address space structure to write 389 * @start: offset in bytes where the range starts 390 * @end: offset in bytes where the range ends (inclusive) 391 * @sync_mode: enable synchronous operation 392 * 393 * Start writeback against all of a mapping's dirty pages that lie 394 * within the byte offsets <start, end> inclusive. 395 * 396 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 397 * opposed to a regular memory cleansing writeback. The difference between 398 * these two operations is that if a dirty page/buffer is encountered, it must 399 * be waited upon, and not just skipped over. 400 * 401 * Return: %0 on success, negative error code otherwise. 402 */ 403 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 404 loff_t end, int sync_mode) 405 { 406 int ret; 407 struct writeback_control wbc = { 408 .sync_mode = sync_mode, 409 .nr_to_write = LONG_MAX, 410 .range_start = start, 411 .range_end = end, 412 }; 413 414 if (!mapping_cap_writeback_dirty(mapping)) 415 return 0; 416 417 wbc_attach_fdatawrite_inode(&wbc, mapping->host); 418 ret = do_writepages(mapping, &wbc); 419 wbc_detach_inode(&wbc); 420 return ret; 421 } 422 423 static inline int __filemap_fdatawrite(struct address_space *mapping, 424 int sync_mode) 425 { 426 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); 427 } 428 429 int filemap_fdatawrite(struct address_space *mapping) 430 { 431 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); 432 } 433 EXPORT_SYMBOL(filemap_fdatawrite); 434 435 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 436 loff_t end) 437 { 438 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 439 } 440 EXPORT_SYMBOL(filemap_fdatawrite_range); 441 442 /** 443 * filemap_flush - mostly a non-blocking flush 444 * @mapping: target address_space 445 * 446 * This is a mostly non-blocking flush. Not suitable for data-integrity 447 * purposes - I/O may not be started against all dirty pages. 448 * 449 * Return: %0 on success, negative error code otherwise. 450 */ 451 int filemap_flush(struct address_space *mapping) 452 { 453 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); 454 } 455 EXPORT_SYMBOL(filemap_flush); 456 457 /** 458 * filemap_range_has_page - check if a page exists in range. 459 * @mapping: address space within which to check 460 * @start_byte: offset in bytes where the range starts 461 * @end_byte: offset in bytes where the range ends (inclusive) 462 * 463 * Find at least one page in the range supplied, usually used to check if 464 * direct writing in this range will trigger a writeback. 465 * 466 * Return: %true if at least one page exists in the specified range, 467 * %false otherwise. 468 */ 469 bool filemap_range_has_page(struct address_space *mapping, 470 loff_t start_byte, loff_t end_byte) 471 { 472 struct page *page; 473 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); 474 pgoff_t max = end_byte >> PAGE_SHIFT; 475 476 if (end_byte < start_byte) 477 return false; 478 479 rcu_read_lock(); 480 for (;;) { 481 page = xas_find(&xas, max); 482 if (xas_retry(&xas, page)) 483 continue; 484 /* Shadow entries don't count */ 485 if (xa_is_value(page)) 486 continue; 487 /* 488 * We don't need to try to pin this page; we're about to 489 * release the RCU lock anyway. It is enough to know that 490 * there was a page here recently. 491 */ 492 break; 493 } 494 rcu_read_unlock(); 495 496 return page != NULL; 497 } 498 EXPORT_SYMBOL(filemap_range_has_page); 499 500 static void __filemap_fdatawait_range(struct address_space *mapping, 501 loff_t start_byte, loff_t end_byte) 502 { 503 pgoff_t index = start_byte >> PAGE_SHIFT; 504 pgoff_t end = end_byte >> PAGE_SHIFT; 505 struct pagevec pvec; 506 int nr_pages; 507 508 if (end_byte < start_byte) 509 return; 510 511 pagevec_init(&pvec); 512 while (index <= end) { 513 unsigned i; 514 515 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, 516 end, PAGECACHE_TAG_WRITEBACK); 517 if (!nr_pages) 518 break; 519 520 for (i = 0; i < nr_pages; i++) { 521 struct page *page = pvec.pages[i]; 522 523 wait_on_page_writeback(page); 524 ClearPageError(page); 525 } 526 pagevec_release(&pvec); 527 cond_resched(); 528 } 529 } 530 531 /** 532 * filemap_fdatawait_range - wait for writeback to complete 533 * @mapping: address space structure to wait for 534 * @start_byte: offset in bytes where the range starts 535 * @end_byte: offset in bytes where the range ends (inclusive) 536 * 537 * Walk the list of under-writeback pages of the given address space 538 * in the given range and wait for all of them. Check error status of 539 * the address space and return it. 540 * 541 * Since the error status of the address space is cleared by this function, 542 * callers are responsible for checking the return value and handling and/or 543 * reporting the error. 544 * 545 * Return: error status of the address space. 546 */ 547 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, 548 loff_t end_byte) 549 { 550 __filemap_fdatawait_range(mapping, start_byte, end_byte); 551 return filemap_check_errors(mapping); 552 } 553 EXPORT_SYMBOL(filemap_fdatawait_range); 554 555 /** 556 * file_fdatawait_range - wait for writeback to complete 557 * @file: file pointing to address space structure to wait for 558 * @start_byte: offset in bytes where the range starts 559 * @end_byte: offset in bytes where the range ends (inclusive) 560 * 561 * Walk the list of under-writeback pages of the address space that file 562 * refers to, in the given range and wait for all of them. Check error 563 * status of the address space vs. the file->f_wb_err cursor and return it. 564 * 565 * Since the error status of the file is advanced by this function, 566 * callers are responsible for checking the return value and handling and/or 567 * reporting the error. 568 * 569 * Return: error status of the address space vs. the file->f_wb_err cursor. 570 */ 571 int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte) 572 { 573 struct address_space *mapping = file->f_mapping; 574 575 __filemap_fdatawait_range(mapping, start_byte, end_byte); 576 return file_check_and_advance_wb_err(file); 577 } 578 EXPORT_SYMBOL(file_fdatawait_range); 579 580 /** 581 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors 582 * @mapping: address space structure to wait for 583 * 584 * Walk the list of under-writeback pages of the given address space 585 * and wait for all of them. Unlike filemap_fdatawait(), this function 586 * does not clear error status of the address space. 587 * 588 * Use this function if callers don't handle errors themselves. Expected 589 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), 590 * fsfreeze(8) 591 * 592 * Return: error status of the address space. 593 */ 594 int filemap_fdatawait_keep_errors(struct address_space *mapping) 595 { 596 __filemap_fdatawait_range(mapping, 0, LLONG_MAX); 597 return filemap_check_and_keep_errors(mapping); 598 } 599 EXPORT_SYMBOL(filemap_fdatawait_keep_errors); 600 601 static bool mapping_needs_writeback(struct address_space *mapping) 602 { 603 return (!dax_mapping(mapping) && mapping->nrpages) || 604 (dax_mapping(mapping) && mapping->nrexceptional); 605 } 606 607 int filemap_write_and_wait(struct address_space *mapping) 608 { 609 int err = 0; 610 611 if (mapping_needs_writeback(mapping)) { 612 err = filemap_fdatawrite(mapping); 613 /* 614 * Even if the above returned error, the pages may be 615 * written partially (e.g. -ENOSPC), so we wait for it. 616 * But the -EIO is special case, it may indicate the worst 617 * thing (e.g. bug) happened, so we avoid waiting for it. 618 */ 619 if (err != -EIO) { 620 int err2 = filemap_fdatawait(mapping); 621 if (!err) 622 err = err2; 623 } else { 624 /* Clear any previously stored errors */ 625 filemap_check_errors(mapping); 626 } 627 } else { 628 err = filemap_check_errors(mapping); 629 } 630 return err; 631 } 632 EXPORT_SYMBOL(filemap_write_and_wait); 633 634 /** 635 * filemap_write_and_wait_range - write out & wait on a file range 636 * @mapping: the address_space for the pages 637 * @lstart: offset in bytes where the range starts 638 * @lend: offset in bytes where the range ends (inclusive) 639 * 640 * Write out and wait upon file offsets lstart->lend, inclusive. 641 * 642 * Note that @lend is inclusive (describes the last byte to be written) so 643 * that this function can be used to write to the very end-of-file (end = -1). 644 * 645 * Return: error status of the address space. 646 */ 647 int filemap_write_and_wait_range(struct address_space *mapping, 648 loff_t lstart, loff_t lend) 649 { 650 int err = 0; 651 652 if (mapping_needs_writeback(mapping)) { 653 err = __filemap_fdatawrite_range(mapping, lstart, lend, 654 WB_SYNC_ALL); 655 /* See comment of filemap_write_and_wait() */ 656 if (err != -EIO) { 657 int err2 = filemap_fdatawait_range(mapping, 658 lstart, lend); 659 if (!err) 660 err = err2; 661 } else { 662 /* Clear any previously stored errors */ 663 filemap_check_errors(mapping); 664 } 665 } else { 666 err = filemap_check_errors(mapping); 667 } 668 return err; 669 } 670 EXPORT_SYMBOL(filemap_write_and_wait_range); 671 672 void __filemap_set_wb_err(struct address_space *mapping, int err) 673 { 674 errseq_t eseq = errseq_set(&mapping->wb_err, err); 675 676 trace_filemap_set_wb_err(mapping, eseq); 677 } 678 EXPORT_SYMBOL(__filemap_set_wb_err); 679 680 /** 681 * file_check_and_advance_wb_err - report wb error (if any) that was previously 682 * and advance wb_err to current one 683 * @file: struct file on which the error is being reported 684 * 685 * When userland calls fsync (or something like nfsd does the equivalent), we 686 * want to report any writeback errors that occurred since the last fsync (or 687 * since the file was opened if there haven't been any). 688 * 689 * Grab the wb_err from the mapping. If it matches what we have in the file, 690 * then just quickly return 0. The file is all caught up. 691 * 692 * If it doesn't match, then take the mapping value, set the "seen" flag in 693 * it and try to swap it into place. If it works, or another task beat us 694 * to it with the new value, then update the f_wb_err and return the error 695 * portion. The error at this point must be reported via proper channels 696 * (a'la fsync, or NFS COMMIT operation, etc.). 697 * 698 * While we handle mapping->wb_err with atomic operations, the f_wb_err 699 * value is protected by the f_lock since we must ensure that it reflects 700 * the latest value swapped in for this file descriptor. 701 * 702 * Return: %0 on success, negative error code otherwise. 703 */ 704 int file_check_and_advance_wb_err(struct file *file) 705 { 706 int err = 0; 707 errseq_t old = READ_ONCE(file->f_wb_err); 708 struct address_space *mapping = file->f_mapping; 709 710 /* Locklessly handle the common case where nothing has changed */ 711 if (errseq_check(&mapping->wb_err, old)) { 712 /* Something changed, must use slow path */ 713 spin_lock(&file->f_lock); 714 old = file->f_wb_err; 715 err = errseq_check_and_advance(&mapping->wb_err, 716 &file->f_wb_err); 717 trace_file_check_and_advance_wb_err(file, old); 718 spin_unlock(&file->f_lock); 719 } 720 721 /* 722 * We're mostly using this function as a drop in replacement for 723 * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect 724 * that the legacy code would have had on these flags. 725 */ 726 clear_bit(AS_EIO, &mapping->flags); 727 clear_bit(AS_ENOSPC, &mapping->flags); 728 return err; 729 } 730 EXPORT_SYMBOL(file_check_and_advance_wb_err); 731 732 /** 733 * file_write_and_wait_range - write out & wait on a file range 734 * @file: file pointing to address_space with pages 735 * @lstart: offset in bytes where the range starts 736 * @lend: offset in bytes where the range ends (inclusive) 737 * 738 * Write out and wait upon file offsets lstart->lend, inclusive. 739 * 740 * Note that @lend is inclusive (describes the last byte to be written) so 741 * that this function can be used to write to the very end-of-file (end = -1). 742 * 743 * After writing out and waiting on the data, we check and advance the 744 * f_wb_err cursor to the latest value, and return any errors detected there. 745 * 746 * Return: %0 on success, negative error code otherwise. 747 */ 748 int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend) 749 { 750 int err = 0, err2; 751 struct address_space *mapping = file->f_mapping; 752 753 if (mapping_needs_writeback(mapping)) { 754 err = __filemap_fdatawrite_range(mapping, lstart, lend, 755 WB_SYNC_ALL); 756 /* See comment of filemap_write_and_wait() */ 757 if (err != -EIO) 758 __filemap_fdatawait_range(mapping, lstart, lend); 759 } 760 err2 = file_check_and_advance_wb_err(file); 761 if (!err) 762 err = err2; 763 return err; 764 } 765 EXPORT_SYMBOL(file_write_and_wait_range); 766 767 /** 768 * replace_page_cache_page - replace a pagecache page with a new one 769 * @old: page to be replaced 770 * @new: page to replace with 771 * @gfp_mask: allocation mode 772 * 773 * This function replaces a page in the pagecache with a new one. On 774 * success it acquires the pagecache reference for the new page and 775 * drops it for the old page. Both the old and new pages must be 776 * locked. This function does not add the new page to the LRU, the 777 * caller must do that. 778 * 779 * The remove + add is atomic. This function cannot fail. 780 * 781 * Return: %0 782 */ 783 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) 784 { 785 struct address_space *mapping = old->mapping; 786 void (*freepage)(struct page *) = mapping->a_ops->freepage; 787 pgoff_t offset = old->index; 788 XA_STATE(xas, &mapping->i_pages, offset); 789 unsigned long flags; 790 791 VM_BUG_ON_PAGE(!PageLocked(old), old); 792 VM_BUG_ON_PAGE(!PageLocked(new), new); 793 VM_BUG_ON_PAGE(new->mapping, new); 794 795 get_page(new); 796 new->mapping = mapping; 797 new->index = offset; 798 799 xas_lock_irqsave(&xas, flags); 800 xas_store(&xas, new); 801 802 old->mapping = NULL; 803 /* hugetlb pages do not participate in page cache accounting. */ 804 if (!PageHuge(old)) 805 __dec_node_page_state(new, NR_FILE_PAGES); 806 if (!PageHuge(new)) 807 __inc_node_page_state(new, NR_FILE_PAGES); 808 if (PageSwapBacked(old)) 809 __dec_node_page_state(new, NR_SHMEM); 810 if (PageSwapBacked(new)) 811 __inc_node_page_state(new, NR_SHMEM); 812 xas_unlock_irqrestore(&xas, flags); 813 mem_cgroup_migrate(old, new); 814 if (freepage) 815 freepage(old); 816 put_page(old); 817 818 return 0; 819 } 820 EXPORT_SYMBOL_GPL(replace_page_cache_page); 821 822 static int __add_to_page_cache_locked(struct page *page, 823 struct address_space *mapping, 824 pgoff_t offset, gfp_t gfp_mask, 825 void **shadowp) 826 { 827 XA_STATE(xas, &mapping->i_pages, offset); 828 int huge = PageHuge(page); 829 struct mem_cgroup *memcg; 830 int error; 831 void *old; 832 833 VM_BUG_ON_PAGE(!PageLocked(page), page); 834 VM_BUG_ON_PAGE(PageSwapBacked(page), page); 835 mapping_set_update(&xas, mapping); 836 837 if (!huge) { 838 error = mem_cgroup_try_charge(page, current->mm, 839 gfp_mask, &memcg, false); 840 if (error) 841 return error; 842 } 843 844 get_page(page); 845 page->mapping = mapping; 846 page->index = offset; 847 848 do { 849 xas_lock_irq(&xas); 850 old = xas_load(&xas); 851 if (old && !xa_is_value(old)) 852 xas_set_err(&xas, -EEXIST); 853 xas_store(&xas, page); 854 if (xas_error(&xas)) 855 goto unlock; 856 857 if (xa_is_value(old)) { 858 mapping->nrexceptional--; 859 if (shadowp) 860 *shadowp = old; 861 } 862 mapping->nrpages++; 863 864 /* hugetlb pages do not participate in page cache accounting */ 865 if (!huge) 866 __inc_node_page_state(page, NR_FILE_PAGES); 867 unlock: 868 xas_unlock_irq(&xas); 869 } while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK)); 870 871 if (xas_error(&xas)) 872 goto error; 873 874 if (!huge) 875 mem_cgroup_commit_charge(page, memcg, false, false); 876 trace_mm_filemap_add_to_page_cache(page); 877 return 0; 878 error: 879 page->mapping = NULL; 880 /* Leave page->index set: truncation relies upon it */ 881 if (!huge) 882 mem_cgroup_cancel_charge(page, memcg, false); 883 put_page(page); 884 return xas_error(&xas); 885 } 886 ALLOW_ERROR_INJECTION(__add_to_page_cache_locked, ERRNO); 887 888 /** 889 * add_to_page_cache_locked - add a locked page to the pagecache 890 * @page: page to add 891 * @mapping: the page's address_space 892 * @offset: page index 893 * @gfp_mask: page allocation mode 894 * 895 * This function is used to add a page to the pagecache. It must be locked. 896 * This function does not add the page to the LRU. The caller must do that. 897 * 898 * Return: %0 on success, negative error code otherwise. 899 */ 900 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 901 pgoff_t offset, gfp_t gfp_mask) 902 { 903 return __add_to_page_cache_locked(page, mapping, offset, 904 gfp_mask, NULL); 905 } 906 EXPORT_SYMBOL(add_to_page_cache_locked); 907 908 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 909 pgoff_t offset, gfp_t gfp_mask) 910 { 911 void *shadow = NULL; 912 int ret; 913 914 __SetPageLocked(page); 915 ret = __add_to_page_cache_locked(page, mapping, offset, 916 gfp_mask, &shadow); 917 if (unlikely(ret)) 918 __ClearPageLocked(page); 919 else { 920 /* 921 * The page might have been evicted from cache only 922 * recently, in which case it should be activated like 923 * any other repeatedly accessed page. 924 * The exception is pages getting rewritten; evicting other 925 * data from the working set, only to cache data that will 926 * get overwritten with something else, is a waste of memory. 927 */ 928 WARN_ON_ONCE(PageActive(page)); 929 if (!(gfp_mask & __GFP_WRITE) && shadow) 930 workingset_refault(page, shadow); 931 lru_cache_add(page); 932 } 933 return ret; 934 } 935 EXPORT_SYMBOL_GPL(add_to_page_cache_lru); 936 937 #ifdef CONFIG_NUMA 938 struct page *__page_cache_alloc(gfp_t gfp) 939 { 940 int n; 941 struct page *page; 942 943 if (cpuset_do_page_mem_spread()) { 944 unsigned int cpuset_mems_cookie; 945 do { 946 cpuset_mems_cookie = read_mems_allowed_begin(); 947 n = cpuset_mem_spread_node(); 948 page = __alloc_pages_node(n, gfp, 0); 949 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); 950 951 return page; 952 } 953 return alloc_pages(gfp, 0); 954 } 955 EXPORT_SYMBOL(__page_cache_alloc); 956 #endif 957 958 /* 959 * In order to wait for pages to become available there must be 960 * waitqueues associated with pages. By using a hash table of 961 * waitqueues where the bucket discipline is to maintain all 962 * waiters on the same queue and wake all when any of the pages 963 * become available, and for the woken contexts to check to be 964 * sure the appropriate page became available, this saves space 965 * at a cost of "thundering herd" phenomena during rare hash 966 * collisions. 967 */ 968 #define PAGE_WAIT_TABLE_BITS 8 969 #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS) 970 static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned; 971 972 static wait_queue_head_t *page_waitqueue(struct page *page) 973 { 974 return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)]; 975 } 976 977 void __init pagecache_init(void) 978 { 979 int i; 980 981 for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++) 982 init_waitqueue_head(&page_wait_table[i]); 983 984 page_writeback_init(); 985 } 986 987 /* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */ 988 struct wait_page_key { 989 struct page *page; 990 int bit_nr; 991 int page_match; 992 }; 993 994 struct wait_page_queue { 995 struct page *page; 996 int bit_nr; 997 wait_queue_entry_t wait; 998 }; 999 1000 static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) 1001 { 1002 struct wait_page_key *key = arg; 1003 struct wait_page_queue *wait_page 1004 = container_of(wait, struct wait_page_queue, wait); 1005 1006 if (wait_page->page != key->page) 1007 return 0; 1008 key->page_match = 1; 1009 1010 if (wait_page->bit_nr != key->bit_nr) 1011 return 0; 1012 1013 /* 1014 * Stop walking if it's locked. 1015 * Is this safe if put_and_wait_on_page_locked() is in use? 1016 * Yes: the waker must hold a reference to this page, and if PG_locked 1017 * has now already been set by another task, that task must also hold 1018 * a reference to the *same usage* of this page; so there is no need 1019 * to walk on to wake even the put_and_wait_on_page_locked() callers. 1020 */ 1021 if (test_bit(key->bit_nr, &key->page->flags)) 1022 return -1; 1023 1024 return autoremove_wake_function(wait, mode, sync, key); 1025 } 1026 1027 static void wake_up_page_bit(struct page *page, int bit_nr) 1028 { 1029 wait_queue_head_t *q = page_waitqueue(page); 1030 struct wait_page_key key; 1031 unsigned long flags; 1032 wait_queue_entry_t bookmark; 1033 1034 key.page = page; 1035 key.bit_nr = bit_nr; 1036 key.page_match = 0; 1037 1038 bookmark.flags = 0; 1039 bookmark.private = NULL; 1040 bookmark.func = NULL; 1041 INIT_LIST_HEAD(&bookmark.entry); 1042 1043 spin_lock_irqsave(&q->lock, flags); 1044 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark); 1045 1046 while (bookmark.flags & WQ_FLAG_BOOKMARK) { 1047 /* 1048 * Take a breather from holding the lock, 1049 * allow pages that finish wake up asynchronously 1050 * to acquire the lock and remove themselves 1051 * from wait queue 1052 */ 1053 spin_unlock_irqrestore(&q->lock, flags); 1054 cpu_relax(); 1055 spin_lock_irqsave(&q->lock, flags); 1056 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark); 1057 } 1058 1059 /* 1060 * It is possible for other pages to have collided on the waitqueue 1061 * hash, so in that case check for a page match. That prevents a long- 1062 * term waiter 1063 * 1064 * It is still possible to miss a case here, when we woke page waiters 1065 * and removed them from the waitqueue, but there are still other 1066 * page waiters. 1067 */ 1068 if (!waitqueue_active(q) || !key.page_match) { 1069 ClearPageWaiters(page); 1070 /* 1071 * It's possible to miss clearing Waiters here, when we woke 1072 * our page waiters, but the hashed waitqueue has waiters for 1073 * other pages on it. 1074 * 1075 * That's okay, it's a rare case. The next waker will clear it. 1076 */ 1077 } 1078 spin_unlock_irqrestore(&q->lock, flags); 1079 } 1080 1081 static void wake_up_page(struct page *page, int bit) 1082 { 1083 if (!PageWaiters(page)) 1084 return; 1085 wake_up_page_bit(page, bit); 1086 } 1087 1088 /* 1089 * A choice of three behaviors for wait_on_page_bit_common(): 1090 */ 1091 enum behavior { 1092 EXCLUSIVE, /* Hold ref to page and take the bit when woken, like 1093 * __lock_page() waiting on then setting PG_locked. 1094 */ 1095 SHARED, /* Hold ref to page and check the bit when woken, like 1096 * wait_on_page_writeback() waiting on PG_writeback. 1097 */ 1098 DROP, /* Drop ref to page before wait, no check when woken, 1099 * like put_and_wait_on_page_locked() on PG_locked. 1100 */ 1101 }; 1102 1103 static inline int wait_on_page_bit_common(wait_queue_head_t *q, 1104 struct page *page, int bit_nr, int state, enum behavior behavior) 1105 { 1106 struct wait_page_queue wait_page; 1107 wait_queue_entry_t *wait = &wait_page.wait; 1108 bool bit_is_set; 1109 bool thrashing = false; 1110 bool delayacct = false; 1111 unsigned long pflags; 1112 int ret = 0; 1113 1114 if (bit_nr == PG_locked && 1115 !PageUptodate(page) && PageWorkingset(page)) { 1116 if (!PageSwapBacked(page)) { 1117 delayacct_thrashing_start(); 1118 delayacct = true; 1119 } 1120 psi_memstall_enter(&pflags); 1121 thrashing = true; 1122 } 1123 1124 init_wait(wait); 1125 wait->flags = behavior == EXCLUSIVE ? WQ_FLAG_EXCLUSIVE : 0; 1126 wait->func = wake_page_function; 1127 wait_page.page = page; 1128 wait_page.bit_nr = bit_nr; 1129 1130 for (;;) { 1131 spin_lock_irq(&q->lock); 1132 1133 if (likely(list_empty(&wait->entry))) { 1134 __add_wait_queue_entry_tail(q, wait); 1135 SetPageWaiters(page); 1136 } 1137 1138 set_current_state(state); 1139 1140 spin_unlock_irq(&q->lock); 1141 1142 bit_is_set = test_bit(bit_nr, &page->flags); 1143 if (behavior == DROP) 1144 put_page(page); 1145 1146 if (likely(bit_is_set)) 1147 io_schedule(); 1148 1149 if (behavior == EXCLUSIVE) { 1150 if (!test_and_set_bit_lock(bit_nr, &page->flags)) 1151 break; 1152 } else if (behavior == SHARED) { 1153 if (!test_bit(bit_nr, &page->flags)) 1154 break; 1155 } 1156 1157 if (signal_pending_state(state, current)) { 1158 ret = -EINTR; 1159 break; 1160 } 1161 1162 if (behavior == DROP) { 1163 /* 1164 * We can no longer safely access page->flags: 1165 * even if CONFIG_MEMORY_HOTREMOVE is not enabled, 1166 * there is a risk of waiting forever on a page reused 1167 * for something that keeps it locked indefinitely. 1168 * But best check for -EINTR above before breaking. 1169 */ 1170 break; 1171 } 1172 } 1173 1174 finish_wait(q, wait); 1175 1176 if (thrashing) { 1177 if (delayacct) 1178 delayacct_thrashing_end(); 1179 psi_memstall_leave(&pflags); 1180 } 1181 1182 /* 1183 * A signal could leave PageWaiters set. Clearing it here if 1184 * !waitqueue_active would be possible (by open-coding finish_wait), 1185 * but still fail to catch it in the case of wait hash collision. We 1186 * already can fail to clear wait hash collision cases, so don't 1187 * bother with signals either. 1188 */ 1189 1190 return ret; 1191 } 1192 1193 void wait_on_page_bit(struct page *page, int bit_nr) 1194 { 1195 wait_queue_head_t *q = page_waitqueue(page); 1196 wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED); 1197 } 1198 EXPORT_SYMBOL(wait_on_page_bit); 1199 1200 int wait_on_page_bit_killable(struct page *page, int bit_nr) 1201 { 1202 wait_queue_head_t *q = page_waitqueue(page); 1203 return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED); 1204 } 1205 EXPORT_SYMBOL(wait_on_page_bit_killable); 1206 1207 /** 1208 * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked 1209 * @page: The page to wait for. 1210 * 1211 * The caller should hold a reference on @page. They expect the page to 1212 * become unlocked relatively soon, but do not wish to hold up migration 1213 * (for example) by holding the reference while waiting for the page to 1214 * come unlocked. After this function returns, the caller should not 1215 * dereference @page. 1216 */ 1217 void put_and_wait_on_page_locked(struct page *page) 1218 { 1219 wait_queue_head_t *q; 1220 1221 page = compound_head(page); 1222 q = page_waitqueue(page); 1223 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, DROP); 1224 } 1225 1226 /** 1227 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue 1228 * @page: Page defining the wait queue of interest 1229 * @waiter: Waiter to add to the queue 1230 * 1231 * Add an arbitrary @waiter to the wait queue for the nominated @page. 1232 */ 1233 void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter) 1234 { 1235 wait_queue_head_t *q = page_waitqueue(page); 1236 unsigned long flags; 1237 1238 spin_lock_irqsave(&q->lock, flags); 1239 __add_wait_queue_entry_tail(q, waiter); 1240 SetPageWaiters(page); 1241 spin_unlock_irqrestore(&q->lock, flags); 1242 } 1243 EXPORT_SYMBOL_GPL(add_page_wait_queue); 1244 1245 #ifndef clear_bit_unlock_is_negative_byte 1246 1247 /* 1248 * PG_waiters is the high bit in the same byte as PG_lock. 1249 * 1250 * On x86 (and on many other architectures), we can clear PG_lock and 1251 * test the sign bit at the same time. But if the architecture does 1252 * not support that special operation, we just do this all by hand 1253 * instead. 1254 * 1255 * The read of PG_waiters has to be after (or concurrently with) PG_locked 1256 * being cleared, but a memory barrier should be unneccssary since it is 1257 * in the same byte as PG_locked. 1258 */ 1259 static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem) 1260 { 1261 clear_bit_unlock(nr, mem); 1262 /* smp_mb__after_atomic(); */ 1263 return test_bit(PG_waiters, mem); 1264 } 1265 1266 #endif 1267 1268 /** 1269 * unlock_page - unlock a locked page 1270 * @page: the page 1271 * 1272 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). 1273 * Also wakes sleepers in wait_on_page_writeback() because the wakeup 1274 * mechanism between PageLocked pages and PageWriteback pages is shared. 1275 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 1276 * 1277 * Note that this depends on PG_waiters being the sign bit in the byte 1278 * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to 1279 * clear the PG_locked bit and test PG_waiters at the same time fairly 1280 * portably (architectures that do LL/SC can test any bit, while x86 can 1281 * test the sign bit). 1282 */ 1283 void unlock_page(struct page *page) 1284 { 1285 BUILD_BUG_ON(PG_waiters != 7); 1286 page = compound_head(page); 1287 VM_BUG_ON_PAGE(!PageLocked(page), page); 1288 if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags)) 1289 wake_up_page_bit(page, PG_locked); 1290 } 1291 EXPORT_SYMBOL(unlock_page); 1292 1293 /** 1294 * end_page_writeback - end writeback against a page 1295 * @page: the page 1296 */ 1297 void end_page_writeback(struct page *page) 1298 { 1299 /* 1300 * TestClearPageReclaim could be used here but it is an atomic 1301 * operation and overkill in this particular case. Failing to 1302 * shuffle a page marked for immediate reclaim is too mild to 1303 * justify taking an atomic operation penalty at the end of 1304 * ever page writeback. 1305 */ 1306 if (PageReclaim(page)) { 1307 ClearPageReclaim(page); 1308 rotate_reclaimable_page(page); 1309 } 1310 1311 if (!test_clear_page_writeback(page)) 1312 BUG(); 1313 1314 smp_mb__after_atomic(); 1315 wake_up_page(page, PG_writeback); 1316 } 1317 EXPORT_SYMBOL(end_page_writeback); 1318 1319 /* 1320 * After completing I/O on a page, call this routine to update the page 1321 * flags appropriately 1322 */ 1323 void page_endio(struct page *page, bool is_write, int err) 1324 { 1325 if (!is_write) { 1326 if (!err) { 1327 SetPageUptodate(page); 1328 } else { 1329 ClearPageUptodate(page); 1330 SetPageError(page); 1331 } 1332 unlock_page(page); 1333 } else { 1334 if (err) { 1335 struct address_space *mapping; 1336 1337 SetPageError(page); 1338 mapping = page_mapping(page); 1339 if (mapping) 1340 mapping_set_error(mapping, err); 1341 } 1342 end_page_writeback(page); 1343 } 1344 } 1345 EXPORT_SYMBOL_GPL(page_endio); 1346 1347 /** 1348 * __lock_page - get a lock on the page, assuming we need to sleep to get it 1349 * @__page: the page to lock 1350 */ 1351 void __lock_page(struct page *__page) 1352 { 1353 struct page *page = compound_head(__page); 1354 wait_queue_head_t *q = page_waitqueue(page); 1355 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, 1356 EXCLUSIVE); 1357 } 1358 EXPORT_SYMBOL(__lock_page); 1359 1360 int __lock_page_killable(struct page *__page) 1361 { 1362 struct page *page = compound_head(__page); 1363 wait_queue_head_t *q = page_waitqueue(page); 1364 return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, 1365 EXCLUSIVE); 1366 } 1367 EXPORT_SYMBOL_GPL(__lock_page_killable); 1368 1369 /* 1370 * Return values: 1371 * 1 - page is locked; mmap_sem is still held. 1372 * 0 - page is not locked. 1373 * mmap_sem has been released (up_read()), unless flags had both 1374 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in 1375 * which case mmap_sem is still held. 1376 * 1377 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1 1378 * with the page locked and the mmap_sem unperturbed. 1379 */ 1380 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 1381 unsigned int flags) 1382 { 1383 if (flags & FAULT_FLAG_ALLOW_RETRY) { 1384 /* 1385 * CAUTION! In this case, mmap_sem is not released 1386 * even though return 0. 1387 */ 1388 if (flags & FAULT_FLAG_RETRY_NOWAIT) 1389 return 0; 1390 1391 up_read(&mm->mmap_sem); 1392 if (flags & FAULT_FLAG_KILLABLE) 1393 wait_on_page_locked_killable(page); 1394 else 1395 wait_on_page_locked(page); 1396 return 0; 1397 } else { 1398 if (flags & FAULT_FLAG_KILLABLE) { 1399 int ret; 1400 1401 ret = __lock_page_killable(page); 1402 if (ret) { 1403 up_read(&mm->mmap_sem); 1404 return 0; 1405 } 1406 } else 1407 __lock_page(page); 1408 return 1; 1409 } 1410 } 1411 1412 /** 1413 * page_cache_next_miss() - Find the next gap in the page cache. 1414 * @mapping: Mapping. 1415 * @index: Index. 1416 * @max_scan: Maximum range to search. 1417 * 1418 * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the 1419 * gap with the lowest index. 1420 * 1421 * This function may be called under the rcu_read_lock. However, this will 1422 * not atomically search a snapshot of the cache at a single point in time. 1423 * For example, if a gap is created at index 5, then subsequently a gap is 1424 * created at index 10, page_cache_next_miss covering both indices may 1425 * return 10 if called under the rcu_read_lock. 1426 * 1427 * Return: The index of the gap if found, otherwise an index outside the 1428 * range specified (in which case 'return - index >= max_scan' will be true). 1429 * In the rare case of index wrap-around, 0 will be returned. 1430 */ 1431 pgoff_t page_cache_next_miss(struct address_space *mapping, 1432 pgoff_t index, unsigned long max_scan) 1433 { 1434 XA_STATE(xas, &mapping->i_pages, index); 1435 1436 while (max_scan--) { 1437 void *entry = xas_next(&xas); 1438 if (!entry || xa_is_value(entry)) 1439 break; 1440 if (xas.xa_index == 0) 1441 break; 1442 } 1443 1444 return xas.xa_index; 1445 } 1446 EXPORT_SYMBOL(page_cache_next_miss); 1447 1448 /** 1449 * page_cache_prev_miss() - Find the previous gap in the page cache. 1450 * @mapping: Mapping. 1451 * @index: Index. 1452 * @max_scan: Maximum range to search. 1453 * 1454 * Search the range [max(index - max_scan + 1, 0), index] for the 1455 * gap with the highest index. 1456 * 1457 * This function may be called under the rcu_read_lock. However, this will 1458 * not atomically search a snapshot of the cache at a single point in time. 1459 * For example, if a gap is created at index 10, then subsequently a gap is 1460 * created at index 5, page_cache_prev_miss() covering both indices may 1461 * return 5 if called under the rcu_read_lock. 1462 * 1463 * Return: The index of the gap if found, otherwise an index outside the 1464 * range specified (in which case 'index - return >= max_scan' will be true). 1465 * In the rare case of wrap-around, ULONG_MAX will be returned. 1466 */ 1467 pgoff_t page_cache_prev_miss(struct address_space *mapping, 1468 pgoff_t index, unsigned long max_scan) 1469 { 1470 XA_STATE(xas, &mapping->i_pages, index); 1471 1472 while (max_scan--) { 1473 void *entry = xas_prev(&xas); 1474 if (!entry || xa_is_value(entry)) 1475 break; 1476 if (xas.xa_index == ULONG_MAX) 1477 break; 1478 } 1479 1480 return xas.xa_index; 1481 } 1482 EXPORT_SYMBOL(page_cache_prev_miss); 1483 1484 /** 1485 * find_get_entry - find and get a page cache entry 1486 * @mapping: the address_space to search 1487 * @offset: the page cache index 1488 * 1489 * Looks up the page cache slot at @mapping & @offset. If there is a 1490 * page cache page, it is returned with an increased refcount. 1491 * 1492 * If the slot holds a shadow entry of a previously evicted page, or a 1493 * swap entry from shmem/tmpfs, it is returned. 1494 * 1495 * Return: the found page or shadow entry, %NULL if nothing is found. 1496 */ 1497 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) 1498 { 1499 XA_STATE(xas, &mapping->i_pages, offset); 1500 struct page *page; 1501 1502 rcu_read_lock(); 1503 repeat: 1504 xas_reset(&xas); 1505 page = xas_load(&xas); 1506 if (xas_retry(&xas, page)) 1507 goto repeat; 1508 /* 1509 * A shadow entry of a recently evicted page, or a swap entry from 1510 * shmem/tmpfs. Return it without attempting to raise page count. 1511 */ 1512 if (!page || xa_is_value(page)) 1513 goto out; 1514 1515 if (!page_cache_get_speculative(page)) 1516 goto repeat; 1517 1518 /* 1519 * Has the page moved or been split? 1520 * This is part of the lockless pagecache protocol. See 1521 * include/linux/pagemap.h for details. 1522 */ 1523 if (unlikely(page != xas_reload(&xas))) { 1524 put_page(page); 1525 goto repeat; 1526 } 1527 page = find_subpage(page, offset); 1528 out: 1529 rcu_read_unlock(); 1530 1531 return page; 1532 } 1533 EXPORT_SYMBOL(find_get_entry); 1534 1535 /** 1536 * find_lock_entry - locate, pin and lock a page cache entry 1537 * @mapping: the address_space to search 1538 * @offset: the page cache index 1539 * 1540 * Looks up the page cache slot at @mapping & @offset. If there is a 1541 * page cache page, it is returned locked and with an increased 1542 * refcount. 1543 * 1544 * If the slot holds a shadow entry of a previously evicted page, or a 1545 * swap entry from shmem/tmpfs, it is returned. 1546 * 1547 * find_lock_entry() may sleep. 1548 * 1549 * Return: the found page or shadow entry, %NULL if nothing is found. 1550 */ 1551 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) 1552 { 1553 struct page *page; 1554 1555 repeat: 1556 page = find_get_entry(mapping, offset); 1557 if (page && !xa_is_value(page)) { 1558 lock_page(page); 1559 /* Has the page been truncated? */ 1560 if (unlikely(page_mapping(page) != mapping)) { 1561 unlock_page(page); 1562 put_page(page); 1563 goto repeat; 1564 } 1565 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); 1566 } 1567 return page; 1568 } 1569 EXPORT_SYMBOL(find_lock_entry); 1570 1571 /** 1572 * pagecache_get_page - find and get a page reference 1573 * @mapping: the address_space to search 1574 * @offset: the page index 1575 * @fgp_flags: PCG flags 1576 * @gfp_mask: gfp mask to use for the page cache data page allocation 1577 * 1578 * Looks up the page cache slot at @mapping & @offset. 1579 * 1580 * PCG flags modify how the page is returned. 1581 * 1582 * @fgp_flags can be: 1583 * 1584 * - FGP_ACCESSED: the page will be marked accessed 1585 * - FGP_LOCK: Page is return locked 1586 * - FGP_CREAT: If page is not present then a new page is allocated using 1587 * @gfp_mask and added to the page cache and the VM's LRU 1588 * list. The page is returned locked and with an increased 1589 * refcount. 1590 * - FGP_FOR_MMAP: Similar to FGP_CREAT, only we want to allow the caller to do 1591 * its own locking dance if the page is already in cache, or unlock the page 1592 * before returning if we had to add the page to pagecache. 1593 * 1594 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even 1595 * if the GFP flags specified for FGP_CREAT are atomic. 1596 * 1597 * If there is a page cache page, it is returned with an increased refcount. 1598 * 1599 * Return: the found page or %NULL otherwise. 1600 */ 1601 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, 1602 int fgp_flags, gfp_t gfp_mask) 1603 { 1604 struct page *page; 1605 1606 repeat: 1607 page = find_get_entry(mapping, offset); 1608 if (xa_is_value(page)) 1609 page = NULL; 1610 if (!page) 1611 goto no_page; 1612 1613 if (fgp_flags & FGP_LOCK) { 1614 if (fgp_flags & FGP_NOWAIT) { 1615 if (!trylock_page(page)) { 1616 put_page(page); 1617 return NULL; 1618 } 1619 } else { 1620 lock_page(page); 1621 } 1622 1623 /* Has the page been truncated? */ 1624 if (unlikely(page->mapping != mapping)) { 1625 unlock_page(page); 1626 put_page(page); 1627 goto repeat; 1628 } 1629 VM_BUG_ON_PAGE(page->index != offset, page); 1630 } 1631 1632 if (fgp_flags & FGP_ACCESSED) 1633 mark_page_accessed(page); 1634 1635 no_page: 1636 if (!page && (fgp_flags & FGP_CREAT)) { 1637 int err; 1638 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) 1639 gfp_mask |= __GFP_WRITE; 1640 if (fgp_flags & FGP_NOFS) 1641 gfp_mask &= ~__GFP_FS; 1642 1643 page = __page_cache_alloc(gfp_mask); 1644 if (!page) 1645 return NULL; 1646 1647 if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP)))) 1648 fgp_flags |= FGP_LOCK; 1649 1650 /* Init accessed so avoid atomic mark_page_accessed later */ 1651 if (fgp_flags & FGP_ACCESSED) 1652 __SetPageReferenced(page); 1653 1654 err = add_to_page_cache_lru(page, mapping, offset, gfp_mask); 1655 if (unlikely(err)) { 1656 put_page(page); 1657 page = NULL; 1658 if (err == -EEXIST) 1659 goto repeat; 1660 } 1661 1662 /* 1663 * add_to_page_cache_lru locks the page, and for mmap we expect 1664 * an unlocked page. 1665 */ 1666 if (page && (fgp_flags & FGP_FOR_MMAP)) 1667 unlock_page(page); 1668 } 1669 1670 return page; 1671 } 1672 EXPORT_SYMBOL(pagecache_get_page); 1673 1674 /** 1675 * find_get_entries - gang pagecache lookup 1676 * @mapping: The address_space to search 1677 * @start: The starting page cache index 1678 * @nr_entries: The maximum number of entries 1679 * @entries: Where the resulting entries are placed 1680 * @indices: The cache indices corresponding to the entries in @entries 1681 * 1682 * find_get_entries() will search for and return a group of up to 1683 * @nr_entries entries in the mapping. The entries are placed at 1684 * @entries. find_get_entries() takes a reference against any actual 1685 * pages it returns. 1686 * 1687 * The search returns a group of mapping-contiguous page cache entries 1688 * with ascending indexes. There may be holes in the indices due to 1689 * not-present pages. 1690 * 1691 * Any shadow entries of evicted pages, or swap entries from 1692 * shmem/tmpfs, are included in the returned array. 1693 * 1694 * Return: the number of pages and shadow entries which were found. 1695 */ 1696 unsigned find_get_entries(struct address_space *mapping, 1697 pgoff_t start, unsigned int nr_entries, 1698 struct page **entries, pgoff_t *indices) 1699 { 1700 XA_STATE(xas, &mapping->i_pages, start); 1701 struct page *page; 1702 unsigned int ret = 0; 1703 1704 if (!nr_entries) 1705 return 0; 1706 1707 rcu_read_lock(); 1708 xas_for_each(&xas, page, ULONG_MAX) { 1709 if (xas_retry(&xas, page)) 1710 continue; 1711 /* 1712 * A shadow entry of a recently evicted page, a swap 1713 * entry from shmem/tmpfs or a DAX entry. Return it 1714 * without attempting to raise page count. 1715 */ 1716 if (xa_is_value(page)) 1717 goto export; 1718 1719 if (!page_cache_get_speculative(page)) 1720 goto retry; 1721 1722 /* Has the page moved or been split? */ 1723 if (unlikely(page != xas_reload(&xas))) 1724 goto put_page; 1725 page = find_subpage(page, xas.xa_index); 1726 1727 export: 1728 indices[ret] = xas.xa_index; 1729 entries[ret] = page; 1730 if (++ret == nr_entries) 1731 break; 1732 continue; 1733 put_page: 1734 put_page(page); 1735 retry: 1736 xas_reset(&xas); 1737 } 1738 rcu_read_unlock(); 1739 return ret; 1740 } 1741 1742 /** 1743 * find_get_pages_range - gang pagecache lookup 1744 * @mapping: The address_space to search 1745 * @start: The starting page index 1746 * @end: The final page index (inclusive) 1747 * @nr_pages: The maximum number of pages 1748 * @pages: Where the resulting pages are placed 1749 * 1750 * find_get_pages_range() will search for and return a group of up to @nr_pages 1751 * pages in the mapping starting at index @start and up to index @end 1752 * (inclusive). The pages are placed at @pages. find_get_pages_range() takes 1753 * a reference against the returned pages. 1754 * 1755 * The search returns a group of mapping-contiguous pages with ascending 1756 * indexes. There may be holes in the indices due to not-present pages. 1757 * We also update @start to index the next page for the traversal. 1758 * 1759 * Return: the number of pages which were found. If this number is 1760 * smaller than @nr_pages, the end of specified range has been 1761 * reached. 1762 */ 1763 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, 1764 pgoff_t end, unsigned int nr_pages, 1765 struct page **pages) 1766 { 1767 XA_STATE(xas, &mapping->i_pages, *start); 1768 struct page *page; 1769 unsigned ret = 0; 1770 1771 if (unlikely(!nr_pages)) 1772 return 0; 1773 1774 rcu_read_lock(); 1775 xas_for_each(&xas, page, end) { 1776 if (xas_retry(&xas, page)) 1777 continue; 1778 /* Skip over shadow, swap and DAX entries */ 1779 if (xa_is_value(page)) 1780 continue; 1781 1782 if (!page_cache_get_speculative(page)) 1783 goto retry; 1784 1785 /* Has the page moved or been split? */ 1786 if (unlikely(page != xas_reload(&xas))) 1787 goto put_page; 1788 1789 pages[ret] = find_subpage(page, xas.xa_index); 1790 if (++ret == nr_pages) { 1791 *start = xas.xa_index + 1; 1792 goto out; 1793 } 1794 continue; 1795 put_page: 1796 put_page(page); 1797 retry: 1798 xas_reset(&xas); 1799 } 1800 1801 /* 1802 * We come here when there is no page beyond @end. We take care to not 1803 * overflow the index @start as it confuses some of the callers. This 1804 * breaks the iteration when there is a page at index -1 but that is 1805 * already broken anyway. 1806 */ 1807 if (end == (pgoff_t)-1) 1808 *start = (pgoff_t)-1; 1809 else 1810 *start = end + 1; 1811 out: 1812 rcu_read_unlock(); 1813 1814 return ret; 1815 } 1816 1817 /** 1818 * find_get_pages_contig - gang contiguous pagecache lookup 1819 * @mapping: The address_space to search 1820 * @index: The starting page index 1821 * @nr_pages: The maximum number of pages 1822 * @pages: Where the resulting pages are placed 1823 * 1824 * find_get_pages_contig() works exactly like find_get_pages(), except 1825 * that the returned number of pages are guaranteed to be contiguous. 1826 * 1827 * Return: the number of pages which were found. 1828 */ 1829 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, 1830 unsigned int nr_pages, struct page **pages) 1831 { 1832 XA_STATE(xas, &mapping->i_pages, index); 1833 struct page *page; 1834 unsigned int ret = 0; 1835 1836 if (unlikely(!nr_pages)) 1837 return 0; 1838 1839 rcu_read_lock(); 1840 for (page = xas_load(&xas); page; page = xas_next(&xas)) { 1841 if (xas_retry(&xas, page)) 1842 continue; 1843 /* 1844 * If the entry has been swapped out, we can stop looking. 1845 * No current caller is looking for DAX entries. 1846 */ 1847 if (xa_is_value(page)) 1848 break; 1849 1850 if (!page_cache_get_speculative(page)) 1851 goto retry; 1852 1853 /* Has the page moved or been split? */ 1854 if (unlikely(page != xas_reload(&xas))) 1855 goto put_page; 1856 1857 pages[ret] = find_subpage(page, xas.xa_index); 1858 if (++ret == nr_pages) 1859 break; 1860 continue; 1861 put_page: 1862 put_page(page); 1863 retry: 1864 xas_reset(&xas); 1865 } 1866 rcu_read_unlock(); 1867 return ret; 1868 } 1869 EXPORT_SYMBOL(find_get_pages_contig); 1870 1871 /** 1872 * find_get_pages_range_tag - find and return pages in given range matching @tag 1873 * @mapping: the address_space to search 1874 * @index: the starting page index 1875 * @end: The final page index (inclusive) 1876 * @tag: the tag index 1877 * @nr_pages: the maximum number of pages 1878 * @pages: where the resulting pages are placed 1879 * 1880 * Like find_get_pages, except we only return pages which are tagged with 1881 * @tag. We update @index to index the next page for the traversal. 1882 * 1883 * Return: the number of pages which were found. 1884 */ 1885 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, 1886 pgoff_t end, xa_mark_t tag, unsigned int nr_pages, 1887 struct page **pages) 1888 { 1889 XA_STATE(xas, &mapping->i_pages, *index); 1890 struct page *page; 1891 unsigned ret = 0; 1892 1893 if (unlikely(!nr_pages)) 1894 return 0; 1895 1896 rcu_read_lock(); 1897 xas_for_each_marked(&xas, page, end, tag) { 1898 if (xas_retry(&xas, page)) 1899 continue; 1900 /* 1901 * Shadow entries should never be tagged, but this iteration 1902 * is lockless so there is a window for page reclaim to evict 1903 * a page we saw tagged. Skip over it. 1904 */ 1905 if (xa_is_value(page)) 1906 continue; 1907 1908 if (!page_cache_get_speculative(page)) 1909 goto retry; 1910 1911 /* Has the page moved or been split? */ 1912 if (unlikely(page != xas_reload(&xas))) 1913 goto put_page; 1914 1915 pages[ret] = find_subpage(page, xas.xa_index); 1916 if (++ret == nr_pages) { 1917 *index = xas.xa_index + 1; 1918 goto out; 1919 } 1920 continue; 1921 put_page: 1922 put_page(page); 1923 retry: 1924 xas_reset(&xas); 1925 } 1926 1927 /* 1928 * We come here when we got to @end. We take care to not overflow the 1929 * index @index as it confuses some of the callers. This breaks the 1930 * iteration when there is a page at index -1 but that is already 1931 * broken anyway. 1932 */ 1933 if (end == (pgoff_t)-1) 1934 *index = (pgoff_t)-1; 1935 else 1936 *index = end + 1; 1937 out: 1938 rcu_read_unlock(); 1939 1940 return ret; 1941 } 1942 EXPORT_SYMBOL(find_get_pages_range_tag); 1943 1944 /* 1945 * CD/DVDs are error prone. When a medium error occurs, the driver may fail 1946 * a _large_ part of the i/o request. Imagine the worst scenario: 1947 * 1948 * ---R__________________________________________B__________ 1949 * ^ reading here ^ bad block(assume 4k) 1950 * 1951 * read(R) => miss => readahead(R...B) => media error => frustrating retries 1952 * => failing the whole request => read(R) => read(R+1) => 1953 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => 1954 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => 1955 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... 1956 * 1957 * It is going insane. Fix it by quickly scaling down the readahead size. 1958 */ 1959 static void shrink_readahead_size_eio(struct file *filp, 1960 struct file_ra_state *ra) 1961 { 1962 ra->ra_pages /= 4; 1963 } 1964 1965 /** 1966 * generic_file_buffered_read - generic file read routine 1967 * @iocb: the iocb to read 1968 * @iter: data destination 1969 * @written: already copied 1970 * 1971 * This is a generic file read routine, and uses the 1972 * mapping->a_ops->readpage() function for the actual low-level stuff. 1973 * 1974 * This is really ugly. But the goto's actually try to clarify some 1975 * of the logic when it comes to error handling etc. 1976 * 1977 * Return: 1978 * * total number of bytes copied, including those the were already @written 1979 * * negative error code if nothing was copied 1980 */ 1981 static ssize_t generic_file_buffered_read(struct kiocb *iocb, 1982 struct iov_iter *iter, ssize_t written) 1983 { 1984 struct file *filp = iocb->ki_filp; 1985 struct address_space *mapping = filp->f_mapping; 1986 struct inode *inode = mapping->host; 1987 struct file_ra_state *ra = &filp->f_ra; 1988 loff_t *ppos = &iocb->ki_pos; 1989 pgoff_t index; 1990 pgoff_t last_index; 1991 pgoff_t prev_index; 1992 unsigned long offset; /* offset into pagecache page */ 1993 unsigned int prev_offset; 1994 int error = 0; 1995 1996 if (unlikely(*ppos >= inode->i_sb->s_maxbytes)) 1997 return 0; 1998 iov_iter_truncate(iter, inode->i_sb->s_maxbytes); 1999 2000 index = *ppos >> PAGE_SHIFT; 2001 prev_index = ra->prev_pos >> PAGE_SHIFT; 2002 prev_offset = ra->prev_pos & (PAGE_SIZE-1); 2003 last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT; 2004 offset = *ppos & ~PAGE_MASK; 2005 2006 for (;;) { 2007 struct page *page; 2008 pgoff_t end_index; 2009 loff_t isize; 2010 unsigned long nr, ret; 2011 2012 cond_resched(); 2013 find_page: 2014 if (fatal_signal_pending(current)) { 2015 error = -EINTR; 2016 goto out; 2017 } 2018 2019 page = find_get_page(mapping, index); 2020 if (!page) { 2021 if (iocb->ki_flags & IOCB_NOWAIT) 2022 goto would_block; 2023 page_cache_sync_readahead(mapping, 2024 ra, filp, 2025 index, last_index - index); 2026 page = find_get_page(mapping, index); 2027 if (unlikely(page == NULL)) 2028 goto no_cached_page; 2029 } 2030 if (PageReadahead(page)) { 2031 page_cache_async_readahead(mapping, 2032 ra, filp, page, 2033 index, last_index - index); 2034 } 2035 if (!PageUptodate(page)) { 2036 if (iocb->ki_flags & IOCB_NOWAIT) { 2037 put_page(page); 2038 goto would_block; 2039 } 2040 2041 /* 2042 * See comment in do_read_cache_page on why 2043 * wait_on_page_locked is used to avoid unnecessarily 2044 * serialisations and why it's safe. 2045 */ 2046 error = wait_on_page_locked_killable(page); 2047 if (unlikely(error)) 2048 goto readpage_error; 2049 if (PageUptodate(page)) 2050 goto page_ok; 2051 2052 if (inode->i_blkbits == PAGE_SHIFT || 2053 !mapping->a_ops->is_partially_uptodate) 2054 goto page_not_up_to_date; 2055 /* pipes can't handle partially uptodate pages */ 2056 if (unlikely(iov_iter_is_pipe(iter))) 2057 goto page_not_up_to_date; 2058 if (!trylock_page(page)) 2059 goto page_not_up_to_date; 2060 /* Did it get truncated before we got the lock? */ 2061 if (!page->mapping) 2062 goto page_not_up_to_date_locked; 2063 if (!mapping->a_ops->is_partially_uptodate(page, 2064 offset, iter->count)) 2065 goto page_not_up_to_date_locked; 2066 unlock_page(page); 2067 } 2068 page_ok: 2069 /* 2070 * i_size must be checked after we know the page is Uptodate. 2071 * 2072 * Checking i_size after the check allows us to calculate 2073 * the correct value for "nr", which means the zero-filled 2074 * part of the page is not copied back to userspace (unless 2075 * another truncate extends the file - this is desired though). 2076 */ 2077 2078 isize = i_size_read(inode); 2079 end_index = (isize - 1) >> PAGE_SHIFT; 2080 if (unlikely(!isize || index > end_index)) { 2081 put_page(page); 2082 goto out; 2083 } 2084 2085 /* nr is the maximum number of bytes to copy from this page */ 2086 nr = PAGE_SIZE; 2087 if (index == end_index) { 2088 nr = ((isize - 1) & ~PAGE_MASK) + 1; 2089 if (nr <= offset) { 2090 put_page(page); 2091 goto out; 2092 } 2093 } 2094 nr = nr - offset; 2095 2096 /* If users can be writing to this page using arbitrary 2097 * virtual addresses, take care about potential aliasing 2098 * before reading the page on the kernel side. 2099 */ 2100 if (mapping_writably_mapped(mapping)) 2101 flush_dcache_page(page); 2102 2103 /* 2104 * When a sequential read accesses a page several times, 2105 * only mark it as accessed the first time. 2106 */ 2107 if (prev_index != index || offset != prev_offset) 2108 mark_page_accessed(page); 2109 prev_index = index; 2110 2111 /* 2112 * Ok, we have the page, and it's up-to-date, so 2113 * now we can copy it to user space... 2114 */ 2115 2116 ret = copy_page_to_iter(page, offset, nr, iter); 2117 offset += ret; 2118 index += offset >> PAGE_SHIFT; 2119 offset &= ~PAGE_MASK; 2120 prev_offset = offset; 2121 2122 put_page(page); 2123 written += ret; 2124 if (!iov_iter_count(iter)) 2125 goto out; 2126 if (ret < nr) { 2127 error = -EFAULT; 2128 goto out; 2129 } 2130 continue; 2131 2132 page_not_up_to_date: 2133 /* Get exclusive access to the page ... */ 2134 error = lock_page_killable(page); 2135 if (unlikely(error)) 2136 goto readpage_error; 2137 2138 page_not_up_to_date_locked: 2139 /* Did it get truncated before we got the lock? */ 2140 if (!page->mapping) { 2141 unlock_page(page); 2142 put_page(page); 2143 continue; 2144 } 2145 2146 /* Did somebody else fill it already? */ 2147 if (PageUptodate(page)) { 2148 unlock_page(page); 2149 goto page_ok; 2150 } 2151 2152 readpage: 2153 /* 2154 * A previous I/O error may have been due to temporary 2155 * failures, eg. multipath errors. 2156 * PG_error will be set again if readpage fails. 2157 */ 2158 ClearPageError(page); 2159 /* Start the actual read. The read will unlock the page. */ 2160 error = mapping->a_ops->readpage(filp, page); 2161 2162 if (unlikely(error)) { 2163 if (error == AOP_TRUNCATED_PAGE) { 2164 put_page(page); 2165 error = 0; 2166 goto find_page; 2167 } 2168 goto readpage_error; 2169 } 2170 2171 if (!PageUptodate(page)) { 2172 error = lock_page_killable(page); 2173 if (unlikely(error)) 2174 goto readpage_error; 2175 if (!PageUptodate(page)) { 2176 if (page->mapping == NULL) { 2177 /* 2178 * invalidate_mapping_pages got it 2179 */ 2180 unlock_page(page); 2181 put_page(page); 2182 goto find_page; 2183 } 2184 unlock_page(page); 2185 shrink_readahead_size_eio(filp, ra); 2186 error = -EIO; 2187 goto readpage_error; 2188 } 2189 unlock_page(page); 2190 } 2191 2192 goto page_ok; 2193 2194 readpage_error: 2195 /* UHHUH! A synchronous read error occurred. Report it */ 2196 put_page(page); 2197 goto out; 2198 2199 no_cached_page: 2200 /* 2201 * Ok, it wasn't cached, so we need to create a new 2202 * page.. 2203 */ 2204 page = page_cache_alloc(mapping); 2205 if (!page) { 2206 error = -ENOMEM; 2207 goto out; 2208 } 2209 error = add_to_page_cache_lru(page, mapping, index, 2210 mapping_gfp_constraint(mapping, GFP_KERNEL)); 2211 if (error) { 2212 put_page(page); 2213 if (error == -EEXIST) { 2214 error = 0; 2215 goto find_page; 2216 } 2217 goto out; 2218 } 2219 goto readpage; 2220 } 2221 2222 would_block: 2223 error = -EAGAIN; 2224 out: 2225 ra->prev_pos = prev_index; 2226 ra->prev_pos <<= PAGE_SHIFT; 2227 ra->prev_pos |= prev_offset; 2228 2229 *ppos = ((loff_t)index << PAGE_SHIFT) + offset; 2230 file_accessed(filp); 2231 return written ? written : error; 2232 } 2233 2234 /** 2235 * generic_file_read_iter - generic filesystem read routine 2236 * @iocb: kernel I/O control block 2237 * @iter: destination for the data read 2238 * 2239 * This is the "read_iter()" routine for all filesystems 2240 * that can use the page cache directly. 2241 * Return: 2242 * * number of bytes copied, even for partial reads 2243 * * negative error code if nothing was read 2244 */ 2245 ssize_t 2246 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 2247 { 2248 size_t count = iov_iter_count(iter); 2249 ssize_t retval = 0; 2250 2251 if (!count) 2252 goto out; /* skip atime */ 2253 2254 if (iocb->ki_flags & IOCB_DIRECT) { 2255 struct file *file = iocb->ki_filp; 2256 struct address_space *mapping = file->f_mapping; 2257 struct inode *inode = mapping->host; 2258 loff_t size; 2259 2260 size = i_size_read(inode); 2261 if (iocb->ki_flags & IOCB_NOWAIT) { 2262 if (filemap_range_has_page(mapping, iocb->ki_pos, 2263 iocb->ki_pos + count - 1)) 2264 return -EAGAIN; 2265 } else { 2266 retval = filemap_write_and_wait_range(mapping, 2267 iocb->ki_pos, 2268 iocb->ki_pos + count - 1); 2269 if (retval < 0) 2270 goto out; 2271 } 2272 2273 file_accessed(file); 2274 2275 retval = mapping->a_ops->direct_IO(iocb, iter); 2276 if (retval >= 0) { 2277 iocb->ki_pos += retval; 2278 count -= retval; 2279 } 2280 iov_iter_revert(iter, count - iov_iter_count(iter)); 2281 2282 /* 2283 * Btrfs can have a short DIO read if we encounter 2284 * compressed extents, so if there was an error, or if 2285 * we've already read everything we wanted to, or if 2286 * there was a short read because we hit EOF, go ahead 2287 * and return. Otherwise fallthrough to buffered io for 2288 * the rest of the read. Buffered reads will not work for 2289 * DAX files, so don't bother trying. 2290 */ 2291 if (retval < 0 || !count || iocb->ki_pos >= size || 2292 IS_DAX(inode)) 2293 goto out; 2294 } 2295 2296 retval = generic_file_buffered_read(iocb, iter, retval); 2297 out: 2298 return retval; 2299 } 2300 EXPORT_SYMBOL(generic_file_read_iter); 2301 2302 #ifdef CONFIG_MMU 2303 #define MMAP_LOTSAMISS (100) 2304 static struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, 2305 struct file *fpin) 2306 { 2307 int flags = vmf->flags; 2308 2309 if (fpin) 2310 return fpin; 2311 2312 /* 2313 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or 2314 * anything, so we only pin the file and drop the mmap_sem if only 2315 * FAULT_FLAG_ALLOW_RETRY is set. 2316 */ 2317 if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) == 2318 FAULT_FLAG_ALLOW_RETRY) { 2319 fpin = get_file(vmf->vma->vm_file); 2320 up_read(&vmf->vma->vm_mm->mmap_sem); 2321 } 2322 return fpin; 2323 } 2324 2325 /* 2326 * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_sem 2327 * @vmf - the vm_fault for this fault. 2328 * @page - the page to lock. 2329 * @fpin - the pointer to the file we may pin (or is already pinned). 2330 * 2331 * This works similar to lock_page_or_retry in that it can drop the mmap_sem. 2332 * It differs in that it actually returns the page locked if it returns 1 and 0 2333 * if it couldn't lock the page. If we did have to drop the mmap_sem then fpin 2334 * will point to the pinned file and needs to be fput()'ed at a later point. 2335 */ 2336 static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page, 2337 struct file **fpin) 2338 { 2339 if (trylock_page(page)) 2340 return 1; 2341 2342 /* 2343 * NOTE! This will make us return with VM_FAULT_RETRY, but with 2344 * the mmap_sem still held. That's how FAULT_FLAG_RETRY_NOWAIT 2345 * is supposed to work. We have way too many special cases.. 2346 */ 2347 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) 2348 return 0; 2349 2350 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); 2351 if (vmf->flags & FAULT_FLAG_KILLABLE) { 2352 if (__lock_page_killable(page)) { 2353 /* 2354 * We didn't have the right flags to drop the mmap_sem, 2355 * but all fault_handlers only check for fatal signals 2356 * if we return VM_FAULT_RETRY, so we need to drop the 2357 * mmap_sem here and return 0 if we don't have a fpin. 2358 */ 2359 if (*fpin == NULL) 2360 up_read(&vmf->vma->vm_mm->mmap_sem); 2361 return 0; 2362 } 2363 } else 2364 __lock_page(page); 2365 return 1; 2366 } 2367 2368 2369 /* 2370 * Synchronous readahead happens when we don't even find a page in the page 2371 * cache at all. We don't want to perform IO under the mmap sem, so if we have 2372 * to drop the mmap sem we return the file that was pinned in order for us to do 2373 * that. If we didn't pin a file then we return NULL. The file that is 2374 * returned needs to be fput()'ed when we're done with it. 2375 */ 2376 static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) 2377 { 2378 struct file *file = vmf->vma->vm_file; 2379 struct file_ra_state *ra = &file->f_ra; 2380 struct address_space *mapping = file->f_mapping; 2381 struct file *fpin = NULL; 2382 pgoff_t offset = vmf->pgoff; 2383 2384 /* If we don't want any read-ahead, don't bother */ 2385 if (vmf->vma->vm_flags & VM_RAND_READ) 2386 return fpin; 2387 if (!ra->ra_pages) 2388 return fpin; 2389 2390 if (vmf->vma->vm_flags & VM_SEQ_READ) { 2391 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 2392 page_cache_sync_readahead(mapping, ra, file, offset, 2393 ra->ra_pages); 2394 return fpin; 2395 } 2396 2397 /* Avoid banging the cache line if not needed */ 2398 if (ra->mmap_miss < MMAP_LOTSAMISS * 10) 2399 ra->mmap_miss++; 2400 2401 /* 2402 * Do we miss much more than hit in this file? If so, 2403 * stop bothering with read-ahead. It will only hurt. 2404 */ 2405 if (ra->mmap_miss > MMAP_LOTSAMISS) 2406 return fpin; 2407 2408 /* 2409 * mmap read-around 2410 */ 2411 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 2412 ra->start = max_t(long, 0, offset - ra->ra_pages / 2); 2413 ra->size = ra->ra_pages; 2414 ra->async_size = ra->ra_pages / 4; 2415 ra_submit(ra, mapping, file); 2416 return fpin; 2417 } 2418 2419 /* 2420 * Asynchronous readahead happens when we find the page and PG_readahead, 2421 * so we want to possibly extend the readahead further. We return the file that 2422 * was pinned if we have to drop the mmap_sem in order to do IO. 2423 */ 2424 static struct file *do_async_mmap_readahead(struct vm_fault *vmf, 2425 struct page *page) 2426 { 2427 struct file *file = vmf->vma->vm_file; 2428 struct file_ra_state *ra = &file->f_ra; 2429 struct address_space *mapping = file->f_mapping; 2430 struct file *fpin = NULL; 2431 pgoff_t offset = vmf->pgoff; 2432 2433 /* If we don't want any read-ahead, don't bother */ 2434 if (vmf->vma->vm_flags & VM_RAND_READ) 2435 return fpin; 2436 if (ra->mmap_miss > 0) 2437 ra->mmap_miss--; 2438 if (PageReadahead(page)) { 2439 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 2440 page_cache_async_readahead(mapping, ra, file, 2441 page, offset, ra->ra_pages); 2442 } 2443 return fpin; 2444 } 2445 2446 /** 2447 * filemap_fault - read in file data for page fault handling 2448 * @vmf: struct vm_fault containing details of the fault 2449 * 2450 * filemap_fault() is invoked via the vma operations vector for a 2451 * mapped memory region to read in file data during a page fault. 2452 * 2453 * The goto's are kind of ugly, but this streamlines the normal case of having 2454 * it in the page cache, and handles the special cases reasonably without 2455 * having a lot of duplicated code. 2456 * 2457 * vma->vm_mm->mmap_sem must be held on entry. 2458 * 2459 * If our return value has VM_FAULT_RETRY set, it's because 2460 * lock_page_or_retry() returned 0. 2461 * The mmap_sem has usually been released in this case. 2462 * See __lock_page_or_retry() for the exception. 2463 * 2464 * If our return value does not have VM_FAULT_RETRY set, the mmap_sem 2465 * has not been released. 2466 * 2467 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. 2468 * 2469 * Return: bitwise-OR of %VM_FAULT_ codes. 2470 */ 2471 vm_fault_t filemap_fault(struct vm_fault *vmf) 2472 { 2473 int error; 2474 struct file *file = vmf->vma->vm_file; 2475 struct file *fpin = NULL; 2476 struct address_space *mapping = file->f_mapping; 2477 struct file_ra_state *ra = &file->f_ra; 2478 struct inode *inode = mapping->host; 2479 pgoff_t offset = vmf->pgoff; 2480 pgoff_t max_off; 2481 struct page *page; 2482 vm_fault_t ret = 0; 2483 2484 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 2485 if (unlikely(offset >= max_off)) 2486 return VM_FAULT_SIGBUS; 2487 2488 /* 2489 * Do we have something in the page cache already? 2490 */ 2491 page = find_get_page(mapping, offset); 2492 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { 2493 /* 2494 * We found the page, so try async readahead before 2495 * waiting for the lock. 2496 */ 2497 fpin = do_async_mmap_readahead(vmf, page); 2498 } else if (!page) { 2499 /* No page in the page cache at all */ 2500 count_vm_event(PGMAJFAULT); 2501 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); 2502 ret = VM_FAULT_MAJOR; 2503 fpin = do_sync_mmap_readahead(vmf); 2504 retry_find: 2505 page = pagecache_get_page(mapping, offset, 2506 FGP_CREAT|FGP_FOR_MMAP, 2507 vmf->gfp_mask); 2508 if (!page) { 2509 if (fpin) 2510 goto out_retry; 2511 return vmf_error(-ENOMEM); 2512 } 2513 } 2514 2515 if (!lock_page_maybe_drop_mmap(vmf, page, &fpin)) 2516 goto out_retry; 2517 2518 /* Did it get truncated? */ 2519 if (unlikely(page->mapping != mapping)) { 2520 unlock_page(page); 2521 put_page(page); 2522 goto retry_find; 2523 } 2524 VM_BUG_ON_PAGE(page->index != offset, page); 2525 2526 /* 2527 * We have a locked page in the page cache, now we need to check 2528 * that it's up-to-date. If not, it is going to be due to an error. 2529 */ 2530 if (unlikely(!PageUptodate(page))) 2531 goto page_not_uptodate; 2532 2533 /* 2534 * We've made it this far and we had to drop our mmap_sem, now is the 2535 * time to return to the upper layer and have it re-find the vma and 2536 * redo the fault. 2537 */ 2538 if (fpin) { 2539 unlock_page(page); 2540 goto out_retry; 2541 } 2542 2543 /* 2544 * Found the page and have a reference on it. 2545 * We must recheck i_size under page lock. 2546 */ 2547 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 2548 if (unlikely(offset >= max_off)) { 2549 unlock_page(page); 2550 put_page(page); 2551 return VM_FAULT_SIGBUS; 2552 } 2553 2554 vmf->page = page; 2555 return ret | VM_FAULT_LOCKED; 2556 2557 page_not_uptodate: 2558 /* 2559 * Umm, take care of errors if the page isn't up-to-date. 2560 * Try to re-read it _once_. We do this synchronously, 2561 * because there really aren't any performance issues here 2562 * and we need to check for errors. 2563 */ 2564 ClearPageError(page); 2565 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 2566 error = mapping->a_ops->readpage(file, page); 2567 if (!error) { 2568 wait_on_page_locked(page); 2569 if (!PageUptodate(page)) 2570 error = -EIO; 2571 } 2572 if (fpin) 2573 goto out_retry; 2574 put_page(page); 2575 2576 if (!error || error == AOP_TRUNCATED_PAGE) 2577 goto retry_find; 2578 2579 /* Things didn't work out. Return zero to tell the mm layer so. */ 2580 shrink_readahead_size_eio(file, ra); 2581 return VM_FAULT_SIGBUS; 2582 2583 out_retry: 2584 /* 2585 * We dropped the mmap_sem, we need to return to the fault handler to 2586 * re-find the vma and come back and find our hopefully still populated 2587 * page. 2588 */ 2589 if (page) 2590 put_page(page); 2591 if (fpin) 2592 fput(fpin); 2593 return ret | VM_FAULT_RETRY; 2594 } 2595 EXPORT_SYMBOL(filemap_fault); 2596 2597 void filemap_map_pages(struct vm_fault *vmf, 2598 pgoff_t start_pgoff, pgoff_t end_pgoff) 2599 { 2600 struct file *file = vmf->vma->vm_file; 2601 struct address_space *mapping = file->f_mapping; 2602 pgoff_t last_pgoff = start_pgoff; 2603 unsigned long max_idx; 2604 XA_STATE(xas, &mapping->i_pages, start_pgoff); 2605 struct page *page; 2606 2607 rcu_read_lock(); 2608 xas_for_each(&xas, page, end_pgoff) { 2609 if (xas_retry(&xas, page)) 2610 continue; 2611 if (xa_is_value(page)) 2612 goto next; 2613 2614 /* 2615 * Check for a locked page first, as a speculative 2616 * reference may adversely influence page migration. 2617 */ 2618 if (PageLocked(page)) 2619 goto next; 2620 if (!page_cache_get_speculative(page)) 2621 goto next; 2622 2623 /* Has the page moved or been split? */ 2624 if (unlikely(page != xas_reload(&xas))) 2625 goto skip; 2626 page = find_subpage(page, xas.xa_index); 2627 2628 if (!PageUptodate(page) || 2629 PageReadahead(page) || 2630 PageHWPoison(page)) 2631 goto skip; 2632 if (!trylock_page(page)) 2633 goto skip; 2634 2635 if (page->mapping != mapping || !PageUptodate(page)) 2636 goto unlock; 2637 2638 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); 2639 if (page->index >= max_idx) 2640 goto unlock; 2641 2642 if (file->f_ra.mmap_miss > 0) 2643 file->f_ra.mmap_miss--; 2644 2645 vmf->address += (xas.xa_index - last_pgoff) << PAGE_SHIFT; 2646 if (vmf->pte) 2647 vmf->pte += xas.xa_index - last_pgoff; 2648 last_pgoff = xas.xa_index; 2649 if (alloc_set_pte(vmf, NULL, page)) 2650 goto unlock; 2651 unlock_page(page); 2652 goto next; 2653 unlock: 2654 unlock_page(page); 2655 skip: 2656 put_page(page); 2657 next: 2658 /* Huge page is mapped? No need to proceed. */ 2659 if (pmd_trans_huge(*vmf->pmd)) 2660 break; 2661 } 2662 rcu_read_unlock(); 2663 } 2664 EXPORT_SYMBOL(filemap_map_pages); 2665 2666 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) 2667 { 2668 struct page *page = vmf->page; 2669 struct inode *inode = file_inode(vmf->vma->vm_file); 2670 vm_fault_t ret = VM_FAULT_LOCKED; 2671 2672 sb_start_pagefault(inode->i_sb); 2673 file_update_time(vmf->vma->vm_file); 2674 lock_page(page); 2675 if (page->mapping != inode->i_mapping) { 2676 unlock_page(page); 2677 ret = VM_FAULT_NOPAGE; 2678 goto out; 2679 } 2680 /* 2681 * We mark the page dirty already here so that when freeze is in 2682 * progress, we are guaranteed that writeback during freezing will 2683 * see the dirty page and writeprotect it again. 2684 */ 2685 set_page_dirty(page); 2686 wait_for_stable_page(page); 2687 out: 2688 sb_end_pagefault(inode->i_sb); 2689 return ret; 2690 } 2691 2692 const struct vm_operations_struct generic_file_vm_ops = { 2693 .fault = filemap_fault, 2694 .map_pages = filemap_map_pages, 2695 .page_mkwrite = filemap_page_mkwrite, 2696 }; 2697 2698 /* This is used for a general mmap of a disk file */ 2699 2700 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2701 { 2702 struct address_space *mapping = file->f_mapping; 2703 2704 if (!mapping->a_ops->readpage) 2705 return -ENOEXEC; 2706 file_accessed(file); 2707 vma->vm_ops = &generic_file_vm_ops; 2708 return 0; 2709 } 2710 2711 /* 2712 * This is for filesystems which do not implement ->writepage. 2713 */ 2714 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 2715 { 2716 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 2717 return -EINVAL; 2718 return generic_file_mmap(file, vma); 2719 } 2720 #else 2721 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) 2722 { 2723 return VM_FAULT_SIGBUS; 2724 } 2725 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2726 { 2727 return -ENOSYS; 2728 } 2729 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) 2730 { 2731 return -ENOSYS; 2732 } 2733 #endif /* CONFIG_MMU */ 2734 2735 EXPORT_SYMBOL(filemap_page_mkwrite); 2736 EXPORT_SYMBOL(generic_file_mmap); 2737 EXPORT_SYMBOL(generic_file_readonly_mmap); 2738 2739 static struct page *wait_on_page_read(struct page *page) 2740 { 2741 if (!IS_ERR(page)) { 2742 wait_on_page_locked(page); 2743 if (!PageUptodate(page)) { 2744 put_page(page); 2745 page = ERR_PTR(-EIO); 2746 } 2747 } 2748 return page; 2749 } 2750 2751 static struct page *do_read_cache_page(struct address_space *mapping, 2752 pgoff_t index, 2753 int (*filler)(void *, struct page *), 2754 void *data, 2755 gfp_t gfp) 2756 { 2757 struct page *page; 2758 int err; 2759 repeat: 2760 page = find_get_page(mapping, index); 2761 if (!page) { 2762 page = __page_cache_alloc(gfp); 2763 if (!page) 2764 return ERR_PTR(-ENOMEM); 2765 err = add_to_page_cache_lru(page, mapping, index, gfp); 2766 if (unlikely(err)) { 2767 put_page(page); 2768 if (err == -EEXIST) 2769 goto repeat; 2770 /* Presumably ENOMEM for xarray node */ 2771 return ERR_PTR(err); 2772 } 2773 2774 filler: 2775 err = filler(data, page); 2776 if (err < 0) { 2777 put_page(page); 2778 return ERR_PTR(err); 2779 } 2780 2781 page = wait_on_page_read(page); 2782 if (IS_ERR(page)) 2783 return page; 2784 goto out; 2785 } 2786 if (PageUptodate(page)) 2787 goto out; 2788 2789 /* 2790 * Page is not up to date and may be locked due one of the following 2791 * case a: Page is being filled and the page lock is held 2792 * case b: Read/write error clearing the page uptodate status 2793 * case c: Truncation in progress (page locked) 2794 * case d: Reclaim in progress 2795 * 2796 * Case a, the page will be up to date when the page is unlocked. 2797 * There is no need to serialise on the page lock here as the page 2798 * is pinned so the lock gives no additional protection. Even if the 2799 * the page is truncated, the data is still valid if PageUptodate as 2800 * it's a race vs truncate race. 2801 * Case b, the page will not be up to date 2802 * Case c, the page may be truncated but in itself, the data may still 2803 * be valid after IO completes as it's a read vs truncate race. The 2804 * operation must restart if the page is not uptodate on unlock but 2805 * otherwise serialising on page lock to stabilise the mapping gives 2806 * no additional guarantees to the caller as the page lock is 2807 * released before return. 2808 * Case d, similar to truncation. If reclaim holds the page lock, it 2809 * will be a race with remove_mapping that determines if the mapping 2810 * is valid on unlock but otherwise the data is valid and there is 2811 * no need to serialise with page lock. 2812 * 2813 * As the page lock gives no additional guarantee, we optimistically 2814 * wait on the page to be unlocked and check if it's up to date and 2815 * use the page if it is. Otherwise, the page lock is required to 2816 * distinguish between the different cases. The motivation is that we 2817 * avoid spurious serialisations and wakeups when multiple processes 2818 * wait on the same page for IO to complete. 2819 */ 2820 wait_on_page_locked(page); 2821 if (PageUptodate(page)) 2822 goto out; 2823 2824 /* Distinguish between all the cases under the safety of the lock */ 2825 lock_page(page); 2826 2827 /* Case c or d, restart the operation */ 2828 if (!page->mapping) { 2829 unlock_page(page); 2830 put_page(page); 2831 goto repeat; 2832 } 2833 2834 /* Someone else locked and filled the page in a very small window */ 2835 if (PageUptodate(page)) { 2836 unlock_page(page); 2837 goto out; 2838 } 2839 goto filler; 2840 2841 out: 2842 mark_page_accessed(page); 2843 return page; 2844 } 2845 2846 /** 2847 * read_cache_page - read into page cache, fill it if needed 2848 * @mapping: the page's address_space 2849 * @index: the page index 2850 * @filler: function to perform the read 2851 * @data: first arg to filler(data, page) function, often left as NULL 2852 * 2853 * Read into the page cache. If a page already exists, and PageUptodate() is 2854 * not set, try to fill the page and wait for it to become unlocked. 2855 * 2856 * If the page does not get brought uptodate, return -EIO. 2857 * 2858 * Return: up to date page on success, ERR_PTR() on failure. 2859 */ 2860 struct page *read_cache_page(struct address_space *mapping, 2861 pgoff_t index, 2862 int (*filler)(void *, struct page *), 2863 void *data) 2864 { 2865 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); 2866 } 2867 EXPORT_SYMBOL(read_cache_page); 2868 2869 /** 2870 * read_cache_page_gfp - read into page cache, using specified page allocation flags. 2871 * @mapping: the page's address_space 2872 * @index: the page index 2873 * @gfp: the page allocator flags to use if allocating 2874 * 2875 * This is the same as "read_mapping_page(mapping, index, NULL)", but with 2876 * any new page allocations done using the specified allocation flags. 2877 * 2878 * If the page does not get brought uptodate, return -EIO. 2879 * 2880 * Return: up to date page on success, ERR_PTR() on failure. 2881 */ 2882 struct page *read_cache_page_gfp(struct address_space *mapping, 2883 pgoff_t index, 2884 gfp_t gfp) 2885 { 2886 filler_t *filler = (filler_t *)mapping->a_ops->readpage; 2887 2888 return do_read_cache_page(mapping, index, filler, NULL, gfp); 2889 } 2890 EXPORT_SYMBOL(read_cache_page_gfp); 2891 2892 /* 2893 * Don't operate on ranges the page cache doesn't support, and don't exceed the 2894 * LFS limits. If pos is under the limit it becomes a short access. If it 2895 * exceeds the limit we return -EFBIG. 2896 */ 2897 static int generic_access_check_limits(struct file *file, loff_t pos, 2898 loff_t *count) 2899 { 2900 struct inode *inode = file->f_mapping->host; 2901 loff_t max_size = inode->i_sb->s_maxbytes; 2902 2903 if (!(file->f_flags & O_LARGEFILE)) 2904 max_size = MAX_NON_LFS; 2905 2906 if (unlikely(pos >= max_size)) 2907 return -EFBIG; 2908 *count = min(*count, max_size - pos); 2909 return 0; 2910 } 2911 2912 static int generic_write_check_limits(struct file *file, loff_t pos, 2913 loff_t *count) 2914 { 2915 loff_t limit = rlimit(RLIMIT_FSIZE); 2916 2917 if (limit != RLIM_INFINITY) { 2918 if (pos >= limit) { 2919 send_sig(SIGXFSZ, current, 0); 2920 return -EFBIG; 2921 } 2922 *count = min(*count, limit - pos); 2923 } 2924 2925 return generic_access_check_limits(file, pos, count); 2926 } 2927 2928 /* 2929 * Performs necessary checks before doing a write 2930 * 2931 * Can adjust writing position or amount of bytes to write. 2932 * Returns appropriate error code that caller should return or 2933 * zero in case that write should be allowed. 2934 */ 2935 inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) 2936 { 2937 struct file *file = iocb->ki_filp; 2938 struct inode *inode = file->f_mapping->host; 2939 loff_t count; 2940 int ret; 2941 2942 if (!iov_iter_count(from)) 2943 return 0; 2944 2945 /* FIXME: this is for backwards compatibility with 2.4 */ 2946 if (iocb->ki_flags & IOCB_APPEND) 2947 iocb->ki_pos = i_size_read(inode); 2948 2949 if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) 2950 return -EINVAL; 2951 2952 count = iov_iter_count(from); 2953 ret = generic_write_check_limits(file, iocb->ki_pos, &count); 2954 if (ret) 2955 return ret; 2956 2957 iov_iter_truncate(from, count); 2958 return iov_iter_count(from); 2959 } 2960 EXPORT_SYMBOL(generic_write_checks); 2961 2962 /* 2963 * Performs necessary checks before doing a clone. 2964 * 2965 * Can adjust amount of bytes to clone. 2966 * Returns appropriate error code that caller should return or 2967 * zero in case the clone should be allowed. 2968 */ 2969 int generic_remap_checks(struct file *file_in, loff_t pos_in, 2970 struct file *file_out, loff_t pos_out, 2971 loff_t *req_count, unsigned int remap_flags) 2972 { 2973 struct inode *inode_in = file_in->f_mapping->host; 2974 struct inode *inode_out = file_out->f_mapping->host; 2975 uint64_t count = *req_count; 2976 uint64_t bcount; 2977 loff_t size_in, size_out; 2978 loff_t bs = inode_out->i_sb->s_blocksize; 2979 int ret; 2980 2981 /* The start of both ranges must be aligned to an fs block. */ 2982 if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs)) 2983 return -EINVAL; 2984 2985 /* Ensure offsets don't wrap. */ 2986 if (pos_in + count < pos_in || pos_out + count < pos_out) 2987 return -EINVAL; 2988 2989 size_in = i_size_read(inode_in); 2990 size_out = i_size_read(inode_out); 2991 2992 /* Dedupe requires both ranges to be within EOF. */ 2993 if ((remap_flags & REMAP_FILE_DEDUP) && 2994 (pos_in >= size_in || pos_in + count > size_in || 2995 pos_out >= size_out || pos_out + count > size_out)) 2996 return -EINVAL; 2997 2998 /* Ensure the infile range is within the infile. */ 2999 if (pos_in >= size_in) 3000 return -EINVAL; 3001 count = min(count, size_in - (uint64_t)pos_in); 3002 3003 ret = generic_access_check_limits(file_in, pos_in, &count); 3004 if (ret) 3005 return ret; 3006 3007 ret = generic_write_check_limits(file_out, pos_out, &count); 3008 if (ret) 3009 return ret; 3010 3011 /* 3012 * If the user wanted us to link to the infile's EOF, round up to the 3013 * next block boundary for this check. 3014 * 3015 * Otherwise, make sure the count is also block-aligned, having 3016 * already confirmed the starting offsets' block alignment. 3017 */ 3018 if (pos_in + count == size_in) { 3019 bcount = ALIGN(size_in, bs) - pos_in; 3020 } else { 3021 if (!IS_ALIGNED(count, bs)) 3022 count = ALIGN_DOWN(count, bs); 3023 bcount = count; 3024 } 3025 3026 /* Don't allow overlapped cloning within the same file. */ 3027 if (inode_in == inode_out && 3028 pos_out + bcount > pos_in && 3029 pos_out < pos_in + bcount) 3030 return -EINVAL; 3031 3032 /* 3033 * We shortened the request but the caller can't deal with that, so 3034 * bounce the request back to userspace. 3035 */ 3036 if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN)) 3037 return -EINVAL; 3038 3039 *req_count = count; 3040 return 0; 3041 } 3042 3043 int pagecache_write_begin(struct file *file, struct address_space *mapping, 3044 loff_t pos, unsigned len, unsigned flags, 3045 struct page **pagep, void **fsdata) 3046 { 3047 const struct address_space_operations *aops = mapping->a_ops; 3048 3049 return aops->write_begin(file, mapping, pos, len, flags, 3050 pagep, fsdata); 3051 } 3052 EXPORT_SYMBOL(pagecache_write_begin); 3053 3054 int pagecache_write_end(struct file *file, struct address_space *mapping, 3055 loff_t pos, unsigned len, unsigned copied, 3056 struct page *page, void *fsdata) 3057 { 3058 const struct address_space_operations *aops = mapping->a_ops; 3059 3060 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); 3061 } 3062 EXPORT_SYMBOL(pagecache_write_end); 3063 3064 ssize_t 3065 generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) 3066 { 3067 struct file *file = iocb->ki_filp; 3068 struct address_space *mapping = file->f_mapping; 3069 struct inode *inode = mapping->host; 3070 loff_t pos = iocb->ki_pos; 3071 ssize_t written; 3072 size_t write_len; 3073 pgoff_t end; 3074 3075 write_len = iov_iter_count(from); 3076 end = (pos + write_len - 1) >> PAGE_SHIFT; 3077 3078 if (iocb->ki_flags & IOCB_NOWAIT) { 3079 /* If there are pages to writeback, return */ 3080 if (filemap_range_has_page(inode->i_mapping, pos, 3081 pos + write_len - 1)) 3082 return -EAGAIN; 3083 } else { 3084 written = filemap_write_and_wait_range(mapping, pos, 3085 pos + write_len - 1); 3086 if (written) 3087 goto out; 3088 } 3089 3090 /* 3091 * After a write we want buffered reads to be sure to go to disk to get 3092 * the new data. We invalidate clean cached page from the region we're 3093 * about to write. We do this *before* the write so that we can return 3094 * without clobbering -EIOCBQUEUED from ->direct_IO(). 3095 */ 3096 written = invalidate_inode_pages2_range(mapping, 3097 pos >> PAGE_SHIFT, end); 3098 /* 3099 * If a page can not be invalidated, return 0 to fall back 3100 * to buffered write. 3101 */ 3102 if (written) { 3103 if (written == -EBUSY) 3104 return 0; 3105 goto out; 3106 } 3107 3108 written = mapping->a_ops->direct_IO(iocb, from); 3109 3110 /* 3111 * Finally, try again to invalidate clean pages which might have been 3112 * cached by non-direct readahead, or faulted in by get_user_pages() 3113 * if the source of the write was an mmap'ed region of the file 3114 * we're writing. Either one is a pretty crazy thing to do, 3115 * so we don't support it 100%. If this invalidation 3116 * fails, tough, the write still worked... 3117 * 3118 * Most of the time we do not need this since dio_complete() will do 3119 * the invalidation for us. However there are some file systems that 3120 * do not end up with dio_complete() being called, so let's not break 3121 * them by removing it completely 3122 */ 3123 if (mapping->nrpages) 3124 invalidate_inode_pages2_range(mapping, 3125 pos >> PAGE_SHIFT, end); 3126 3127 if (written > 0) { 3128 pos += written; 3129 write_len -= written; 3130 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { 3131 i_size_write(inode, pos); 3132 mark_inode_dirty(inode); 3133 } 3134 iocb->ki_pos = pos; 3135 } 3136 iov_iter_revert(from, write_len - iov_iter_count(from)); 3137 out: 3138 return written; 3139 } 3140 EXPORT_SYMBOL(generic_file_direct_write); 3141 3142 /* 3143 * Find or create a page at the given pagecache position. Return the locked 3144 * page. This function is specifically for buffered writes. 3145 */ 3146 struct page *grab_cache_page_write_begin(struct address_space *mapping, 3147 pgoff_t index, unsigned flags) 3148 { 3149 struct page *page; 3150 int fgp_flags = FGP_LOCK|FGP_WRITE|FGP_CREAT; 3151 3152 if (flags & AOP_FLAG_NOFS) 3153 fgp_flags |= FGP_NOFS; 3154 3155 page = pagecache_get_page(mapping, index, fgp_flags, 3156 mapping_gfp_mask(mapping)); 3157 if (page) 3158 wait_for_stable_page(page); 3159 3160 return page; 3161 } 3162 EXPORT_SYMBOL(grab_cache_page_write_begin); 3163 3164 ssize_t generic_perform_write(struct file *file, 3165 struct iov_iter *i, loff_t pos) 3166 { 3167 struct address_space *mapping = file->f_mapping; 3168 const struct address_space_operations *a_ops = mapping->a_ops; 3169 long status = 0; 3170 ssize_t written = 0; 3171 unsigned int flags = 0; 3172 3173 do { 3174 struct page *page; 3175 unsigned long offset; /* Offset into pagecache page */ 3176 unsigned long bytes; /* Bytes to write to page */ 3177 size_t copied; /* Bytes copied from user */ 3178 void *fsdata; 3179 3180 offset = (pos & (PAGE_SIZE - 1)); 3181 bytes = min_t(unsigned long, PAGE_SIZE - offset, 3182 iov_iter_count(i)); 3183 3184 again: 3185 /* 3186 * Bring in the user page that we will copy from _first_. 3187 * Otherwise there's a nasty deadlock on copying from the 3188 * same page as we're writing to, without it being marked 3189 * up-to-date. 3190 * 3191 * Not only is this an optimisation, but it is also required 3192 * to check that the address is actually valid, when atomic 3193 * usercopies are used, below. 3194 */ 3195 if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 3196 status = -EFAULT; 3197 break; 3198 } 3199 3200 if (fatal_signal_pending(current)) { 3201 status = -EINTR; 3202 break; 3203 } 3204 3205 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 3206 &page, &fsdata); 3207 if (unlikely(status < 0)) 3208 break; 3209 3210 if (mapping_writably_mapped(mapping)) 3211 flush_dcache_page(page); 3212 3213 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 3214 flush_dcache_page(page); 3215 3216 status = a_ops->write_end(file, mapping, pos, bytes, copied, 3217 page, fsdata); 3218 if (unlikely(status < 0)) 3219 break; 3220 copied = status; 3221 3222 cond_resched(); 3223 3224 iov_iter_advance(i, copied); 3225 if (unlikely(copied == 0)) { 3226 /* 3227 * If we were unable to copy any data at all, we must 3228 * fall back to a single segment length write. 3229 * 3230 * If we didn't fallback here, we could livelock 3231 * because not all segments in the iov can be copied at 3232 * once without a pagefault. 3233 */ 3234 bytes = min_t(unsigned long, PAGE_SIZE - offset, 3235 iov_iter_single_seg_count(i)); 3236 goto again; 3237 } 3238 pos += copied; 3239 written += copied; 3240 3241 balance_dirty_pages_ratelimited(mapping); 3242 } while (iov_iter_count(i)); 3243 3244 return written ? written : status; 3245 } 3246 EXPORT_SYMBOL(generic_perform_write); 3247 3248 /** 3249 * __generic_file_write_iter - write data to a file 3250 * @iocb: IO state structure (file, offset, etc.) 3251 * @from: iov_iter with data to write 3252 * 3253 * This function does all the work needed for actually writing data to a 3254 * file. It does all basic checks, removes SUID from the file, updates 3255 * modification times and calls proper subroutines depending on whether we 3256 * do direct IO or a standard buffered write. 3257 * 3258 * It expects i_mutex to be grabbed unless we work on a block device or similar 3259 * object which does not need locking at all. 3260 * 3261 * This function does *not* take care of syncing data in case of O_SYNC write. 3262 * A caller has to handle it. This is mainly due to the fact that we want to 3263 * avoid syncing under i_mutex. 3264 * 3265 * Return: 3266 * * number of bytes written, even for truncated writes 3267 * * negative error code if no data has been written at all 3268 */ 3269 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 3270 { 3271 struct file *file = iocb->ki_filp; 3272 struct address_space * mapping = file->f_mapping; 3273 struct inode *inode = mapping->host; 3274 ssize_t written = 0; 3275 ssize_t err; 3276 ssize_t status; 3277 3278 /* We can write back this queue in page reclaim */ 3279 current->backing_dev_info = inode_to_bdi(inode); 3280 err = file_remove_privs(file); 3281 if (err) 3282 goto out; 3283 3284 err = file_update_time(file); 3285 if (err) 3286 goto out; 3287 3288 if (iocb->ki_flags & IOCB_DIRECT) { 3289 loff_t pos, endbyte; 3290 3291 written = generic_file_direct_write(iocb, from); 3292 /* 3293 * If the write stopped short of completing, fall back to 3294 * buffered writes. Some filesystems do this for writes to 3295 * holes, for example. For DAX files, a buffered write will 3296 * not succeed (even if it did, DAX does not handle dirty 3297 * page-cache pages correctly). 3298 */ 3299 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) 3300 goto out; 3301 3302 status = generic_perform_write(file, from, pos = iocb->ki_pos); 3303 /* 3304 * If generic_perform_write() returned a synchronous error 3305 * then we want to return the number of bytes which were 3306 * direct-written, or the error code if that was zero. Note 3307 * that this differs from normal direct-io semantics, which 3308 * will return -EFOO even if some bytes were written. 3309 */ 3310 if (unlikely(status < 0)) { 3311 err = status; 3312 goto out; 3313 } 3314 /* 3315 * We need to ensure that the page cache pages are written to 3316 * disk and invalidated to preserve the expected O_DIRECT 3317 * semantics. 3318 */ 3319 endbyte = pos + status - 1; 3320 err = filemap_write_and_wait_range(mapping, pos, endbyte); 3321 if (err == 0) { 3322 iocb->ki_pos = endbyte + 1; 3323 written += status; 3324 invalidate_mapping_pages(mapping, 3325 pos >> PAGE_SHIFT, 3326 endbyte >> PAGE_SHIFT); 3327 } else { 3328 /* 3329 * We don't know how much we wrote, so just return 3330 * the number of bytes which were direct-written 3331 */ 3332 } 3333 } else { 3334 written = generic_perform_write(file, from, iocb->ki_pos); 3335 if (likely(written > 0)) 3336 iocb->ki_pos += written; 3337 } 3338 out: 3339 current->backing_dev_info = NULL; 3340 return written ? written : err; 3341 } 3342 EXPORT_SYMBOL(__generic_file_write_iter); 3343 3344 /** 3345 * generic_file_write_iter - write data to a file 3346 * @iocb: IO state structure 3347 * @from: iov_iter with data to write 3348 * 3349 * This is a wrapper around __generic_file_write_iter() to be used by most 3350 * filesystems. It takes care of syncing the file in case of O_SYNC file 3351 * and acquires i_mutex as needed. 3352 * Return: 3353 * * negative error code if no data has been written at all of 3354 * vfs_fsync_range() failed for a synchronous write 3355 * * number of bytes written, even for truncated writes 3356 */ 3357 ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 3358 { 3359 struct file *file = iocb->ki_filp; 3360 struct inode *inode = file->f_mapping->host; 3361 ssize_t ret; 3362 3363 inode_lock(inode); 3364 ret = generic_write_checks(iocb, from); 3365 if (ret > 0) 3366 ret = __generic_file_write_iter(iocb, from); 3367 inode_unlock(inode); 3368 3369 if (ret > 0) 3370 ret = generic_write_sync(iocb, ret); 3371 return ret; 3372 } 3373 EXPORT_SYMBOL(generic_file_write_iter); 3374 3375 /** 3376 * try_to_release_page() - release old fs-specific metadata on a page 3377 * 3378 * @page: the page which the kernel is trying to free 3379 * @gfp_mask: memory allocation flags (and I/O mode) 3380 * 3381 * The address_space is to try to release any data against the page 3382 * (presumably at page->private). 3383 * 3384 * This may also be called if PG_fscache is set on a page, indicating that the 3385 * page is known to the local caching routines. 3386 * 3387 * The @gfp_mask argument specifies whether I/O may be performed to release 3388 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS). 3389 * 3390 * Return: %1 if the release was successful, otherwise return zero. 3391 */ 3392 int try_to_release_page(struct page *page, gfp_t gfp_mask) 3393 { 3394 struct address_space * const mapping = page->mapping; 3395 3396 BUG_ON(!PageLocked(page)); 3397 if (PageWriteback(page)) 3398 return 0; 3399 3400 if (mapping && mapping->a_ops->releasepage) 3401 return mapping->a_ops->releasepage(page, gfp_mask); 3402 return try_to_free_buffers(page); 3403 } 3404 3405 EXPORT_SYMBOL(try_to_release_page); 3406