1 /* 2 * linux/mm/filemap.c 3 * 4 * Copyright (C) 1994-1999 Linus Torvalds 5 */ 6 7 /* 8 * This file handles the generic file mmap semantics used by 9 * most "normal" filesystems (but you don't /have/ to use this: 10 * the NFS filesystem used to do this differently, for example) 11 */ 12 #include <linux/export.h> 13 #include <linux/compiler.h> 14 #include <linux/dax.h> 15 #include <linux/fs.h> 16 #include <linux/uaccess.h> 17 #include <linux/capability.h> 18 #include <linux/kernel_stat.h> 19 #include <linux/gfp.h> 20 #include <linux/mm.h> 21 #include <linux/swap.h> 22 #include <linux/mman.h> 23 #include <linux/pagemap.h> 24 #include <linux/file.h> 25 #include <linux/uio.h> 26 #include <linux/hash.h> 27 #include <linux/writeback.h> 28 #include <linux/backing-dev.h> 29 #include <linux/pagevec.h> 30 #include <linux/blkdev.h> 31 #include <linux/security.h> 32 #include <linux/cpuset.h> 33 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ 34 #include <linux/hugetlb.h> 35 #include <linux/memcontrol.h> 36 #include <linux/cleancache.h> 37 #include <linux/rmap.h> 38 #include "internal.h" 39 40 #define CREATE_TRACE_POINTS 41 #include <trace/events/filemap.h> 42 43 /* 44 * FIXME: remove all knowledge of the buffer layer from the core VM 45 */ 46 #include <linux/buffer_head.h> /* for try_to_free_buffers */ 47 48 #include <asm/mman.h> 49 50 /* 51 * Shared mappings implemented 30.11.1994. It's not fully working yet, 52 * though. 53 * 54 * Shared mappings now work. 15.8.1995 Bruno. 55 * 56 * finished 'unifying' the page and buffer cache and SMP-threaded the 57 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 58 * 59 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 60 */ 61 62 /* 63 * Lock ordering: 64 * 65 * ->i_mmap_rwsem (truncate_pagecache) 66 * ->private_lock (__free_pte->__set_page_dirty_buffers) 67 * ->swap_lock (exclusive_swap_page, others) 68 * ->mapping->tree_lock 69 * 70 * ->i_mutex 71 * ->i_mmap_rwsem (truncate->unmap_mapping_range) 72 * 73 * ->mmap_sem 74 * ->i_mmap_rwsem 75 * ->page_table_lock or pte_lock (various, mainly in memory.c) 76 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) 77 * 78 * ->mmap_sem 79 * ->lock_page (access_process_vm) 80 * 81 * ->i_mutex (generic_perform_write) 82 * ->mmap_sem (fault_in_pages_readable->do_page_fault) 83 * 84 * bdi->wb.list_lock 85 * sb_lock (fs/fs-writeback.c) 86 * ->mapping->tree_lock (__sync_single_inode) 87 * 88 * ->i_mmap_rwsem 89 * ->anon_vma.lock (vma_adjust) 90 * 91 * ->anon_vma.lock 92 * ->page_table_lock or pte_lock (anon_vma_prepare and various) 93 * 94 * ->page_table_lock or pte_lock 95 * ->swap_lock (try_to_unmap_one) 96 * ->private_lock (try_to_unmap_one) 97 * ->tree_lock (try_to_unmap_one) 98 * ->zone.lru_lock (follow_page->mark_page_accessed) 99 * ->zone.lru_lock (check_pte_range->isolate_lru_page) 100 * ->private_lock (page_remove_rmap->set_page_dirty) 101 * ->tree_lock (page_remove_rmap->set_page_dirty) 102 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) 103 * ->inode->i_lock (page_remove_rmap->set_page_dirty) 104 * ->memcg->move_lock (page_remove_rmap->mem_cgroup_begin_page_stat) 105 * bdi.wb->list_lock (zap_pte_range->set_page_dirty) 106 * ->inode->i_lock (zap_pte_range->set_page_dirty) 107 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 108 * 109 * ->i_mmap_rwsem 110 * ->tasklist_lock (memory_failure, collect_procs_ao) 111 */ 112 113 static void page_cache_tree_delete(struct address_space *mapping, 114 struct page *page, void *shadow) 115 { 116 struct radix_tree_node *node; 117 unsigned long index; 118 unsigned int offset; 119 unsigned int tag; 120 void **slot; 121 122 VM_BUG_ON(!PageLocked(page)); 123 124 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); 125 126 if (shadow) { 127 mapping->nrexceptional++; 128 /* 129 * Make sure the nrexceptional update is committed before 130 * the nrpages update so that final truncate racing 131 * with reclaim does not see both counters 0 at the 132 * same time and miss a shadow entry. 133 */ 134 smp_wmb(); 135 } 136 mapping->nrpages--; 137 138 if (!node) { 139 /* Clear direct pointer tags in root node */ 140 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK; 141 radix_tree_replace_slot(slot, shadow); 142 return; 143 } 144 145 /* Clear tree tags for the removed page */ 146 index = page->index; 147 offset = index & RADIX_TREE_MAP_MASK; 148 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { 149 if (test_bit(offset, node->tags[tag])) 150 radix_tree_tag_clear(&mapping->page_tree, index, tag); 151 } 152 153 /* Delete page, swap shadow entry */ 154 radix_tree_replace_slot(slot, shadow); 155 workingset_node_pages_dec(node); 156 if (shadow) 157 workingset_node_shadows_inc(node); 158 else 159 if (__radix_tree_delete_node(&mapping->page_tree, node)) 160 return; 161 162 /* 163 * Track node that only contains shadow entries. 164 * 165 * Avoid acquiring the list_lru lock if already tracked. The 166 * list_empty() test is safe as node->private_list is 167 * protected by mapping->tree_lock. 168 */ 169 if (!workingset_node_pages(node) && 170 list_empty(&node->private_list)) { 171 node->private_data = mapping; 172 list_lru_add(&workingset_shadow_nodes, &node->private_list); 173 } 174 } 175 176 /* 177 * Delete a page from the page cache and free it. Caller has to make 178 * sure the page is locked and that nobody else uses it - or that usage 179 * is safe. The caller must hold the mapping's tree_lock and 180 * mem_cgroup_begin_page_stat(). 181 */ 182 void __delete_from_page_cache(struct page *page, void *shadow, 183 struct mem_cgroup *memcg) 184 { 185 struct address_space *mapping = page->mapping; 186 187 trace_mm_filemap_delete_from_page_cache(page); 188 /* 189 * if we're uptodate, flush out into the cleancache, otherwise 190 * invalidate any existing cleancache entries. We can't leave 191 * stale data around in the cleancache once our page is gone 192 */ 193 if (PageUptodate(page) && PageMappedToDisk(page)) 194 cleancache_put_page(page); 195 else 196 cleancache_invalidate_page(mapping, page); 197 198 page_cache_tree_delete(mapping, page, shadow); 199 200 page->mapping = NULL; 201 /* Leave page->index set: truncation lookup relies upon it */ 202 203 /* hugetlb pages do not participate in page cache accounting. */ 204 if (!PageHuge(page)) 205 __dec_zone_page_state(page, NR_FILE_PAGES); 206 if (PageSwapBacked(page)) 207 __dec_zone_page_state(page, NR_SHMEM); 208 VM_BUG_ON_PAGE(page_mapped(page), page); 209 210 /* 211 * At this point page must be either written or cleaned by truncate. 212 * Dirty page here signals a bug and loss of unwritten data. 213 * 214 * This fixes dirty accounting after removing the page entirely but 215 * leaves PageDirty set: it has no effect for truncated page and 216 * anyway will be cleared before returning page into buddy allocator. 217 */ 218 if (WARN_ON_ONCE(PageDirty(page))) 219 account_page_cleaned(page, mapping, memcg, 220 inode_to_wb(mapping->host)); 221 } 222 223 /** 224 * delete_from_page_cache - delete page from page cache 225 * @page: the page which the kernel is trying to remove from page cache 226 * 227 * This must be called only on pages that have been verified to be in the page 228 * cache and locked. It will never put the page into the free list, the caller 229 * has a reference on the page. 230 */ 231 void delete_from_page_cache(struct page *page) 232 { 233 struct address_space *mapping = page->mapping; 234 struct mem_cgroup *memcg; 235 unsigned long flags; 236 237 void (*freepage)(struct page *); 238 239 BUG_ON(!PageLocked(page)); 240 241 freepage = mapping->a_ops->freepage; 242 243 memcg = mem_cgroup_begin_page_stat(page); 244 spin_lock_irqsave(&mapping->tree_lock, flags); 245 __delete_from_page_cache(page, NULL, memcg); 246 spin_unlock_irqrestore(&mapping->tree_lock, flags); 247 mem_cgroup_end_page_stat(memcg); 248 249 if (freepage) 250 freepage(page); 251 page_cache_release(page); 252 } 253 EXPORT_SYMBOL(delete_from_page_cache); 254 255 static int filemap_check_errors(struct address_space *mapping) 256 { 257 int ret = 0; 258 /* Check for outstanding write errors */ 259 if (test_bit(AS_ENOSPC, &mapping->flags) && 260 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 261 ret = -ENOSPC; 262 if (test_bit(AS_EIO, &mapping->flags) && 263 test_and_clear_bit(AS_EIO, &mapping->flags)) 264 ret = -EIO; 265 return ret; 266 } 267 268 /** 269 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 270 * @mapping: address space structure to write 271 * @start: offset in bytes where the range starts 272 * @end: offset in bytes where the range ends (inclusive) 273 * @sync_mode: enable synchronous operation 274 * 275 * Start writeback against all of a mapping's dirty pages that lie 276 * within the byte offsets <start, end> inclusive. 277 * 278 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 279 * opposed to a regular memory cleansing writeback. The difference between 280 * these two operations is that if a dirty page/buffer is encountered, it must 281 * be waited upon, and not just skipped over. 282 */ 283 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 284 loff_t end, int sync_mode) 285 { 286 int ret; 287 struct writeback_control wbc = { 288 .sync_mode = sync_mode, 289 .nr_to_write = LONG_MAX, 290 .range_start = start, 291 .range_end = end, 292 }; 293 294 if (!mapping_cap_writeback_dirty(mapping)) 295 return 0; 296 297 wbc_attach_fdatawrite_inode(&wbc, mapping->host); 298 ret = do_writepages(mapping, &wbc); 299 wbc_detach_inode(&wbc); 300 return ret; 301 } 302 303 static inline int __filemap_fdatawrite(struct address_space *mapping, 304 int sync_mode) 305 { 306 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); 307 } 308 309 int filemap_fdatawrite(struct address_space *mapping) 310 { 311 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); 312 } 313 EXPORT_SYMBOL(filemap_fdatawrite); 314 315 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 316 loff_t end) 317 { 318 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 319 } 320 EXPORT_SYMBOL(filemap_fdatawrite_range); 321 322 /** 323 * filemap_flush - mostly a non-blocking flush 324 * @mapping: target address_space 325 * 326 * This is a mostly non-blocking flush. Not suitable for data-integrity 327 * purposes - I/O may not be started against all dirty pages. 328 */ 329 int filemap_flush(struct address_space *mapping) 330 { 331 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); 332 } 333 EXPORT_SYMBOL(filemap_flush); 334 335 static int __filemap_fdatawait_range(struct address_space *mapping, 336 loff_t start_byte, loff_t end_byte) 337 { 338 pgoff_t index = start_byte >> PAGE_CACHE_SHIFT; 339 pgoff_t end = end_byte >> PAGE_CACHE_SHIFT; 340 struct pagevec pvec; 341 int nr_pages; 342 int ret = 0; 343 344 if (end_byte < start_byte) 345 goto out; 346 347 pagevec_init(&pvec, 0); 348 while ((index <= end) && 349 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 350 PAGECACHE_TAG_WRITEBACK, 351 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { 352 unsigned i; 353 354 for (i = 0; i < nr_pages; i++) { 355 struct page *page = pvec.pages[i]; 356 357 /* until radix tree lookup accepts end_index */ 358 if (page->index > end) 359 continue; 360 361 wait_on_page_writeback(page); 362 if (TestClearPageError(page)) 363 ret = -EIO; 364 } 365 pagevec_release(&pvec); 366 cond_resched(); 367 } 368 out: 369 return ret; 370 } 371 372 /** 373 * filemap_fdatawait_range - wait for writeback to complete 374 * @mapping: address space structure to wait for 375 * @start_byte: offset in bytes where the range starts 376 * @end_byte: offset in bytes where the range ends (inclusive) 377 * 378 * Walk the list of under-writeback pages of the given address space 379 * in the given range and wait for all of them. Check error status of 380 * the address space and return it. 381 * 382 * Since the error status of the address space is cleared by this function, 383 * callers are responsible for checking the return value and handling and/or 384 * reporting the error. 385 */ 386 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, 387 loff_t end_byte) 388 { 389 int ret, ret2; 390 391 ret = __filemap_fdatawait_range(mapping, start_byte, end_byte); 392 ret2 = filemap_check_errors(mapping); 393 if (!ret) 394 ret = ret2; 395 396 return ret; 397 } 398 EXPORT_SYMBOL(filemap_fdatawait_range); 399 400 /** 401 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors 402 * @mapping: address space structure to wait for 403 * 404 * Walk the list of under-writeback pages of the given address space 405 * and wait for all of them. Unlike filemap_fdatawait(), this function 406 * does not clear error status of the address space. 407 * 408 * Use this function if callers don't handle errors themselves. Expected 409 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), 410 * fsfreeze(8) 411 */ 412 void filemap_fdatawait_keep_errors(struct address_space *mapping) 413 { 414 loff_t i_size = i_size_read(mapping->host); 415 416 if (i_size == 0) 417 return; 418 419 __filemap_fdatawait_range(mapping, 0, i_size - 1); 420 } 421 422 /** 423 * filemap_fdatawait - wait for all under-writeback pages to complete 424 * @mapping: address space structure to wait for 425 * 426 * Walk the list of under-writeback pages of the given address space 427 * and wait for all of them. Check error status of the address space 428 * and return it. 429 * 430 * Since the error status of the address space is cleared by this function, 431 * callers are responsible for checking the return value and handling and/or 432 * reporting the error. 433 */ 434 int filemap_fdatawait(struct address_space *mapping) 435 { 436 loff_t i_size = i_size_read(mapping->host); 437 438 if (i_size == 0) 439 return 0; 440 441 return filemap_fdatawait_range(mapping, 0, i_size - 1); 442 } 443 EXPORT_SYMBOL(filemap_fdatawait); 444 445 int filemap_write_and_wait(struct address_space *mapping) 446 { 447 int err = 0; 448 449 if (mapping->nrpages) { 450 err = filemap_fdatawrite(mapping); 451 /* 452 * Even if the above returned error, the pages may be 453 * written partially (e.g. -ENOSPC), so we wait for it. 454 * But the -EIO is special case, it may indicate the worst 455 * thing (e.g. bug) happened, so we avoid waiting for it. 456 */ 457 if (err != -EIO) { 458 int err2 = filemap_fdatawait(mapping); 459 if (!err) 460 err = err2; 461 } 462 } else { 463 err = filemap_check_errors(mapping); 464 } 465 return err; 466 } 467 EXPORT_SYMBOL(filemap_write_and_wait); 468 469 /** 470 * filemap_write_and_wait_range - write out & wait on a file range 471 * @mapping: the address_space for the pages 472 * @lstart: offset in bytes where the range starts 473 * @lend: offset in bytes where the range ends (inclusive) 474 * 475 * Write out and wait upon file offsets lstart->lend, inclusive. 476 * 477 * Note that `lend' is inclusive (describes the last byte to be written) so 478 * that this function can be used to write to the very end-of-file (end = -1). 479 */ 480 int filemap_write_and_wait_range(struct address_space *mapping, 481 loff_t lstart, loff_t lend) 482 { 483 int err = 0; 484 485 if (dax_mapping(mapping) && mapping->nrexceptional) { 486 err = dax_writeback_mapping_range(mapping, lstart, lend); 487 if (err) 488 return err; 489 } 490 491 if (mapping->nrpages) { 492 err = __filemap_fdatawrite_range(mapping, lstart, lend, 493 WB_SYNC_ALL); 494 /* See comment of filemap_write_and_wait() */ 495 if (err != -EIO) { 496 int err2 = filemap_fdatawait_range(mapping, 497 lstart, lend); 498 if (!err) 499 err = err2; 500 } 501 } else { 502 err = filemap_check_errors(mapping); 503 } 504 return err; 505 } 506 EXPORT_SYMBOL(filemap_write_and_wait_range); 507 508 /** 509 * replace_page_cache_page - replace a pagecache page with a new one 510 * @old: page to be replaced 511 * @new: page to replace with 512 * @gfp_mask: allocation mode 513 * 514 * This function replaces a page in the pagecache with a new one. On 515 * success it acquires the pagecache reference for the new page and 516 * drops it for the old page. Both the old and new pages must be 517 * locked. This function does not add the new page to the LRU, the 518 * caller must do that. 519 * 520 * The remove + add is atomic. The only way this function can fail is 521 * memory allocation failure. 522 */ 523 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) 524 { 525 int error; 526 527 VM_BUG_ON_PAGE(!PageLocked(old), old); 528 VM_BUG_ON_PAGE(!PageLocked(new), new); 529 VM_BUG_ON_PAGE(new->mapping, new); 530 531 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 532 if (!error) { 533 struct address_space *mapping = old->mapping; 534 void (*freepage)(struct page *); 535 struct mem_cgroup *memcg; 536 unsigned long flags; 537 538 pgoff_t offset = old->index; 539 freepage = mapping->a_ops->freepage; 540 541 page_cache_get(new); 542 new->mapping = mapping; 543 new->index = offset; 544 545 memcg = mem_cgroup_begin_page_stat(old); 546 spin_lock_irqsave(&mapping->tree_lock, flags); 547 __delete_from_page_cache(old, NULL, memcg); 548 error = radix_tree_insert(&mapping->page_tree, offset, new); 549 BUG_ON(error); 550 mapping->nrpages++; 551 552 /* 553 * hugetlb pages do not participate in page cache accounting. 554 */ 555 if (!PageHuge(new)) 556 __inc_zone_page_state(new, NR_FILE_PAGES); 557 if (PageSwapBacked(new)) 558 __inc_zone_page_state(new, NR_SHMEM); 559 spin_unlock_irqrestore(&mapping->tree_lock, flags); 560 mem_cgroup_end_page_stat(memcg); 561 mem_cgroup_replace_page(old, new); 562 radix_tree_preload_end(); 563 if (freepage) 564 freepage(old); 565 page_cache_release(old); 566 } 567 568 return error; 569 } 570 EXPORT_SYMBOL_GPL(replace_page_cache_page); 571 572 static int page_cache_tree_insert(struct address_space *mapping, 573 struct page *page, void **shadowp) 574 { 575 struct radix_tree_node *node; 576 void **slot; 577 int error; 578 579 error = __radix_tree_create(&mapping->page_tree, page->index, 580 &node, &slot); 581 if (error) 582 return error; 583 if (*slot) { 584 void *p; 585 586 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 587 if (!radix_tree_exceptional_entry(p)) 588 return -EEXIST; 589 590 if (WARN_ON(dax_mapping(mapping))) 591 return -EINVAL; 592 593 if (shadowp) 594 *shadowp = p; 595 mapping->nrexceptional--; 596 if (node) 597 workingset_node_shadows_dec(node); 598 } 599 radix_tree_replace_slot(slot, page); 600 mapping->nrpages++; 601 if (node) { 602 workingset_node_pages_inc(node); 603 /* 604 * Don't track node that contains actual pages. 605 * 606 * Avoid acquiring the list_lru lock if already 607 * untracked. The list_empty() test is safe as 608 * node->private_list is protected by 609 * mapping->tree_lock. 610 */ 611 if (!list_empty(&node->private_list)) 612 list_lru_del(&workingset_shadow_nodes, 613 &node->private_list); 614 } 615 return 0; 616 } 617 618 static int __add_to_page_cache_locked(struct page *page, 619 struct address_space *mapping, 620 pgoff_t offset, gfp_t gfp_mask, 621 void **shadowp) 622 { 623 int huge = PageHuge(page); 624 struct mem_cgroup *memcg; 625 int error; 626 627 VM_BUG_ON_PAGE(!PageLocked(page), page); 628 VM_BUG_ON_PAGE(PageSwapBacked(page), page); 629 630 if (!huge) { 631 error = mem_cgroup_try_charge(page, current->mm, 632 gfp_mask, &memcg, false); 633 if (error) 634 return error; 635 } 636 637 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); 638 if (error) { 639 if (!huge) 640 mem_cgroup_cancel_charge(page, memcg, false); 641 return error; 642 } 643 644 page_cache_get(page); 645 page->mapping = mapping; 646 page->index = offset; 647 648 spin_lock_irq(&mapping->tree_lock); 649 error = page_cache_tree_insert(mapping, page, shadowp); 650 radix_tree_preload_end(); 651 if (unlikely(error)) 652 goto err_insert; 653 654 /* hugetlb pages do not participate in page cache accounting. */ 655 if (!huge) 656 __inc_zone_page_state(page, NR_FILE_PAGES); 657 spin_unlock_irq(&mapping->tree_lock); 658 if (!huge) 659 mem_cgroup_commit_charge(page, memcg, false, false); 660 trace_mm_filemap_add_to_page_cache(page); 661 return 0; 662 err_insert: 663 page->mapping = NULL; 664 /* Leave page->index set: truncation relies upon it */ 665 spin_unlock_irq(&mapping->tree_lock); 666 if (!huge) 667 mem_cgroup_cancel_charge(page, memcg, false); 668 page_cache_release(page); 669 return error; 670 } 671 672 /** 673 * add_to_page_cache_locked - add a locked page to the pagecache 674 * @page: page to add 675 * @mapping: the page's address_space 676 * @offset: page index 677 * @gfp_mask: page allocation mode 678 * 679 * This function is used to add a page to the pagecache. It must be locked. 680 * This function does not add the page to the LRU. The caller must do that. 681 */ 682 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 683 pgoff_t offset, gfp_t gfp_mask) 684 { 685 return __add_to_page_cache_locked(page, mapping, offset, 686 gfp_mask, NULL); 687 } 688 EXPORT_SYMBOL(add_to_page_cache_locked); 689 690 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 691 pgoff_t offset, gfp_t gfp_mask) 692 { 693 void *shadow = NULL; 694 int ret; 695 696 __SetPageLocked(page); 697 ret = __add_to_page_cache_locked(page, mapping, offset, 698 gfp_mask, &shadow); 699 if (unlikely(ret)) 700 __ClearPageLocked(page); 701 else { 702 /* 703 * The page might have been evicted from cache only 704 * recently, in which case it should be activated like 705 * any other repeatedly accessed page. 706 */ 707 if (shadow && workingset_refault(shadow)) { 708 SetPageActive(page); 709 workingset_activation(page); 710 } else 711 ClearPageActive(page); 712 lru_cache_add(page); 713 } 714 return ret; 715 } 716 EXPORT_SYMBOL_GPL(add_to_page_cache_lru); 717 718 #ifdef CONFIG_NUMA 719 struct page *__page_cache_alloc(gfp_t gfp) 720 { 721 int n; 722 struct page *page; 723 724 if (cpuset_do_page_mem_spread()) { 725 unsigned int cpuset_mems_cookie; 726 do { 727 cpuset_mems_cookie = read_mems_allowed_begin(); 728 n = cpuset_mem_spread_node(); 729 page = __alloc_pages_node(n, gfp, 0); 730 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); 731 732 return page; 733 } 734 return alloc_pages(gfp, 0); 735 } 736 EXPORT_SYMBOL(__page_cache_alloc); 737 #endif 738 739 /* 740 * In order to wait for pages to become available there must be 741 * waitqueues associated with pages. By using a hash table of 742 * waitqueues where the bucket discipline is to maintain all 743 * waiters on the same queue and wake all when any of the pages 744 * become available, and for the woken contexts to check to be 745 * sure the appropriate page became available, this saves space 746 * at a cost of "thundering herd" phenomena during rare hash 747 * collisions. 748 */ 749 wait_queue_head_t *page_waitqueue(struct page *page) 750 { 751 const struct zone *zone = page_zone(page); 752 753 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; 754 } 755 EXPORT_SYMBOL(page_waitqueue); 756 757 void wait_on_page_bit(struct page *page, int bit_nr) 758 { 759 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 760 761 if (test_bit(bit_nr, &page->flags)) 762 __wait_on_bit(page_waitqueue(page), &wait, bit_wait_io, 763 TASK_UNINTERRUPTIBLE); 764 } 765 EXPORT_SYMBOL(wait_on_page_bit); 766 767 int wait_on_page_bit_killable(struct page *page, int bit_nr) 768 { 769 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 770 771 if (!test_bit(bit_nr, &page->flags)) 772 return 0; 773 774 return __wait_on_bit(page_waitqueue(page), &wait, 775 bit_wait_io, TASK_KILLABLE); 776 } 777 778 int wait_on_page_bit_killable_timeout(struct page *page, 779 int bit_nr, unsigned long timeout) 780 { 781 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 782 783 wait.key.timeout = jiffies + timeout; 784 if (!test_bit(bit_nr, &page->flags)) 785 return 0; 786 return __wait_on_bit(page_waitqueue(page), &wait, 787 bit_wait_io_timeout, TASK_KILLABLE); 788 } 789 EXPORT_SYMBOL_GPL(wait_on_page_bit_killable_timeout); 790 791 /** 792 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue 793 * @page: Page defining the wait queue of interest 794 * @waiter: Waiter to add to the queue 795 * 796 * Add an arbitrary @waiter to the wait queue for the nominated @page. 797 */ 798 void add_page_wait_queue(struct page *page, wait_queue_t *waiter) 799 { 800 wait_queue_head_t *q = page_waitqueue(page); 801 unsigned long flags; 802 803 spin_lock_irqsave(&q->lock, flags); 804 __add_wait_queue(q, waiter); 805 spin_unlock_irqrestore(&q->lock, flags); 806 } 807 EXPORT_SYMBOL_GPL(add_page_wait_queue); 808 809 /** 810 * unlock_page - unlock a locked page 811 * @page: the page 812 * 813 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). 814 * Also wakes sleepers in wait_on_page_writeback() because the wakeup 815 * mechanism between PageLocked pages and PageWriteback pages is shared. 816 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 817 * 818 * The mb is necessary to enforce ordering between the clear_bit and the read 819 * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()). 820 */ 821 void unlock_page(struct page *page) 822 { 823 page = compound_head(page); 824 VM_BUG_ON_PAGE(!PageLocked(page), page); 825 clear_bit_unlock(PG_locked, &page->flags); 826 smp_mb__after_atomic(); 827 wake_up_page(page, PG_locked); 828 } 829 EXPORT_SYMBOL(unlock_page); 830 831 /** 832 * end_page_writeback - end writeback against a page 833 * @page: the page 834 */ 835 void end_page_writeback(struct page *page) 836 { 837 /* 838 * TestClearPageReclaim could be used here but it is an atomic 839 * operation and overkill in this particular case. Failing to 840 * shuffle a page marked for immediate reclaim is too mild to 841 * justify taking an atomic operation penalty at the end of 842 * ever page writeback. 843 */ 844 if (PageReclaim(page)) { 845 ClearPageReclaim(page); 846 rotate_reclaimable_page(page); 847 } 848 849 if (!test_clear_page_writeback(page)) 850 BUG(); 851 852 smp_mb__after_atomic(); 853 wake_up_page(page, PG_writeback); 854 } 855 EXPORT_SYMBOL(end_page_writeback); 856 857 /* 858 * After completing I/O on a page, call this routine to update the page 859 * flags appropriately 860 */ 861 void page_endio(struct page *page, int rw, int err) 862 { 863 if (rw == READ) { 864 if (!err) { 865 SetPageUptodate(page); 866 } else { 867 ClearPageUptodate(page); 868 SetPageError(page); 869 } 870 unlock_page(page); 871 } else { /* rw == WRITE */ 872 if (err) { 873 SetPageError(page); 874 if (page->mapping) 875 mapping_set_error(page->mapping, err); 876 } 877 end_page_writeback(page); 878 } 879 } 880 EXPORT_SYMBOL_GPL(page_endio); 881 882 /** 883 * __lock_page - get a lock on the page, assuming we need to sleep to get it 884 * @page: the page to lock 885 */ 886 void __lock_page(struct page *page) 887 { 888 struct page *page_head = compound_head(page); 889 DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked); 890 891 __wait_on_bit_lock(page_waitqueue(page_head), &wait, bit_wait_io, 892 TASK_UNINTERRUPTIBLE); 893 } 894 EXPORT_SYMBOL(__lock_page); 895 896 int __lock_page_killable(struct page *page) 897 { 898 struct page *page_head = compound_head(page); 899 DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked); 900 901 return __wait_on_bit_lock(page_waitqueue(page_head), &wait, 902 bit_wait_io, TASK_KILLABLE); 903 } 904 EXPORT_SYMBOL_GPL(__lock_page_killable); 905 906 /* 907 * Return values: 908 * 1 - page is locked; mmap_sem is still held. 909 * 0 - page is not locked. 910 * mmap_sem has been released (up_read()), unless flags had both 911 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in 912 * which case mmap_sem is still held. 913 * 914 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1 915 * with the page locked and the mmap_sem unperturbed. 916 */ 917 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 918 unsigned int flags) 919 { 920 if (flags & FAULT_FLAG_ALLOW_RETRY) { 921 /* 922 * CAUTION! In this case, mmap_sem is not released 923 * even though return 0. 924 */ 925 if (flags & FAULT_FLAG_RETRY_NOWAIT) 926 return 0; 927 928 up_read(&mm->mmap_sem); 929 if (flags & FAULT_FLAG_KILLABLE) 930 wait_on_page_locked_killable(page); 931 else 932 wait_on_page_locked(page); 933 return 0; 934 } else { 935 if (flags & FAULT_FLAG_KILLABLE) { 936 int ret; 937 938 ret = __lock_page_killable(page); 939 if (ret) { 940 up_read(&mm->mmap_sem); 941 return 0; 942 } 943 } else 944 __lock_page(page); 945 return 1; 946 } 947 } 948 949 /** 950 * page_cache_next_hole - find the next hole (not-present entry) 951 * @mapping: mapping 952 * @index: index 953 * @max_scan: maximum range to search 954 * 955 * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the 956 * lowest indexed hole. 957 * 958 * Returns: the index of the hole if found, otherwise returns an index 959 * outside of the set specified (in which case 'return - index >= 960 * max_scan' will be true). In rare cases of index wrap-around, 0 will 961 * be returned. 962 * 963 * page_cache_next_hole may be called under rcu_read_lock. However, 964 * like radix_tree_gang_lookup, this will not atomically search a 965 * snapshot of the tree at a single point in time. For example, if a 966 * hole is created at index 5, then subsequently a hole is created at 967 * index 10, page_cache_next_hole covering both indexes may return 10 968 * if called under rcu_read_lock. 969 */ 970 pgoff_t page_cache_next_hole(struct address_space *mapping, 971 pgoff_t index, unsigned long max_scan) 972 { 973 unsigned long i; 974 975 for (i = 0; i < max_scan; i++) { 976 struct page *page; 977 978 page = radix_tree_lookup(&mapping->page_tree, index); 979 if (!page || radix_tree_exceptional_entry(page)) 980 break; 981 index++; 982 if (index == 0) 983 break; 984 } 985 986 return index; 987 } 988 EXPORT_SYMBOL(page_cache_next_hole); 989 990 /** 991 * page_cache_prev_hole - find the prev hole (not-present entry) 992 * @mapping: mapping 993 * @index: index 994 * @max_scan: maximum range to search 995 * 996 * Search backwards in the range [max(index-max_scan+1, 0), index] for 997 * the first hole. 998 * 999 * Returns: the index of the hole if found, otherwise returns an index 1000 * outside of the set specified (in which case 'index - return >= 1001 * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX 1002 * will be returned. 1003 * 1004 * page_cache_prev_hole may be called under rcu_read_lock. However, 1005 * like radix_tree_gang_lookup, this will not atomically search a 1006 * snapshot of the tree at a single point in time. For example, if a 1007 * hole is created at index 10, then subsequently a hole is created at 1008 * index 5, page_cache_prev_hole covering both indexes may return 5 if 1009 * called under rcu_read_lock. 1010 */ 1011 pgoff_t page_cache_prev_hole(struct address_space *mapping, 1012 pgoff_t index, unsigned long max_scan) 1013 { 1014 unsigned long i; 1015 1016 for (i = 0; i < max_scan; i++) { 1017 struct page *page; 1018 1019 page = radix_tree_lookup(&mapping->page_tree, index); 1020 if (!page || radix_tree_exceptional_entry(page)) 1021 break; 1022 index--; 1023 if (index == ULONG_MAX) 1024 break; 1025 } 1026 1027 return index; 1028 } 1029 EXPORT_SYMBOL(page_cache_prev_hole); 1030 1031 /** 1032 * find_get_entry - find and get a page cache entry 1033 * @mapping: the address_space to search 1034 * @offset: the page cache index 1035 * 1036 * Looks up the page cache slot at @mapping & @offset. If there is a 1037 * page cache page, it is returned with an increased refcount. 1038 * 1039 * If the slot holds a shadow entry of a previously evicted page, or a 1040 * swap entry from shmem/tmpfs, it is returned. 1041 * 1042 * Otherwise, %NULL is returned. 1043 */ 1044 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) 1045 { 1046 void **pagep; 1047 struct page *page; 1048 1049 rcu_read_lock(); 1050 repeat: 1051 page = NULL; 1052 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); 1053 if (pagep) { 1054 page = radix_tree_deref_slot(pagep); 1055 if (unlikely(!page)) 1056 goto out; 1057 if (radix_tree_exception(page)) { 1058 if (radix_tree_deref_retry(page)) 1059 goto repeat; 1060 /* 1061 * A shadow entry of a recently evicted page, 1062 * or a swap entry from shmem/tmpfs. Return 1063 * it without attempting to raise page count. 1064 */ 1065 goto out; 1066 } 1067 if (!page_cache_get_speculative(page)) 1068 goto repeat; 1069 1070 /* 1071 * Has the page moved? 1072 * This is part of the lockless pagecache protocol. See 1073 * include/linux/pagemap.h for details. 1074 */ 1075 if (unlikely(page != *pagep)) { 1076 page_cache_release(page); 1077 goto repeat; 1078 } 1079 } 1080 out: 1081 rcu_read_unlock(); 1082 1083 return page; 1084 } 1085 EXPORT_SYMBOL(find_get_entry); 1086 1087 /** 1088 * find_lock_entry - locate, pin and lock a page cache entry 1089 * @mapping: the address_space to search 1090 * @offset: the page cache index 1091 * 1092 * Looks up the page cache slot at @mapping & @offset. If there is a 1093 * page cache page, it is returned locked and with an increased 1094 * refcount. 1095 * 1096 * If the slot holds a shadow entry of a previously evicted page, or a 1097 * swap entry from shmem/tmpfs, it is returned. 1098 * 1099 * Otherwise, %NULL is returned. 1100 * 1101 * find_lock_entry() may sleep. 1102 */ 1103 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) 1104 { 1105 struct page *page; 1106 1107 repeat: 1108 page = find_get_entry(mapping, offset); 1109 if (page && !radix_tree_exception(page)) { 1110 lock_page(page); 1111 /* Has the page been truncated? */ 1112 if (unlikely(page->mapping != mapping)) { 1113 unlock_page(page); 1114 page_cache_release(page); 1115 goto repeat; 1116 } 1117 VM_BUG_ON_PAGE(page->index != offset, page); 1118 } 1119 return page; 1120 } 1121 EXPORT_SYMBOL(find_lock_entry); 1122 1123 /** 1124 * pagecache_get_page - find and get a page reference 1125 * @mapping: the address_space to search 1126 * @offset: the page index 1127 * @fgp_flags: PCG flags 1128 * @gfp_mask: gfp mask to use for the page cache data page allocation 1129 * 1130 * Looks up the page cache slot at @mapping & @offset. 1131 * 1132 * PCG flags modify how the page is returned. 1133 * 1134 * FGP_ACCESSED: the page will be marked accessed 1135 * FGP_LOCK: Page is return locked 1136 * FGP_CREAT: If page is not present then a new page is allocated using 1137 * @gfp_mask and added to the page cache and the VM's LRU 1138 * list. The page is returned locked and with an increased 1139 * refcount. Otherwise, %NULL is returned. 1140 * 1141 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even 1142 * if the GFP flags specified for FGP_CREAT are atomic. 1143 * 1144 * If there is a page cache page, it is returned with an increased refcount. 1145 */ 1146 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, 1147 int fgp_flags, gfp_t gfp_mask) 1148 { 1149 struct page *page; 1150 1151 repeat: 1152 page = find_get_entry(mapping, offset); 1153 if (radix_tree_exceptional_entry(page)) 1154 page = NULL; 1155 if (!page) 1156 goto no_page; 1157 1158 if (fgp_flags & FGP_LOCK) { 1159 if (fgp_flags & FGP_NOWAIT) { 1160 if (!trylock_page(page)) { 1161 page_cache_release(page); 1162 return NULL; 1163 } 1164 } else { 1165 lock_page(page); 1166 } 1167 1168 /* Has the page been truncated? */ 1169 if (unlikely(page->mapping != mapping)) { 1170 unlock_page(page); 1171 page_cache_release(page); 1172 goto repeat; 1173 } 1174 VM_BUG_ON_PAGE(page->index != offset, page); 1175 } 1176 1177 if (page && (fgp_flags & FGP_ACCESSED)) 1178 mark_page_accessed(page); 1179 1180 no_page: 1181 if (!page && (fgp_flags & FGP_CREAT)) { 1182 int err; 1183 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) 1184 gfp_mask |= __GFP_WRITE; 1185 if (fgp_flags & FGP_NOFS) 1186 gfp_mask &= ~__GFP_FS; 1187 1188 page = __page_cache_alloc(gfp_mask); 1189 if (!page) 1190 return NULL; 1191 1192 if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK))) 1193 fgp_flags |= FGP_LOCK; 1194 1195 /* Init accessed so avoid atomic mark_page_accessed later */ 1196 if (fgp_flags & FGP_ACCESSED) 1197 __SetPageReferenced(page); 1198 1199 err = add_to_page_cache_lru(page, mapping, offset, 1200 gfp_mask & GFP_RECLAIM_MASK); 1201 if (unlikely(err)) { 1202 page_cache_release(page); 1203 page = NULL; 1204 if (err == -EEXIST) 1205 goto repeat; 1206 } 1207 } 1208 1209 return page; 1210 } 1211 EXPORT_SYMBOL(pagecache_get_page); 1212 1213 /** 1214 * find_get_entries - gang pagecache lookup 1215 * @mapping: The address_space to search 1216 * @start: The starting page cache index 1217 * @nr_entries: The maximum number of entries 1218 * @entries: Where the resulting entries are placed 1219 * @indices: The cache indices corresponding to the entries in @entries 1220 * 1221 * find_get_entries() will search for and return a group of up to 1222 * @nr_entries entries in the mapping. The entries are placed at 1223 * @entries. find_get_entries() takes a reference against any actual 1224 * pages it returns. 1225 * 1226 * The search returns a group of mapping-contiguous page cache entries 1227 * with ascending indexes. There may be holes in the indices due to 1228 * not-present pages. 1229 * 1230 * Any shadow entries of evicted pages, or swap entries from 1231 * shmem/tmpfs, are included in the returned array. 1232 * 1233 * find_get_entries() returns the number of pages and shadow entries 1234 * which were found. 1235 */ 1236 unsigned find_get_entries(struct address_space *mapping, 1237 pgoff_t start, unsigned int nr_entries, 1238 struct page **entries, pgoff_t *indices) 1239 { 1240 void **slot; 1241 unsigned int ret = 0; 1242 struct radix_tree_iter iter; 1243 1244 if (!nr_entries) 1245 return 0; 1246 1247 rcu_read_lock(); 1248 restart: 1249 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1250 struct page *page; 1251 repeat: 1252 page = radix_tree_deref_slot(slot); 1253 if (unlikely(!page)) 1254 continue; 1255 if (radix_tree_exception(page)) { 1256 if (radix_tree_deref_retry(page)) 1257 goto restart; 1258 /* 1259 * A shadow entry of a recently evicted page, a swap 1260 * entry from shmem/tmpfs or a DAX entry. Return it 1261 * without attempting to raise page count. 1262 */ 1263 goto export; 1264 } 1265 if (!page_cache_get_speculative(page)) 1266 goto repeat; 1267 1268 /* Has the page moved? */ 1269 if (unlikely(page != *slot)) { 1270 page_cache_release(page); 1271 goto repeat; 1272 } 1273 export: 1274 indices[ret] = iter.index; 1275 entries[ret] = page; 1276 if (++ret == nr_entries) 1277 break; 1278 } 1279 rcu_read_unlock(); 1280 return ret; 1281 } 1282 1283 /** 1284 * find_get_pages - gang pagecache lookup 1285 * @mapping: The address_space to search 1286 * @start: The starting page index 1287 * @nr_pages: The maximum number of pages 1288 * @pages: Where the resulting pages are placed 1289 * 1290 * find_get_pages() will search for and return a group of up to 1291 * @nr_pages pages in the mapping. The pages are placed at @pages. 1292 * find_get_pages() takes a reference against the returned pages. 1293 * 1294 * The search returns a group of mapping-contiguous pages with ascending 1295 * indexes. There may be holes in the indices due to not-present pages. 1296 * 1297 * find_get_pages() returns the number of pages which were found. 1298 */ 1299 unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 1300 unsigned int nr_pages, struct page **pages) 1301 { 1302 struct radix_tree_iter iter; 1303 void **slot; 1304 unsigned ret = 0; 1305 1306 if (unlikely(!nr_pages)) 1307 return 0; 1308 1309 rcu_read_lock(); 1310 restart: 1311 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1312 struct page *page; 1313 repeat: 1314 page = radix_tree_deref_slot(slot); 1315 if (unlikely(!page)) 1316 continue; 1317 1318 if (radix_tree_exception(page)) { 1319 if (radix_tree_deref_retry(page)) { 1320 /* 1321 * Transient condition which can only trigger 1322 * when entry at index 0 moves out of or back 1323 * to root: none yet gotten, safe to restart. 1324 */ 1325 WARN_ON(iter.index); 1326 goto restart; 1327 } 1328 /* 1329 * A shadow entry of a recently evicted page, 1330 * or a swap entry from shmem/tmpfs. Skip 1331 * over it. 1332 */ 1333 continue; 1334 } 1335 1336 if (!page_cache_get_speculative(page)) 1337 goto repeat; 1338 1339 /* Has the page moved? */ 1340 if (unlikely(page != *slot)) { 1341 page_cache_release(page); 1342 goto repeat; 1343 } 1344 1345 pages[ret] = page; 1346 if (++ret == nr_pages) 1347 break; 1348 } 1349 1350 rcu_read_unlock(); 1351 return ret; 1352 } 1353 1354 /** 1355 * find_get_pages_contig - gang contiguous pagecache lookup 1356 * @mapping: The address_space to search 1357 * @index: The starting page index 1358 * @nr_pages: The maximum number of pages 1359 * @pages: Where the resulting pages are placed 1360 * 1361 * find_get_pages_contig() works exactly like find_get_pages(), except 1362 * that the returned number of pages are guaranteed to be contiguous. 1363 * 1364 * find_get_pages_contig() returns the number of pages which were found. 1365 */ 1366 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, 1367 unsigned int nr_pages, struct page **pages) 1368 { 1369 struct radix_tree_iter iter; 1370 void **slot; 1371 unsigned int ret = 0; 1372 1373 if (unlikely(!nr_pages)) 1374 return 0; 1375 1376 rcu_read_lock(); 1377 restart: 1378 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) { 1379 struct page *page; 1380 repeat: 1381 page = radix_tree_deref_slot(slot); 1382 /* The hole, there no reason to continue */ 1383 if (unlikely(!page)) 1384 break; 1385 1386 if (radix_tree_exception(page)) { 1387 if (radix_tree_deref_retry(page)) { 1388 /* 1389 * Transient condition which can only trigger 1390 * when entry at index 0 moves out of or back 1391 * to root: none yet gotten, safe to restart. 1392 */ 1393 goto restart; 1394 } 1395 /* 1396 * A shadow entry of a recently evicted page, 1397 * or a swap entry from shmem/tmpfs. Stop 1398 * looking for contiguous pages. 1399 */ 1400 break; 1401 } 1402 1403 if (!page_cache_get_speculative(page)) 1404 goto repeat; 1405 1406 /* Has the page moved? */ 1407 if (unlikely(page != *slot)) { 1408 page_cache_release(page); 1409 goto repeat; 1410 } 1411 1412 /* 1413 * must check mapping and index after taking the ref. 1414 * otherwise we can get both false positives and false 1415 * negatives, which is just confusing to the caller. 1416 */ 1417 if (page->mapping == NULL || page->index != iter.index) { 1418 page_cache_release(page); 1419 break; 1420 } 1421 1422 pages[ret] = page; 1423 if (++ret == nr_pages) 1424 break; 1425 } 1426 rcu_read_unlock(); 1427 return ret; 1428 } 1429 EXPORT_SYMBOL(find_get_pages_contig); 1430 1431 /** 1432 * find_get_pages_tag - find and return pages that match @tag 1433 * @mapping: the address_space to search 1434 * @index: the starting page index 1435 * @tag: the tag index 1436 * @nr_pages: the maximum number of pages 1437 * @pages: where the resulting pages are placed 1438 * 1439 * Like find_get_pages, except we only return pages which are tagged with 1440 * @tag. We update @index to index the next page for the traversal. 1441 */ 1442 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 1443 int tag, unsigned int nr_pages, struct page **pages) 1444 { 1445 struct radix_tree_iter iter; 1446 void **slot; 1447 unsigned ret = 0; 1448 1449 if (unlikely(!nr_pages)) 1450 return 0; 1451 1452 rcu_read_lock(); 1453 restart: 1454 radix_tree_for_each_tagged(slot, &mapping->page_tree, 1455 &iter, *index, tag) { 1456 struct page *page; 1457 repeat: 1458 page = radix_tree_deref_slot(slot); 1459 if (unlikely(!page)) 1460 continue; 1461 1462 if (radix_tree_exception(page)) { 1463 if (radix_tree_deref_retry(page)) { 1464 /* 1465 * Transient condition which can only trigger 1466 * when entry at index 0 moves out of or back 1467 * to root: none yet gotten, safe to restart. 1468 */ 1469 goto restart; 1470 } 1471 /* 1472 * A shadow entry of a recently evicted page. 1473 * 1474 * Those entries should never be tagged, but 1475 * this tree walk is lockless and the tags are 1476 * looked up in bulk, one radix tree node at a 1477 * time, so there is a sizable window for page 1478 * reclaim to evict a page we saw tagged. 1479 * 1480 * Skip over it. 1481 */ 1482 continue; 1483 } 1484 1485 if (!page_cache_get_speculative(page)) 1486 goto repeat; 1487 1488 /* Has the page moved? */ 1489 if (unlikely(page != *slot)) { 1490 page_cache_release(page); 1491 goto repeat; 1492 } 1493 1494 pages[ret] = page; 1495 if (++ret == nr_pages) 1496 break; 1497 } 1498 1499 rcu_read_unlock(); 1500 1501 if (ret) 1502 *index = pages[ret - 1]->index + 1; 1503 1504 return ret; 1505 } 1506 EXPORT_SYMBOL(find_get_pages_tag); 1507 1508 /** 1509 * find_get_entries_tag - find and return entries that match @tag 1510 * @mapping: the address_space to search 1511 * @start: the starting page cache index 1512 * @tag: the tag index 1513 * @nr_entries: the maximum number of entries 1514 * @entries: where the resulting entries are placed 1515 * @indices: the cache indices corresponding to the entries in @entries 1516 * 1517 * Like find_get_entries, except we only return entries which are tagged with 1518 * @tag. 1519 */ 1520 unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, 1521 int tag, unsigned int nr_entries, 1522 struct page **entries, pgoff_t *indices) 1523 { 1524 void **slot; 1525 unsigned int ret = 0; 1526 struct radix_tree_iter iter; 1527 1528 if (!nr_entries) 1529 return 0; 1530 1531 rcu_read_lock(); 1532 restart: 1533 radix_tree_for_each_tagged(slot, &mapping->page_tree, 1534 &iter, start, tag) { 1535 struct page *page; 1536 repeat: 1537 page = radix_tree_deref_slot(slot); 1538 if (unlikely(!page)) 1539 continue; 1540 if (radix_tree_exception(page)) { 1541 if (radix_tree_deref_retry(page)) { 1542 /* 1543 * Transient condition which can only trigger 1544 * when entry at index 0 moves out of or back 1545 * to root: none yet gotten, safe to restart. 1546 */ 1547 goto restart; 1548 } 1549 1550 /* 1551 * A shadow entry of a recently evicted page, a swap 1552 * entry from shmem/tmpfs or a DAX entry. Return it 1553 * without attempting to raise page count. 1554 */ 1555 goto export; 1556 } 1557 if (!page_cache_get_speculative(page)) 1558 goto repeat; 1559 1560 /* Has the page moved? */ 1561 if (unlikely(page != *slot)) { 1562 page_cache_release(page); 1563 goto repeat; 1564 } 1565 export: 1566 indices[ret] = iter.index; 1567 entries[ret] = page; 1568 if (++ret == nr_entries) 1569 break; 1570 } 1571 rcu_read_unlock(); 1572 return ret; 1573 } 1574 EXPORT_SYMBOL(find_get_entries_tag); 1575 1576 /* 1577 * CD/DVDs are error prone. When a medium error occurs, the driver may fail 1578 * a _large_ part of the i/o request. Imagine the worst scenario: 1579 * 1580 * ---R__________________________________________B__________ 1581 * ^ reading here ^ bad block(assume 4k) 1582 * 1583 * read(R) => miss => readahead(R...B) => media error => frustrating retries 1584 * => failing the whole request => read(R) => read(R+1) => 1585 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => 1586 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => 1587 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... 1588 * 1589 * It is going insane. Fix it by quickly scaling down the readahead size. 1590 */ 1591 static void shrink_readahead_size_eio(struct file *filp, 1592 struct file_ra_state *ra) 1593 { 1594 ra->ra_pages /= 4; 1595 } 1596 1597 /** 1598 * do_generic_file_read - generic file read routine 1599 * @filp: the file to read 1600 * @ppos: current file position 1601 * @iter: data destination 1602 * @written: already copied 1603 * 1604 * This is a generic file read routine, and uses the 1605 * mapping->a_ops->readpage() function for the actual low-level stuff. 1606 * 1607 * This is really ugly. But the goto's actually try to clarify some 1608 * of the logic when it comes to error handling etc. 1609 */ 1610 static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos, 1611 struct iov_iter *iter, ssize_t written) 1612 { 1613 struct address_space *mapping = filp->f_mapping; 1614 struct inode *inode = mapping->host; 1615 struct file_ra_state *ra = &filp->f_ra; 1616 pgoff_t index; 1617 pgoff_t last_index; 1618 pgoff_t prev_index; 1619 unsigned long offset; /* offset into pagecache page */ 1620 unsigned int prev_offset; 1621 int error = 0; 1622 1623 index = *ppos >> PAGE_CACHE_SHIFT; 1624 prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT; 1625 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1); 1626 last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 1627 offset = *ppos & ~PAGE_CACHE_MASK; 1628 1629 for (;;) { 1630 struct page *page; 1631 pgoff_t end_index; 1632 loff_t isize; 1633 unsigned long nr, ret; 1634 1635 cond_resched(); 1636 find_page: 1637 page = find_get_page(mapping, index); 1638 if (!page) { 1639 page_cache_sync_readahead(mapping, 1640 ra, filp, 1641 index, last_index - index); 1642 page = find_get_page(mapping, index); 1643 if (unlikely(page == NULL)) 1644 goto no_cached_page; 1645 } 1646 if (PageReadahead(page)) { 1647 page_cache_async_readahead(mapping, 1648 ra, filp, page, 1649 index, last_index - index); 1650 } 1651 if (!PageUptodate(page)) { 1652 if (inode->i_blkbits == PAGE_CACHE_SHIFT || 1653 !mapping->a_ops->is_partially_uptodate) 1654 goto page_not_up_to_date; 1655 if (!trylock_page(page)) 1656 goto page_not_up_to_date; 1657 /* Did it get truncated before we got the lock? */ 1658 if (!page->mapping) 1659 goto page_not_up_to_date_locked; 1660 if (!mapping->a_ops->is_partially_uptodate(page, 1661 offset, iter->count)) 1662 goto page_not_up_to_date_locked; 1663 unlock_page(page); 1664 } 1665 page_ok: 1666 /* 1667 * i_size must be checked after we know the page is Uptodate. 1668 * 1669 * Checking i_size after the check allows us to calculate 1670 * the correct value for "nr", which means the zero-filled 1671 * part of the page is not copied back to userspace (unless 1672 * another truncate extends the file - this is desired though). 1673 */ 1674 1675 isize = i_size_read(inode); 1676 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1677 if (unlikely(!isize || index > end_index)) { 1678 page_cache_release(page); 1679 goto out; 1680 } 1681 1682 /* nr is the maximum number of bytes to copy from this page */ 1683 nr = PAGE_CACHE_SIZE; 1684 if (index == end_index) { 1685 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1686 if (nr <= offset) { 1687 page_cache_release(page); 1688 goto out; 1689 } 1690 } 1691 nr = nr - offset; 1692 1693 /* If users can be writing to this page using arbitrary 1694 * virtual addresses, take care about potential aliasing 1695 * before reading the page on the kernel side. 1696 */ 1697 if (mapping_writably_mapped(mapping)) 1698 flush_dcache_page(page); 1699 1700 /* 1701 * When a sequential read accesses a page several times, 1702 * only mark it as accessed the first time. 1703 */ 1704 if (prev_index != index || offset != prev_offset) 1705 mark_page_accessed(page); 1706 prev_index = index; 1707 1708 /* 1709 * Ok, we have the page, and it's up-to-date, so 1710 * now we can copy it to user space... 1711 */ 1712 1713 ret = copy_page_to_iter(page, offset, nr, iter); 1714 offset += ret; 1715 index += offset >> PAGE_CACHE_SHIFT; 1716 offset &= ~PAGE_CACHE_MASK; 1717 prev_offset = offset; 1718 1719 page_cache_release(page); 1720 written += ret; 1721 if (!iov_iter_count(iter)) 1722 goto out; 1723 if (ret < nr) { 1724 error = -EFAULT; 1725 goto out; 1726 } 1727 continue; 1728 1729 page_not_up_to_date: 1730 /* Get exclusive access to the page ... */ 1731 error = lock_page_killable(page); 1732 if (unlikely(error)) 1733 goto readpage_error; 1734 1735 page_not_up_to_date_locked: 1736 /* Did it get truncated before we got the lock? */ 1737 if (!page->mapping) { 1738 unlock_page(page); 1739 page_cache_release(page); 1740 continue; 1741 } 1742 1743 /* Did somebody else fill it already? */ 1744 if (PageUptodate(page)) { 1745 unlock_page(page); 1746 goto page_ok; 1747 } 1748 1749 readpage: 1750 /* 1751 * A previous I/O error may have been due to temporary 1752 * failures, eg. multipath errors. 1753 * PG_error will be set again if readpage fails. 1754 */ 1755 ClearPageError(page); 1756 /* Start the actual read. The read will unlock the page. */ 1757 error = mapping->a_ops->readpage(filp, page); 1758 1759 if (unlikely(error)) { 1760 if (error == AOP_TRUNCATED_PAGE) { 1761 page_cache_release(page); 1762 error = 0; 1763 goto find_page; 1764 } 1765 goto readpage_error; 1766 } 1767 1768 if (!PageUptodate(page)) { 1769 error = lock_page_killable(page); 1770 if (unlikely(error)) 1771 goto readpage_error; 1772 if (!PageUptodate(page)) { 1773 if (page->mapping == NULL) { 1774 /* 1775 * invalidate_mapping_pages got it 1776 */ 1777 unlock_page(page); 1778 page_cache_release(page); 1779 goto find_page; 1780 } 1781 unlock_page(page); 1782 shrink_readahead_size_eio(filp, ra); 1783 error = -EIO; 1784 goto readpage_error; 1785 } 1786 unlock_page(page); 1787 } 1788 1789 goto page_ok; 1790 1791 readpage_error: 1792 /* UHHUH! A synchronous read error occurred. Report it */ 1793 page_cache_release(page); 1794 goto out; 1795 1796 no_cached_page: 1797 /* 1798 * Ok, it wasn't cached, so we need to create a new 1799 * page.. 1800 */ 1801 page = page_cache_alloc_cold(mapping); 1802 if (!page) { 1803 error = -ENOMEM; 1804 goto out; 1805 } 1806 error = add_to_page_cache_lru(page, mapping, index, 1807 mapping_gfp_constraint(mapping, GFP_KERNEL)); 1808 if (error) { 1809 page_cache_release(page); 1810 if (error == -EEXIST) { 1811 error = 0; 1812 goto find_page; 1813 } 1814 goto out; 1815 } 1816 goto readpage; 1817 } 1818 1819 out: 1820 ra->prev_pos = prev_index; 1821 ra->prev_pos <<= PAGE_CACHE_SHIFT; 1822 ra->prev_pos |= prev_offset; 1823 1824 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; 1825 file_accessed(filp); 1826 return written ? written : error; 1827 } 1828 1829 /** 1830 * generic_file_read_iter - generic filesystem read routine 1831 * @iocb: kernel I/O control block 1832 * @iter: destination for the data read 1833 * 1834 * This is the "read_iter()" routine for all filesystems 1835 * that can use the page cache directly. 1836 */ 1837 ssize_t 1838 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 1839 { 1840 struct file *file = iocb->ki_filp; 1841 ssize_t retval = 0; 1842 loff_t *ppos = &iocb->ki_pos; 1843 loff_t pos = *ppos; 1844 1845 if (iocb->ki_flags & IOCB_DIRECT) { 1846 struct address_space *mapping = file->f_mapping; 1847 struct inode *inode = mapping->host; 1848 size_t count = iov_iter_count(iter); 1849 loff_t size; 1850 1851 if (!count) 1852 goto out; /* skip atime */ 1853 size = i_size_read(inode); 1854 retval = filemap_write_and_wait_range(mapping, pos, 1855 pos + count - 1); 1856 if (!retval) { 1857 struct iov_iter data = *iter; 1858 retval = mapping->a_ops->direct_IO(iocb, &data, pos); 1859 } 1860 1861 if (retval > 0) { 1862 *ppos = pos + retval; 1863 iov_iter_advance(iter, retval); 1864 } 1865 1866 /* 1867 * Btrfs can have a short DIO read if we encounter 1868 * compressed extents, so if there was an error, or if 1869 * we've already read everything we wanted to, or if 1870 * there was a short read because we hit EOF, go ahead 1871 * and return. Otherwise fallthrough to buffered io for 1872 * the rest of the read. Buffered reads will not work for 1873 * DAX files, so don't bother trying. 1874 */ 1875 if (retval < 0 || !iov_iter_count(iter) || *ppos >= size || 1876 IS_DAX(inode)) { 1877 file_accessed(file); 1878 goto out; 1879 } 1880 } 1881 1882 retval = do_generic_file_read(file, ppos, iter, retval); 1883 out: 1884 return retval; 1885 } 1886 EXPORT_SYMBOL(generic_file_read_iter); 1887 1888 #ifdef CONFIG_MMU 1889 /** 1890 * page_cache_read - adds requested page to the page cache if not already there 1891 * @file: file to read 1892 * @offset: page index 1893 * 1894 * This adds the requested page to the page cache if it isn't already there, 1895 * and schedules an I/O to read in its contents from disk. 1896 */ 1897 static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask) 1898 { 1899 struct address_space *mapping = file->f_mapping; 1900 struct page *page; 1901 int ret; 1902 1903 do { 1904 page = __page_cache_alloc(gfp_mask|__GFP_COLD); 1905 if (!page) 1906 return -ENOMEM; 1907 1908 ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL); 1909 if (ret == 0) 1910 ret = mapping->a_ops->readpage(file, page); 1911 else if (ret == -EEXIST) 1912 ret = 0; /* losing race to add is OK */ 1913 1914 page_cache_release(page); 1915 1916 } while (ret == AOP_TRUNCATED_PAGE); 1917 1918 return ret; 1919 } 1920 1921 #define MMAP_LOTSAMISS (100) 1922 1923 /* 1924 * Synchronous readahead happens when we don't even find 1925 * a page in the page cache at all. 1926 */ 1927 static void do_sync_mmap_readahead(struct vm_area_struct *vma, 1928 struct file_ra_state *ra, 1929 struct file *file, 1930 pgoff_t offset) 1931 { 1932 struct address_space *mapping = file->f_mapping; 1933 1934 /* If we don't want any read-ahead, don't bother */ 1935 if (vma->vm_flags & VM_RAND_READ) 1936 return; 1937 if (!ra->ra_pages) 1938 return; 1939 1940 if (vma->vm_flags & VM_SEQ_READ) { 1941 page_cache_sync_readahead(mapping, ra, file, offset, 1942 ra->ra_pages); 1943 return; 1944 } 1945 1946 /* Avoid banging the cache line if not needed */ 1947 if (ra->mmap_miss < MMAP_LOTSAMISS * 10) 1948 ra->mmap_miss++; 1949 1950 /* 1951 * Do we miss much more than hit in this file? If so, 1952 * stop bothering with read-ahead. It will only hurt. 1953 */ 1954 if (ra->mmap_miss > MMAP_LOTSAMISS) 1955 return; 1956 1957 /* 1958 * mmap read-around 1959 */ 1960 ra->start = max_t(long, 0, offset - ra->ra_pages / 2); 1961 ra->size = ra->ra_pages; 1962 ra->async_size = ra->ra_pages / 4; 1963 ra_submit(ra, mapping, file); 1964 } 1965 1966 /* 1967 * Asynchronous readahead happens when we find the page and PG_readahead, 1968 * so we want to possibly extend the readahead further.. 1969 */ 1970 static void do_async_mmap_readahead(struct vm_area_struct *vma, 1971 struct file_ra_state *ra, 1972 struct file *file, 1973 struct page *page, 1974 pgoff_t offset) 1975 { 1976 struct address_space *mapping = file->f_mapping; 1977 1978 /* If we don't want any read-ahead, don't bother */ 1979 if (vma->vm_flags & VM_RAND_READ) 1980 return; 1981 if (ra->mmap_miss > 0) 1982 ra->mmap_miss--; 1983 if (PageReadahead(page)) 1984 page_cache_async_readahead(mapping, ra, file, 1985 page, offset, ra->ra_pages); 1986 } 1987 1988 /** 1989 * filemap_fault - read in file data for page fault handling 1990 * @vma: vma in which the fault was taken 1991 * @vmf: struct vm_fault containing details of the fault 1992 * 1993 * filemap_fault() is invoked via the vma operations vector for a 1994 * mapped memory region to read in file data during a page fault. 1995 * 1996 * The goto's are kind of ugly, but this streamlines the normal case of having 1997 * it in the page cache, and handles the special cases reasonably without 1998 * having a lot of duplicated code. 1999 * 2000 * vma->vm_mm->mmap_sem must be held on entry. 2001 * 2002 * If our return value has VM_FAULT_RETRY set, it's because 2003 * lock_page_or_retry() returned 0. 2004 * The mmap_sem has usually been released in this case. 2005 * See __lock_page_or_retry() for the exception. 2006 * 2007 * If our return value does not have VM_FAULT_RETRY set, the mmap_sem 2008 * has not been released. 2009 * 2010 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. 2011 */ 2012 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2013 { 2014 int error; 2015 struct file *file = vma->vm_file; 2016 struct address_space *mapping = file->f_mapping; 2017 struct file_ra_state *ra = &file->f_ra; 2018 struct inode *inode = mapping->host; 2019 pgoff_t offset = vmf->pgoff; 2020 struct page *page; 2021 loff_t size; 2022 int ret = 0; 2023 2024 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 2025 if (offset >= size >> PAGE_CACHE_SHIFT) 2026 return VM_FAULT_SIGBUS; 2027 2028 /* 2029 * Do we have something in the page cache already? 2030 */ 2031 page = find_get_page(mapping, offset); 2032 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { 2033 /* 2034 * We found the page, so try async readahead before 2035 * waiting for the lock. 2036 */ 2037 do_async_mmap_readahead(vma, ra, file, page, offset); 2038 } else if (!page) { 2039 /* No page in the page cache at all */ 2040 do_sync_mmap_readahead(vma, ra, file, offset); 2041 count_vm_event(PGMAJFAULT); 2042 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 2043 ret = VM_FAULT_MAJOR; 2044 retry_find: 2045 page = find_get_page(mapping, offset); 2046 if (!page) 2047 goto no_cached_page; 2048 } 2049 2050 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { 2051 page_cache_release(page); 2052 return ret | VM_FAULT_RETRY; 2053 } 2054 2055 /* Did it get truncated? */ 2056 if (unlikely(page->mapping != mapping)) { 2057 unlock_page(page); 2058 put_page(page); 2059 goto retry_find; 2060 } 2061 VM_BUG_ON_PAGE(page->index != offset, page); 2062 2063 /* 2064 * We have a locked page in the page cache, now we need to check 2065 * that it's up-to-date. If not, it is going to be due to an error. 2066 */ 2067 if (unlikely(!PageUptodate(page))) 2068 goto page_not_uptodate; 2069 2070 /* 2071 * Found the page and have a reference on it. 2072 * We must recheck i_size under page lock. 2073 */ 2074 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 2075 if (unlikely(offset >= size >> PAGE_CACHE_SHIFT)) { 2076 unlock_page(page); 2077 page_cache_release(page); 2078 return VM_FAULT_SIGBUS; 2079 } 2080 2081 vmf->page = page; 2082 return ret | VM_FAULT_LOCKED; 2083 2084 no_cached_page: 2085 /* 2086 * We're only likely to ever get here if MADV_RANDOM is in 2087 * effect. 2088 */ 2089 error = page_cache_read(file, offset, vmf->gfp_mask); 2090 2091 /* 2092 * The page we want has now been added to the page cache. 2093 * In the unlikely event that someone removed it in the 2094 * meantime, we'll just come back here and read it again. 2095 */ 2096 if (error >= 0) 2097 goto retry_find; 2098 2099 /* 2100 * An error return from page_cache_read can result if the 2101 * system is low on memory, or a problem occurs while trying 2102 * to schedule I/O. 2103 */ 2104 if (error == -ENOMEM) 2105 return VM_FAULT_OOM; 2106 return VM_FAULT_SIGBUS; 2107 2108 page_not_uptodate: 2109 /* 2110 * Umm, take care of errors if the page isn't up-to-date. 2111 * Try to re-read it _once_. We do this synchronously, 2112 * because there really aren't any performance issues here 2113 * and we need to check for errors. 2114 */ 2115 ClearPageError(page); 2116 error = mapping->a_ops->readpage(file, page); 2117 if (!error) { 2118 wait_on_page_locked(page); 2119 if (!PageUptodate(page)) 2120 error = -EIO; 2121 } 2122 page_cache_release(page); 2123 2124 if (!error || error == AOP_TRUNCATED_PAGE) 2125 goto retry_find; 2126 2127 /* Things didn't work out. Return zero to tell the mm layer so. */ 2128 shrink_readahead_size_eio(file, ra); 2129 return VM_FAULT_SIGBUS; 2130 } 2131 EXPORT_SYMBOL(filemap_fault); 2132 2133 void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) 2134 { 2135 struct radix_tree_iter iter; 2136 void **slot; 2137 struct file *file = vma->vm_file; 2138 struct address_space *mapping = file->f_mapping; 2139 loff_t size; 2140 struct page *page; 2141 unsigned long address = (unsigned long) vmf->virtual_address; 2142 unsigned long addr; 2143 pte_t *pte; 2144 2145 rcu_read_lock(); 2146 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, vmf->pgoff) { 2147 if (iter.index > vmf->max_pgoff) 2148 break; 2149 repeat: 2150 page = radix_tree_deref_slot(slot); 2151 if (unlikely(!page)) 2152 goto next; 2153 if (radix_tree_exception(page)) { 2154 if (radix_tree_deref_retry(page)) 2155 break; 2156 else 2157 goto next; 2158 } 2159 2160 if (!page_cache_get_speculative(page)) 2161 goto repeat; 2162 2163 /* Has the page moved? */ 2164 if (unlikely(page != *slot)) { 2165 page_cache_release(page); 2166 goto repeat; 2167 } 2168 2169 if (!PageUptodate(page) || 2170 PageReadahead(page) || 2171 PageHWPoison(page)) 2172 goto skip; 2173 if (!trylock_page(page)) 2174 goto skip; 2175 2176 if (page->mapping != mapping || !PageUptodate(page)) 2177 goto unlock; 2178 2179 size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE); 2180 if (page->index >= size >> PAGE_CACHE_SHIFT) 2181 goto unlock; 2182 2183 pte = vmf->pte + page->index - vmf->pgoff; 2184 if (!pte_none(*pte)) 2185 goto unlock; 2186 2187 if (file->f_ra.mmap_miss > 0) 2188 file->f_ra.mmap_miss--; 2189 addr = address + (page->index - vmf->pgoff) * PAGE_SIZE; 2190 do_set_pte(vma, addr, page, pte, false, false); 2191 unlock_page(page); 2192 goto next; 2193 unlock: 2194 unlock_page(page); 2195 skip: 2196 page_cache_release(page); 2197 next: 2198 if (iter.index == vmf->max_pgoff) 2199 break; 2200 } 2201 rcu_read_unlock(); 2202 } 2203 EXPORT_SYMBOL(filemap_map_pages); 2204 2205 int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 2206 { 2207 struct page *page = vmf->page; 2208 struct inode *inode = file_inode(vma->vm_file); 2209 int ret = VM_FAULT_LOCKED; 2210 2211 sb_start_pagefault(inode->i_sb); 2212 file_update_time(vma->vm_file); 2213 lock_page(page); 2214 if (page->mapping != inode->i_mapping) { 2215 unlock_page(page); 2216 ret = VM_FAULT_NOPAGE; 2217 goto out; 2218 } 2219 /* 2220 * We mark the page dirty already here so that when freeze is in 2221 * progress, we are guaranteed that writeback during freezing will 2222 * see the dirty page and writeprotect it again. 2223 */ 2224 set_page_dirty(page); 2225 wait_for_stable_page(page); 2226 out: 2227 sb_end_pagefault(inode->i_sb); 2228 return ret; 2229 } 2230 EXPORT_SYMBOL(filemap_page_mkwrite); 2231 2232 const struct vm_operations_struct generic_file_vm_ops = { 2233 .fault = filemap_fault, 2234 .map_pages = filemap_map_pages, 2235 .page_mkwrite = filemap_page_mkwrite, 2236 }; 2237 2238 /* This is used for a general mmap of a disk file */ 2239 2240 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2241 { 2242 struct address_space *mapping = file->f_mapping; 2243 2244 if (!mapping->a_ops->readpage) 2245 return -ENOEXEC; 2246 file_accessed(file); 2247 vma->vm_ops = &generic_file_vm_ops; 2248 return 0; 2249 } 2250 2251 /* 2252 * This is for filesystems which do not implement ->writepage. 2253 */ 2254 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 2255 { 2256 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 2257 return -EINVAL; 2258 return generic_file_mmap(file, vma); 2259 } 2260 #else 2261 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2262 { 2263 return -ENOSYS; 2264 } 2265 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) 2266 { 2267 return -ENOSYS; 2268 } 2269 #endif /* CONFIG_MMU */ 2270 2271 EXPORT_SYMBOL(generic_file_mmap); 2272 EXPORT_SYMBOL(generic_file_readonly_mmap); 2273 2274 static struct page *wait_on_page_read(struct page *page) 2275 { 2276 if (!IS_ERR(page)) { 2277 wait_on_page_locked(page); 2278 if (!PageUptodate(page)) { 2279 page_cache_release(page); 2280 page = ERR_PTR(-EIO); 2281 } 2282 } 2283 return page; 2284 } 2285 2286 static struct page *__read_cache_page(struct address_space *mapping, 2287 pgoff_t index, 2288 int (*filler)(void *, struct page *), 2289 void *data, 2290 gfp_t gfp) 2291 { 2292 struct page *page; 2293 int err; 2294 repeat: 2295 page = find_get_page(mapping, index); 2296 if (!page) { 2297 page = __page_cache_alloc(gfp | __GFP_COLD); 2298 if (!page) 2299 return ERR_PTR(-ENOMEM); 2300 err = add_to_page_cache_lru(page, mapping, index, gfp); 2301 if (unlikely(err)) { 2302 page_cache_release(page); 2303 if (err == -EEXIST) 2304 goto repeat; 2305 /* Presumably ENOMEM for radix tree node */ 2306 return ERR_PTR(err); 2307 } 2308 err = filler(data, page); 2309 if (err < 0) { 2310 page_cache_release(page); 2311 page = ERR_PTR(err); 2312 } else { 2313 page = wait_on_page_read(page); 2314 } 2315 } 2316 return page; 2317 } 2318 2319 static struct page *do_read_cache_page(struct address_space *mapping, 2320 pgoff_t index, 2321 int (*filler)(void *, struct page *), 2322 void *data, 2323 gfp_t gfp) 2324 2325 { 2326 struct page *page; 2327 int err; 2328 2329 retry: 2330 page = __read_cache_page(mapping, index, filler, data, gfp); 2331 if (IS_ERR(page)) 2332 return page; 2333 if (PageUptodate(page)) 2334 goto out; 2335 2336 lock_page(page); 2337 if (!page->mapping) { 2338 unlock_page(page); 2339 page_cache_release(page); 2340 goto retry; 2341 } 2342 if (PageUptodate(page)) { 2343 unlock_page(page); 2344 goto out; 2345 } 2346 err = filler(data, page); 2347 if (err < 0) { 2348 page_cache_release(page); 2349 return ERR_PTR(err); 2350 } else { 2351 page = wait_on_page_read(page); 2352 if (IS_ERR(page)) 2353 return page; 2354 } 2355 out: 2356 mark_page_accessed(page); 2357 return page; 2358 } 2359 2360 /** 2361 * read_cache_page - read into page cache, fill it if needed 2362 * @mapping: the page's address_space 2363 * @index: the page index 2364 * @filler: function to perform the read 2365 * @data: first arg to filler(data, page) function, often left as NULL 2366 * 2367 * Read into the page cache. If a page already exists, and PageUptodate() is 2368 * not set, try to fill the page and wait for it to become unlocked. 2369 * 2370 * If the page does not get brought uptodate, return -EIO. 2371 */ 2372 struct page *read_cache_page(struct address_space *mapping, 2373 pgoff_t index, 2374 int (*filler)(void *, struct page *), 2375 void *data) 2376 { 2377 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); 2378 } 2379 EXPORT_SYMBOL(read_cache_page); 2380 2381 /** 2382 * read_cache_page_gfp - read into page cache, using specified page allocation flags. 2383 * @mapping: the page's address_space 2384 * @index: the page index 2385 * @gfp: the page allocator flags to use if allocating 2386 * 2387 * This is the same as "read_mapping_page(mapping, index, NULL)", but with 2388 * any new page allocations done using the specified allocation flags. 2389 * 2390 * If the page does not get brought uptodate, return -EIO. 2391 */ 2392 struct page *read_cache_page_gfp(struct address_space *mapping, 2393 pgoff_t index, 2394 gfp_t gfp) 2395 { 2396 filler_t *filler = (filler_t *)mapping->a_ops->readpage; 2397 2398 return do_read_cache_page(mapping, index, filler, NULL, gfp); 2399 } 2400 EXPORT_SYMBOL(read_cache_page_gfp); 2401 2402 /* 2403 * Performs necessary checks before doing a write 2404 * 2405 * Can adjust writing position or amount of bytes to write. 2406 * Returns appropriate error code that caller should return or 2407 * zero in case that write should be allowed. 2408 */ 2409 inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) 2410 { 2411 struct file *file = iocb->ki_filp; 2412 struct inode *inode = file->f_mapping->host; 2413 unsigned long limit = rlimit(RLIMIT_FSIZE); 2414 loff_t pos; 2415 2416 if (!iov_iter_count(from)) 2417 return 0; 2418 2419 /* FIXME: this is for backwards compatibility with 2.4 */ 2420 if (iocb->ki_flags & IOCB_APPEND) 2421 iocb->ki_pos = i_size_read(inode); 2422 2423 pos = iocb->ki_pos; 2424 2425 if (limit != RLIM_INFINITY) { 2426 if (iocb->ki_pos >= limit) { 2427 send_sig(SIGXFSZ, current, 0); 2428 return -EFBIG; 2429 } 2430 iov_iter_truncate(from, limit - (unsigned long)pos); 2431 } 2432 2433 /* 2434 * LFS rule 2435 */ 2436 if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS && 2437 !(file->f_flags & O_LARGEFILE))) { 2438 if (pos >= MAX_NON_LFS) 2439 return -EFBIG; 2440 iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos); 2441 } 2442 2443 /* 2444 * Are we about to exceed the fs block limit ? 2445 * 2446 * If we have written data it becomes a short write. If we have 2447 * exceeded without writing data we send a signal and return EFBIG. 2448 * Linus frestrict idea will clean these up nicely.. 2449 */ 2450 if (unlikely(pos >= inode->i_sb->s_maxbytes)) 2451 return -EFBIG; 2452 2453 iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos); 2454 return iov_iter_count(from); 2455 } 2456 EXPORT_SYMBOL(generic_write_checks); 2457 2458 int pagecache_write_begin(struct file *file, struct address_space *mapping, 2459 loff_t pos, unsigned len, unsigned flags, 2460 struct page **pagep, void **fsdata) 2461 { 2462 const struct address_space_operations *aops = mapping->a_ops; 2463 2464 return aops->write_begin(file, mapping, pos, len, flags, 2465 pagep, fsdata); 2466 } 2467 EXPORT_SYMBOL(pagecache_write_begin); 2468 2469 int pagecache_write_end(struct file *file, struct address_space *mapping, 2470 loff_t pos, unsigned len, unsigned copied, 2471 struct page *page, void *fsdata) 2472 { 2473 const struct address_space_operations *aops = mapping->a_ops; 2474 2475 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); 2476 } 2477 EXPORT_SYMBOL(pagecache_write_end); 2478 2479 ssize_t 2480 generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos) 2481 { 2482 struct file *file = iocb->ki_filp; 2483 struct address_space *mapping = file->f_mapping; 2484 struct inode *inode = mapping->host; 2485 ssize_t written; 2486 size_t write_len; 2487 pgoff_t end; 2488 struct iov_iter data; 2489 2490 write_len = iov_iter_count(from); 2491 end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT; 2492 2493 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1); 2494 if (written) 2495 goto out; 2496 2497 /* 2498 * After a write we want buffered reads to be sure to go to disk to get 2499 * the new data. We invalidate clean cached page from the region we're 2500 * about to write. We do this *before* the write so that we can return 2501 * without clobbering -EIOCBQUEUED from ->direct_IO(). 2502 */ 2503 if (mapping->nrpages) { 2504 written = invalidate_inode_pages2_range(mapping, 2505 pos >> PAGE_CACHE_SHIFT, end); 2506 /* 2507 * If a page can not be invalidated, return 0 to fall back 2508 * to buffered write. 2509 */ 2510 if (written) { 2511 if (written == -EBUSY) 2512 return 0; 2513 goto out; 2514 } 2515 } 2516 2517 data = *from; 2518 written = mapping->a_ops->direct_IO(iocb, &data, pos); 2519 2520 /* 2521 * Finally, try again to invalidate clean pages which might have been 2522 * cached by non-direct readahead, or faulted in by get_user_pages() 2523 * if the source of the write was an mmap'ed region of the file 2524 * we're writing. Either one is a pretty crazy thing to do, 2525 * so we don't support it 100%. If this invalidation 2526 * fails, tough, the write still worked... 2527 */ 2528 if (mapping->nrpages) { 2529 invalidate_inode_pages2_range(mapping, 2530 pos >> PAGE_CACHE_SHIFT, end); 2531 } 2532 2533 if (written > 0) { 2534 pos += written; 2535 iov_iter_advance(from, written); 2536 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { 2537 i_size_write(inode, pos); 2538 mark_inode_dirty(inode); 2539 } 2540 iocb->ki_pos = pos; 2541 } 2542 out: 2543 return written; 2544 } 2545 EXPORT_SYMBOL(generic_file_direct_write); 2546 2547 /* 2548 * Find or create a page at the given pagecache position. Return the locked 2549 * page. This function is specifically for buffered writes. 2550 */ 2551 struct page *grab_cache_page_write_begin(struct address_space *mapping, 2552 pgoff_t index, unsigned flags) 2553 { 2554 struct page *page; 2555 int fgp_flags = FGP_LOCK|FGP_ACCESSED|FGP_WRITE|FGP_CREAT; 2556 2557 if (flags & AOP_FLAG_NOFS) 2558 fgp_flags |= FGP_NOFS; 2559 2560 page = pagecache_get_page(mapping, index, fgp_flags, 2561 mapping_gfp_mask(mapping)); 2562 if (page) 2563 wait_for_stable_page(page); 2564 2565 return page; 2566 } 2567 EXPORT_SYMBOL(grab_cache_page_write_begin); 2568 2569 ssize_t generic_perform_write(struct file *file, 2570 struct iov_iter *i, loff_t pos) 2571 { 2572 struct address_space *mapping = file->f_mapping; 2573 const struct address_space_operations *a_ops = mapping->a_ops; 2574 long status = 0; 2575 ssize_t written = 0; 2576 unsigned int flags = 0; 2577 2578 /* 2579 * Copies from kernel address space cannot fail (NFSD is a big user). 2580 */ 2581 if (!iter_is_iovec(i)) 2582 flags |= AOP_FLAG_UNINTERRUPTIBLE; 2583 2584 do { 2585 struct page *page; 2586 unsigned long offset; /* Offset into pagecache page */ 2587 unsigned long bytes; /* Bytes to write to page */ 2588 size_t copied; /* Bytes copied from user */ 2589 void *fsdata; 2590 2591 offset = (pos & (PAGE_CACHE_SIZE - 1)); 2592 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2593 iov_iter_count(i)); 2594 2595 again: 2596 /* 2597 * Bring in the user page that we will copy from _first_. 2598 * Otherwise there's a nasty deadlock on copying from the 2599 * same page as we're writing to, without it being marked 2600 * up-to-date. 2601 * 2602 * Not only is this an optimisation, but it is also required 2603 * to check that the address is actually valid, when atomic 2604 * usercopies are used, below. 2605 */ 2606 if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 2607 status = -EFAULT; 2608 break; 2609 } 2610 2611 if (fatal_signal_pending(current)) { 2612 status = -EINTR; 2613 break; 2614 } 2615 2616 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 2617 &page, &fsdata); 2618 if (unlikely(status < 0)) 2619 break; 2620 2621 if (mapping_writably_mapped(mapping)) 2622 flush_dcache_page(page); 2623 2624 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 2625 flush_dcache_page(page); 2626 2627 status = a_ops->write_end(file, mapping, pos, bytes, copied, 2628 page, fsdata); 2629 if (unlikely(status < 0)) 2630 break; 2631 copied = status; 2632 2633 cond_resched(); 2634 2635 iov_iter_advance(i, copied); 2636 if (unlikely(copied == 0)) { 2637 /* 2638 * If we were unable to copy any data at all, we must 2639 * fall back to a single segment length write. 2640 * 2641 * If we didn't fallback here, we could livelock 2642 * because not all segments in the iov can be copied at 2643 * once without a pagefault. 2644 */ 2645 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2646 iov_iter_single_seg_count(i)); 2647 goto again; 2648 } 2649 pos += copied; 2650 written += copied; 2651 2652 balance_dirty_pages_ratelimited(mapping); 2653 } while (iov_iter_count(i)); 2654 2655 return written ? written : status; 2656 } 2657 EXPORT_SYMBOL(generic_perform_write); 2658 2659 /** 2660 * __generic_file_write_iter - write data to a file 2661 * @iocb: IO state structure (file, offset, etc.) 2662 * @from: iov_iter with data to write 2663 * 2664 * This function does all the work needed for actually writing data to a 2665 * file. It does all basic checks, removes SUID from the file, updates 2666 * modification times and calls proper subroutines depending on whether we 2667 * do direct IO or a standard buffered write. 2668 * 2669 * It expects i_mutex to be grabbed unless we work on a block device or similar 2670 * object which does not need locking at all. 2671 * 2672 * This function does *not* take care of syncing data in case of O_SYNC write. 2673 * A caller has to handle it. This is mainly due to the fact that we want to 2674 * avoid syncing under i_mutex. 2675 */ 2676 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2677 { 2678 struct file *file = iocb->ki_filp; 2679 struct address_space * mapping = file->f_mapping; 2680 struct inode *inode = mapping->host; 2681 ssize_t written = 0; 2682 ssize_t err; 2683 ssize_t status; 2684 2685 /* We can write back this queue in page reclaim */ 2686 current->backing_dev_info = inode_to_bdi(inode); 2687 err = file_remove_privs(file); 2688 if (err) 2689 goto out; 2690 2691 err = file_update_time(file); 2692 if (err) 2693 goto out; 2694 2695 if (iocb->ki_flags & IOCB_DIRECT) { 2696 loff_t pos, endbyte; 2697 2698 written = generic_file_direct_write(iocb, from, iocb->ki_pos); 2699 /* 2700 * If the write stopped short of completing, fall back to 2701 * buffered writes. Some filesystems do this for writes to 2702 * holes, for example. For DAX files, a buffered write will 2703 * not succeed (even if it did, DAX does not handle dirty 2704 * page-cache pages correctly). 2705 */ 2706 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) 2707 goto out; 2708 2709 status = generic_perform_write(file, from, pos = iocb->ki_pos); 2710 /* 2711 * If generic_perform_write() returned a synchronous error 2712 * then we want to return the number of bytes which were 2713 * direct-written, or the error code if that was zero. Note 2714 * that this differs from normal direct-io semantics, which 2715 * will return -EFOO even if some bytes were written. 2716 */ 2717 if (unlikely(status < 0)) { 2718 err = status; 2719 goto out; 2720 } 2721 /* 2722 * We need to ensure that the page cache pages are written to 2723 * disk and invalidated to preserve the expected O_DIRECT 2724 * semantics. 2725 */ 2726 endbyte = pos + status - 1; 2727 err = filemap_write_and_wait_range(mapping, pos, endbyte); 2728 if (err == 0) { 2729 iocb->ki_pos = endbyte + 1; 2730 written += status; 2731 invalidate_mapping_pages(mapping, 2732 pos >> PAGE_CACHE_SHIFT, 2733 endbyte >> PAGE_CACHE_SHIFT); 2734 } else { 2735 /* 2736 * We don't know how much we wrote, so just return 2737 * the number of bytes which were direct-written 2738 */ 2739 } 2740 } else { 2741 written = generic_perform_write(file, from, iocb->ki_pos); 2742 if (likely(written > 0)) 2743 iocb->ki_pos += written; 2744 } 2745 out: 2746 current->backing_dev_info = NULL; 2747 return written ? written : err; 2748 } 2749 EXPORT_SYMBOL(__generic_file_write_iter); 2750 2751 /** 2752 * generic_file_write_iter - write data to a file 2753 * @iocb: IO state structure 2754 * @from: iov_iter with data to write 2755 * 2756 * This is a wrapper around __generic_file_write_iter() to be used by most 2757 * filesystems. It takes care of syncing the file in case of O_SYNC file 2758 * and acquires i_mutex as needed. 2759 */ 2760 ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2761 { 2762 struct file *file = iocb->ki_filp; 2763 struct inode *inode = file->f_mapping->host; 2764 ssize_t ret; 2765 2766 inode_lock(inode); 2767 ret = generic_write_checks(iocb, from); 2768 if (ret > 0) 2769 ret = __generic_file_write_iter(iocb, from); 2770 inode_unlock(inode); 2771 2772 if (ret > 0) { 2773 ssize_t err; 2774 2775 err = generic_write_sync(file, iocb->ki_pos - ret, ret); 2776 if (err < 0) 2777 ret = err; 2778 } 2779 return ret; 2780 } 2781 EXPORT_SYMBOL(generic_file_write_iter); 2782 2783 /** 2784 * try_to_release_page() - release old fs-specific metadata on a page 2785 * 2786 * @page: the page which the kernel is trying to free 2787 * @gfp_mask: memory allocation flags (and I/O mode) 2788 * 2789 * The address_space is to try to release any data against the page 2790 * (presumably at page->private). If the release was successful, return `1'. 2791 * Otherwise return zero. 2792 * 2793 * This may also be called if PG_fscache is set on a page, indicating that the 2794 * page is known to the local caching routines. 2795 * 2796 * The @gfp_mask argument specifies whether I/O may be performed to release 2797 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS). 2798 * 2799 */ 2800 int try_to_release_page(struct page *page, gfp_t gfp_mask) 2801 { 2802 struct address_space * const mapping = page->mapping; 2803 2804 BUG_ON(!PageLocked(page)); 2805 if (PageWriteback(page)) 2806 return 0; 2807 2808 if (mapping && mapping->a_ops->releasepage) 2809 return mapping->a_ops->releasepage(page, gfp_mask); 2810 return try_to_free_buffers(page); 2811 } 2812 2813 EXPORT_SYMBOL(try_to_release_page); 2814