1 /* 2 * linux/mm/filemap.c 3 * 4 * Copyright (C) 1994-1999 Linus Torvalds 5 */ 6 7 /* 8 * This file handles the generic file mmap semantics used by 9 * most "normal" filesystems (but you don't /have/ to use this: 10 * the NFS filesystem used to do this differently, for example) 11 */ 12 #include <linux/export.h> 13 #include <linux/compiler.h> 14 #include <linux/fs.h> 15 #include <linux/uaccess.h> 16 #include <linux/aio.h> 17 #include <linux/capability.h> 18 #include <linux/kernel_stat.h> 19 #include <linux/gfp.h> 20 #include <linux/mm.h> 21 #include <linux/swap.h> 22 #include <linux/mman.h> 23 #include <linux/pagemap.h> 24 #include <linux/file.h> 25 #include <linux/uio.h> 26 #include <linux/hash.h> 27 #include <linux/writeback.h> 28 #include <linux/backing-dev.h> 29 #include <linux/pagevec.h> 30 #include <linux/blkdev.h> 31 #include <linux/security.h> 32 #include <linux/cpuset.h> 33 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ 34 #include <linux/hugetlb.h> 35 #include <linux/memcontrol.h> 36 #include <linux/cleancache.h> 37 #include <linux/rmap.h> 38 #include "internal.h" 39 40 #define CREATE_TRACE_POINTS 41 #include <trace/events/filemap.h> 42 43 /* 44 * FIXME: remove all knowledge of the buffer layer from the core VM 45 */ 46 #include <linux/buffer_head.h> /* for try_to_free_buffers */ 47 48 #include <asm/mman.h> 49 50 /* 51 * Shared mappings implemented 30.11.1994. It's not fully working yet, 52 * though. 53 * 54 * Shared mappings now work. 15.8.1995 Bruno. 55 * 56 * finished 'unifying' the page and buffer cache and SMP-threaded the 57 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 58 * 59 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 60 */ 61 62 /* 63 * Lock ordering: 64 * 65 * ->i_mmap_mutex (truncate_pagecache) 66 * ->private_lock (__free_pte->__set_page_dirty_buffers) 67 * ->swap_lock (exclusive_swap_page, others) 68 * ->mapping->tree_lock 69 * 70 * ->i_mutex 71 * ->i_mmap_mutex (truncate->unmap_mapping_range) 72 * 73 * ->mmap_sem 74 * ->i_mmap_mutex 75 * ->page_table_lock or pte_lock (various, mainly in memory.c) 76 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) 77 * 78 * ->mmap_sem 79 * ->lock_page (access_process_vm) 80 * 81 * ->i_mutex (generic_perform_write) 82 * ->mmap_sem (fault_in_pages_readable->do_page_fault) 83 * 84 * bdi->wb.list_lock 85 * sb_lock (fs/fs-writeback.c) 86 * ->mapping->tree_lock (__sync_single_inode) 87 * 88 * ->i_mmap_mutex 89 * ->anon_vma.lock (vma_adjust) 90 * 91 * ->anon_vma.lock 92 * ->page_table_lock or pte_lock (anon_vma_prepare and various) 93 * 94 * ->page_table_lock or pte_lock 95 * ->swap_lock (try_to_unmap_one) 96 * ->private_lock (try_to_unmap_one) 97 * ->tree_lock (try_to_unmap_one) 98 * ->zone.lru_lock (follow_page->mark_page_accessed) 99 * ->zone.lru_lock (check_pte_range->isolate_lru_page) 100 * ->private_lock (page_remove_rmap->set_page_dirty) 101 * ->tree_lock (page_remove_rmap->set_page_dirty) 102 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) 103 * ->inode->i_lock (page_remove_rmap->set_page_dirty) 104 * bdi.wb->list_lock (zap_pte_range->set_page_dirty) 105 * ->inode->i_lock (zap_pte_range->set_page_dirty) 106 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 107 * 108 * ->i_mmap_mutex 109 * ->tasklist_lock (memory_failure, collect_procs_ao) 110 */ 111 112 static void page_cache_tree_delete(struct address_space *mapping, 113 struct page *page, void *shadow) 114 { 115 struct radix_tree_node *node; 116 unsigned long index; 117 unsigned int offset; 118 unsigned int tag; 119 void **slot; 120 121 VM_BUG_ON(!PageLocked(page)); 122 123 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); 124 125 if (shadow) { 126 mapping->nrshadows++; 127 /* 128 * Make sure the nrshadows update is committed before 129 * the nrpages update so that final truncate racing 130 * with reclaim does not see both counters 0 at the 131 * same time and miss a shadow entry. 132 */ 133 smp_wmb(); 134 } 135 mapping->nrpages--; 136 137 if (!node) { 138 /* Clear direct pointer tags in root node */ 139 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK; 140 radix_tree_replace_slot(slot, shadow); 141 return; 142 } 143 144 /* Clear tree tags for the removed page */ 145 index = page->index; 146 offset = index & RADIX_TREE_MAP_MASK; 147 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { 148 if (test_bit(offset, node->tags[tag])) 149 radix_tree_tag_clear(&mapping->page_tree, index, tag); 150 } 151 152 /* Delete page, swap shadow entry */ 153 radix_tree_replace_slot(slot, shadow); 154 workingset_node_pages_dec(node); 155 if (shadow) 156 workingset_node_shadows_inc(node); 157 else 158 if (__radix_tree_delete_node(&mapping->page_tree, node)) 159 return; 160 161 /* 162 * Track node that only contains shadow entries. 163 * 164 * Avoid acquiring the list_lru lock if already tracked. The 165 * list_empty() test is safe as node->private_list is 166 * protected by mapping->tree_lock. 167 */ 168 if (!workingset_node_pages(node) && 169 list_empty(&node->private_list)) { 170 node->private_data = mapping; 171 list_lru_add(&workingset_shadow_nodes, &node->private_list); 172 } 173 } 174 175 /* 176 * Delete a page from the page cache and free it. Caller has to make 177 * sure the page is locked and that nobody else uses it - or that usage 178 * is safe. The caller must hold the mapping's tree_lock. 179 */ 180 void __delete_from_page_cache(struct page *page, void *shadow) 181 { 182 struct address_space *mapping = page->mapping; 183 184 trace_mm_filemap_delete_from_page_cache(page); 185 /* 186 * if we're uptodate, flush out into the cleancache, otherwise 187 * invalidate any existing cleancache entries. We can't leave 188 * stale data around in the cleancache once our page is gone 189 */ 190 if (PageUptodate(page) && PageMappedToDisk(page)) 191 cleancache_put_page(page); 192 else 193 cleancache_invalidate_page(mapping, page); 194 195 page_cache_tree_delete(mapping, page, shadow); 196 197 page->mapping = NULL; 198 /* Leave page->index set: truncation lookup relies upon it */ 199 200 __dec_zone_page_state(page, NR_FILE_PAGES); 201 if (PageSwapBacked(page)) 202 __dec_zone_page_state(page, NR_SHMEM); 203 BUG_ON(page_mapped(page)); 204 205 /* 206 * Some filesystems seem to re-dirty the page even after 207 * the VM has canceled the dirty bit (eg ext3 journaling). 208 * 209 * Fix it up by doing a final dirty accounting check after 210 * having removed the page entirely. 211 */ 212 if (PageDirty(page) && mapping_cap_account_dirty(mapping)) { 213 dec_zone_page_state(page, NR_FILE_DIRTY); 214 dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); 215 } 216 } 217 218 /** 219 * delete_from_page_cache - delete page from page cache 220 * @page: the page which the kernel is trying to remove from page cache 221 * 222 * This must be called only on pages that have been verified to be in the page 223 * cache and locked. It will never put the page into the free list, the caller 224 * has a reference on the page. 225 */ 226 void delete_from_page_cache(struct page *page) 227 { 228 struct address_space *mapping = page->mapping; 229 void (*freepage)(struct page *); 230 231 BUG_ON(!PageLocked(page)); 232 233 freepage = mapping->a_ops->freepage; 234 spin_lock_irq(&mapping->tree_lock); 235 __delete_from_page_cache(page, NULL); 236 spin_unlock_irq(&mapping->tree_lock); 237 238 if (freepage) 239 freepage(page); 240 page_cache_release(page); 241 } 242 EXPORT_SYMBOL(delete_from_page_cache); 243 244 static int filemap_check_errors(struct address_space *mapping) 245 { 246 int ret = 0; 247 /* Check for outstanding write errors */ 248 if (test_bit(AS_ENOSPC, &mapping->flags) && 249 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 250 ret = -ENOSPC; 251 if (test_bit(AS_EIO, &mapping->flags) && 252 test_and_clear_bit(AS_EIO, &mapping->flags)) 253 ret = -EIO; 254 return ret; 255 } 256 257 /** 258 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 259 * @mapping: address space structure to write 260 * @start: offset in bytes where the range starts 261 * @end: offset in bytes where the range ends (inclusive) 262 * @sync_mode: enable synchronous operation 263 * 264 * Start writeback against all of a mapping's dirty pages that lie 265 * within the byte offsets <start, end> inclusive. 266 * 267 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 268 * opposed to a regular memory cleansing writeback. The difference between 269 * these two operations is that if a dirty page/buffer is encountered, it must 270 * be waited upon, and not just skipped over. 271 */ 272 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 273 loff_t end, int sync_mode) 274 { 275 int ret; 276 struct writeback_control wbc = { 277 .sync_mode = sync_mode, 278 .nr_to_write = LONG_MAX, 279 .range_start = start, 280 .range_end = end, 281 }; 282 283 if (!mapping_cap_writeback_dirty(mapping)) 284 return 0; 285 286 ret = do_writepages(mapping, &wbc); 287 return ret; 288 } 289 290 static inline int __filemap_fdatawrite(struct address_space *mapping, 291 int sync_mode) 292 { 293 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); 294 } 295 296 int filemap_fdatawrite(struct address_space *mapping) 297 { 298 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); 299 } 300 EXPORT_SYMBOL(filemap_fdatawrite); 301 302 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 303 loff_t end) 304 { 305 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 306 } 307 EXPORT_SYMBOL(filemap_fdatawrite_range); 308 309 /** 310 * filemap_flush - mostly a non-blocking flush 311 * @mapping: target address_space 312 * 313 * This is a mostly non-blocking flush. Not suitable for data-integrity 314 * purposes - I/O may not be started against all dirty pages. 315 */ 316 int filemap_flush(struct address_space *mapping) 317 { 318 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); 319 } 320 EXPORT_SYMBOL(filemap_flush); 321 322 /** 323 * filemap_fdatawait_range - wait for writeback to complete 324 * @mapping: address space structure to wait for 325 * @start_byte: offset in bytes where the range starts 326 * @end_byte: offset in bytes where the range ends (inclusive) 327 * 328 * Walk the list of under-writeback pages of the given address space 329 * in the given range and wait for all of them. 330 */ 331 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, 332 loff_t end_byte) 333 { 334 pgoff_t index = start_byte >> PAGE_CACHE_SHIFT; 335 pgoff_t end = end_byte >> PAGE_CACHE_SHIFT; 336 struct pagevec pvec; 337 int nr_pages; 338 int ret2, ret = 0; 339 340 if (end_byte < start_byte) 341 goto out; 342 343 pagevec_init(&pvec, 0); 344 while ((index <= end) && 345 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 346 PAGECACHE_TAG_WRITEBACK, 347 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { 348 unsigned i; 349 350 for (i = 0; i < nr_pages; i++) { 351 struct page *page = pvec.pages[i]; 352 353 /* until radix tree lookup accepts end_index */ 354 if (page->index > end) 355 continue; 356 357 wait_on_page_writeback(page); 358 if (TestClearPageError(page)) 359 ret = -EIO; 360 } 361 pagevec_release(&pvec); 362 cond_resched(); 363 } 364 out: 365 ret2 = filemap_check_errors(mapping); 366 if (!ret) 367 ret = ret2; 368 369 return ret; 370 } 371 EXPORT_SYMBOL(filemap_fdatawait_range); 372 373 /** 374 * filemap_fdatawait - wait for all under-writeback pages to complete 375 * @mapping: address space structure to wait for 376 * 377 * Walk the list of under-writeback pages of the given address space 378 * and wait for all of them. 379 */ 380 int filemap_fdatawait(struct address_space *mapping) 381 { 382 loff_t i_size = i_size_read(mapping->host); 383 384 if (i_size == 0) 385 return 0; 386 387 return filemap_fdatawait_range(mapping, 0, i_size - 1); 388 } 389 EXPORT_SYMBOL(filemap_fdatawait); 390 391 int filemap_write_and_wait(struct address_space *mapping) 392 { 393 int err = 0; 394 395 if (mapping->nrpages) { 396 err = filemap_fdatawrite(mapping); 397 /* 398 * Even if the above returned error, the pages may be 399 * written partially (e.g. -ENOSPC), so we wait for it. 400 * But the -EIO is special case, it may indicate the worst 401 * thing (e.g. bug) happened, so we avoid waiting for it. 402 */ 403 if (err != -EIO) { 404 int err2 = filemap_fdatawait(mapping); 405 if (!err) 406 err = err2; 407 } 408 } else { 409 err = filemap_check_errors(mapping); 410 } 411 return err; 412 } 413 EXPORT_SYMBOL(filemap_write_and_wait); 414 415 /** 416 * filemap_write_and_wait_range - write out & wait on a file range 417 * @mapping: the address_space for the pages 418 * @lstart: offset in bytes where the range starts 419 * @lend: offset in bytes where the range ends (inclusive) 420 * 421 * Write out and wait upon file offsets lstart->lend, inclusive. 422 * 423 * Note that `lend' is inclusive (describes the last byte to be written) so 424 * that this function can be used to write to the very end-of-file (end = -1). 425 */ 426 int filemap_write_and_wait_range(struct address_space *mapping, 427 loff_t lstart, loff_t lend) 428 { 429 int err = 0; 430 431 if (mapping->nrpages) { 432 err = __filemap_fdatawrite_range(mapping, lstart, lend, 433 WB_SYNC_ALL); 434 /* See comment of filemap_write_and_wait() */ 435 if (err != -EIO) { 436 int err2 = filemap_fdatawait_range(mapping, 437 lstart, lend); 438 if (!err) 439 err = err2; 440 } 441 } else { 442 err = filemap_check_errors(mapping); 443 } 444 return err; 445 } 446 EXPORT_SYMBOL(filemap_write_and_wait_range); 447 448 /** 449 * replace_page_cache_page - replace a pagecache page with a new one 450 * @old: page to be replaced 451 * @new: page to replace with 452 * @gfp_mask: allocation mode 453 * 454 * This function replaces a page in the pagecache with a new one. On 455 * success it acquires the pagecache reference for the new page and 456 * drops it for the old page. Both the old and new pages must be 457 * locked. This function does not add the new page to the LRU, the 458 * caller must do that. 459 * 460 * The remove + add is atomic. The only way this function can fail is 461 * memory allocation failure. 462 */ 463 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) 464 { 465 int error; 466 467 VM_BUG_ON_PAGE(!PageLocked(old), old); 468 VM_BUG_ON_PAGE(!PageLocked(new), new); 469 VM_BUG_ON_PAGE(new->mapping, new); 470 471 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 472 if (!error) { 473 struct address_space *mapping = old->mapping; 474 void (*freepage)(struct page *); 475 476 pgoff_t offset = old->index; 477 freepage = mapping->a_ops->freepage; 478 479 page_cache_get(new); 480 new->mapping = mapping; 481 new->index = offset; 482 483 spin_lock_irq(&mapping->tree_lock); 484 __delete_from_page_cache(old, NULL); 485 error = radix_tree_insert(&mapping->page_tree, offset, new); 486 BUG_ON(error); 487 mapping->nrpages++; 488 __inc_zone_page_state(new, NR_FILE_PAGES); 489 if (PageSwapBacked(new)) 490 __inc_zone_page_state(new, NR_SHMEM); 491 spin_unlock_irq(&mapping->tree_lock); 492 mem_cgroup_migrate(old, new, true); 493 radix_tree_preload_end(); 494 if (freepage) 495 freepage(old); 496 page_cache_release(old); 497 } 498 499 return error; 500 } 501 EXPORT_SYMBOL_GPL(replace_page_cache_page); 502 503 static int page_cache_tree_insert(struct address_space *mapping, 504 struct page *page, void **shadowp) 505 { 506 struct radix_tree_node *node; 507 void **slot; 508 int error; 509 510 error = __radix_tree_create(&mapping->page_tree, page->index, 511 &node, &slot); 512 if (error) 513 return error; 514 if (*slot) { 515 void *p; 516 517 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 518 if (!radix_tree_exceptional_entry(p)) 519 return -EEXIST; 520 if (shadowp) 521 *shadowp = p; 522 mapping->nrshadows--; 523 if (node) 524 workingset_node_shadows_dec(node); 525 } 526 radix_tree_replace_slot(slot, page); 527 mapping->nrpages++; 528 if (node) { 529 workingset_node_pages_inc(node); 530 /* 531 * Don't track node that contains actual pages. 532 * 533 * Avoid acquiring the list_lru lock if already 534 * untracked. The list_empty() test is safe as 535 * node->private_list is protected by 536 * mapping->tree_lock. 537 */ 538 if (!list_empty(&node->private_list)) 539 list_lru_del(&workingset_shadow_nodes, 540 &node->private_list); 541 } 542 return 0; 543 } 544 545 static int __add_to_page_cache_locked(struct page *page, 546 struct address_space *mapping, 547 pgoff_t offset, gfp_t gfp_mask, 548 void **shadowp) 549 { 550 int huge = PageHuge(page); 551 struct mem_cgroup *memcg; 552 int error; 553 554 VM_BUG_ON_PAGE(!PageLocked(page), page); 555 VM_BUG_ON_PAGE(PageSwapBacked(page), page); 556 557 if (!huge) { 558 error = mem_cgroup_try_charge(page, current->mm, 559 gfp_mask, &memcg); 560 if (error) 561 return error; 562 } 563 564 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); 565 if (error) { 566 if (!huge) 567 mem_cgroup_cancel_charge(page, memcg); 568 return error; 569 } 570 571 page_cache_get(page); 572 page->mapping = mapping; 573 page->index = offset; 574 575 spin_lock_irq(&mapping->tree_lock); 576 error = page_cache_tree_insert(mapping, page, shadowp); 577 radix_tree_preload_end(); 578 if (unlikely(error)) 579 goto err_insert; 580 __inc_zone_page_state(page, NR_FILE_PAGES); 581 spin_unlock_irq(&mapping->tree_lock); 582 if (!huge) 583 mem_cgroup_commit_charge(page, memcg, false); 584 trace_mm_filemap_add_to_page_cache(page); 585 return 0; 586 err_insert: 587 page->mapping = NULL; 588 /* Leave page->index set: truncation relies upon it */ 589 spin_unlock_irq(&mapping->tree_lock); 590 if (!huge) 591 mem_cgroup_cancel_charge(page, memcg); 592 page_cache_release(page); 593 return error; 594 } 595 596 /** 597 * add_to_page_cache_locked - add a locked page to the pagecache 598 * @page: page to add 599 * @mapping: the page's address_space 600 * @offset: page index 601 * @gfp_mask: page allocation mode 602 * 603 * This function is used to add a page to the pagecache. It must be locked. 604 * This function does not add the page to the LRU. The caller must do that. 605 */ 606 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 607 pgoff_t offset, gfp_t gfp_mask) 608 { 609 return __add_to_page_cache_locked(page, mapping, offset, 610 gfp_mask, NULL); 611 } 612 EXPORT_SYMBOL(add_to_page_cache_locked); 613 614 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 615 pgoff_t offset, gfp_t gfp_mask) 616 { 617 void *shadow = NULL; 618 int ret; 619 620 __set_page_locked(page); 621 ret = __add_to_page_cache_locked(page, mapping, offset, 622 gfp_mask, &shadow); 623 if (unlikely(ret)) 624 __clear_page_locked(page); 625 else { 626 /* 627 * The page might have been evicted from cache only 628 * recently, in which case it should be activated like 629 * any other repeatedly accessed page. 630 */ 631 if (shadow && workingset_refault(shadow)) { 632 SetPageActive(page); 633 workingset_activation(page); 634 } else 635 ClearPageActive(page); 636 lru_cache_add(page); 637 } 638 return ret; 639 } 640 EXPORT_SYMBOL_GPL(add_to_page_cache_lru); 641 642 #ifdef CONFIG_NUMA 643 struct page *__page_cache_alloc(gfp_t gfp) 644 { 645 int n; 646 struct page *page; 647 648 if (cpuset_do_page_mem_spread()) { 649 unsigned int cpuset_mems_cookie; 650 do { 651 cpuset_mems_cookie = read_mems_allowed_begin(); 652 n = cpuset_mem_spread_node(); 653 page = alloc_pages_exact_node(n, gfp, 0); 654 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); 655 656 return page; 657 } 658 return alloc_pages(gfp, 0); 659 } 660 EXPORT_SYMBOL(__page_cache_alloc); 661 #endif 662 663 /* 664 * In order to wait for pages to become available there must be 665 * waitqueues associated with pages. By using a hash table of 666 * waitqueues where the bucket discipline is to maintain all 667 * waiters on the same queue and wake all when any of the pages 668 * become available, and for the woken contexts to check to be 669 * sure the appropriate page became available, this saves space 670 * at a cost of "thundering herd" phenomena during rare hash 671 * collisions. 672 */ 673 wait_queue_head_t *page_waitqueue(struct page *page) 674 { 675 const struct zone *zone = page_zone(page); 676 677 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; 678 } 679 EXPORT_SYMBOL(page_waitqueue); 680 681 void wait_on_page_bit(struct page *page, int bit_nr) 682 { 683 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 684 685 if (test_bit(bit_nr, &page->flags)) 686 __wait_on_bit(page_waitqueue(page), &wait, bit_wait_io, 687 TASK_UNINTERRUPTIBLE); 688 } 689 EXPORT_SYMBOL(wait_on_page_bit); 690 691 int wait_on_page_bit_killable(struct page *page, int bit_nr) 692 { 693 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 694 695 if (!test_bit(bit_nr, &page->flags)) 696 return 0; 697 698 return __wait_on_bit(page_waitqueue(page), &wait, 699 bit_wait_io, TASK_KILLABLE); 700 } 701 702 int wait_on_page_bit_killable_timeout(struct page *page, 703 int bit_nr, unsigned long timeout) 704 { 705 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 706 707 wait.key.timeout = jiffies + timeout; 708 if (!test_bit(bit_nr, &page->flags)) 709 return 0; 710 return __wait_on_bit(page_waitqueue(page), &wait, 711 bit_wait_io_timeout, TASK_KILLABLE); 712 } 713 EXPORT_SYMBOL_GPL(wait_on_page_bit_killable_timeout); 714 715 /** 716 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue 717 * @page: Page defining the wait queue of interest 718 * @waiter: Waiter to add to the queue 719 * 720 * Add an arbitrary @waiter to the wait queue for the nominated @page. 721 */ 722 void add_page_wait_queue(struct page *page, wait_queue_t *waiter) 723 { 724 wait_queue_head_t *q = page_waitqueue(page); 725 unsigned long flags; 726 727 spin_lock_irqsave(&q->lock, flags); 728 __add_wait_queue(q, waiter); 729 spin_unlock_irqrestore(&q->lock, flags); 730 } 731 EXPORT_SYMBOL_GPL(add_page_wait_queue); 732 733 /** 734 * unlock_page - unlock a locked page 735 * @page: the page 736 * 737 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). 738 * Also wakes sleepers in wait_on_page_writeback() because the wakeup 739 * mechanism between PageLocked pages and PageWriteback pages is shared. 740 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 741 * 742 * The mb is necessary to enforce ordering between the clear_bit and the read 743 * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()). 744 */ 745 void unlock_page(struct page *page) 746 { 747 VM_BUG_ON_PAGE(!PageLocked(page), page); 748 clear_bit_unlock(PG_locked, &page->flags); 749 smp_mb__after_atomic(); 750 wake_up_page(page, PG_locked); 751 } 752 EXPORT_SYMBOL(unlock_page); 753 754 /** 755 * end_page_writeback - end writeback against a page 756 * @page: the page 757 */ 758 void end_page_writeback(struct page *page) 759 { 760 /* 761 * TestClearPageReclaim could be used here but it is an atomic 762 * operation and overkill in this particular case. Failing to 763 * shuffle a page marked for immediate reclaim is too mild to 764 * justify taking an atomic operation penalty at the end of 765 * ever page writeback. 766 */ 767 if (PageReclaim(page)) { 768 ClearPageReclaim(page); 769 rotate_reclaimable_page(page); 770 } 771 772 if (!test_clear_page_writeback(page)) 773 BUG(); 774 775 smp_mb__after_atomic(); 776 wake_up_page(page, PG_writeback); 777 } 778 EXPORT_SYMBOL(end_page_writeback); 779 780 /* 781 * After completing I/O on a page, call this routine to update the page 782 * flags appropriately 783 */ 784 void page_endio(struct page *page, int rw, int err) 785 { 786 if (rw == READ) { 787 if (!err) { 788 SetPageUptodate(page); 789 } else { 790 ClearPageUptodate(page); 791 SetPageError(page); 792 } 793 unlock_page(page); 794 } else { /* rw == WRITE */ 795 if (err) { 796 SetPageError(page); 797 if (page->mapping) 798 mapping_set_error(page->mapping, err); 799 } 800 end_page_writeback(page); 801 } 802 } 803 EXPORT_SYMBOL_GPL(page_endio); 804 805 /** 806 * __lock_page - get a lock on the page, assuming we need to sleep to get it 807 * @page: the page to lock 808 */ 809 void __lock_page(struct page *page) 810 { 811 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 812 813 __wait_on_bit_lock(page_waitqueue(page), &wait, bit_wait_io, 814 TASK_UNINTERRUPTIBLE); 815 } 816 EXPORT_SYMBOL(__lock_page); 817 818 int __lock_page_killable(struct page *page) 819 { 820 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 821 822 return __wait_on_bit_lock(page_waitqueue(page), &wait, 823 bit_wait_io, TASK_KILLABLE); 824 } 825 EXPORT_SYMBOL_GPL(__lock_page_killable); 826 827 /* 828 * Return values: 829 * 1 - page is locked; mmap_sem is still held. 830 * 0 - page is not locked. 831 * mmap_sem has been released (up_read()), unless flags had both 832 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in 833 * which case mmap_sem is still held. 834 * 835 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1 836 * with the page locked and the mmap_sem unperturbed. 837 */ 838 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 839 unsigned int flags) 840 { 841 if (flags & FAULT_FLAG_ALLOW_RETRY) { 842 /* 843 * CAUTION! In this case, mmap_sem is not released 844 * even though return 0. 845 */ 846 if (flags & FAULT_FLAG_RETRY_NOWAIT) 847 return 0; 848 849 up_read(&mm->mmap_sem); 850 if (flags & FAULT_FLAG_KILLABLE) 851 wait_on_page_locked_killable(page); 852 else 853 wait_on_page_locked(page); 854 return 0; 855 } else { 856 if (flags & FAULT_FLAG_KILLABLE) { 857 int ret; 858 859 ret = __lock_page_killable(page); 860 if (ret) { 861 up_read(&mm->mmap_sem); 862 return 0; 863 } 864 } else 865 __lock_page(page); 866 return 1; 867 } 868 } 869 870 /** 871 * page_cache_next_hole - find the next hole (not-present entry) 872 * @mapping: mapping 873 * @index: index 874 * @max_scan: maximum range to search 875 * 876 * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the 877 * lowest indexed hole. 878 * 879 * Returns: the index of the hole if found, otherwise returns an index 880 * outside of the set specified (in which case 'return - index >= 881 * max_scan' will be true). In rare cases of index wrap-around, 0 will 882 * be returned. 883 * 884 * page_cache_next_hole may be called under rcu_read_lock. However, 885 * like radix_tree_gang_lookup, this will not atomically search a 886 * snapshot of the tree at a single point in time. For example, if a 887 * hole is created at index 5, then subsequently a hole is created at 888 * index 10, page_cache_next_hole covering both indexes may return 10 889 * if called under rcu_read_lock. 890 */ 891 pgoff_t page_cache_next_hole(struct address_space *mapping, 892 pgoff_t index, unsigned long max_scan) 893 { 894 unsigned long i; 895 896 for (i = 0; i < max_scan; i++) { 897 struct page *page; 898 899 page = radix_tree_lookup(&mapping->page_tree, index); 900 if (!page || radix_tree_exceptional_entry(page)) 901 break; 902 index++; 903 if (index == 0) 904 break; 905 } 906 907 return index; 908 } 909 EXPORT_SYMBOL(page_cache_next_hole); 910 911 /** 912 * page_cache_prev_hole - find the prev hole (not-present entry) 913 * @mapping: mapping 914 * @index: index 915 * @max_scan: maximum range to search 916 * 917 * Search backwards in the range [max(index-max_scan+1, 0), index] for 918 * the first hole. 919 * 920 * Returns: the index of the hole if found, otherwise returns an index 921 * outside of the set specified (in which case 'index - return >= 922 * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX 923 * will be returned. 924 * 925 * page_cache_prev_hole may be called under rcu_read_lock. However, 926 * like radix_tree_gang_lookup, this will not atomically search a 927 * snapshot of the tree at a single point in time. For example, if a 928 * hole is created at index 10, then subsequently a hole is created at 929 * index 5, page_cache_prev_hole covering both indexes may return 5 if 930 * called under rcu_read_lock. 931 */ 932 pgoff_t page_cache_prev_hole(struct address_space *mapping, 933 pgoff_t index, unsigned long max_scan) 934 { 935 unsigned long i; 936 937 for (i = 0; i < max_scan; i++) { 938 struct page *page; 939 940 page = radix_tree_lookup(&mapping->page_tree, index); 941 if (!page || radix_tree_exceptional_entry(page)) 942 break; 943 index--; 944 if (index == ULONG_MAX) 945 break; 946 } 947 948 return index; 949 } 950 EXPORT_SYMBOL(page_cache_prev_hole); 951 952 /** 953 * find_get_entry - find and get a page cache entry 954 * @mapping: the address_space to search 955 * @offset: the page cache index 956 * 957 * Looks up the page cache slot at @mapping & @offset. If there is a 958 * page cache page, it is returned with an increased refcount. 959 * 960 * If the slot holds a shadow entry of a previously evicted page, or a 961 * swap entry from shmem/tmpfs, it is returned. 962 * 963 * Otherwise, %NULL is returned. 964 */ 965 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) 966 { 967 void **pagep; 968 struct page *page; 969 970 rcu_read_lock(); 971 repeat: 972 page = NULL; 973 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); 974 if (pagep) { 975 page = radix_tree_deref_slot(pagep); 976 if (unlikely(!page)) 977 goto out; 978 if (radix_tree_exception(page)) { 979 if (radix_tree_deref_retry(page)) 980 goto repeat; 981 /* 982 * A shadow entry of a recently evicted page, 983 * or a swap entry from shmem/tmpfs. Return 984 * it without attempting to raise page count. 985 */ 986 goto out; 987 } 988 if (!page_cache_get_speculative(page)) 989 goto repeat; 990 991 /* 992 * Has the page moved? 993 * This is part of the lockless pagecache protocol. See 994 * include/linux/pagemap.h for details. 995 */ 996 if (unlikely(page != *pagep)) { 997 page_cache_release(page); 998 goto repeat; 999 } 1000 } 1001 out: 1002 rcu_read_unlock(); 1003 1004 return page; 1005 } 1006 EXPORT_SYMBOL(find_get_entry); 1007 1008 /** 1009 * find_lock_entry - locate, pin and lock a page cache entry 1010 * @mapping: the address_space to search 1011 * @offset: the page cache index 1012 * 1013 * Looks up the page cache slot at @mapping & @offset. If there is a 1014 * page cache page, it is returned locked and with an increased 1015 * refcount. 1016 * 1017 * If the slot holds a shadow entry of a previously evicted page, or a 1018 * swap entry from shmem/tmpfs, it is returned. 1019 * 1020 * Otherwise, %NULL is returned. 1021 * 1022 * find_lock_entry() may sleep. 1023 */ 1024 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) 1025 { 1026 struct page *page; 1027 1028 repeat: 1029 page = find_get_entry(mapping, offset); 1030 if (page && !radix_tree_exception(page)) { 1031 lock_page(page); 1032 /* Has the page been truncated? */ 1033 if (unlikely(page->mapping != mapping)) { 1034 unlock_page(page); 1035 page_cache_release(page); 1036 goto repeat; 1037 } 1038 VM_BUG_ON_PAGE(page->index != offset, page); 1039 } 1040 return page; 1041 } 1042 EXPORT_SYMBOL(find_lock_entry); 1043 1044 /** 1045 * pagecache_get_page - find and get a page reference 1046 * @mapping: the address_space to search 1047 * @offset: the page index 1048 * @fgp_flags: PCG flags 1049 * @cache_gfp_mask: gfp mask to use for the page cache data page allocation 1050 * @radix_gfp_mask: gfp mask to use for radix tree node allocation 1051 * 1052 * Looks up the page cache slot at @mapping & @offset. 1053 * 1054 * PCG flags modify how the page is returned. 1055 * 1056 * FGP_ACCESSED: the page will be marked accessed 1057 * FGP_LOCK: Page is return locked 1058 * FGP_CREAT: If page is not present then a new page is allocated using 1059 * @cache_gfp_mask and added to the page cache and the VM's LRU 1060 * list. If radix tree nodes are allocated during page cache 1061 * insertion then @radix_gfp_mask is used. The page is returned 1062 * locked and with an increased refcount. Otherwise, %NULL is 1063 * returned. 1064 * 1065 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even 1066 * if the GFP flags specified for FGP_CREAT are atomic. 1067 * 1068 * If there is a page cache page, it is returned with an increased refcount. 1069 */ 1070 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, 1071 int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask) 1072 { 1073 struct page *page; 1074 1075 repeat: 1076 page = find_get_entry(mapping, offset); 1077 if (radix_tree_exceptional_entry(page)) 1078 page = NULL; 1079 if (!page) 1080 goto no_page; 1081 1082 if (fgp_flags & FGP_LOCK) { 1083 if (fgp_flags & FGP_NOWAIT) { 1084 if (!trylock_page(page)) { 1085 page_cache_release(page); 1086 return NULL; 1087 } 1088 } else { 1089 lock_page(page); 1090 } 1091 1092 /* Has the page been truncated? */ 1093 if (unlikely(page->mapping != mapping)) { 1094 unlock_page(page); 1095 page_cache_release(page); 1096 goto repeat; 1097 } 1098 VM_BUG_ON_PAGE(page->index != offset, page); 1099 } 1100 1101 if (page && (fgp_flags & FGP_ACCESSED)) 1102 mark_page_accessed(page); 1103 1104 no_page: 1105 if (!page && (fgp_flags & FGP_CREAT)) { 1106 int err; 1107 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) 1108 cache_gfp_mask |= __GFP_WRITE; 1109 if (fgp_flags & FGP_NOFS) { 1110 cache_gfp_mask &= ~__GFP_FS; 1111 radix_gfp_mask &= ~__GFP_FS; 1112 } 1113 1114 page = __page_cache_alloc(cache_gfp_mask); 1115 if (!page) 1116 return NULL; 1117 1118 if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK))) 1119 fgp_flags |= FGP_LOCK; 1120 1121 /* Init accessed so avoid atomic mark_page_accessed later */ 1122 if (fgp_flags & FGP_ACCESSED) 1123 __SetPageReferenced(page); 1124 1125 err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask); 1126 if (unlikely(err)) { 1127 page_cache_release(page); 1128 page = NULL; 1129 if (err == -EEXIST) 1130 goto repeat; 1131 } 1132 } 1133 1134 return page; 1135 } 1136 EXPORT_SYMBOL(pagecache_get_page); 1137 1138 /** 1139 * find_get_entries - gang pagecache lookup 1140 * @mapping: The address_space to search 1141 * @start: The starting page cache index 1142 * @nr_entries: The maximum number of entries 1143 * @entries: Where the resulting entries are placed 1144 * @indices: The cache indices corresponding to the entries in @entries 1145 * 1146 * find_get_entries() will search for and return a group of up to 1147 * @nr_entries entries in the mapping. The entries are placed at 1148 * @entries. find_get_entries() takes a reference against any actual 1149 * pages it returns. 1150 * 1151 * The search returns a group of mapping-contiguous page cache entries 1152 * with ascending indexes. There may be holes in the indices due to 1153 * not-present pages. 1154 * 1155 * Any shadow entries of evicted pages, or swap entries from 1156 * shmem/tmpfs, are included in the returned array. 1157 * 1158 * find_get_entries() returns the number of pages and shadow entries 1159 * which were found. 1160 */ 1161 unsigned find_get_entries(struct address_space *mapping, 1162 pgoff_t start, unsigned int nr_entries, 1163 struct page **entries, pgoff_t *indices) 1164 { 1165 void **slot; 1166 unsigned int ret = 0; 1167 struct radix_tree_iter iter; 1168 1169 if (!nr_entries) 1170 return 0; 1171 1172 rcu_read_lock(); 1173 restart: 1174 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1175 struct page *page; 1176 repeat: 1177 page = radix_tree_deref_slot(slot); 1178 if (unlikely(!page)) 1179 continue; 1180 if (radix_tree_exception(page)) { 1181 if (radix_tree_deref_retry(page)) 1182 goto restart; 1183 /* 1184 * A shadow entry of a recently evicted page, 1185 * or a swap entry from shmem/tmpfs. Return 1186 * it without attempting to raise page count. 1187 */ 1188 goto export; 1189 } 1190 if (!page_cache_get_speculative(page)) 1191 goto repeat; 1192 1193 /* Has the page moved? */ 1194 if (unlikely(page != *slot)) { 1195 page_cache_release(page); 1196 goto repeat; 1197 } 1198 export: 1199 indices[ret] = iter.index; 1200 entries[ret] = page; 1201 if (++ret == nr_entries) 1202 break; 1203 } 1204 rcu_read_unlock(); 1205 return ret; 1206 } 1207 1208 /** 1209 * find_get_pages - gang pagecache lookup 1210 * @mapping: The address_space to search 1211 * @start: The starting page index 1212 * @nr_pages: The maximum number of pages 1213 * @pages: Where the resulting pages are placed 1214 * 1215 * find_get_pages() will search for and return a group of up to 1216 * @nr_pages pages in the mapping. The pages are placed at @pages. 1217 * find_get_pages() takes a reference against the returned pages. 1218 * 1219 * The search returns a group of mapping-contiguous pages with ascending 1220 * indexes. There may be holes in the indices due to not-present pages. 1221 * 1222 * find_get_pages() returns the number of pages which were found. 1223 */ 1224 unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 1225 unsigned int nr_pages, struct page **pages) 1226 { 1227 struct radix_tree_iter iter; 1228 void **slot; 1229 unsigned ret = 0; 1230 1231 if (unlikely(!nr_pages)) 1232 return 0; 1233 1234 rcu_read_lock(); 1235 restart: 1236 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1237 struct page *page; 1238 repeat: 1239 page = radix_tree_deref_slot(slot); 1240 if (unlikely(!page)) 1241 continue; 1242 1243 if (radix_tree_exception(page)) { 1244 if (radix_tree_deref_retry(page)) { 1245 /* 1246 * Transient condition which can only trigger 1247 * when entry at index 0 moves out of or back 1248 * to root: none yet gotten, safe to restart. 1249 */ 1250 WARN_ON(iter.index); 1251 goto restart; 1252 } 1253 /* 1254 * A shadow entry of a recently evicted page, 1255 * or a swap entry from shmem/tmpfs. Skip 1256 * over it. 1257 */ 1258 continue; 1259 } 1260 1261 if (!page_cache_get_speculative(page)) 1262 goto repeat; 1263 1264 /* Has the page moved? */ 1265 if (unlikely(page != *slot)) { 1266 page_cache_release(page); 1267 goto repeat; 1268 } 1269 1270 pages[ret] = page; 1271 if (++ret == nr_pages) 1272 break; 1273 } 1274 1275 rcu_read_unlock(); 1276 return ret; 1277 } 1278 1279 /** 1280 * find_get_pages_contig - gang contiguous pagecache lookup 1281 * @mapping: The address_space to search 1282 * @index: The starting page index 1283 * @nr_pages: The maximum number of pages 1284 * @pages: Where the resulting pages are placed 1285 * 1286 * find_get_pages_contig() works exactly like find_get_pages(), except 1287 * that the returned number of pages are guaranteed to be contiguous. 1288 * 1289 * find_get_pages_contig() returns the number of pages which were found. 1290 */ 1291 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, 1292 unsigned int nr_pages, struct page **pages) 1293 { 1294 struct radix_tree_iter iter; 1295 void **slot; 1296 unsigned int ret = 0; 1297 1298 if (unlikely(!nr_pages)) 1299 return 0; 1300 1301 rcu_read_lock(); 1302 restart: 1303 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) { 1304 struct page *page; 1305 repeat: 1306 page = radix_tree_deref_slot(slot); 1307 /* The hole, there no reason to continue */ 1308 if (unlikely(!page)) 1309 break; 1310 1311 if (radix_tree_exception(page)) { 1312 if (radix_tree_deref_retry(page)) { 1313 /* 1314 * Transient condition which can only trigger 1315 * when entry at index 0 moves out of or back 1316 * to root: none yet gotten, safe to restart. 1317 */ 1318 goto restart; 1319 } 1320 /* 1321 * A shadow entry of a recently evicted page, 1322 * or a swap entry from shmem/tmpfs. Stop 1323 * looking for contiguous pages. 1324 */ 1325 break; 1326 } 1327 1328 if (!page_cache_get_speculative(page)) 1329 goto repeat; 1330 1331 /* Has the page moved? */ 1332 if (unlikely(page != *slot)) { 1333 page_cache_release(page); 1334 goto repeat; 1335 } 1336 1337 /* 1338 * must check mapping and index after taking the ref. 1339 * otherwise we can get both false positives and false 1340 * negatives, which is just confusing to the caller. 1341 */ 1342 if (page->mapping == NULL || page->index != iter.index) { 1343 page_cache_release(page); 1344 break; 1345 } 1346 1347 pages[ret] = page; 1348 if (++ret == nr_pages) 1349 break; 1350 } 1351 rcu_read_unlock(); 1352 return ret; 1353 } 1354 EXPORT_SYMBOL(find_get_pages_contig); 1355 1356 /** 1357 * find_get_pages_tag - find and return pages that match @tag 1358 * @mapping: the address_space to search 1359 * @index: the starting page index 1360 * @tag: the tag index 1361 * @nr_pages: the maximum number of pages 1362 * @pages: where the resulting pages are placed 1363 * 1364 * Like find_get_pages, except we only return pages which are tagged with 1365 * @tag. We update @index to index the next page for the traversal. 1366 */ 1367 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 1368 int tag, unsigned int nr_pages, struct page **pages) 1369 { 1370 struct radix_tree_iter iter; 1371 void **slot; 1372 unsigned ret = 0; 1373 1374 if (unlikely(!nr_pages)) 1375 return 0; 1376 1377 rcu_read_lock(); 1378 restart: 1379 radix_tree_for_each_tagged(slot, &mapping->page_tree, 1380 &iter, *index, tag) { 1381 struct page *page; 1382 repeat: 1383 page = radix_tree_deref_slot(slot); 1384 if (unlikely(!page)) 1385 continue; 1386 1387 if (radix_tree_exception(page)) { 1388 if (radix_tree_deref_retry(page)) { 1389 /* 1390 * Transient condition which can only trigger 1391 * when entry at index 0 moves out of or back 1392 * to root: none yet gotten, safe to restart. 1393 */ 1394 goto restart; 1395 } 1396 /* 1397 * A shadow entry of a recently evicted page. 1398 * 1399 * Those entries should never be tagged, but 1400 * this tree walk is lockless and the tags are 1401 * looked up in bulk, one radix tree node at a 1402 * time, so there is a sizable window for page 1403 * reclaim to evict a page we saw tagged. 1404 * 1405 * Skip over it. 1406 */ 1407 continue; 1408 } 1409 1410 if (!page_cache_get_speculative(page)) 1411 goto repeat; 1412 1413 /* Has the page moved? */ 1414 if (unlikely(page != *slot)) { 1415 page_cache_release(page); 1416 goto repeat; 1417 } 1418 1419 pages[ret] = page; 1420 if (++ret == nr_pages) 1421 break; 1422 } 1423 1424 rcu_read_unlock(); 1425 1426 if (ret) 1427 *index = pages[ret - 1]->index + 1; 1428 1429 return ret; 1430 } 1431 EXPORT_SYMBOL(find_get_pages_tag); 1432 1433 /* 1434 * CD/DVDs are error prone. When a medium error occurs, the driver may fail 1435 * a _large_ part of the i/o request. Imagine the worst scenario: 1436 * 1437 * ---R__________________________________________B__________ 1438 * ^ reading here ^ bad block(assume 4k) 1439 * 1440 * read(R) => miss => readahead(R...B) => media error => frustrating retries 1441 * => failing the whole request => read(R) => read(R+1) => 1442 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => 1443 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => 1444 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... 1445 * 1446 * It is going insane. Fix it by quickly scaling down the readahead size. 1447 */ 1448 static void shrink_readahead_size_eio(struct file *filp, 1449 struct file_ra_state *ra) 1450 { 1451 ra->ra_pages /= 4; 1452 } 1453 1454 /** 1455 * do_generic_file_read - generic file read routine 1456 * @filp: the file to read 1457 * @ppos: current file position 1458 * @iter: data destination 1459 * @written: already copied 1460 * 1461 * This is a generic file read routine, and uses the 1462 * mapping->a_ops->readpage() function for the actual low-level stuff. 1463 * 1464 * This is really ugly. But the goto's actually try to clarify some 1465 * of the logic when it comes to error handling etc. 1466 */ 1467 static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos, 1468 struct iov_iter *iter, ssize_t written) 1469 { 1470 struct address_space *mapping = filp->f_mapping; 1471 struct inode *inode = mapping->host; 1472 struct file_ra_state *ra = &filp->f_ra; 1473 pgoff_t index; 1474 pgoff_t last_index; 1475 pgoff_t prev_index; 1476 unsigned long offset; /* offset into pagecache page */ 1477 unsigned int prev_offset; 1478 int error = 0; 1479 1480 index = *ppos >> PAGE_CACHE_SHIFT; 1481 prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT; 1482 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1); 1483 last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 1484 offset = *ppos & ~PAGE_CACHE_MASK; 1485 1486 for (;;) { 1487 struct page *page; 1488 pgoff_t end_index; 1489 loff_t isize; 1490 unsigned long nr, ret; 1491 1492 cond_resched(); 1493 find_page: 1494 page = find_get_page(mapping, index); 1495 if (!page) { 1496 page_cache_sync_readahead(mapping, 1497 ra, filp, 1498 index, last_index - index); 1499 page = find_get_page(mapping, index); 1500 if (unlikely(page == NULL)) 1501 goto no_cached_page; 1502 } 1503 if (PageReadahead(page)) { 1504 page_cache_async_readahead(mapping, 1505 ra, filp, page, 1506 index, last_index - index); 1507 } 1508 if (!PageUptodate(page)) { 1509 if (inode->i_blkbits == PAGE_CACHE_SHIFT || 1510 !mapping->a_ops->is_partially_uptodate) 1511 goto page_not_up_to_date; 1512 if (!trylock_page(page)) 1513 goto page_not_up_to_date; 1514 /* Did it get truncated before we got the lock? */ 1515 if (!page->mapping) 1516 goto page_not_up_to_date_locked; 1517 if (!mapping->a_ops->is_partially_uptodate(page, 1518 offset, iter->count)) 1519 goto page_not_up_to_date_locked; 1520 unlock_page(page); 1521 } 1522 page_ok: 1523 /* 1524 * i_size must be checked after we know the page is Uptodate. 1525 * 1526 * Checking i_size after the check allows us to calculate 1527 * the correct value for "nr", which means the zero-filled 1528 * part of the page is not copied back to userspace (unless 1529 * another truncate extends the file - this is desired though). 1530 */ 1531 1532 isize = i_size_read(inode); 1533 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1534 if (unlikely(!isize || index > end_index)) { 1535 page_cache_release(page); 1536 goto out; 1537 } 1538 1539 /* nr is the maximum number of bytes to copy from this page */ 1540 nr = PAGE_CACHE_SIZE; 1541 if (index == end_index) { 1542 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1543 if (nr <= offset) { 1544 page_cache_release(page); 1545 goto out; 1546 } 1547 } 1548 nr = nr - offset; 1549 1550 /* If users can be writing to this page using arbitrary 1551 * virtual addresses, take care about potential aliasing 1552 * before reading the page on the kernel side. 1553 */ 1554 if (mapping_writably_mapped(mapping)) 1555 flush_dcache_page(page); 1556 1557 /* 1558 * When a sequential read accesses a page several times, 1559 * only mark it as accessed the first time. 1560 */ 1561 if (prev_index != index || offset != prev_offset) 1562 mark_page_accessed(page); 1563 prev_index = index; 1564 1565 /* 1566 * Ok, we have the page, and it's up-to-date, so 1567 * now we can copy it to user space... 1568 */ 1569 1570 ret = copy_page_to_iter(page, offset, nr, iter); 1571 offset += ret; 1572 index += offset >> PAGE_CACHE_SHIFT; 1573 offset &= ~PAGE_CACHE_MASK; 1574 prev_offset = offset; 1575 1576 page_cache_release(page); 1577 written += ret; 1578 if (!iov_iter_count(iter)) 1579 goto out; 1580 if (ret < nr) { 1581 error = -EFAULT; 1582 goto out; 1583 } 1584 continue; 1585 1586 page_not_up_to_date: 1587 /* Get exclusive access to the page ... */ 1588 error = lock_page_killable(page); 1589 if (unlikely(error)) 1590 goto readpage_error; 1591 1592 page_not_up_to_date_locked: 1593 /* Did it get truncated before we got the lock? */ 1594 if (!page->mapping) { 1595 unlock_page(page); 1596 page_cache_release(page); 1597 continue; 1598 } 1599 1600 /* Did somebody else fill it already? */ 1601 if (PageUptodate(page)) { 1602 unlock_page(page); 1603 goto page_ok; 1604 } 1605 1606 readpage: 1607 /* 1608 * A previous I/O error may have been due to temporary 1609 * failures, eg. multipath errors. 1610 * PG_error will be set again if readpage fails. 1611 */ 1612 ClearPageError(page); 1613 /* Start the actual read. The read will unlock the page. */ 1614 error = mapping->a_ops->readpage(filp, page); 1615 1616 if (unlikely(error)) { 1617 if (error == AOP_TRUNCATED_PAGE) { 1618 page_cache_release(page); 1619 error = 0; 1620 goto find_page; 1621 } 1622 goto readpage_error; 1623 } 1624 1625 if (!PageUptodate(page)) { 1626 error = lock_page_killable(page); 1627 if (unlikely(error)) 1628 goto readpage_error; 1629 if (!PageUptodate(page)) { 1630 if (page->mapping == NULL) { 1631 /* 1632 * invalidate_mapping_pages got it 1633 */ 1634 unlock_page(page); 1635 page_cache_release(page); 1636 goto find_page; 1637 } 1638 unlock_page(page); 1639 shrink_readahead_size_eio(filp, ra); 1640 error = -EIO; 1641 goto readpage_error; 1642 } 1643 unlock_page(page); 1644 } 1645 1646 goto page_ok; 1647 1648 readpage_error: 1649 /* UHHUH! A synchronous read error occurred. Report it */ 1650 page_cache_release(page); 1651 goto out; 1652 1653 no_cached_page: 1654 /* 1655 * Ok, it wasn't cached, so we need to create a new 1656 * page.. 1657 */ 1658 page = page_cache_alloc_cold(mapping); 1659 if (!page) { 1660 error = -ENOMEM; 1661 goto out; 1662 } 1663 error = add_to_page_cache_lru(page, mapping, 1664 index, GFP_KERNEL); 1665 if (error) { 1666 page_cache_release(page); 1667 if (error == -EEXIST) { 1668 error = 0; 1669 goto find_page; 1670 } 1671 goto out; 1672 } 1673 goto readpage; 1674 } 1675 1676 out: 1677 ra->prev_pos = prev_index; 1678 ra->prev_pos <<= PAGE_CACHE_SHIFT; 1679 ra->prev_pos |= prev_offset; 1680 1681 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; 1682 file_accessed(filp); 1683 return written ? written : error; 1684 } 1685 1686 /** 1687 * generic_file_read_iter - generic filesystem read routine 1688 * @iocb: kernel I/O control block 1689 * @iter: destination for the data read 1690 * 1691 * This is the "read_iter()" routine for all filesystems 1692 * that can use the page cache directly. 1693 */ 1694 ssize_t 1695 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 1696 { 1697 struct file *file = iocb->ki_filp; 1698 ssize_t retval = 0; 1699 loff_t *ppos = &iocb->ki_pos; 1700 loff_t pos = *ppos; 1701 1702 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ 1703 if (file->f_flags & O_DIRECT) { 1704 struct address_space *mapping = file->f_mapping; 1705 struct inode *inode = mapping->host; 1706 size_t count = iov_iter_count(iter); 1707 loff_t size; 1708 1709 if (!count) 1710 goto out; /* skip atime */ 1711 size = i_size_read(inode); 1712 retval = filemap_write_and_wait_range(mapping, pos, 1713 pos + count - 1); 1714 if (!retval) { 1715 struct iov_iter data = *iter; 1716 retval = mapping->a_ops->direct_IO(READ, iocb, &data, pos); 1717 } 1718 1719 if (retval > 0) { 1720 *ppos = pos + retval; 1721 iov_iter_advance(iter, retval); 1722 } 1723 1724 /* 1725 * Btrfs can have a short DIO read if we encounter 1726 * compressed extents, so if there was an error, or if 1727 * we've already read everything we wanted to, or if 1728 * there was a short read because we hit EOF, go ahead 1729 * and return. Otherwise fallthrough to buffered io for 1730 * the rest of the read. 1731 */ 1732 if (retval < 0 || !iov_iter_count(iter) || *ppos >= size) { 1733 file_accessed(file); 1734 goto out; 1735 } 1736 } 1737 1738 retval = do_generic_file_read(file, ppos, iter, retval); 1739 out: 1740 return retval; 1741 } 1742 EXPORT_SYMBOL(generic_file_read_iter); 1743 1744 #ifdef CONFIG_MMU 1745 /** 1746 * page_cache_read - adds requested page to the page cache if not already there 1747 * @file: file to read 1748 * @offset: page index 1749 * 1750 * This adds the requested page to the page cache if it isn't already there, 1751 * and schedules an I/O to read in its contents from disk. 1752 */ 1753 static int page_cache_read(struct file *file, pgoff_t offset) 1754 { 1755 struct address_space *mapping = file->f_mapping; 1756 struct page *page; 1757 int ret; 1758 1759 do { 1760 page = page_cache_alloc_cold(mapping); 1761 if (!page) 1762 return -ENOMEM; 1763 1764 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL); 1765 if (ret == 0) 1766 ret = mapping->a_ops->readpage(file, page); 1767 else if (ret == -EEXIST) 1768 ret = 0; /* losing race to add is OK */ 1769 1770 page_cache_release(page); 1771 1772 } while (ret == AOP_TRUNCATED_PAGE); 1773 1774 return ret; 1775 } 1776 1777 #define MMAP_LOTSAMISS (100) 1778 1779 /* 1780 * Synchronous readahead happens when we don't even find 1781 * a page in the page cache at all. 1782 */ 1783 static void do_sync_mmap_readahead(struct vm_area_struct *vma, 1784 struct file_ra_state *ra, 1785 struct file *file, 1786 pgoff_t offset) 1787 { 1788 unsigned long ra_pages; 1789 struct address_space *mapping = file->f_mapping; 1790 1791 /* If we don't want any read-ahead, don't bother */ 1792 if (vma->vm_flags & VM_RAND_READ) 1793 return; 1794 if (!ra->ra_pages) 1795 return; 1796 1797 if (vma->vm_flags & VM_SEQ_READ) { 1798 page_cache_sync_readahead(mapping, ra, file, offset, 1799 ra->ra_pages); 1800 return; 1801 } 1802 1803 /* Avoid banging the cache line if not needed */ 1804 if (ra->mmap_miss < MMAP_LOTSAMISS * 10) 1805 ra->mmap_miss++; 1806 1807 /* 1808 * Do we miss much more than hit in this file? If so, 1809 * stop bothering with read-ahead. It will only hurt. 1810 */ 1811 if (ra->mmap_miss > MMAP_LOTSAMISS) 1812 return; 1813 1814 /* 1815 * mmap read-around 1816 */ 1817 ra_pages = max_sane_readahead(ra->ra_pages); 1818 ra->start = max_t(long, 0, offset - ra_pages / 2); 1819 ra->size = ra_pages; 1820 ra->async_size = ra_pages / 4; 1821 ra_submit(ra, mapping, file); 1822 } 1823 1824 /* 1825 * Asynchronous readahead happens when we find the page and PG_readahead, 1826 * so we want to possibly extend the readahead further.. 1827 */ 1828 static void do_async_mmap_readahead(struct vm_area_struct *vma, 1829 struct file_ra_state *ra, 1830 struct file *file, 1831 struct page *page, 1832 pgoff_t offset) 1833 { 1834 struct address_space *mapping = file->f_mapping; 1835 1836 /* If we don't want any read-ahead, don't bother */ 1837 if (vma->vm_flags & VM_RAND_READ) 1838 return; 1839 if (ra->mmap_miss > 0) 1840 ra->mmap_miss--; 1841 if (PageReadahead(page)) 1842 page_cache_async_readahead(mapping, ra, file, 1843 page, offset, ra->ra_pages); 1844 } 1845 1846 /** 1847 * filemap_fault - read in file data for page fault handling 1848 * @vma: vma in which the fault was taken 1849 * @vmf: struct vm_fault containing details of the fault 1850 * 1851 * filemap_fault() is invoked via the vma operations vector for a 1852 * mapped memory region to read in file data during a page fault. 1853 * 1854 * The goto's are kind of ugly, but this streamlines the normal case of having 1855 * it in the page cache, and handles the special cases reasonably without 1856 * having a lot of duplicated code. 1857 * 1858 * vma->vm_mm->mmap_sem must be held on entry. 1859 * 1860 * If our return value has VM_FAULT_RETRY set, it's because 1861 * lock_page_or_retry() returned 0. 1862 * The mmap_sem has usually been released in this case. 1863 * See __lock_page_or_retry() for the exception. 1864 * 1865 * If our return value does not have VM_FAULT_RETRY set, the mmap_sem 1866 * has not been released. 1867 * 1868 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. 1869 */ 1870 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1871 { 1872 int error; 1873 struct file *file = vma->vm_file; 1874 struct address_space *mapping = file->f_mapping; 1875 struct file_ra_state *ra = &file->f_ra; 1876 struct inode *inode = mapping->host; 1877 pgoff_t offset = vmf->pgoff; 1878 struct page *page; 1879 loff_t size; 1880 int ret = 0; 1881 1882 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 1883 if (offset >= size >> PAGE_CACHE_SHIFT) 1884 return VM_FAULT_SIGBUS; 1885 1886 /* 1887 * Do we have something in the page cache already? 1888 */ 1889 page = find_get_page(mapping, offset); 1890 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { 1891 /* 1892 * We found the page, so try async readahead before 1893 * waiting for the lock. 1894 */ 1895 do_async_mmap_readahead(vma, ra, file, page, offset); 1896 } else if (!page) { 1897 /* No page in the page cache at all */ 1898 do_sync_mmap_readahead(vma, ra, file, offset); 1899 count_vm_event(PGMAJFAULT); 1900 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1901 ret = VM_FAULT_MAJOR; 1902 retry_find: 1903 page = find_get_page(mapping, offset); 1904 if (!page) 1905 goto no_cached_page; 1906 } 1907 1908 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { 1909 page_cache_release(page); 1910 return ret | VM_FAULT_RETRY; 1911 } 1912 1913 /* Did it get truncated? */ 1914 if (unlikely(page->mapping != mapping)) { 1915 unlock_page(page); 1916 put_page(page); 1917 goto retry_find; 1918 } 1919 VM_BUG_ON_PAGE(page->index != offset, page); 1920 1921 /* 1922 * We have a locked page in the page cache, now we need to check 1923 * that it's up-to-date. If not, it is going to be due to an error. 1924 */ 1925 if (unlikely(!PageUptodate(page))) 1926 goto page_not_uptodate; 1927 1928 /* 1929 * Found the page and have a reference on it. 1930 * We must recheck i_size under page lock. 1931 */ 1932 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 1933 if (unlikely(offset >= size >> PAGE_CACHE_SHIFT)) { 1934 unlock_page(page); 1935 page_cache_release(page); 1936 return VM_FAULT_SIGBUS; 1937 } 1938 1939 vmf->page = page; 1940 return ret | VM_FAULT_LOCKED; 1941 1942 no_cached_page: 1943 /* 1944 * We're only likely to ever get here if MADV_RANDOM is in 1945 * effect. 1946 */ 1947 error = page_cache_read(file, offset); 1948 1949 /* 1950 * The page we want has now been added to the page cache. 1951 * In the unlikely event that someone removed it in the 1952 * meantime, we'll just come back here and read it again. 1953 */ 1954 if (error >= 0) 1955 goto retry_find; 1956 1957 /* 1958 * An error return from page_cache_read can result if the 1959 * system is low on memory, or a problem occurs while trying 1960 * to schedule I/O. 1961 */ 1962 if (error == -ENOMEM) 1963 return VM_FAULT_OOM; 1964 return VM_FAULT_SIGBUS; 1965 1966 page_not_uptodate: 1967 /* 1968 * Umm, take care of errors if the page isn't up-to-date. 1969 * Try to re-read it _once_. We do this synchronously, 1970 * because there really aren't any performance issues here 1971 * and we need to check for errors. 1972 */ 1973 ClearPageError(page); 1974 error = mapping->a_ops->readpage(file, page); 1975 if (!error) { 1976 wait_on_page_locked(page); 1977 if (!PageUptodate(page)) 1978 error = -EIO; 1979 } 1980 page_cache_release(page); 1981 1982 if (!error || error == AOP_TRUNCATED_PAGE) 1983 goto retry_find; 1984 1985 /* Things didn't work out. Return zero to tell the mm layer so. */ 1986 shrink_readahead_size_eio(file, ra); 1987 return VM_FAULT_SIGBUS; 1988 } 1989 EXPORT_SYMBOL(filemap_fault); 1990 1991 void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) 1992 { 1993 struct radix_tree_iter iter; 1994 void **slot; 1995 struct file *file = vma->vm_file; 1996 struct address_space *mapping = file->f_mapping; 1997 loff_t size; 1998 struct page *page; 1999 unsigned long address = (unsigned long) vmf->virtual_address; 2000 unsigned long addr; 2001 pte_t *pte; 2002 2003 rcu_read_lock(); 2004 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, vmf->pgoff) { 2005 if (iter.index > vmf->max_pgoff) 2006 break; 2007 repeat: 2008 page = radix_tree_deref_slot(slot); 2009 if (unlikely(!page)) 2010 goto next; 2011 if (radix_tree_exception(page)) { 2012 if (radix_tree_deref_retry(page)) 2013 break; 2014 else 2015 goto next; 2016 } 2017 2018 if (!page_cache_get_speculative(page)) 2019 goto repeat; 2020 2021 /* Has the page moved? */ 2022 if (unlikely(page != *slot)) { 2023 page_cache_release(page); 2024 goto repeat; 2025 } 2026 2027 if (!PageUptodate(page) || 2028 PageReadahead(page) || 2029 PageHWPoison(page)) 2030 goto skip; 2031 if (!trylock_page(page)) 2032 goto skip; 2033 2034 if (page->mapping != mapping || !PageUptodate(page)) 2035 goto unlock; 2036 2037 size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE); 2038 if (page->index >= size >> PAGE_CACHE_SHIFT) 2039 goto unlock; 2040 2041 pte = vmf->pte + page->index - vmf->pgoff; 2042 if (!pte_none(*pte)) 2043 goto unlock; 2044 2045 if (file->f_ra.mmap_miss > 0) 2046 file->f_ra.mmap_miss--; 2047 addr = address + (page->index - vmf->pgoff) * PAGE_SIZE; 2048 do_set_pte(vma, addr, page, pte, false, false); 2049 unlock_page(page); 2050 goto next; 2051 unlock: 2052 unlock_page(page); 2053 skip: 2054 page_cache_release(page); 2055 next: 2056 if (iter.index == vmf->max_pgoff) 2057 break; 2058 } 2059 rcu_read_unlock(); 2060 } 2061 EXPORT_SYMBOL(filemap_map_pages); 2062 2063 int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 2064 { 2065 struct page *page = vmf->page; 2066 struct inode *inode = file_inode(vma->vm_file); 2067 int ret = VM_FAULT_LOCKED; 2068 2069 sb_start_pagefault(inode->i_sb); 2070 file_update_time(vma->vm_file); 2071 lock_page(page); 2072 if (page->mapping != inode->i_mapping) { 2073 unlock_page(page); 2074 ret = VM_FAULT_NOPAGE; 2075 goto out; 2076 } 2077 /* 2078 * We mark the page dirty already here so that when freeze is in 2079 * progress, we are guaranteed that writeback during freezing will 2080 * see the dirty page and writeprotect it again. 2081 */ 2082 set_page_dirty(page); 2083 wait_for_stable_page(page); 2084 out: 2085 sb_end_pagefault(inode->i_sb); 2086 return ret; 2087 } 2088 EXPORT_SYMBOL(filemap_page_mkwrite); 2089 2090 const struct vm_operations_struct generic_file_vm_ops = { 2091 .fault = filemap_fault, 2092 .map_pages = filemap_map_pages, 2093 .page_mkwrite = filemap_page_mkwrite, 2094 .remap_pages = generic_file_remap_pages, 2095 }; 2096 2097 /* This is used for a general mmap of a disk file */ 2098 2099 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2100 { 2101 struct address_space *mapping = file->f_mapping; 2102 2103 if (!mapping->a_ops->readpage) 2104 return -ENOEXEC; 2105 file_accessed(file); 2106 vma->vm_ops = &generic_file_vm_ops; 2107 return 0; 2108 } 2109 2110 /* 2111 * This is for filesystems which do not implement ->writepage. 2112 */ 2113 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 2114 { 2115 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 2116 return -EINVAL; 2117 return generic_file_mmap(file, vma); 2118 } 2119 #else 2120 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2121 { 2122 return -ENOSYS; 2123 } 2124 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) 2125 { 2126 return -ENOSYS; 2127 } 2128 #endif /* CONFIG_MMU */ 2129 2130 EXPORT_SYMBOL(generic_file_mmap); 2131 EXPORT_SYMBOL(generic_file_readonly_mmap); 2132 2133 static struct page *wait_on_page_read(struct page *page) 2134 { 2135 if (!IS_ERR(page)) { 2136 wait_on_page_locked(page); 2137 if (!PageUptodate(page)) { 2138 page_cache_release(page); 2139 page = ERR_PTR(-EIO); 2140 } 2141 } 2142 return page; 2143 } 2144 2145 static struct page *__read_cache_page(struct address_space *mapping, 2146 pgoff_t index, 2147 int (*filler)(void *, struct page *), 2148 void *data, 2149 gfp_t gfp) 2150 { 2151 struct page *page; 2152 int err; 2153 repeat: 2154 page = find_get_page(mapping, index); 2155 if (!page) { 2156 page = __page_cache_alloc(gfp | __GFP_COLD); 2157 if (!page) 2158 return ERR_PTR(-ENOMEM); 2159 err = add_to_page_cache_lru(page, mapping, index, gfp); 2160 if (unlikely(err)) { 2161 page_cache_release(page); 2162 if (err == -EEXIST) 2163 goto repeat; 2164 /* Presumably ENOMEM for radix tree node */ 2165 return ERR_PTR(err); 2166 } 2167 err = filler(data, page); 2168 if (err < 0) { 2169 page_cache_release(page); 2170 page = ERR_PTR(err); 2171 } else { 2172 page = wait_on_page_read(page); 2173 } 2174 } 2175 return page; 2176 } 2177 2178 static struct page *do_read_cache_page(struct address_space *mapping, 2179 pgoff_t index, 2180 int (*filler)(void *, struct page *), 2181 void *data, 2182 gfp_t gfp) 2183 2184 { 2185 struct page *page; 2186 int err; 2187 2188 retry: 2189 page = __read_cache_page(mapping, index, filler, data, gfp); 2190 if (IS_ERR(page)) 2191 return page; 2192 if (PageUptodate(page)) 2193 goto out; 2194 2195 lock_page(page); 2196 if (!page->mapping) { 2197 unlock_page(page); 2198 page_cache_release(page); 2199 goto retry; 2200 } 2201 if (PageUptodate(page)) { 2202 unlock_page(page); 2203 goto out; 2204 } 2205 err = filler(data, page); 2206 if (err < 0) { 2207 page_cache_release(page); 2208 return ERR_PTR(err); 2209 } else { 2210 page = wait_on_page_read(page); 2211 if (IS_ERR(page)) 2212 return page; 2213 } 2214 out: 2215 mark_page_accessed(page); 2216 return page; 2217 } 2218 2219 /** 2220 * read_cache_page - read into page cache, fill it if needed 2221 * @mapping: the page's address_space 2222 * @index: the page index 2223 * @filler: function to perform the read 2224 * @data: first arg to filler(data, page) function, often left as NULL 2225 * 2226 * Read into the page cache. If a page already exists, and PageUptodate() is 2227 * not set, try to fill the page and wait for it to become unlocked. 2228 * 2229 * If the page does not get brought uptodate, return -EIO. 2230 */ 2231 struct page *read_cache_page(struct address_space *mapping, 2232 pgoff_t index, 2233 int (*filler)(void *, struct page *), 2234 void *data) 2235 { 2236 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); 2237 } 2238 EXPORT_SYMBOL(read_cache_page); 2239 2240 /** 2241 * read_cache_page_gfp - read into page cache, using specified page allocation flags. 2242 * @mapping: the page's address_space 2243 * @index: the page index 2244 * @gfp: the page allocator flags to use if allocating 2245 * 2246 * This is the same as "read_mapping_page(mapping, index, NULL)", but with 2247 * any new page allocations done using the specified allocation flags. 2248 * 2249 * If the page does not get brought uptodate, return -EIO. 2250 */ 2251 struct page *read_cache_page_gfp(struct address_space *mapping, 2252 pgoff_t index, 2253 gfp_t gfp) 2254 { 2255 filler_t *filler = (filler_t *)mapping->a_ops->readpage; 2256 2257 return do_read_cache_page(mapping, index, filler, NULL, gfp); 2258 } 2259 EXPORT_SYMBOL(read_cache_page_gfp); 2260 2261 /* 2262 * Performs necessary checks before doing a write 2263 * 2264 * Can adjust writing position or amount of bytes to write. 2265 * Returns appropriate error code that caller should return or 2266 * zero in case that write should be allowed. 2267 */ 2268 inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk) 2269 { 2270 struct inode *inode = file->f_mapping->host; 2271 unsigned long limit = rlimit(RLIMIT_FSIZE); 2272 2273 if (unlikely(*pos < 0)) 2274 return -EINVAL; 2275 2276 if (!isblk) { 2277 /* FIXME: this is for backwards compatibility with 2.4 */ 2278 if (file->f_flags & O_APPEND) 2279 *pos = i_size_read(inode); 2280 2281 if (limit != RLIM_INFINITY) { 2282 if (*pos >= limit) { 2283 send_sig(SIGXFSZ, current, 0); 2284 return -EFBIG; 2285 } 2286 if (*count > limit - (typeof(limit))*pos) { 2287 *count = limit - (typeof(limit))*pos; 2288 } 2289 } 2290 } 2291 2292 /* 2293 * LFS rule 2294 */ 2295 if (unlikely(*pos + *count > MAX_NON_LFS && 2296 !(file->f_flags & O_LARGEFILE))) { 2297 if (*pos >= MAX_NON_LFS) { 2298 return -EFBIG; 2299 } 2300 if (*count > MAX_NON_LFS - (unsigned long)*pos) { 2301 *count = MAX_NON_LFS - (unsigned long)*pos; 2302 } 2303 } 2304 2305 /* 2306 * Are we about to exceed the fs block limit ? 2307 * 2308 * If we have written data it becomes a short write. If we have 2309 * exceeded without writing data we send a signal and return EFBIG. 2310 * Linus frestrict idea will clean these up nicely.. 2311 */ 2312 if (likely(!isblk)) { 2313 if (unlikely(*pos >= inode->i_sb->s_maxbytes)) { 2314 if (*count || *pos > inode->i_sb->s_maxbytes) { 2315 return -EFBIG; 2316 } 2317 /* zero-length writes at ->s_maxbytes are OK */ 2318 } 2319 2320 if (unlikely(*pos + *count > inode->i_sb->s_maxbytes)) 2321 *count = inode->i_sb->s_maxbytes - *pos; 2322 } else { 2323 #ifdef CONFIG_BLOCK 2324 loff_t isize; 2325 if (bdev_read_only(I_BDEV(inode))) 2326 return -EPERM; 2327 isize = i_size_read(inode); 2328 if (*pos >= isize) { 2329 if (*count || *pos > isize) 2330 return -ENOSPC; 2331 } 2332 2333 if (*pos + *count > isize) 2334 *count = isize - *pos; 2335 #else 2336 return -EPERM; 2337 #endif 2338 } 2339 return 0; 2340 } 2341 EXPORT_SYMBOL(generic_write_checks); 2342 2343 int pagecache_write_begin(struct file *file, struct address_space *mapping, 2344 loff_t pos, unsigned len, unsigned flags, 2345 struct page **pagep, void **fsdata) 2346 { 2347 const struct address_space_operations *aops = mapping->a_ops; 2348 2349 return aops->write_begin(file, mapping, pos, len, flags, 2350 pagep, fsdata); 2351 } 2352 EXPORT_SYMBOL(pagecache_write_begin); 2353 2354 int pagecache_write_end(struct file *file, struct address_space *mapping, 2355 loff_t pos, unsigned len, unsigned copied, 2356 struct page *page, void *fsdata) 2357 { 2358 const struct address_space_operations *aops = mapping->a_ops; 2359 2360 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); 2361 } 2362 EXPORT_SYMBOL(pagecache_write_end); 2363 2364 ssize_t 2365 generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos) 2366 { 2367 struct file *file = iocb->ki_filp; 2368 struct address_space *mapping = file->f_mapping; 2369 struct inode *inode = mapping->host; 2370 ssize_t written; 2371 size_t write_len; 2372 pgoff_t end; 2373 struct iov_iter data; 2374 2375 write_len = iov_iter_count(from); 2376 end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT; 2377 2378 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1); 2379 if (written) 2380 goto out; 2381 2382 /* 2383 * After a write we want buffered reads to be sure to go to disk to get 2384 * the new data. We invalidate clean cached page from the region we're 2385 * about to write. We do this *before* the write so that we can return 2386 * without clobbering -EIOCBQUEUED from ->direct_IO(). 2387 */ 2388 if (mapping->nrpages) { 2389 written = invalidate_inode_pages2_range(mapping, 2390 pos >> PAGE_CACHE_SHIFT, end); 2391 /* 2392 * If a page can not be invalidated, return 0 to fall back 2393 * to buffered write. 2394 */ 2395 if (written) { 2396 if (written == -EBUSY) 2397 return 0; 2398 goto out; 2399 } 2400 } 2401 2402 data = *from; 2403 written = mapping->a_ops->direct_IO(WRITE, iocb, &data, pos); 2404 2405 /* 2406 * Finally, try again to invalidate clean pages which might have been 2407 * cached by non-direct readahead, or faulted in by get_user_pages() 2408 * if the source of the write was an mmap'ed region of the file 2409 * we're writing. Either one is a pretty crazy thing to do, 2410 * so we don't support it 100%. If this invalidation 2411 * fails, tough, the write still worked... 2412 */ 2413 if (mapping->nrpages) { 2414 invalidate_inode_pages2_range(mapping, 2415 pos >> PAGE_CACHE_SHIFT, end); 2416 } 2417 2418 if (written > 0) { 2419 pos += written; 2420 iov_iter_advance(from, written); 2421 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { 2422 i_size_write(inode, pos); 2423 mark_inode_dirty(inode); 2424 } 2425 iocb->ki_pos = pos; 2426 } 2427 out: 2428 return written; 2429 } 2430 EXPORT_SYMBOL(generic_file_direct_write); 2431 2432 /* 2433 * Find or create a page at the given pagecache position. Return the locked 2434 * page. This function is specifically for buffered writes. 2435 */ 2436 struct page *grab_cache_page_write_begin(struct address_space *mapping, 2437 pgoff_t index, unsigned flags) 2438 { 2439 struct page *page; 2440 int fgp_flags = FGP_LOCK|FGP_ACCESSED|FGP_WRITE|FGP_CREAT; 2441 2442 if (flags & AOP_FLAG_NOFS) 2443 fgp_flags |= FGP_NOFS; 2444 2445 page = pagecache_get_page(mapping, index, fgp_flags, 2446 mapping_gfp_mask(mapping), 2447 GFP_KERNEL); 2448 if (page) 2449 wait_for_stable_page(page); 2450 2451 return page; 2452 } 2453 EXPORT_SYMBOL(grab_cache_page_write_begin); 2454 2455 ssize_t generic_perform_write(struct file *file, 2456 struct iov_iter *i, loff_t pos) 2457 { 2458 struct address_space *mapping = file->f_mapping; 2459 const struct address_space_operations *a_ops = mapping->a_ops; 2460 long status = 0; 2461 ssize_t written = 0; 2462 unsigned int flags = 0; 2463 2464 /* 2465 * Copies from kernel address space cannot fail (NFSD is a big user). 2466 */ 2467 if (segment_eq(get_fs(), KERNEL_DS)) 2468 flags |= AOP_FLAG_UNINTERRUPTIBLE; 2469 2470 do { 2471 struct page *page; 2472 unsigned long offset; /* Offset into pagecache page */ 2473 unsigned long bytes; /* Bytes to write to page */ 2474 size_t copied; /* Bytes copied from user */ 2475 void *fsdata; 2476 2477 offset = (pos & (PAGE_CACHE_SIZE - 1)); 2478 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2479 iov_iter_count(i)); 2480 2481 again: 2482 /* 2483 * Bring in the user page that we will copy from _first_. 2484 * Otherwise there's a nasty deadlock on copying from the 2485 * same page as we're writing to, without it being marked 2486 * up-to-date. 2487 * 2488 * Not only is this an optimisation, but it is also required 2489 * to check that the address is actually valid, when atomic 2490 * usercopies are used, below. 2491 */ 2492 if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 2493 status = -EFAULT; 2494 break; 2495 } 2496 2497 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 2498 &page, &fsdata); 2499 if (unlikely(status < 0)) 2500 break; 2501 2502 if (mapping_writably_mapped(mapping)) 2503 flush_dcache_page(page); 2504 2505 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 2506 flush_dcache_page(page); 2507 2508 status = a_ops->write_end(file, mapping, pos, bytes, copied, 2509 page, fsdata); 2510 if (unlikely(status < 0)) 2511 break; 2512 copied = status; 2513 2514 cond_resched(); 2515 2516 iov_iter_advance(i, copied); 2517 if (unlikely(copied == 0)) { 2518 /* 2519 * If we were unable to copy any data at all, we must 2520 * fall back to a single segment length write. 2521 * 2522 * If we didn't fallback here, we could livelock 2523 * because not all segments in the iov can be copied at 2524 * once without a pagefault. 2525 */ 2526 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2527 iov_iter_single_seg_count(i)); 2528 goto again; 2529 } 2530 pos += copied; 2531 written += copied; 2532 2533 balance_dirty_pages_ratelimited(mapping); 2534 if (fatal_signal_pending(current)) { 2535 status = -EINTR; 2536 break; 2537 } 2538 } while (iov_iter_count(i)); 2539 2540 return written ? written : status; 2541 } 2542 EXPORT_SYMBOL(generic_perform_write); 2543 2544 /** 2545 * __generic_file_write_iter - write data to a file 2546 * @iocb: IO state structure (file, offset, etc.) 2547 * @from: iov_iter with data to write 2548 * 2549 * This function does all the work needed for actually writing data to a 2550 * file. It does all basic checks, removes SUID from the file, updates 2551 * modification times and calls proper subroutines depending on whether we 2552 * do direct IO or a standard buffered write. 2553 * 2554 * It expects i_mutex to be grabbed unless we work on a block device or similar 2555 * object which does not need locking at all. 2556 * 2557 * This function does *not* take care of syncing data in case of O_SYNC write. 2558 * A caller has to handle it. This is mainly due to the fact that we want to 2559 * avoid syncing under i_mutex. 2560 */ 2561 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2562 { 2563 struct file *file = iocb->ki_filp; 2564 struct address_space * mapping = file->f_mapping; 2565 struct inode *inode = mapping->host; 2566 loff_t pos = iocb->ki_pos; 2567 ssize_t written = 0; 2568 ssize_t err; 2569 ssize_t status; 2570 size_t count = iov_iter_count(from); 2571 2572 /* We can write back this queue in page reclaim */ 2573 current->backing_dev_info = mapping->backing_dev_info; 2574 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 2575 if (err) 2576 goto out; 2577 2578 if (count == 0) 2579 goto out; 2580 2581 iov_iter_truncate(from, count); 2582 2583 err = file_remove_suid(file); 2584 if (err) 2585 goto out; 2586 2587 err = file_update_time(file); 2588 if (err) 2589 goto out; 2590 2591 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ 2592 if (unlikely(file->f_flags & O_DIRECT)) { 2593 loff_t endbyte; 2594 2595 written = generic_file_direct_write(iocb, from, pos); 2596 if (written < 0 || written == count) 2597 goto out; 2598 2599 /* 2600 * direct-io write to a hole: fall through to buffered I/O 2601 * for completing the rest of the request. 2602 */ 2603 pos += written; 2604 count -= written; 2605 2606 status = generic_perform_write(file, from, pos); 2607 /* 2608 * If generic_perform_write() returned a synchronous error 2609 * then we want to return the number of bytes which were 2610 * direct-written, or the error code if that was zero. Note 2611 * that this differs from normal direct-io semantics, which 2612 * will return -EFOO even if some bytes were written. 2613 */ 2614 if (unlikely(status < 0)) { 2615 err = status; 2616 goto out; 2617 } 2618 iocb->ki_pos = pos + status; 2619 /* 2620 * We need to ensure that the page cache pages are written to 2621 * disk and invalidated to preserve the expected O_DIRECT 2622 * semantics. 2623 */ 2624 endbyte = pos + status - 1; 2625 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte); 2626 if (err == 0) { 2627 written += status; 2628 invalidate_mapping_pages(mapping, 2629 pos >> PAGE_CACHE_SHIFT, 2630 endbyte >> PAGE_CACHE_SHIFT); 2631 } else { 2632 /* 2633 * We don't know how much we wrote, so just return 2634 * the number of bytes which were direct-written 2635 */ 2636 } 2637 } else { 2638 written = generic_perform_write(file, from, pos); 2639 if (likely(written >= 0)) 2640 iocb->ki_pos = pos + written; 2641 } 2642 out: 2643 current->backing_dev_info = NULL; 2644 return written ? written : err; 2645 } 2646 EXPORT_SYMBOL(__generic_file_write_iter); 2647 2648 /** 2649 * generic_file_write_iter - write data to a file 2650 * @iocb: IO state structure 2651 * @from: iov_iter with data to write 2652 * 2653 * This is a wrapper around __generic_file_write_iter() to be used by most 2654 * filesystems. It takes care of syncing the file in case of O_SYNC file 2655 * and acquires i_mutex as needed. 2656 */ 2657 ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2658 { 2659 struct file *file = iocb->ki_filp; 2660 struct inode *inode = file->f_mapping->host; 2661 ssize_t ret; 2662 2663 mutex_lock(&inode->i_mutex); 2664 ret = __generic_file_write_iter(iocb, from); 2665 mutex_unlock(&inode->i_mutex); 2666 2667 if (ret > 0) { 2668 ssize_t err; 2669 2670 err = generic_write_sync(file, iocb->ki_pos - ret, ret); 2671 if (err < 0) 2672 ret = err; 2673 } 2674 return ret; 2675 } 2676 EXPORT_SYMBOL(generic_file_write_iter); 2677 2678 /** 2679 * try_to_release_page() - release old fs-specific metadata on a page 2680 * 2681 * @page: the page which the kernel is trying to free 2682 * @gfp_mask: memory allocation flags (and I/O mode) 2683 * 2684 * The address_space is to try to release any data against the page 2685 * (presumably at page->private). If the release was successful, return `1'. 2686 * Otherwise return zero. 2687 * 2688 * This may also be called if PG_fscache is set on a page, indicating that the 2689 * page is known to the local caching routines. 2690 * 2691 * The @gfp_mask argument specifies whether I/O may be performed to release 2692 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS). 2693 * 2694 */ 2695 int try_to_release_page(struct page *page, gfp_t gfp_mask) 2696 { 2697 struct address_space * const mapping = page->mapping; 2698 2699 BUG_ON(!PageLocked(page)); 2700 if (PageWriteback(page)) 2701 return 0; 2702 2703 if (mapping && mapping->a_ops->releasepage) 2704 return mapping->a_ops->releasepage(page, gfp_mask); 2705 return try_to_free_buffers(page); 2706 } 2707 2708 EXPORT_SYMBOL(try_to_release_page); 2709