1 /* 2 * linux/mm/filemap.c 3 * 4 * Copyright (C) 1994-1999 Linus Torvalds 5 */ 6 7 /* 8 * This file handles the generic file mmap semantics used by 9 * most "normal" filesystems (but you don't /have/ to use this: 10 * the NFS filesystem used to do this differently, for example) 11 */ 12 #include <linux/export.h> 13 #include <linux/compiler.h> 14 #include <linux/fs.h> 15 #include <linux/uaccess.h> 16 #include <linux/aio.h> 17 #include <linux/capability.h> 18 #include <linux/kernel_stat.h> 19 #include <linux/gfp.h> 20 #include <linux/mm.h> 21 #include <linux/swap.h> 22 #include <linux/mman.h> 23 #include <linux/pagemap.h> 24 #include <linux/file.h> 25 #include <linux/uio.h> 26 #include <linux/hash.h> 27 #include <linux/writeback.h> 28 #include <linux/backing-dev.h> 29 #include <linux/pagevec.h> 30 #include <linux/blkdev.h> 31 #include <linux/security.h> 32 #include <linux/cpuset.h> 33 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ 34 #include <linux/memcontrol.h> 35 #include <linux/cleancache.h> 36 #include <linux/rmap.h> 37 #include "internal.h" 38 39 #define CREATE_TRACE_POINTS 40 #include <trace/events/filemap.h> 41 42 /* 43 * FIXME: remove all knowledge of the buffer layer from the core VM 44 */ 45 #include <linux/buffer_head.h> /* for try_to_free_buffers */ 46 47 #include <asm/mman.h> 48 49 /* 50 * Shared mappings implemented 30.11.1994. It's not fully working yet, 51 * though. 52 * 53 * Shared mappings now work. 15.8.1995 Bruno. 54 * 55 * finished 'unifying' the page and buffer cache and SMP-threaded the 56 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 57 * 58 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 59 */ 60 61 /* 62 * Lock ordering: 63 * 64 * ->i_mmap_mutex (truncate_pagecache) 65 * ->private_lock (__free_pte->__set_page_dirty_buffers) 66 * ->swap_lock (exclusive_swap_page, others) 67 * ->mapping->tree_lock 68 * 69 * ->i_mutex 70 * ->i_mmap_mutex (truncate->unmap_mapping_range) 71 * 72 * ->mmap_sem 73 * ->i_mmap_mutex 74 * ->page_table_lock or pte_lock (various, mainly in memory.c) 75 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) 76 * 77 * ->mmap_sem 78 * ->lock_page (access_process_vm) 79 * 80 * ->i_mutex (generic_perform_write) 81 * ->mmap_sem (fault_in_pages_readable->do_page_fault) 82 * 83 * bdi->wb.list_lock 84 * sb_lock (fs/fs-writeback.c) 85 * ->mapping->tree_lock (__sync_single_inode) 86 * 87 * ->i_mmap_mutex 88 * ->anon_vma.lock (vma_adjust) 89 * 90 * ->anon_vma.lock 91 * ->page_table_lock or pte_lock (anon_vma_prepare and various) 92 * 93 * ->page_table_lock or pte_lock 94 * ->swap_lock (try_to_unmap_one) 95 * ->private_lock (try_to_unmap_one) 96 * ->tree_lock (try_to_unmap_one) 97 * ->zone.lru_lock (follow_page->mark_page_accessed) 98 * ->zone.lru_lock (check_pte_range->isolate_lru_page) 99 * ->private_lock (page_remove_rmap->set_page_dirty) 100 * ->tree_lock (page_remove_rmap->set_page_dirty) 101 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) 102 * ->inode->i_lock (page_remove_rmap->set_page_dirty) 103 * bdi.wb->list_lock (zap_pte_range->set_page_dirty) 104 * ->inode->i_lock (zap_pte_range->set_page_dirty) 105 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 106 * 107 * ->i_mmap_mutex 108 * ->tasklist_lock (memory_failure, collect_procs_ao) 109 */ 110 111 static void page_cache_tree_delete(struct address_space *mapping, 112 struct page *page, void *shadow) 113 { 114 struct radix_tree_node *node; 115 unsigned long index; 116 unsigned int offset; 117 unsigned int tag; 118 void **slot; 119 120 VM_BUG_ON(!PageLocked(page)); 121 122 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); 123 124 if (shadow) { 125 mapping->nrshadows++; 126 /* 127 * Make sure the nrshadows update is committed before 128 * the nrpages update so that final truncate racing 129 * with reclaim does not see both counters 0 at the 130 * same time and miss a shadow entry. 131 */ 132 smp_wmb(); 133 } 134 mapping->nrpages--; 135 136 if (!node) { 137 /* Clear direct pointer tags in root node */ 138 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK; 139 radix_tree_replace_slot(slot, shadow); 140 return; 141 } 142 143 /* Clear tree tags for the removed page */ 144 index = page->index; 145 offset = index & RADIX_TREE_MAP_MASK; 146 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { 147 if (test_bit(offset, node->tags[tag])) 148 radix_tree_tag_clear(&mapping->page_tree, index, tag); 149 } 150 151 /* Delete page, swap shadow entry */ 152 radix_tree_replace_slot(slot, shadow); 153 workingset_node_pages_dec(node); 154 if (shadow) 155 workingset_node_shadows_inc(node); 156 else 157 if (__radix_tree_delete_node(&mapping->page_tree, node)) 158 return; 159 160 /* 161 * Track node that only contains shadow entries. 162 * 163 * Avoid acquiring the list_lru lock if already tracked. The 164 * list_empty() test is safe as node->private_list is 165 * protected by mapping->tree_lock. 166 */ 167 if (!workingset_node_pages(node) && 168 list_empty(&node->private_list)) { 169 node->private_data = mapping; 170 list_lru_add(&workingset_shadow_nodes, &node->private_list); 171 } 172 } 173 174 /* 175 * Delete a page from the page cache and free it. Caller has to make 176 * sure the page is locked and that nobody else uses it - or that usage 177 * is safe. The caller must hold the mapping's tree_lock. 178 */ 179 void __delete_from_page_cache(struct page *page, void *shadow) 180 { 181 struct address_space *mapping = page->mapping; 182 183 trace_mm_filemap_delete_from_page_cache(page); 184 /* 185 * if we're uptodate, flush out into the cleancache, otherwise 186 * invalidate any existing cleancache entries. We can't leave 187 * stale data around in the cleancache once our page is gone 188 */ 189 if (PageUptodate(page) && PageMappedToDisk(page)) 190 cleancache_put_page(page); 191 else 192 cleancache_invalidate_page(mapping, page); 193 194 page_cache_tree_delete(mapping, page, shadow); 195 196 page->mapping = NULL; 197 /* Leave page->index set: truncation lookup relies upon it */ 198 199 __dec_zone_page_state(page, NR_FILE_PAGES); 200 if (PageSwapBacked(page)) 201 __dec_zone_page_state(page, NR_SHMEM); 202 BUG_ON(page_mapped(page)); 203 204 /* 205 * Some filesystems seem to re-dirty the page even after 206 * the VM has canceled the dirty bit (eg ext3 journaling). 207 * 208 * Fix it up by doing a final dirty accounting check after 209 * having removed the page entirely. 210 */ 211 if (PageDirty(page) && mapping_cap_account_dirty(mapping)) { 212 dec_zone_page_state(page, NR_FILE_DIRTY); 213 dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); 214 } 215 } 216 217 /** 218 * delete_from_page_cache - delete page from page cache 219 * @page: the page which the kernel is trying to remove from page cache 220 * 221 * This must be called only on pages that have been verified to be in the page 222 * cache and locked. It will never put the page into the free list, the caller 223 * has a reference on the page. 224 */ 225 void delete_from_page_cache(struct page *page) 226 { 227 struct address_space *mapping = page->mapping; 228 void (*freepage)(struct page *); 229 230 BUG_ON(!PageLocked(page)); 231 232 freepage = mapping->a_ops->freepage; 233 spin_lock_irq(&mapping->tree_lock); 234 __delete_from_page_cache(page, NULL); 235 spin_unlock_irq(&mapping->tree_lock); 236 mem_cgroup_uncharge_cache_page(page); 237 238 if (freepage) 239 freepage(page); 240 page_cache_release(page); 241 } 242 EXPORT_SYMBOL(delete_from_page_cache); 243 244 static int sleep_on_page(void *word) 245 { 246 io_schedule(); 247 return 0; 248 } 249 250 static int sleep_on_page_killable(void *word) 251 { 252 sleep_on_page(word); 253 return fatal_signal_pending(current) ? -EINTR : 0; 254 } 255 256 static int filemap_check_errors(struct address_space *mapping) 257 { 258 int ret = 0; 259 /* Check for outstanding write errors */ 260 if (test_bit(AS_ENOSPC, &mapping->flags) && 261 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 262 ret = -ENOSPC; 263 if (test_bit(AS_EIO, &mapping->flags) && 264 test_and_clear_bit(AS_EIO, &mapping->flags)) 265 ret = -EIO; 266 return ret; 267 } 268 269 /** 270 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 271 * @mapping: address space structure to write 272 * @start: offset in bytes where the range starts 273 * @end: offset in bytes where the range ends (inclusive) 274 * @sync_mode: enable synchronous operation 275 * 276 * Start writeback against all of a mapping's dirty pages that lie 277 * within the byte offsets <start, end> inclusive. 278 * 279 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 280 * opposed to a regular memory cleansing writeback. The difference between 281 * these two operations is that if a dirty page/buffer is encountered, it must 282 * be waited upon, and not just skipped over. 283 */ 284 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 285 loff_t end, int sync_mode) 286 { 287 int ret; 288 struct writeback_control wbc = { 289 .sync_mode = sync_mode, 290 .nr_to_write = LONG_MAX, 291 .range_start = start, 292 .range_end = end, 293 }; 294 295 if (!mapping_cap_writeback_dirty(mapping)) 296 return 0; 297 298 ret = do_writepages(mapping, &wbc); 299 return ret; 300 } 301 302 static inline int __filemap_fdatawrite(struct address_space *mapping, 303 int sync_mode) 304 { 305 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); 306 } 307 308 int filemap_fdatawrite(struct address_space *mapping) 309 { 310 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); 311 } 312 EXPORT_SYMBOL(filemap_fdatawrite); 313 314 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 315 loff_t end) 316 { 317 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 318 } 319 EXPORT_SYMBOL(filemap_fdatawrite_range); 320 321 /** 322 * filemap_flush - mostly a non-blocking flush 323 * @mapping: target address_space 324 * 325 * This is a mostly non-blocking flush. Not suitable for data-integrity 326 * purposes - I/O may not be started against all dirty pages. 327 */ 328 int filemap_flush(struct address_space *mapping) 329 { 330 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); 331 } 332 EXPORT_SYMBOL(filemap_flush); 333 334 /** 335 * filemap_fdatawait_range - wait for writeback to complete 336 * @mapping: address space structure to wait for 337 * @start_byte: offset in bytes where the range starts 338 * @end_byte: offset in bytes where the range ends (inclusive) 339 * 340 * Walk the list of under-writeback pages of the given address space 341 * in the given range and wait for all of them. 342 */ 343 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, 344 loff_t end_byte) 345 { 346 pgoff_t index = start_byte >> PAGE_CACHE_SHIFT; 347 pgoff_t end = end_byte >> PAGE_CACHE_SHIFT; 348 struct pagevec pvec; 349 int nr_pages; 350 int ret2, ret = 0; 351 352 if (end_byte < start_byte) 353 goto out; 354 355 pagevec_init(&pvec, 0); 356 while ((index <= end) && 357 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 358 PAGECACHE_TAG_WRITEBACK, 359 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { 360 unsigned i; 361 362 for (i = 0; i < nr_pages; i++) { 363 struct page *page = pvec.pages[i]; 364 365 /* until radix tree lookup accepts end_index */ 366 if (page->index > end) 367 continue; 368 369 wait_on_page_writeback(page); 370 if (TestClearPageError(page)) 371 ret = -EIO; 372 } 373 pagevec_release(&pvec); 374 cond_resched(); 375 } 376 out: 377 ret2 = filemap_check_errors(mapping); 378 if (!ret) 379 ret = ret2; 380 381 return ret; 382 } 383 EXPORT_SYMBOL(filemap_fdatawait_range); 384 385 /** 386 * filemap_fdatawait - wait for all under-writeback pages to complete 387 * @mapping: address space structure to wait for 388 * 389 * Walk the list of under-writeback pages of the given address space 390 * and wait for all of them. 391 */ 392 int filemap_fdatawait(struct address_space *mapping) 393 { 394 loff_t i_size = i_size_read(mapping->host); 395 396 if (i_size == 0) 397 return 0; 398 399 return filemap_fdatawait_range(mapping, 0, i_size - 1); 400 } 401 EXPORT_SYMBOL(filemap_fdatawait); 402 403 int filemap_write_and_wait(struct address_space *mapping) 404 { 405 int err = 0; 406 407 if (mapping->nrpages) { 408 err = filemap_fdatawrite(mapping); 409 /* 410 * Even if the above returned error, the pages may be 411 * written partially (e.g. -ENOSPC), so we wait for it. 412 * But the -EIO is special case, it may indicate the worst 413 * thing (e.g. bug) happened, so we avoid waiting for it. 414 */ 415 if (err != -EIO) { 416 int err2 = filemap_fdatawait(mapping); 417 if (!err) 418 err = err2; 419 } 420 } else { 421 err = filemap_check_errors(mapping); 422 } 423 return err; 424 } 425 EXPORT_SYMBOL(filemap_write_and_wait); 426 427 /** 428 * filemap_write_and_wait_range - write out & wait on a file range 429 * @mapping: the address_space for the pages 430 * @lstart: offset in bytes where the range starts 431 * @lend: offset in bytes where the range ends (inclusive) 432 * 433 * Write out and wait upon file offsets lstart->lend, inclusive. 434 * 435 * Note that `lend' is inclusive (describes the last byte to be written) so 436 * that this function can be used to write to the very end-of-file (end = -1). 437 */ 438 int filemap_write_and_wait_range(struct address_space *mapping, 439 loff_t lstart, loff_t lend) 440 { 441 int err = 0; 442 443 if (mapping->nrpages) { 444 err = __filemap_fdatawrite_range(mapping, lstart, lend, 445 WB_SYNC_ALL); 446 /* See comment of filemap_write_and_wait() */ 447 if (err != -EIO) { 448 int err2 = filemap_fdatawait_range(mapping, 449 lstart, lend); 450 if (!err) 451 err = err2; 452 } 453 } else { 454 err = filemap_check_errors(mapping); 455 } 456 return err; 457 } 458 EXPORT_SYMBOL(filemap_write_and_wait_range); 459 460 /** 461 * replace_page_cache_page - replace a pagecache page with a new one 462 * @old: page to be replaced 463 * @new: page to replace with 464 * @gfp_mask: allocation mode 465 * 466 * This function replaces a page in the pagecache with a new one. On 467 * success it acquires the pagecache reference for the new page and 468 * drops it for the old page. Both the old and new pages must be 469 * locked. This function does not add the new page to the LRU, the 470 * caller must do that. 471 * 472 * The remove + add is atomic. The only way this function can fail is 473 * memory allocation failure. 474 */ 475 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) 476 { 477 int error; 478 479 VM_BUG_ON_PAGE(!PageLocked(old), old); 480 VM_BUG_ON_PAGE(!PageLocked(new), new); 481 VM_BUG_ON_PAGE(new->mapping, new); 482 483 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 484 if (!error) { 485 struct address_space *mapping = old->mapping; 486 void (*freepage)(struct page *); 487 488 pgoff_t offset = old->index; 489 freepage = mapping->a_ops->freepage; 490 491 page_cache_get(new); 492 new->mapping = mapping; 493 new->index = offset; 494 495 spin_lock_irq(&mapping->tree_lock); 496 __delete_from_page_cache(old, NULL); 497 error = radix_tree_insert(&mapping->page_tree, offset, new); 498 BUG_ON(error); 499 mapping->nrpages++; 500 __inc_zone_page_state(new, NR_FILE_PAGES); 501 if (PageSwapBacked(new)) 502 __inc_zone_page_state(new, NR_SHMEM); 503 spin_unlock_irq(&mapping->tree_lock); 504 /* mem_cgroup codes must not be called under tree_lock */ 505 mem_cgroup_replace_page_cache(old, new); 506 radix_tree_preload_end(); 507 if (freepage) 508 freepage(old); 509 page_cache_release(old); 510 } 511 512 return error; 513 } 514 EXPORT_SYMBOL_GPL(replace_page_cache_page); 515 516 static int page_cache_tree_insert(struct address_space *mapping, 517 struct page *page, void **shadowp) 518 { 519 struct radix_tree_node *node; 520 void **slot; 521 int error; 522 523 error = __radix_tree_create(&mapping->page_tree, page->index, 524 &node, &slot); 525 if (error) 526 return error; 527 if (*slot) { 528 void *p; 529 530 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 531 if (!radix_tree_exceptional_entry(p)) 532 return -EEXIST; 533 if (shadowp) 534 *shadowp = p; 535 mapping->nrshadows--; 536 if (node) 537 workingset_node_shadows_dec(node); 538 } 539 radix_tree_replace_slot(slot, page); 540 mapping->nrpages++; 541 if (node) { 542 workingset_node_pages_inc(node); 543 /* 544 * Don't track node that contains actual pages. 545 * 546 * Avoid acquiring the list_lru lock if already 547 * untracked. The list_empty() test is safe as 548 * node->private_list is protected by 549 * mapping->tree_lock. 550 */ 551 if (!list_empty(&node->private_list)) 552 list_lru_del(&workingset_shadow_nodes, 553 &node->private_list); 554 } 555 return 0; 556 } 557 558 static int __add_to_page_cache_locked(struct page *page, 559 struct address_space *mapping, 560 pgoff_t offset, gfp_t gfp_mask, 561 void **shadowp) 562 { 563 int error; 564 565 VM_BUG_ON_PAGE(!PageLocked(page), page); 566 VM_BUG_ON_PAGE(PageSwapBacked(page), page); 567 568 error = mem_cgroup_charge_file(page, current->mm, 569 gfp_mask & GFP_RECLAIM_MASK); 570 if (error) 571 return error; 572 573 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); 574 if (error) { 575 mem_cgroup_uncharge_cache_page(page); 576 return error; 577 } 578 579 page_cache_get(page); 580 page->mapping = mapping; 581 page->index = offset; 582 583 spin_lock_irq(&mapping->tree_lock); 584 error = page_cache_tree_insert(mapping, page, shadowp); 585 radix_tree_preload_end(); 586 if (unlikely(error)) 587 goto err_insert; 588 __inc_zone_page_state(page, NR_FILE_PAGES); 589 spin_unlock_irq(&mapping->tree_lock); 590 trace_mm_filemap_add_to_page_cache(page); 591 return 0; 592 err_insert: 593 page->mapping = NULL; 594 /* Leave page->index set: truncation relies upon it */ 595 spin_unlock_irq(&mapping->tree_lock); 596 mem_cgroup_uncharge_cache_page(page); 597 page_cache_release(page); 598 return error; 599 } 600 601 /** 602 * add_to_page_cache_locked - add a locked page to the pagecache 603 * @page: page to add 604 * @mapping: the page's address_space 605 * @offset: page index 606 * @gfp_mask: page allocation mode 607 * 608 * This function is used to add a page to the pagecache. It must be locked. 609 * This function does not add the page to the LRU. The caller must do that. 610 */ 611 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 612 pgoff_t offset, gfp_t gfp_mask) 613 { 614 return __add_to_page_cache_locked(page, mapping, offset, 615 gfp_mask, NULL); 616 } 617 EXPORT_SYMBOL(add_to_page_cache_locked); 618 619 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 620 pgoff_t offset, gfp_t gfp_mask) 621 { 622 void *shadow = NULL; 623 int ret; 624 625 __set_page_locked(page); 626 ret = __add_to_page_cache_locked(page, mapping, offset, 627 gfp_mask, &shadow); 628 if (unlikely(ret)) 629 __clear_page_locked(page); 630 else { 631 /* 632 * The page might have been evicted from cache only 633 * recently, in which case it should be activated like 634 * any other repeatedly accessed page. 635 */ 636 if (shadow && workingset_refault(shadow)) { 637 SetPageActive(page); 638 workingset_activation(page); 639 } else 640 ClearPageActive(page); 641 lru_cache_add(page); 642 } 643 return ret; 644 } 645 EXPORT_SYMBOL_GPL(add_to_page_cache_lru); 646 647 #ifdef CONFIG_NUMA 648 struct page *__page_cache_alloc(gfp_t gfp) 649 { 650 int n; 651 struct page *page; 652 653 if (cpuset_do_page_mem_spread()) { 654 unsigned int cpuset_mems_cookie; 655 do { 656 cpuset_mems_cookie = read_mems_allowed_begin(); 657 n = cpuset_mem_spread_node(); 658 page = alloc_pages_exact_node(n, gfp, 0); 659 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); 660 661 return page; 662 } 663 return alloc_pages(gfp, 0); 664 } 665 EXPORT_SYMBOL(__page_cache_alloc); 666 #endif 667 668 /* 669 * In order to wait for pages to become available there must be 670 * waitqueues associated with pages. By using a hash table of 671 * waitqueues where the bucket discipline is to maintain all 672 * waiters on the same queue and wake all when any of the pages 673 * become available, and for the woken contexts to check to be 674 * sure the appropriate page became available, this saves space 675 * at a cost of "thundering herd" phenomena during rare hash 676 * collisions. 677 */ 678 static wait_queue_head_t *page_waitqueue(struct page *page) 679 { 680 const struct zone *zone = page_zone(page); 681 682 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; 683 } 684 685 static inline void wake_up_page(struct page *page, int bit) 686 { 687 __wake_up_bit(page_waitqueue(page), &page->flags, bit); 688 } 689 690 void wait_on_page_bit(struct page *page, int bit_nr) 691 { 692 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 693 694 if (test_bit(bit_nr, &page->flags)) 695 __wait_on_bit(page_waitqueue(page), &wait, sleep_on_page, 696 TASK_UNINTERRUPTIBLE); 697 } 698 EXPORT_SYMBOL(wait_on_page_bit); 699 700 int wait_on_page_bit_killable(struct page *page, int bit_nr) 701 { 702 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 703 704 if (!test_bit(bit_nr, &page->flags)) 705 return 0; 706 707 return __wait_on_bit(page_waitqueue(page), &wait, 708 sleep_on_page_killable, TASK_KILLABLE); 709 } 710 711 /** 712 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue 713 * @page: Page defining the wait queue of interest 714 * @waiter: Waiter to add to the queue 715 * 716 * Add an arbitrary @waiter to the wait queue for the nominated @page. 717 */ 718 void add_page_wait_queue(struct page *page, wait_queue_t *waiter) 719 { 720 wait_queue_head_t *q = page_waitqueue(page); 721 unsigned long flags; 722 723 spin_lock_irqsave(&q->lock, flags); 724 __add_wait_queue(q, waiter); 725 spin_unlock_irqrestore(&q->lock, flags); 726 } 727 EXPORT_SYMBOL_GPL(add_page_wait_queue); 728 729 /** 730 * unlock_page - unlock a locked page 731 * @page: the page 732 * 733 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). 734 * Also wakes sleepers in wait_on_page_writeback() because the wakeup 735 * mechananism between PageLocked pages and PageWriteback pages is shared. 736 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 737 * 738 * The mb is necessary to enforce ordering between the clear_bit and the read 739 * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()). 740 */ 741 void unlock_page(struct page *page) 742 { 743 VM_BUG_ON_PAGE(!PageLocked(page), page); 744 clear_bit_unlock(PG_locked, &page->flags); 745 smp_mb__after_atomic(); 746 wake_up_page(page, PG_locked); 747 } 748 EXPORT_SYMBOL(unlock_page); 749 750 /** 751 * end_page_writeback - end writeback against a page 752 * @page: the page 753 */ 754 void end_page_writeback(struct page *page) 755 { 756 /* 757 * TestClearPageReclaim could be used here but it is an atomic 758 * operation and overkill in this particular case. Failing to 759 * shuffle a page marked for immediate reclaim is too mild to 760 * justify taking an atomic operation penalty at the end of 761 * ever page writeback. 762 */ 763 if (PageReclaim(page)) { 764 ClearPageReclaim(page); 765 rotate_reclaimable_page(page); 766 } 767 768 if (!test_clear_page_writeback(page)) 769 BUG(); 770 771 smp_mb__after_atomic(); 772 wake_up_page(page, PG_writeback); 773 } 774 EXPORT_SYMBOL(end_page_writeback); 775 776 /* 777 * After completing I/O on a page, call this routine to update the page 778 * flags appropriately 779 */ 780 void page_endio(struct page *page, int rw, int err) 781 { 782 if (rw == READ) { 783 if (!err) { 784 SetPageUptodate(page); 785 } else { 786 ClearPageUptodate(page); 787 SetPageError(page); 788 } 789 unlock_page(page); 790 } else { /* rw == WRITE */ 791 if (err) { 792 SetPageError(page); 793 if (page->mapping) 794 mapping_set_error(page->mapping, err); 795 } 796 end_page_writeback(page); 797 } 798 } 799 EXPORT_SYMBOL_GPL(page_endio); 800 801 /** 802 * __lock_page - get a lock on the page, assuming we need to sleep to get it 803 * @page: the page to lock 804 */ 805 void __lock_page(struct page *page) 806 { 807 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 808 809 __wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page, 810 TASK_UNINTERRUPTIBLE); 811 } 812 EXPORT_SYMBOL(__lock_page); 813 814 int __lock_page_killable(struct page *page) 815 { 816 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 817 818 return __wait_on_bit_lock(page_waitqueue(page), &wait, 819 sleep_on_page_killable, TASK_KILLABLE); 820 } 821 EXPORT_SYMBOL_GPL(__lock_page_killable); 822 823 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 824 unsigned int flags) 825 { 826 if (flags & FAULT_FLAG_ALLOW_RETRY) { 827 /* 828 * CAUTION! In this case, mmap_sem is not released 829 * even though return 0. 830 */ 831 if (flags & FAULT_FLAG_RETRY_NOWAIT) 832 return 0; 833 834 up_read(&mm->mmap_sem); 835 if (flags & FAULT_FLAG_KILLABLE) 836 wait_on_page_locked_killable(page); 837 else 838 wait_on_page_locked(page); 839 return 0; 840 } else { 841 if (flags & FAULT_FLAG_KILLABLE) { 842 int ret; 843 844 ret = __lock_page_killable(page); 845 if (ret) { 846 up_read(&mm->mmap_sem); 847 return 0; 848 } 849 } else 850 __lock_page(page); 851 return 1; 852 } 853 } 854 855 /** 856 * page_cache_next_hole - find the next hole (not-present entry) 857 * @mapping: mapping 858 * @index: index 859 * @max_scan: maximum range to search 860 * 861 * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the 862 * lowest indexed hole. 863 * 864 * Returns: the index of the hole if found, otherwise returns an index 865 * outside of the set specified (in which case 'return - index >= 866 * max_scan' will be true). In rare cases of index wrap-around, 0 will 867 * be returned. 868 * 869 * page_cache_next_hole may be called under rcu_read_lock. However, 870 * like radix_tree_gang_lookup, this will not atomically search a 871 * snapshot of the tree at a single point in time. For example, if a 872 * hole is created at index 5, then subsequently a hole is created at 873 * index 10, page_cache_next_hole covering both indexes may return 10 874 * if called under rcu_read_lock. 875 */ 876 pgoff_t page_cache_next_hole(struct address_space *mapping, 877 pgoff_t index, unsigned long max_scan) 878 { 879 unsigned long i; 880 881 for (i = 0; i < max_scan; i++) { 882 struct page *page; 883 884 page = radix_tree_lookup(&mapping->page_tree, index); 885 if (!page || radix_tree_exceptional_entry(page)) 886 break; 887 index++; 888 if (index == 0) 889 break; 890 } 891 892 return index; 893 } 894 EXPORT_SYMBOL(page_cache_next_hole); 895 896 /** 897 * page_cache_prev_hole - find the prev hole (not-present entry) 898 * @mapping: mapping 899 * @index: index 900 * @max_scan: maximum range to search 901 * 902 * Search backwards in the range [max(index-max_scan+1, 0), index] for 903 * the first hole. 904 * 905 * Returns: the index of the hole if found, otherwise returns an index 906 * outside of the set specified (in which case 'index - return >= 907 * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX 908 * will be returned. 909 * 910 * page_cache_prev_hole may be called under rcu_read_lock. However, 911 * like radix_tree_gang_lookup, this will not atomically search a 912 * snapshot of the tree at a single point in time. For example, if a 913 * hole is created at index 10, then subsequently a hole is created at 914 * index 5, page_cache_prev_hole covering both indexes may return 5 if 915 * called under rcu_read_lock. 916 */ 917 pgoff_t page_cache_prev_hole(struct address_space *mapping, 918 pgoff_t index, unsigned long max_scan) 919 { 920 unsigned long i; 921 922 for (i = 0; i < max_scan; i++) { 923 struct page *page; 924 925 page = radix_tree_lookup(&mapping->page_tree, index); 926 if (!page || radix_tree_exceptional_entry(page)) 927 break; 928 index--; 929 if (index == ULONG_MAX) 930 break; 931 } 932 933 return index; 934 } 935 EXPORT_SYMBOL(page_cache_prev_hole); 936 937 /** 938 * find_get_entry - find and get a page cache entry 939 * @mapping: the address_space to search 940 * @offset: the page cache index 941 * 942 * Looks up the page cache slot at @mapping & @offset. If there is a 943 * page cache page, it is returned with an increased refcount. 944 * 945 * If the slot holds a shadow entry of a previously evicted page, or a 946 * swap entry from shmem/tmpfs, it is returned. 947 * 948 * Otherwise, %NULL is returned. 949 */ 950 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) 951 { 952 void **pagep; 953 struct page *page; 954 955 rcu_read_lock(); 956 repeat: 957 page = NULL; 958 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); 959 if (pagep) { 960 page = radix_tree_deref_slot(pagep); 961 if (unlikely(!page)) 962 goto out; 963 if (radix_tree_exception(page)) { 964 if (radix_tree_deref_retry(page)) 965 goto repeat; 966 /* 967 * A shadow entry of a recently evicted page, 968 * or a swap entry from shmem/tmpfs. Return 969 * it without attempting to raise page count. 970 */ 971 goto out; 972 } 973 if (!page_cache_get_speculative(page)) 974 goto repeat; 975 976 /* 977 * Has the page moved? 978 * This is part of the lockless pagecache protocol. See 979 * include/linux/pagemap.h for details. 980 */ 981 if (unlikely(page != *pagep)) { 982 page_cache_release(page); 983 goto repeat; 984 } 985 } 986 out: 987 rcu_read_unlock(); 988 989 return page; 990 } 991 EXPORT_SYMBOL(find_get_entry); 992 993 /** 994 * find_lock_entry - locate, pin and lock a page cache entry 995 * @mapping: the address_space to search 996 * @offset: the page cache index 997 * 998 * Looks up the page cache slot at @mapping & @offset. If there is a 999 * page cache page, it is returned locked and with an increased 1000 * refcount. 1001 * 1002 * If the slot holds a shadow entry of a previously evicted page, or a 1003 * swap entry from shmem/tmpfs, it is returned. 1004 * 1005 * Otherwise, %NULL is returned. 1006 * 1007 * find_lock_entry() may sleep. 1008 */ 1009 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) 1010 { 1011 struct page *page; 1012 1013 repeat: 1014 page = find_get_entry(mapping, offset); 1015 if (page && !radix_tree_exception(page)) { 1016 lock_page(page); 1017 /* Has the page been truncated? */ 1018 if (unlikely(page->mapping != mapping)) { 1019 unlock_page(page); 1020 page_cache_release(page); 1021 goto repeat; 1022 } 1023 VM_BUG_ON_PAGE(page->index != offset, page); 1024 } 1025 return page; 1026 } 1027 EXPORT_SYMBOL(find_lock_entry); 1028 1029 /** 1030 * pagecache_get_page - find and get a page reference 1031 * @mapping: the address_space to search 1032 * @offset: the page index 1033 * @fgp_flags: PCG flags 1034 * @gfp_mask: gfp mask to use if a page is to be allocated 1035 * 1036 * Looks up the page cache slot at @mapping & @offset. 1037 * 1038 * PCG flags modify how the page is returned 1039 * 1040 * FGP_ACCESSED: the page will be marked accessed 1041 * FGP_LOCK: Page is return locked 1042 * FGP_CREAT: If page is not present then a new page is allocated using 1043 * @gfp_mask and added to the page cache and the VM's LRU 1044 * list. The page is returned locked and with an increased 1045 * refcount. Otherwise, %NULL is returned. 1046 * 1047 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even 1048 * if the GFP flags specified for FGP_CREAT are atomic. 1049 * 1050 * If there is a page cache page, it is returned with an increased refcount. 1051 */ 1052 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, 1053 int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask) 1054 { 1055 struct page *page; 1056 1057 repeat: 1058 page = find_get_entry(mapping, offset); 1059 if (radix_tree_exceptional_entry(page)) 1060 page = NULL; 1061 if (!page) 1062 goto no_page; 1063 1064 if (fgp_flags & FGP_LOCK) { 1065 if (fgp_flags & FGP_NOWAIT) { 1066 if (!trylock_page(page)) { 1067 page_cache_release(page); 1068 return NULL; 1069 } 1070 } else { 1071 lock_page(page); 1072 } 1073 1074 /* Has the page been truncated? */ 1075 if (unlikely(page->mapping != mapping)) { 1076 unlock_page(page); 1077 page_cache_release(page); 1078 goto repeat; 1079 } 1080 VM_BUG_ON_PAGE(page->index != offset, page); 1081 } 1082 1083 if (page && (fgp_flags & FGP_ACCESSED)) 1084 mark_page_accessed(page); 1085 1086 no_page: 1087 if (!page && (fgp_flags & FGP_CREAT)) { 1088 int err; 1089 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) 1090 cache_gfp_mask |= __GFP_WRITE; 1091 if (fgp_flags & FGP_NOFS) { 1092 cache_gfp_mask &= ~__GFP_FS; 1093 radix_gfp_mask &= ~__GFP_FS; 1094 } 1095 1096 page = __page_cache_alloc(cache_gfp_mask); 1097 if (!page) 1098 return NULL; 1099 1100 if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK))) 1101 fgp_flags |= FGP_LOCK; 1102 1103 /* Init accessed so avoit atomic mark_page_accessed later */ 1104 if (fgp_flags & FGP_ACCESSED) 1105 init_page_accessed(page); 1106 1107 err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask); 1108 if (unlikely(err)) { 1109 page_cache_release(page); 1110 page = NULL; 1111 if (err == -EEXIST) 1112 goto repeat; 1113 } 1114 } 1115 1116 return page; 1117 } 1118 EXPORT_SYMBOL(pagecache_get_page); 1119 1120 /** 1121 * find_get_entries - gang pagecache lookup 1122 * @mapping: The address_space to search 1123 * @start: The starting page cache index 1124 * @nr_entries: The maximum number of entries 1125 * @entries: Where the resulting entries are placed 1126 * @indices: The cache indices corresponding to the entries in @entries 1127 * 1128 * find_get_entries() will search for and return a group of up to 1129 * @nr_entries entries in the mapping. The entries are placed at 1130 * @entries. find_get_entries() takes a reference against any actual 1131 * pages it returns. 1132 * 1133 * The search returns a group of mapping-contiguous page cache entries 1134 * with ascending indexes. There may be holes in the indices due to 1135 * not-present pages. 1136 * 1137 * Any shadow entries of evicted pages, or swap entries from 1138 * shmem/tmpfs, are included in the returned array. 1139 * 1140 * find_get_entries() returns the number of pages and shadow entries 1141 * which were found. 1142 */ 1143 unsigned find_get_entries(struct address_space *mapping, 1144 pgoff_t start, unsigned int nr_entries, 1145 struct page **entries, pgoff_t *indices) 1146 { 1147 void **slot; 1148 unsigned int ret = 0; 1149 struct radix_tree_iter iter; 1150 1151 if (!nr_entries) 1152 return 0; 1153 1154 rcu_read_lock(); 1155 restart: 1156 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1157 struct page *page; 1158 repeat: 1159 page = radix_tree_deref_slot(slot); 1160 if (unlikely(!page)) 1161 continue; 1162 if (radix_tree_exception(page)) { 1163 if (radix_tree_deref_retry(page)) 1164 goto restart; 1165 /* 1166 * A shadow entry of a recently evicted page, 1167 * or a swap entry from shmem/tmpfs. Return 1168 * it without attempting to raise page count. 1169 */ 1170 goto export; 1171 } 1172 if (!page_cache_get_speculative(page)) 1173 goto repeat; 1174 1175 /* Has the page moved? */ 1176 if (unlikely(page != *slot)) { 1177 page_cache_release(page); 1178 goto repeat; 1179 } 1180 export: 1181 indices[ret] = iter.index; 1182 entries[ret] = page; 1183 if (++ret == nr_entries) 1184 break; 1185 } 1186 rcu_read_unlock(); 1187 return ret; 1188 } 1189 1190 /** 1191 * find_get_pages - gang pagecache lookup 1192 * @mapping: The address_space to search 1193 * @start: The starting page index 1194 * @nr_pages: The maximum number of pages 1195 * @pages: Where the resulting pages are placed 1196 * 1197 * find_get_pages() will search for and return a group of up to 1198 * @nr_pages pages in the mapping. The pages are placed at @pages. 1199 * find_get_pages() takes a reference against the returned pages. 1200 * 1201 * The search returns a group of mapping-contiguous pages with ascending 1202 * indexes. There may be holes in the indices due to not-present pages. 1203 * 1204 * find_get_pages() returns the number of pages which were found. 1205 */ 1206 unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 1207 unsigned int nr_pages, struct page **pages) 1208 { 1209 struct radix_tree_iter iter; 1210 void **slot; 1211 unsigned ret = 0; 1212 1213 if (unlikely(!nr_pages)) 1214 return 0; 1215 1216 rcu_read_lock(); 1217 restart: 1218 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1219 struct page *page; 1220 repeat: 1221 page = radix_tree_deref_slot(slot); 1222 if (unlikely(!page)) 1223 continue; 1224 1225 if (radix_tree_exception(page)) { 1226 if (radix_tree_deref_retry(page)) { 1227 /* 1228 * Transient condition which can only trigger 1229 * when entry at index 0 moves out of or back 1230 * to root: none yet gotten, safe to restart. 1231 */ 1232 WARN_ON(iter.index); 1233 goto restart; 1234 } 1235 /* 1236 * A shadow entry of a recently evicted page, 1237 * or a swap entry from shmem/tmpfs. Skip 1238 * over it. 1239 */ 1240 continue; 1241 } 1242 1243 if (!page_cache_get_speculative(page)) 1244 goto repeat; 1245 1246 /* Has the page moved? */ 1247 if (unlikely(page != *slot)) { 1248 page_cache_release(page); 1249 goto repeat; 1250 } 1251 1252 pages[ret] = page; 1253 if (++ret == nr_pages) 1254 break; 1255 } 1256 1257 rcu_read_unlock(); 1258 return ret; 1259 } 1260 1261 /** 1262 * find_get_pages_contig - gang contiguous pagecache lookup 1263 * @mapping: The address_space to search 1264 * @index: The starting page index 1265 * @nr_pages: The maximum number of pages 1266 * @pages: Where the resulting pages are placed 1267 * 1268 * find_get_pages_contig() works exactly like find_get_pages(), except 1269 * that the returned number of pages are guaranteed to be contiguous. 1270 * 1271 * find_get_pages_contig() returns the number of pages which were found. 1272 */ 1273 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, 1274 unsigned int nr_pages, struct page **pages) 1275 { 1276 struct radix_tree_iter iter; 1277 void **slot; 1278 unsigned int ret = 0; 1279 1280 if (unlikely(!nr_pages)) 1281 return 0; 1282 1283 rcu_read_lock(); 1284 restart: 1285 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) { 1286 struct page *page; 1287 repeat: 1288 page = radix_tree_deref_slot(slot); 1289 /* The hole, there no reason to continue */ 1290 if (unlikely(!page)) 1291 break; 1292 1293 if (radix_tree_exception(page)) { 1294 if (radix_tree_deref_retry(page)) { 1295 /* 1296 * Transient condition which can only trigger 1297 * when entry at index 0 moves out of or back 1298 * to root: none yet gotten, safe to restart. 1299 */ 1300 goto restart; 1301 } 1302 /* 1303 * A shadow entry of a recently evicted page, 1304 * or a swap entry from shmem/tmpfs. Stop 1305 * looking for contiguous pages. 1306 */ 1307 break; 1308 } 1309 1310 if (!page_cache_get_speculative(page)) 1311 goto repeat; 1312 1313 /* Has the page moved? */ 1314 if (unlikely(page != *slot)) { 1315 page_cache_release(page); 1316 goto repeat; 1317 } 1318 1319 /* 1320 * must check mapping and index after taking the ref. 1321 * otherwise we can get both false positives and false 1322 * negatives, which is just confusing to the caller. 1323 */ 1324 if (page->mapping == NULL || page->index != iter.index) { 1325 page_cache_release(page); 1326 break; 1327 } 1328 1329 pages[ret] = page; 1330 if (++ret == nr_pages) 1331 break; 1332 } 1333 rcu_read_unlock(); 1334 return ret; 1335 } 1336 EXPORT_SYMBOL(find_get_pages_contig); 1337 1338 /** 1339 * find_get_pages_tag - find and return pages that match @tag 1340 * @mapping: the address_space to search 1341 * @index: the starting page index 1342 * @tag: the tag index 1343 * @nr_pages: the maximum number of pages 1344 * @pages: where the resulting pages are placed 1345 * 1346 * Like find_get_pages, except we only return pages which are tagged with 1347 * @tag. We update @index to index the next page for the traversal. 1348 */ 1349 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 1350 int tag, unsigned int nr_pages, struct page **pages) 1351 { 1352 struct radix_tree_iter iter; 1353 void **slot; 1354 unsigned ret = 0; 1355 1356 if (unlikely(!nr_pages)) 1357 return 0; 1358 1359 rcu_read_lock(); 1360 restart: 1361 radix_tree_for_each_tagged(slot, &mapping->page_tree, 1362 &iter, *index, tag) { 1363 struct page *page; 1364 repeat: 1365 page = radix_tree_deref_slot(slot); 1366 if (unlikely(!page)) 1367 continue; 1368 1369 if (radix_tree_exception(page)) { 1370 if (radix_tree_deref_retry(page)) { 1371 /* 1372 * Transient condition which can only trigger 1373 * when entry at index 0 moves out of or back 1374 * to root: none yet gotten, safe to restart. 1375 */ 1376 goto restart; 1377 } 1378 /* 1379 * A shadow entry of a recently evicted page. 1380 * 1381 * Those entries should never be tagged, but 1382 * this tree walk is lockless and the tags are 1383 * looked up in bulk, one radix tree node at a 1384 * time, so there is a sizable window for page 1385 * reclaim to evict a page we saw tagged. 1386 * 1387 * Skip over it. 1388 */ 1389 continue; 1390 } 1391 1392 if (!page_cache_get_speculative(page)) 1393 goto repeat; 1394 1395 /* Has the page moved? */ 1396 if (unlikely(page != *slot)) { 1397 page_cache_release(page); 1398 goto repeat; 1399 } 1400 1401 pages[ret] = page; 1402 if (++ret == nr_pages) 1403 break; 1404 } 1405 1406 rcu_read_unlock(); 1407 1408 if (ret) 1409 *index = pages[ret - 1]->index + 1; 1410 1411 return ret; 1412 } 1413 EXPORT_SYMBOL(find_get_pages_tag); 1414 1415 /* 1416 * CD/DVDs are error prone. When a medium error occurs, the driver may fail 1417 * a _large_ part of the i/o request. Imagine the worst scenario: 1418 * 1419 * ---R__________________________________________B__________ 1420 * ^ reading here ^ bad block(assume 4k) 1421 * 1422 * read(R) => miss => readahead(R...B) => media error => frustrating retries 1423 * => failing the whole request => read(R) => read(R+1) => 1424 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => 1425 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => 1426 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... 1427 * 1428 * It is going insane. Fix it by quickly scaling down the readahead size. 1429 */ 1430 static void shrink_readahead_size_eio(struct file *filp, 1431 struct file_ra_state *ra) 1432 { 1433 ra->ra_pages /= 4; 1434 } 1435 1436 /** 1437 * do_generic_file_read - generic file read routine 1438 * @filp: the file to read 1439 * @ppos: current file position 1440 * @iter: data destination 1441 * @written: already copied 1442 * 1443 * This is a generic file read routine, and uses the 1444 * mapping->a_ops->readpage() function for the actual low-level stuff. 1445 * 1446 * This is really ugly. But the goto's actually try to clarify some 1447 * of the logic when it comes to error handling etc. 1448 */ 1449 static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos, 1450 struct iov_iter *iter, ssize_t written) 1451 { 1452 struct address_space *mapping = filp->f_mapping; 1453 struct inode *inode = mapping->host; 1454 struct file_ra_state *ra = &filp->f_ra; 1455 pgoff_t index; 1456 pgoff_t last_index; 1457 pgoff_t prev_index; 1458 unsigned long offset; /* offset into pagecache page */ 1459 unsigned int prev_offset; 1460 int error = 0; 1461 1462 index = *ppos >> PAGE_CACHE_SHIFT; 1463 prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT; 1464 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1); 1465 last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 1466 offset = *ppos & ~PAGE_CACHE_MASK; 1467 1468 for (;;) { 1469 struct page *page; 1470 pgoff_t end_index; 1471 loff_t isize; 1472 unsigned long nr, ret; 1473 1474 cond_resched(); 1475 find_page: 1476 page = find_get_page(mapping, index); 1477 if (!page) { 1478 page_cache_sync_readahead(mapping, 1479 ra, filp, 1480 index, last_index - index); 1481 page = find_get_page(mapping, index); 1482 if (unlikely(page == NULL)) 1483 goto no_cached_page; 1484 } 1485 if (PageReadahead(page)) { 1486 page_cache_async_readahead(mapping, 1487 ra, filp, page, 1488 index, last_index - index); 1489 } 1490 if (!PageUptodate(page)) { 1491 if (inode->i_blkbits == PAGE_CACHE_SHIFT || 1492 !mapping->a_ops->is_partially_uptodate) 1493 goto page_not_up_to_date; 1494 if (!trylock_page(page)) 1495 goto page_not_up_to_date; 1496 /* Did it get truncated before we got the lock? */ 1497 if (!page->mapping) 1498 goto page_not_up_to_date_locked; 1499 if (!mapping->a_ops->is_partially_uptodate(page, 1500 offset, iter->count)) 1501 goto page_not_up_to_date_locked; 1502 unlock_page(page); 1503 } 1504 page_ok: 1505 /* 1506 * i_size must be checked after we know the page is Uptodate. 1507 * 1508 * Checking i_size after the check allows us to calculate 1509 * the correct value for "nr", which means the zero-filled 1510 * part of the page is not copied back to userspace (unless 1511 * another truncate extends the file - this is desired though). 1512 */ 1513 1514 isize = i_size_read(inode); 1515 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1516 if (unlikely(!isize || index > end_index)) { 1517 page_cache_release(page); 1518 goto out; 1519 } 1520 1521 /* nr is the maximum number of bytes to copy from this page */ 1522 nr = PAGE_CACHE_SIZE; 1523 if (index == end_index) { 1524 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1525 if (nr <= offset) { 1526 page_cache_release(page); 1527 goto out; 1528 } 1529 } 1530 nr = nr - offset; 1531 1532 /* If users can be writing to this page using arbitrary 1533 * virtual addresses, take care about potential aliasing 1534 * before reading the page on the kernel side. 1535 */ 1536 if (mapping_writably_mapped(mapping)) 1537 flush_dcache_page(page); 1538 1539 /* 1540 * When a sequential read accesses a page several times, 1541 * only mark it as accessed the first time. 1542 */ 1543 if (prev_index != index || offset != prev_offset) 1544 mark_page_accessed(page); 1545 prev_index = index; 1546 1547 /* 1548 * Ok, we have the page, and it's up-to-date, so 1549 * now we can copy it to user space... 1550 */ 1551 1552 ret = copy_page_to_iter(page, offset, nr, iter); 1553 offset += ret; 1554 index += offset >> PAGE_CACHE_SHIFT; 1555 offset &= ~PAGE_CACHE_MASK; 1556 prev_offset = offset; 1557 1558 page_cache_release(page); 1559 written += ret; 1560 if (!iov_iter_count(iter)) 1561 goto out; 1562 if (ret < nr) { 1563 error = -EFAULT; 1564 goto out; 1565 } 1566 continue; 1567 1568 page_not_up_to_date: 1569 /* Get exclusive access to the page ... */ 1570 error = lock_page_killable(page); 1571 if (unlikely(error)) 1572 goto readpage_error; 1573 1574 page_not_up_to_date_locked: 1575 /* Did it get truncated before we got the lock? */ 1576 if (!page->mapping) { 1577 unlock_page(page); 1578 page_cache_release(page); 1579 continue; 1580 } 1581 1582 /* Did somebody else fill it already? */ 1583 if (PageUptodate(page)) { 1584 unlock_page(page); 1585 goto page_ok; 1586 } 1587 1588 readpage: 1589 /* 1590 * A previous I/O error may have been due to temporary 1591 * failures, eg. multipath errors. 1592 * PG_error will be set again if readpage fails. 1593 */ 1594 ClearPageError(page); 1595 /* Start the actual read. The read will unlock the page. */ 1596 error = mapping->a_ops->readpage(filp, page); 1597 1598 if (unlikely(error)) { 1599 if (error == AOP_TRUNCATED_PAGE) { 1600 page_cache_release(page); 1601 error = 0; 1602 goto find_page; 1603 } 1604 goto readpage_error; 1605 } 1606 1607 if (!PageUptodate(page)) { 1608 error = lock_page_killable(page); 1609 if (unlikely(error)) 1610 goto readpage_error; 1611 if (!PageUptodate(page)) { 1612 if (page->mapping == NULL) { 1613 /* 1614 * invalidate_mapping_pages got it 1615 */ 1616 unlock_page(page); 1617 page_cache_release(page); 1618 goto find_page; 1619 } 1620 unlock_page(page); 1621 shrink_readahead_size_eio(filp, ra); 1622 error = -EIO; 1623 goto readpage_error; 1624 } 1625 unlock_page(page); 1626 } 1627 1628 goto page_ok; 1629 1630 readpage_error: 1631 /* UHHUH! A synchronous read error occurred. Report it */ 1632 page_cache_release(page); 1633 goto out; 1634 1635 no_cached_page: 1636 /* 1637 * Ok, it wasn't cached, so we need to create a new 1638 * page.. 1639 */ 1640 page = page_cache_alloc_cold(mapping); 1641 if (!page) { 1642 error = -ENOMEM; 1643 goto out; 1644 } 1645 error = add_to_page_cache_lru(page, mapping, 1646 index, GFP_KERNEL); 1647 if (error) { 1648 page_cache_release(page); 1649 if (error == -EEXIST) { 1650 error = 0; 1651 goto find_page; 1652 } 1653 goto out; 1654 } 1655 goto readpage; 1656 } 1657 1658 out: 1659 ra->prev_pos = prev_index; 1660 ra->prev_pos <<= PAGE_CACHE_SHIFT; 1661 ra->prev_pos |= prev_offset; 1662 1663 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; 1664 file_accessed(filp); 1665 return written ? written : error; 1666 } 1667 1668 /** 1669 * generic_file_read_iter - generic filesystem read routine 1670 * @iocb: kernel I/O control block 1671 * @iter: destination for the data read 1672 * 1673 * This is the "read_iter()" routine for all filesystems 1674 * that can use the page cache directly. 1675 */ 1676 ssize_t 1677 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 1678 { 1679 struct file *file = iocb->ki_filp; 1680 ssize_t retval = 0; 1681 loff_t *ppos = &iocb->ki_pos; 1682 loff_t pos = *ppos; 1683 1684 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ 1685 if (file->f_flags & O_DIRECT) { 1686 struct address_space *mapping = file->f_mapping; 1687 struct inode *inode = mapping->host; 1688 size_t count = iov_iter_count(iter); 1689 loff_t size; 1690 1691 if (!count) 1692 goto out; /* skip atime */ 1693 size = i_size_read(inode); 1694 retval = filemap_write_and_wait_range(mapping, pos, 1695 pos + count - 1); 1696 if (!retval) { 1697 struct iov_iter data = *iter; 1698 retval = mapping->a_ops->direct_IO(READ, iocb, &data, pos); 1699 } 1700 1701 if (retval > 0) { 1702 *ppos = pos + retval; 1703 iov_iter_advance(iter, retval); 1704 } 1705 1706 /* 1707 * Btrfs can have a short DIO read if we encounter 1708 * compressed extents, so if there was an error, or if 1709 * we've already read everything we wanted to, or if 1710 * there was a short read because we hit EOF, go ahead 1711 * and return. Otherwise fallthrough to buffered io for 1712 * the rest of the read. 1713 */ 1714 if (retval < 0 || !iov_iter_count(iter) || *ppos >= size) { 1715 file_accessed(file); 1716 goto out; 1717 } 1718 } 1719 1720 retval = do_generic_file_read(file, ppos, iter, retval); 1721 out: 1722 return retval; 1723 } 1724 EXPORT_SYMBOL(generic_file_read_iter); 1725 1726 #ifdef CONFIG_MMU 1727 /** 1728 * page_cache_read - adds requested page to the page cache if not already there 1729 * @file: file to read 1730 * @offset: page index 1731 * 1732 * This adds the requested page to the page cache if it isn't already there, 1733 * and schedules an I/O to read in its contents from disk. 1734 */ 1735 static int page_cache_read(struct file *file, pgoff_t offset) 1736 { 1737 struct address_space *mapping = file->f_mapping; 1738 struct page *page; 1739 int ret; 1740 1741 do { 1742 page = page_cache_alloc_cold(mapping); 1743 if (!page) 1744 return -ENOMEM; 1745 1746 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL); 1747 if (ret == 0) 1748 ret = mapping->a_ops->readpage(file, page); 1749 else if (ret == -EEXIST) 1750 ret = 0; /* losing race to add is OK */ 1751 1752 page_cache_release(page); 1753 1754 } while (ret == AOP_TRUNCATED_PAGE); 1755 1756 return ret; 1757 } 1758 1759 #define MMAP_LOTSAMISS (100) 1760 1761 /* 1762 * Synchronous readahead happens when we don't even find 1763 * a page in the page cache at all. 1764 */ 1765 static void do_sync_mmap_readahead(struct vm_area_struct *vma, 1766 struct file_ra_state *ra, 1767 struct file *file, 1768 pgoff_t offset) 1769 { 1770 unsigned long ra_pages; 1771 struct address_space *mapping = file->f_mapping; 1772 1773 /* If we don't want any read-ahead, don't bother */ 1774 if (vma->vm_flags & VM_RAND_READ) 1775 return; 1776 if (!ra->ra_pages) 1777 return; 1778 1779 if (vma->vm_flags & VM_SEQ_READ) { 1780 page_cache_sync_readahead(mapping, ra, file, offset, 1781 ra->ra_pages); 1782 return; 1783 } 1784 1785 /* Avoid banging the cache line if not needed */ 1786 if (ra->mmap_miss < MMAP_LOTSAMISS * 10) 1787 ra->mmap_miss++; 1788 1789 /* 1790 * Do we miss much more than hit in this file? If so, 1791 * stop bothering with read-ahead. It will only hurt. 1792 */ 1793 if (ra->mmap_miss > MMAP_LOTSAMISS) 1794 return; 1795 1796 /* 1797 * mmap read-around 1798 */ 1799 ra_pages = max_sane_readahead(ra->ra_pages); 1800 ra->start = max_t(long, 0, offset - ra_pages / 2); 1801 ra->size = ra_pages; 1802 ra->async_size = ra_pages / 4; 1803 ra_submit(ra, mapping, file); 1804 } 1805 1806 /* 1807 * Asynchronous readahead happens when we find the page and PG_readahead, 1808 * so we want to possibly extend the readahead further.. 1809 */ 1810 static void do_async_mmap_readahead(struct vm_area_struct *vma, 1811 struct file_ra_state *ra, 1812 struct file *file, 1813 struct page *page, 1814 pgoff_t offset) 1815 { 1816 struct address_space *mapping = file->f_mapping; 1817 1818 /* If we don't want any read-ahead, don't bother */ 1819 if (vma->vm_flags & VM_RAND_READ) 1820 return; 1821 if (ra->mmap_miss > 0) 1822 ra->mmap_miss--; 1823 if (PageReadahead(page)) 1824 page_cache_async_readahead(mapping, ra, file, 1825 page, offset, ra->ra_pages); 1826 } 1827 1828 /** 1829 * filemap_fault - read in file data for page fault handling 1830 * @vma: vma in which the fault was taken 1831 * @vmf: struct vm_fault containing details of the fault 1832 * 1833 * filemap_fault() is invoked via the vma operations vector for a 1834 * mapped memory region to read in file data during a page fault. 1835 * 1836 * The goto's are kind of ugly, but this streamlines the normal case of having 1837 * it in the page cache, and handles the special cases reasonably without 1838 * having a lot of duplicated code. 1839 */ 1840 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1841 { 1842 int error; 1843 struct file *file = vma->vm_file; 1844 struct address_space *mapping = file->f_mapping; 1845 struct file_ra_state *ra = &file->f_ra; 1846 struct inode *inode = mapping->host; 1847 pgoff_t offset = vmf->pgoff; 1848 struct page *page; 1849 loff_t size; 1850 int ret = 0; 1851 1852 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 1853 if (offset >= size >> PAGE_CACHE_SHIFT) 1854 return VM_FAULT_SIGBUS; 1855 1856 /* 1857 * Do we have something in the page cache already? 1858 */ 1859 page = find_get_page(mapping, offset); 1860 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { 1861 /* 1862 * We found the page, so try async readahead before 1863 * waiting for the lock. 1864 */ 1865 do_async_mmap_readahead(vma, ra, file, page, offset); 1866 } else if (!page) { 1867 /* No page in the page cache at all */ 1868 do_sync_mmap_readahead(vma, ra, file, offset); 1869 count_vm_event(PGMAJFAULT); 1870 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1871 ret = VM_FAULT_MAJOR; 1872 retry_find: 1873 page = find_get_page(mapping, offset); 1874 if (!page) 1875 goto no_cached_page; 1876 } 1877 1878 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { 1879 page_cache_release(page); 1880 return ret | VM_FAULT_RETRY; 1881 } 1882 1883 /* Did it get truncated? */ 1884 if (unlikely(page->mapping != mapping)) { 1885 unlock_page(page); 1886 put_page(page); 1887 goto retry_find; 1888 } 1889 VM_BUG_ON_PAGE(page->index != offset, page); 1890 1891 /* 1892 * We have a locked page in the page cache, now we need to check 1893 * that it's up-to-date. If not, it is going to be due to an error. 1894 */ 1895 if (unlikely(!PageUptodate(page))) 1896 goto page_not_uptodate; 1897 1898 /* 1899 * Found the page and have a reference on it. 1900 * We must recheck i_size under page lock. 1901 */ 1902 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 1903 if (unlikely(offset >= size >> PAGE_CACHE_SHIFT)) { 1904 unlock_page(page); 1905 page_cache_release(page); 1906 return VM_FAULT_SIGBUS; 1907 } 1908 1909 vmf->page = page; 1910 return ret | VM_FAULT_LOCKED; 1911 1912 no_cached_page: 1913 /* 1914 * We're only likely to ever get here if MADV_RANDOM is in 1915 * effect. 1916 */ 1917 error = page_cache_read(file, offset); 1918 1919 /* 1920 * The page we want has now been added to the page cache. 1921 * In the unlikely event that someone removed it in the 1922 * meantime, we'll just come back here and read it again. 1923 */ 1924 if (error >= 0) 1925 goto retry_find; 1926 1927 /* 1928 * An error return from page_cache_read can result if the 1929 * system is low on memory, or a problem occurs while trying 1930 * to schedule I/O. 1931 */ 1932 if (error == -ENOMEM) 1933 return VM_FAULT_OOM; 1934 return VM_FAULT_SIGBUS; 1935 1936 page_not_uptodate: 1937 /* 1938 * Umm, take care of errors if the page isn't up-to-date. 1939 * Try to re-read it _once_. We do this synchronously, 1940 * because there really aren't any performance issues here 1941 * and we need to check for errors. 1942 */ 1943 ClearPageError(page); 1944 error = mapping->a_ops->readpage(file, page); 1945 if (!error) { 1946 wait_on_page_locked(page); 1947 if (!PageUptodate(page)) 1948 error = -EIO; 1949 } 1950 page_cache_release(page); 1951 1952 if (!error || error == AOP_TRUNCATED_PAGE) 1953 goto retry_find; 1954 1955 /* Things didn't work out. Return zero to tell the mm layer so. */ 1956 shrink_readahead_size_eio(file, ra); 1957 return VM_FAULT_SIGBUS; 1958 } 1959 EXPORT_SYMBOL(filemap_fault); 1960 1961 void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) 1962 { 1963 struct radix_tree_iter iter; 1964 void **slot; 1965 struct file *file = vma->vm_file; 1966 struct address_space *mapping = file->f_mapping; 1967 loff_t size; 1968 struct page *page; 1969 unsigned long address = (unsigned long) vmf->virtual_address; 1970 unsigned long addr; 1971 pte_t *pte; 1972 1973 rcu_read_lock(); 1974 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, vmf->pgoff) { 1975 if (iter.index > vmf->max_pgoff) 1976 break; 1977 repeat: 1978 page = radix_tree_deref_slot(slot); 1979 if (unlikely(!page)) 1980 goto next; 1981 if (radix_tree_exception(page)) { 1982 if (radix_tree_deref_retry(page)) 1983 break; 1984 else 1985 goto next; 1986 } 1987 1988 if (!page_cache_get_speculative(page)) 1989 goto repeat; 1990 1991 /* Has the page moved? */ 1992 if (unlikely(page != *slot)) { 1993 page_cache_release(page); 1994 goto repeat; 1995 } 1996 1997 if (!PageUptodate(page) || 1998 PageReadahead(page) || 1999 PageHWPoison(page)) 2000 goto skip; 2001 if (!trylock_page(page)) 2002 goto skip; 2003 2004 if (page->mapping != mapping || !PageUptodate(page)) 2005 goto unlock; 2006 2007 size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE); 2008 if (page->index >= size >> PAGE_CACHE_SHIFT) 2009 goto unlock; 2010 2011 pte = vmf->pte + page->index - vmf->pgoff; 2012 if (!pte_none(*pte)) 2013 goto unlock; 2014 2015 if (file->f_ra.mmap_miss > 0) 2016 file->f_ra.mmap_miss--; 2017 addr = address + (page->index - vmf->pgoff) * PAGE_SIZE; 2018 do_set_pte(vma, addr, page, pte, false, false); 2019 unlock_page(page); 2020 goto next; 2021 unlock: 2022 unlock_page(page); 2023 skip: 2024 page_cache_release(page); 2025 next: 2026 if (iter.index == vmf->max_pgoff) 2027 break; 2028 } 2029 rcu_read_unlock(); 2030 } 2031 EXPORT_SYMBOL(filemap_map_pages); 2032 2033 int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 2034 { 2035 struct page *page = vmf->page; 2036 struct inode *inode = file_inode(vma->vm_file); 2037 int ret = VM_FAULT_LOCKED; 2038 2039 sb_start_pagefault(inode->i_sb); 2040 file_update_time(vma->vm_file); 2041 lock_page(page); 2042 if (page->mapping != inode->i_mapping) { 2043 unlock_page(page); 2044 ret = VM_FAULT_NOPAGE; 2045 goto out; 2046 } 2047 /* 2048 * We mark the page dirty already here so that when freeze is in 2049 * progress, we are guaranteed that writeback during freezing will 2050 * see the dirty page and writeprotect it again. 2051 */ 2052 set_page_dirty(page); 2053 wait_for_stable_page(page); 2054 out: 2055 sb_end_pagefault(inode->i_sb); 2056 return ret; 2057 } 2058 EXPORT_SYMBOL(filemap_page_mkwrite); 2059 2060 const struct vm_operations_struct generic_file_vm_ops = { 2061 .fault = filemap_fault, 2062 .map_pages = filemap_map_pages, 2063 .page_mkwrite = filemap_page_mkwrite, 2064 .remap_pages = generic_file_remap_pages, 2065 }; 2066 2067 /* This is used for a general mmap of a disk file */ 2068 2069 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2070 { 2071 struct address_space *mapping = file->f_mapping; 2072 2073 if (!mapping->a_ops->readpage) 2074 return -ENOEXEC; 2075 file_accessed(file); 2076 vma->vm_ops = &generic_file_vm_ops; 2077 return 0; 2078 } 2079 2080 /* 2081 * This is for filesystems which do not implement ->writepage. 2082 */ 2083 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 2084 { 2085 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 2086 return -EINVAL; 2087 return generic_file_mmap(file, vma); 2088 } 2089 #else 2090 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2091 { 2092 return -ENOSYS; 2093 } 2094 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) 2095 { 2096 return -ENOSYS; 2097 } 2098 #endif /* CONFIG_MMU */ 2099 2100 EXPORT_SYMBOL(generic_file_mmap); 2101 EXPORT_SYMBOL(generic_file_readonly_mmap); 2102 2103 static struct page *wait_on_page_read(struct page *page) 2104 { 2105 if (!IS_ERR(page)) { 2106 wait_on_page_locked(page); 2107 if (!PageUptodate(page)) { 2108 page_cache_release(page); 2109 page = ERR_PTR(-EIO); 2110 } 2111 } 2112 return page; 2113 } 2114 2115 static struct page *__read_cache_page(struct address_space *mapping, 2116 pgoff_t index, 2117 int (*filler)(void *, struct page *), 2118 void *data, 2119 gfp_t gfp) 2120 { 2121 struct page *page; 2122 int err; 2123 repeat: 2124 page = find_get_page(mapping, index); 2125 if (!page) { 2126 page = __page_cache_alloc(gfp | __GFP_COLD); 2127 if (!page) 2128 return ERR_PTR(-ENOMEM); 2129 err = add_to_page_cache_lru(page, mapping, index, gfp); 2130 if (unlikely(err)) { 2131 page_cache_release(page); 2132 if (err == -EEXIST) 2133 goto repeat; 2134 /* Presumably ENOMEM for radix tree node */ 2135 return ERR_PTR(err); 2136 } 2137 err = filler(data, page); 2138 if (err < 0) { 2139 page_cache_release(page); 2140 page = ERR_PTR(err); 2141 } else { 2142 page = wait_on_page_read(page); 2143 } 2144 } 2145 return page; 2146 } 2147 2148 static struct page *do_read_cache_page(struct address_space *mapping, 2149 pgoff_t index, 2150 int (*filler)(void *, struct page *), 2151 void *data, 2152 gfp_t gfp) 2153 2154 { 2155 struct page *page; 2156 int err; 2157 2158 retry: 2159 page = __read_cache_page(mapping, index, filler, data, gfp); 2160 if (IS_ERR(page)) 2161 return page; 2162 if (PageUptodate(page)) 2163 goto out; 2164 2165 lock_page(page); 2166 if (!page->mapping) { 2167 unlock_page(page); 2168 page_cache_release(page); 2169 goto retry; 2170 } 2171 if (PageUptodate(page)) { 2172 unlock_page(page); 2173 goto out; 2174 } 2175 err = filler(data, page); 2176 if (err < 0) { 2177 page_cache_release(page); 2178 return ERR_PTR(err); 2179 } else { 2180 page = wait_on_page_read(page); 2181 if (IS_ERR(page)) 2182 return page; 2183 } 2184 out: 2185 mark_page_accessed(page); 2186 return page; 2187 } 2188 2189 /** 2190 * read_cache_page - read into page cache, fill it if needed 2191 * @mapping: the page's address_space 2192 * @index: the page index 2193 * @filler: function to perform the read 2194 * @data: first arg to filler(data, page) function, often left as NULL 2195 * 2196 * Read into the page cache. If a page already exists, and PageUptodate() is 2197 * not set, try to fill the page and wait for it to become unlocked. 2198 * 2199 * If the page does not get brought uptodate, return -EIO. 2200 */ 2201 struct page *read_cache_page(struct address_space *mapping, 2202 pgoff_t index, 2203 int (*filler)(void *, struct page *), 2204 void *data) 2205 { 2206 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); 2207 } 2208 EXPORT_SYMBOL(read_cache_page); 2209 2210 /** 2211 * read_cache_page_gfp - read into page cache, using specified page allocation flags. 2212 * @mapping: the page's address_space 2213 * @index: the page index 2214 * @gfp: the page allocator flags to use if allocating 2215 * 2216 * This is the same as "read_mapping_page(mapping, index, NULL)", but with 2217 * any new page allocations done using the specified allocation flags. 2218 * 2219 * If the page does not get brought uptodate, return -EIO. 2220 */ 2221 struct page *read_cache_page_gfp(struct address_space *mapping, 2222 pgoff_t index, 2223 gfp_t gfp) 2224 { 2225 filler_t *filler = (filler_t *)mapping->a_ops->readpage; 2226 2227 return do_read_cache_page(mapping, index, filler, NULL, gfp); 2228 } 2229 EXPORT_SYMBOL(read_cache_page_gfp); 2230 2231 /* 2232 * Performs necessary checks before doing a write 2233 * 2234 * Can adjust writing position or amount of bytes to write. 2235 * Returns appropriate error code that caller should return or 2236 * zero in case that write should be allowed. 2237 */ 2238 inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk) 2239 { 2240 struct inode *inode = file->f_mapping->host; 2241 unsigned long limit = rlimit(RLIMIT_FSIZE); 2242 2243 if (unlikely(*pos < 0)) 2244 return -EINVAL; 2245 2246 if (!isblk) { 2247 /* FIXME: this is for backwards compatibility with 2.4 */ 2248 if (file->f_flags & O_APPEND) 2249 *pos = i_size_read(inode); 2250 2251 if (limit != RLIM_INFINITY) { 2252 if (*pos >= limit) { 2253 send_sig(SIGXFSZ, current, 0); 2254 return -EFBIG; 2255 } 2256 if (*count > limit - (typeof(limit))*pos) { 2257 *count = limit - (typeof(limit))*pos; 2258 } 2259 } 2260 } 2261 2262 /* 2263 * LFS rule 2264 */ 2265 if (unlikely(*pos + *count > MAX_NON_LFS && 2266 !(file->f_flags & O_LARGEFILE))) { 2267 if (*pos >= MAX_NON_LFS) { 2268 return -EFBIG; 2269 } 2270 if (*count > MAX_NON_LFS - (unsigned long)*pos) { 2271 *count = MAX_NON_LFS - (unsigned long)*pos; 2272 } 2273 } 2274 2275 /* 2276 * Are we about to exceed the fs block limit ? 2277 * 2278 * If we have written data it becomes a short write. If we have 2279 * exceeded without writing data we send a signal and return EFBIG. 2280 * Linus frestrict idea will clean these up nicely.. 2281 */ 2282 if (likely(!isblk)) { 2283 if (unlikely(*pos >= inode->i_sb->s_maxbytes)) { 2284 if (*count || *pos > inode->i_sb->s_maxbytes) { 2285 return -EFBIG; 2286 } 2287 /* zero-length writes at ->s_maxbytes are OK */ 2288 } 2289 2290 if (unlikely(*pos + *count > inode->i_sb->s_maxbytes)) 2291 *count = inode->i_sb->s_maxbytes - *pos; 2292 } else { 2293 #ifdef CONFIG_BLOCK 2294 loff_t isize; 2295 if (bdev_read_only(I_BDEV(inode))) 2296 return -EPERM; 2297 isize = i_size_read(inode); 2298 if (*pos >= isize) { 2299 if (*count || *pos > isize) 2300 return -ENOSPC; 2301 } 2302 2303 if (*pos + *count > isize) 2304 *count = isize - *pos; 2305 #else 2306 return -EPERM; 2307 #endif 2308 } 2309 return 0; 2310 } 2311 EXPORT_SYMBOL(generic_write_checks); 2312 2313 int pagecache_write_begin(struct file *file, struct address_space *mapping, 2314 loff_t pos, unsigned len, unsigned flags, 2315 struct page **pagep, void **fsdata) 2316 { 2317 const struct address_space_operations *aops = mapping->a_ops; 2318 2319 return aops->write_begin(file, mapping, pos, len, flags, 2320 pagep, fsdata); 2321 } 2322 EXPORT_SYMBOL(pagecache_write_begin); 2323 2324 int pagecache_write_end(struct file *file, struct address_space *mapping, 2325 loff_t pos, unsigned len, unsigned copied, 2326 struct page *page, void *fsdata) 2327 { 2328 const struct address_space_operations *aops = mapping->a_ops; 2329 2330 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); 2331 } 2332 EXPORT_SYMBOL(pagecache_write_end); 2333 2334 ssize_t 2335 generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos) 2336 { 2337 struct file *file = iocb->ki_filp; 2338 struct address_space *mapping = file->f_mapping; 2339 struct inode *inode = mapping->host; 2340 ssize_t written; 2341 size_t write_len; 2342 pgoff_t end; 2343 struct iov_iter data; 2344 2345 write_len = iov_iter_count(from); 2346 end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT; 2347 2348 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1); 2349 if (written) 2350 goto out; 2351 2352 /* 2353 * After a write we want buffered reads to be sure to go to disk to get 2354 * the new data. We invalidate clean cached page from the region we're 2355 * about to write. We do this *before* the write so that we can return 2356 * without clobbering -EIOCBQUEUED from ->direct_IO(). 2357 */ 2358 if (mapping->nrpages) { 2359 written = invalidate_inode_pages2_range(mapping, 2360 pos >> PAGE_CACHE_SHIFT, end); 2361 /* 2362 * If a page can not be invalidated, return 0 to fall back 2363 * to buffered write. 2364 */ 2365 if (written) { 2366 if (written == -EBUSY) 2367 return 0; 2368 goto out; 2369 } 2370 } 2371 2372 data = *from; 2373 written = mapping->a_ops->direct_IO(WRITE, iocb, &data, pos); 2374 2375 /* 2376 * Finally, try again to invalidate clean pages which might have been 2377 * cached by non-direct readahead, or faulted in by get_user_pages() 2378 * if the source of the write was an mmap'ed region of the file 2379 * we're writing. Either one is a pretty crazy thing to do, 2380 * so we don't support it 100%. If this invalidation 2381 * fails, tough, the write still worked... 2382 */ 2383 if (mapping->nrpages) { 2384 invalidate_inode_pages2_range(mapping, 2385 pos >> PAGE_CACHE_SHIFT, end); 2386 } 2387 2388 if (written > 0) { 2389 pos += written; 2390 iov_iter_advance(from, written); 2391 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { 2392 i_size_write(inode, pos); 2393 mark_inode_dirty(inode); 2394 } 2395 iocb->ki_pos = pos; 2396 } 2397 out: 2398 return written; 2399 } 2400 EXPORT_SYMBOL(generic_file_direct_write); 2401 2402 /* 2403 * Find or create a page at the given pagecache position. Return the locked 2404 * page. This function is specifically for buffered writes. 2405 */ 2406 struct page *grab_cache_page_write_begin(struct address_space *mapping, 2407 pgoff_t index, unsigned flags) 2408 { 2409 struct page *page; 2410 int fgp_flags = FGP_LOCK|FGP_ACCESSED|FGP_WRITE|FGP_CREAT; 2411 2412 if (flags & AOP_FLAG_NOFS) 2413 fgp_flags |= FGP_NOFS; 2414 2415 page = pagecache_get_page(mapping, index, fgp_flags, 2416 mapping_gfp_mask(mapping), 2417 GFP_KERNEL); 2418 if (page) 2419 wait_for_stable_page(page); 2420 2421 return page; 2422 } 2423 EXPORT_SYMBOL(grab_cache_page_write_begin); 2424 2425 ssize_t generic_perform_write(struct file *file, 2426 struct iov_iter *i, loff_t pos) 2427 { 2428 struct address_space *mapping = file->f_mapping; 2429 const struct address_space_operations *a_ops = mapping->a_ops; 2430 long status = 0; 2431 ssize_t written = 0; 2432 unsigned int flags = 0; 2433 2434 /* 2435 * Copies from kernel address space cannot fail (NFSD is a big user). 2436 */ 2437 if (segment_eq(get_fs(), KERNEL_DS)) 2438 flags |= AOP_FLAG_UNINTERRUPTIBLE; 2439 2440 do { 2441 struct page *page; 2442 unsigned long offset; /* Offset into pagecache page */ 2443 unsigned long bytes; /* Bytes to write to page */ 2444 size_t copied; /* Bytes copied from user */ 2445 void *fsdata; 2446 2447 offset = (pos & (PAGE_CACHE_SIZE - 1)); 2448 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2449 iov_iter_count(i)); 2450 2451 again: 2452 /* 2453 * Bring in the user page that we will copy from _first_. 2454 * Otherwise there's a nasty deadlock on copying from the 2455 * same page as we're writing to, without it being marked 2456 * up-to-date. 2457 * 2458 * Not only is this an optimisation, but it is also required 2459 * to check that the address is actually valid, when atomic 2460 * usercopies are used, below. 2461 */ 2462 if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 2463 status = -EFAULT; 2464 break; 2465 } 2466 2467 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 2468 &page, &fsdata); 2469 if (unlikely(status < 0)) 2470 break; 2471 2472 if (mapping_writably_mapped(mapping)) 2473 flush_dcache_page(page); 2474 2475 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 2476 flush_dcache_page(page); 2477 2478 status = a_ops->write_end(file, mapping, pos, bytes, copied, 2479 page, fsdata); 2480 if (unlikely(status < 0)) 2481 break; 2482 copied = status; 2483 2484 cond_resched(); 2485 2486 iov_iter_advance(i, copied); 2487 if (unlikely(copied == 0)) { 2488 /* 2489 * If we were unable to copy any data at all, we must 2490 * fall back to a single segment length write. 2491 * 2492 * If we didn't fallback here, we could livelock 2493 * because not all segments in the iov can be copied at 2494 * once without a pagefault. 2495 */ 2496 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2497 iov_iter_single_seg_count(i)); 2498 goto again; 2499 } 2500 pos += copied; 2501 written += copied; 2502 2503 balance_dirty_pages_ratelimited(mapping); 2504 if (fatal_signal_pending(current)) { 2505 status = -EINTR; 2506 break; 2507 } 2508 } while (iov_iter_count(i)); 2509 2510 return written ? written : status; 2511 } 2512 EXPORT_SYMBOL(generic_perform_write); 2513 2514 /** 2515 * __generic_file_write_iter - write data to a file 2516 * @iocb: IO state structure (file, offset, etc.) 2517 * @from: iov_iter with data to write 2518 * 2519 * This function does all the work needed for actually writing data to a 2520 * file. It does all basic checks, removes SUID from the file, updates 2521 * modification times and calls proper subroutines depending on whether we 2522 * do direct IO or a standard buffered write. 2523 * 2524 * It expects i_mutex to be grabbed unless we work on a block device or similar 2525 * object which does not need locking at all. 2526 * 2527 * This function does *not* take care of syncing data in case of O_SYNC write. 2528 * A caller has to handle it. This is mainly due to the fact that we want to 2529 * avoid syncing under i_mutex. 2530 */ 2531 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2532 { 2533 struct file *file = iocb->ki_filp; 2534 struct address_space * mapping = file->f_mapping; 2535 struct inode *inode = mapping->host; 2536 loff_t pos = iocb->ki_pos; 2537 ssize_t written = 0; 2538 ssize_t err; 2539 ssize_t status; 2540 size_t count = iov_iter_count(from); 2541 2542 /* We can write back this queue in page reclaim */ 2543 current->backing_dev_info = mapping->backing_dev_info; 2544 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 2545 if (err) 2546 goto out; 2547 2548 if (count == 0) 2549 goto out; 2550 2551 iov_iter_truncate(from, count); 2552 2553 err = file_remove_suid(file); 2554 if (err) 2555 goto out; 2556 2557 err = file_update_time(file); 2558 if (err) 2559 goto out; 2560 2561 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ 2562 if (unlikely(file->f_flags & O_DIRECT)) { 2563 loff_t endbyte; 2564 2565 written = generic_file_direct_write(iocb, from, pos); 2566 if (written < 0 || written == count) 2567 goto out; 2568 2569 /* 2570 * direct-io write to a hole: fall through to buffered I/O 2571 * for completing the rest of the request. 2572 */ 2573 pos += written; 2574 count -= written; 2575 2576 status = generic_perform_write(file, from, pos); 2577 /* 2578 * If generic_perform_write() returned a synchronous error 2579 * then we want to return the number of bytes which were 2580 * direct-written, or the error code if that was zero. Note 2581 * that this differs from normal direct-io semantics, which 2582 * will return -EFOO even if some bytes were written. 2583 */ 2584 if (unlikely(status < 0) && !written) { 2585 err = status; 2586 goto out; 2587 } 2588 iocb->ki_pos = pos + status; 2589 /* 2590 * We need to ensure that the page cache pages are written to 2591 * disk and invalidated to preserve the expected O_DIRECT 2592 * semantics. 2593 */ 2594 endbyte = pos + status - 1; 2595 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte); 2596 if (err == 0) { 2597 written += status; 2598 invalidate_mapping_pages(mapping, 2599 pos >> PAGE_CACHE_SHIFT, 2600 endbyte >> PAGE_CACHE_SHIFT); 2601 } else { 2602 /* 2603 * We don't know how much we wrote, so just return 2604 * the number of bytes which were direct-written 2605 */ 2606 } 2607 } else { 2608 written = generic_perform_write(file, from, pos); 2609 if (likely(written >= 0)) 2610 iocb->ki_pos = pos + written; 2611 } 2612 out: 2613 current->backing_dev_info = NULL; 2614 return written ? written : err; 2615 } 2616 EXPORT_SYMBOL(__generic_file_write_iter); 2617 2618 /** 2619 * generic_file_write_iter - write data to a file 2620 * @iocb: IO state structure 2621 * @from: iov_iter with data to write 2622 * 2623 * This is a wrapper around __generic_file_write_iter() to be used by most 2624 * filesystems. It takes care of syncing the file in case of O_SYNC file 2625 * and acquires i_mutex as needed. 2626 */ 2627 ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2628 { 2629 struct file *file = iocb->ki_filp; 2630 struct inode *inode = file->f_mapping->host; 2631 ssize_t ret; 2632 2633 mutex_lock(&inode->i_mutex); 2634 ret = __generic_file_write_iter(iocb, from); 2635 mutex_unlock(&inode->i_mutex); 2636 2637 if (ret > 0) { 2638 ssize_t err; 2639 2640 err = generic_write_sync(file, iocb->ki_pos - ret, ret); 2641 if (err < 0) 2642 ret = err; 2643 } 2644 return ret; 2645 } 2646 EXPORT_SYMBOL(generic_file_write_iter); 2647 2648 /** 2649 * try_to_release_page() - release old fs-specific metadata on a page 2650 * 2651 * @page: the page which the kernel is trying to free 2652 * @gfp_mask: memory allocation flags (and I/O mode) 2653 * 2654 * The address_space is to try to release any data against the page 2655 * (presumably at page->private). If the release was successful, return `1'. 2656 * Otherwise return zero. 2657 * 2658 * This may also be called if PG_fscache is set on a page, indicating that the 2659 * page is known to the local caching routines. 2660 * 2661 * The @gfp_mask argument specifies whether I/O may be performed to release 2662 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS). 2663 * 2664 */ 2665 int try_to_release_page(struct page *page, gfp_t gfp_mask) 2666 { 2667 struct address_space * const mapping = page->mapping; 2668 2669 BUG_ON(!PageLocked(page)); 2670 if (PageWriteback(page)) 2671 return 0; 2672 2673 if (mapping && mapping->a_ops->releasepage) 2674 return mapping->a_ops->releasepage(page, gfp_mask); 2675 return try_to_free_buffers(page); 2676 } 2677 2678 EXPORT_SYMBOL(try_to_release_page); 2679