1 /* 2 * linux/mm/filemap.c 3 * 4 * Copyright (C) 1994-1999 Linus Torvalds 5 */ 6 7 /* 8 * This file handles the generic file mmap semantics used by 9 * most "normal" filesystems (but you don't /have/ to use this: 10 * the NFS filesystem used to do this differently, for example) 11 */ 12 #include <linux/export.h> 13 #include <linux/compiler.h> 14 #include <linux/fs.h> 15 #include <linux/uaccess.h> 16 #include <linux/aio.h> 17 #include <linux/capability.h> 18 #include <linux/kernel_stat.h> 19 #include <linux/gfp.h> 20 #include <linux/mm.h> 21 #include <linux/swap.h> 22 #include <linux/mman.h> 23 #include <linux/pagemap.h> 24 #include <linux/file.h> 25 #include <linux/uio.h> 26 #include <linux/hash.h> 27 #include <linux/writeback.h> 28 #include <linux/backing-dev.h> 29 #include <linux/pagevec.h> 30 #include <linux/blkdev.h> 31 #include <linux/security.h> 32 #include <linux/cpuset.h> 33 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ 34 #include <linux/memcontrol.h> 35 #include <linux/cleancache.h> 36 #include <linux/rmap.h> 37 #include "internal.h" 38 39 #define CREATE_TRACE_POINTS 40 #include <trace/events/filemap.h> 41 42 /* 43 * FIXME: remove all knowledge of the buffer layer from the core VM 44 */ 45 #include <linux/buffer_head.h> /* for try_to_free_buffers */ 46 47 #include <asm/mman.h> 48 49 /* 50 * Shared mappings implemented 30.11.1994. It's not fully working yet, 51 * though. 52 * 53 * Shared mappings now work. 15.8.1995 Bruno. 54 * 55 * finished 'unifying' the page and buffer cache and SMP-threaded the 56 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 57 * 58 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 59 */ 60 61 /* 62 * Lock ordering: 63 * 64 * ->i_mmap_mutex (truncate_pagecache) 65 * ->private_lock (__free_pte->__set_page_dirty_buffers) 66 * ->swap_lock (exclusive_swap_page, others) 67 * ->mapping->tree_lock 68 * 69 * ->i_mutex 70 * ->i_mmap_mutex (truncate->unmap_mapping_range) 71 * 72 * ->mmap_sem 73 * ->i_mmap_mutex 74 * ->page_table_lock or pte_lock (various, mainly in memory.c) 75 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) 76 * 77 * ->mmap_sem 78 * ->lock_page (access_process_vm) 79 * 80 * ->i_mutex (generic_perform_write) 81 * ->mmap_sem (fault_in_pages_readable->do_page_fault) 82 * 83 * bdi->wb.list_lock 84 * sb_lock (fs/fs-writeback.c) 85 * ->mapping->tree_lock (__sync_single_inode) 86 * 87 * ->i_mmap_mutex 88 * ->anon_vma.lock (vma_adjust) 89 * 90 * ->anon_vma.lock 91 * ->page_table_lock or pte_lock (anon_vma_prepare and various) 92 * 93 * ->page_table_lock or pte_lock 94 * ->swap_lock (try_to_unmap_one) 95 * ->private_lock (try_to_unmap_one) 96 * ->tree_lock (try_to_unmap_one) 97 * ->zone.lru_lock (follow_page->mark_page_accessed) 98 * ->zone.lru_lock (check_pte_range->isolate_lru_page) 99 * ->private_lock (page_remove_rmap->set_page_dirty) 100 * ->tree_lock (page_remove_rmap->set_page_dirty) 101 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) 102 * ->inode->i_lock (page_remove_rmap->set_page_dirty) 103 * bdi.wb->list_lock (zap_pte_range->set_page_dirty) 104 * ->inode->i_lock (zap_pte_range->set_page_dirty) 105 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 106 * 107 * ->i_mmap_mutex 108 * ->tasklist_lock (memory_failure, collect_procs_ao) 109 */ 110 111 static void page_cache_tree_delete(struct address_space *mapping, 112 struct page *page, void *shadow) 113 { 114 struct radix_tree_node *node; 115 unsigned long index; 116 unsigned int offset; 117 unsigned int tag; 118 void **slot; 119 120 VM_BUG_ON(!PageLocked(page)); 121 122 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); 123 124 if (shadow) { 125 mapping->nrshadows++; 126 /* 127 * Make sure the nrshadows update is committed before 128 * the nrpages update so that final truncate racing 129 * with reclaim does not see both counters 0 at the 130 * same time and miss a shadow entry. 131 */ 132 smp_wmb(); 133 } 134 mapping->nrpages--; 135 136 if (!node) { 137 /* Clear direct pointer tags in root node */ 138 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK; 139 radix_tree_replace_slot(slot, shadow); 140 return; 141 } 142 143 /* Clear tree tags for the removed page */ 144 index = page->index; 145 offset = index & RADIX_TREE_MAP_MASK; 146 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { 147 if (test_bit(offset, node->tags[tag])) 148 radix_tree_tag_clear(&mapping->page_tree, index, tag); 149 } 150 151 /* Delete page, swap shadow entry */ 152 radix_tree_replace_slot(slot, shadow); 153 workingset_node_pages_dec(node); 154 if (shadow) 155 workingset_node_shadows_inc(node); 156 else 157 if (__radix_tree_delete_node(&mapping->page_tree, node)) 158 return; 159 160 /* 161 * Track node that only contains shadow entries. 162 * 163 * Avoid acquiring the list_lru lock if already tracked. The 164 * list_empty() test is safe as node->private_list is 165 * protected by mapping->tree_lock. 166 */ 167 if (!workingset_node_pages(node) && 168 list_empty(&node->private_list)) { 169 node->private_data = mapping; 170 list_lru_add(&workingset_shadow_nodes, &node->private_list); 171 } 172 } 173 174 /* 175 * Delete a page from the page cache and free it. Caller has to make 176 * sure the page is locked and that nobody else uses it - or that usage 177 * is safe. The caller must hold the mapping's tree_lock. 178 */ 179 void __delete_from_page_cache(struct page *page, void *shadow) 180 { 181 struct address_space *mapping = page->mapping; 182 183 trace_mm_filemap_delete_from_page_cache(page); 184 /* 185 * if we're uptodate, flush out into the cleancache, otherwise 186 * invalidate any existing cleancache entries. We can't leave 187 * stale data around in the cleancache once our page is gone 188 */ 189 if (PageUptodate(page) && PageMappedToDisk(page)) 190 cleancache_put_page(page); 191 else 192 cleancache_invalidate_page(mapping, page); 193 194 page_cache_tree_delete(mapping, page, shadow); 195 196 page->mapping = NULL; 197 /* Leave page->index set: truncation lookup relies upon it */ 198 199 __dec_zone_page_state(page, NR_FILE_PAGES); 200 if (PageSwapBacked(page)) 201 __dec_zone_page_state(page, NR_SHMEM); 202 BUG_ON(page_mapped(page)); 203 204 /* 205 * Some filesystems seem to re-dirty the page even after 206 * the VM has canceled the dirty bit (eg ext3 journaling). 207 * 208 * Fix it up by doing a final dirty accounting check after 209 * having removed the page entirely. 210 */ 211 if (PageDirty(page) && mapping_cap_account_dirty(mapping)) { 212 dec_zone_page_state(page, NR_FILE_DIRTY); 213 dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); 214 } 215 } 216 217 /** 218 * delete_from_page_cache - delete page from page cache 219 * @page: the page which the kernel is trying to remove from page cache 220 * 221 * This must be called only on pages that have been verified to be in the page 222 * cache and locked. It will never put the page into the free list, the caller 223 * has a reference on the page. 224 */ 225 void delete_from_page_cache(struct page *page) 226 { 227 struct address_space *mapping = page->mapping; 228 void (*freepage)(struct page *); 229 230 BUG_ON(!PageLocked(page)); 231 232 freepage = mapping->a_ops->freepage; 233 spin_lock_irq(&mapping->tree_lock); 234 __delete_from_page_cache(page, NULL); 235 spin_unlock_irq(&mapping->tree_lock); 236 mem_cgroup_uncharge_cache_page(page); 237 238 if (freepage) 239 freepage(page); 240 page_cache_release(page); 241 } 242 EXPORT_SYMBOL(delete_from_page_cache); 243 244 static int sleep_on_page(void *word) 245 { 246 io_schedule(); 247 return 0; 248 } 249 250 static int sleep_on_page_killable(void *word) 251 { 252 sleep_on_page(word); 253 return fatal_signal_pending(current) ? -EINTR : 0; 254 } 255 256 static int filemap_check_errors(struct address_space *mapping) 257 { 258 int ret = 0; 259 /* Check for outstanding write errors */ 260 if (test_bit(AS_ENOSPC, &mapping->flags) && 261 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 262 ret = -ENOSPC; 263 if (test_bit(AS_EIO, &mapping->flags) && 264 test_and_clear_bit(AS_EIO, &mapping->flags)) 265 ret = -EIO; 266 return ret; 267 } 268 269 /** 270 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 271 * @mapping: address space structure to write 272 * @start: offset in bytes where the range starts 273 * @end: offset in bytes where the range ends (inclusive) 274 * @sync_mode: enable synchronous operation 275 * 276 * Start writeback against all of a mapping's dirty pages that lie 277 * within the byte offsets <start, end> inclusive. 278 * 279 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 280 * opposed to a regular memory cleansing writeback. The difference between 281 * these two operations is that if a dirty page/buffer is encountered, it must 282 * be waited upon, and not just skipped over. 283 */ 284 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 285 loff_t end, int sync_mode) 286 { 287 int ret; 288 struct writeback_control wbc = { 289 .sync_mode = sync_mode, 290 .nr_to_write = LONG_MAX, 291 .range_start = start, 292 .range_end = end, 293 }; 294 295 if (!mapping_cap_writeback_dirty(mapping)) 296 return 0; 297 298 ret = do_writepages(mapping, &wbc); 299 return ret; 300 } 301 302 static inline int __filemap_fdatawrite(struct address_space *mapping, 303 int sync_mode) 304 { 305 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); 306 } 307 308 int filemap_fdatawrite(struct address_space *mapping) 309 { 310 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); 311 } 312 EXPORT_SYMBOL(filemap_fdatawrite); 313 314 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 315 loff_t end) 316 { 317 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 318 } 319 EXPORT_SYMBOL(filemap_fdatawrite_range); 320 321 /** 322 * filemap_flush - mostly a non-blocking flush 323 * @mapping: target address_space 324 * 325 * This is a mostly non-blocking flush. Not suitable for data-integrity 326 * purposes - I/O may not be started against all dirty pages. 327 */ 328 int filemap_flush(struct address_space *mapping) 329 { 330 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); 331 } 332 EXPORT_SYMBOL(filemap_flush); 333 334 /** 335 * filemap_fdatawait_range - wait for writeback to complete 336 * @mapping: address space structure to wait for 337 * @start_byte: offset in bytes where the range starts 338 * @end_byte: offset in bytes where the range ends (inclusive) 339 * 340 * Walk the list of under-writeback pages of the given address space 341 * in the given range and wait for all of them. 342 */ 343 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, 344 loff_t end_byte) 345 { 346 pgoff_t index = start_byte >> PAGE_CACHE_SHIFT; 347 pgoff_t end = end_byte >> PAGE_CACHE_SHIFT; 348 struct pagevec pvec; 349 int nr_pages; 350 int ret2, ret = 0; 351 352 if (end_byte < start_byte) 353 goto out; 354 355 pagevec_init(&pvec, 0); 356 while ((index <= end) && 357 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 358 PAGECACHE_TAG_WRITEBACK, 359 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { 360 unsigned i; 361 362 for (i = 0; i < nr_pages; i++) { 363 struct page *page = pvec.pages[i]; 364 365 /* until radix tree lookup accepts end_index */ 366 if (page->index > end) 367 continue; 368 369 wait_on_page_writeback(page); 370 if (TestClearPageError(page)) 371 ret = -EIO; 372 } 373 pagevec_release(&pvec); 374 cond_resched(); 375 } 376 out: 377 ret2 = filemap_check_errors(mapping); 378 if (!ret) 379 ret = ret2; 380 381 return ret; 382 } 383 EXPORT_SYMBOL(filemap_fdatawait_range); 384 385 /** 386 * filemap_fdatawait - wait for all under-writeback pages to complete 387 * @mapping: address space structure to wait for 388 * 389 * Walk the list of under-writeback pages of the given address space 390 * and wait for all of them. 391 */ 392 int filemap_fdatawait(struct address_space *mapping) 393 { 394 loff_t i_size = i_size_read(mapping->host); 395 396 if (i_size == 0) 397 return 0; 398 399 return filemap_fdatawait_range(mapping, 0, i_size - 1); 400 } 401 EXPORT_SYMBOL(filemap_fdatawait); 402 403 int filemap_write_and_wait(struct address_space *mapping) 404 { 405 int err = 0; 406 407 if (mapping->nrpages) { 408 err = filemap_fdatawrite(mapping); 409 /* 410 * Even if the above returned error, the pages may be 411 * written partially (e.g. -ENOSPC), so we wait for it. 412 * But the -EIO is special case, it may indicate the worst 413 * thing (e.g. bug) happened, so we avoid waiting for it. 414 */ 415 if (err != -EIO) { 416 int err2 = filemap_fdatawait(mapping); 417 if (!err) 418 err = err2; 419 } 420 } else { 421 err = filemap_check_errors(mapping); 422 } 423 return err; 424 } 425 EXPORT_SYMBOL(filemap_write_and_wait); 426 427 /** 428 * filemap_write_and_wait_range - write out & wait on a file range 429 * @mapping: the address_space for the pages 430 * @lstart: offset in bytes where the range starts 431 * @lend: offset in bytes where the range ends (inclusive) 432 * 433 * Write out and wait upon file offsets lstart->lend, inclusive. 434 * 435 * Note that `lend' is inclusive (describes the last byte to be written) so 436 * that this function can be used to write to the very end-of-file (end = -1). 437 */ 438 int filemap_write_and_wait_range(struct address_space *mapping, 439 loff_t lstart, loff_t lend) 440 { 441 int err = 0; 442 443 if (mapping->nrpages) { 444 err = __filemap_fdatawrite_range(mapping, lstart, lend, 445 WB_SYNC_ALL); 446 /* See comment of filemap_write_and_wait() */ 447 if (err != -EIO) { 448 int err2 = filemap_fdatawait_range(mapping, 449 lstart, lend); 450 if (!err) 451 err = err2; 452 } 453 } else { 454 err = filemap_check_errors(mapping); 455 } 456 return err; 457 } 458 EXPORT_SYMBOL(filemap_write_and_wait_range); 459 460 /** 461 * replace_page_cache_page - replace a pagecache page with a new one 462 * @old: page to be replaced 463 * @new: page to replace with 464 * @gfp_mask: allocation mode 465 * 466 * This function replaces a page in the pagecache with a new one. On 467 * success it acquires the pagecache reference for the new page and 468 * drops it for the old page. Both the old and new pages must be 469 * locked. This function does not add the new page to the LRU, the 470 * caller must do that. 471 * 472 * The remove + add is atomic. The only way this function can fail is 473 * memory allocation failure. 474 */ 475 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) 476 { 477 int error; 478 479 VM_BUG_ON_PAGE(!PageLocked(old), old); 480 VM_BUG_ON_PAGE(!PageLocked(new), new); 481 VM_BUG_ON_PAGE(new->mapping, new); 482 483 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 484 if (!error) { 485 struct address_space *mapping = old->mapping; 486 void (*freepage)(struct page *); 487 488 pgoff_t offset = old->index; 489 freepage = mapping->a_ops->freepage; 490 491 page_cache_get(new); 492 new->mapping = mapping; 493 new->index = offset; 494 495 spin_lock_irq(&mapping->tree_lock); 496 __delete_from_page_cache(old, NULL); 497 error = radix_tree_insert(&mapping->page_tree, offset, new); 498 BUG_ON(error); 499 mapping->nrpages++; 500 __inc_zone_page_state(new, NR_FILE_PAGES); 501 if (PageSwapBacked(new)) 502 __inc_zone_page_state(new, NR_SHMEM); 503 spin_unlock_irq(&mapping->tree_lock); 504 /* mem_cgroup codes must not be called under tree_lock */ 505 mem_cgroup_replace_page_cache(old, new); 506 radix_tree_preload_end(); 507 if (freepage) 508 freepage(old); 509 page_cache_release(old); 510 } 511 512 return error; 513 } 514 EXPORT_SYMBOL_GPL(replace_page_cache_page); 515 516 static int page_cache_tree_insert(struct address_space *mapping, 517 struct page *page, void **shadowp) 518 { 519 struct radix_tree_node *node; 520 void **slot; 521 int error; 522 523 error = __radix_tree_create(&mapping->page_tree, page->index, 524 &node, &slot); 525 if (error) 526 return error; 527 if (*slot) { 528 void *p; 529 530 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 531 if (!radix_tree_exceptional_entry(p)) 532 return -EEXIST; 533 if (shadowp) 534 *shadowp = p; 535 mapping->nrshadows--; 536 if (node) 537 workingset_node_shadows_dec(node); 538 } 539 radix_tree_replace_slot(slot, page); 540 mapping->nrpages++; 541 if (node) { 542 workingset_node_pages_inc(node); 543 /* 544 * Don't track node that contains actual pages. 545 * 546 * Avoid acquiring the list_lru lock if already 547 * untracked. The list_empty() test is safe as 548 * node->private_list is protected by 549 * mapping->tree_lock. 550 */ 551 if (!list_empty(&node->private_list)) 552 list_lru_del(&workingset_shadow_nodes, 553 &node->private_list); 554 } 555 return 0; 556 } 557 558 static int __add_to_page_cache_locked(struct page *page, 559 struct address_space *mapping, 560 pgoff_t offset, gfp_t gfp_mask, 561 void **shadowp) 562 { 563 int error; 564 565 VM_BUG_ON_PAGE(!PageLocked(page), page); 566 VM_BUG_ON_PAGE(PageSwapBacked(page), page); 567 568 error = mem_cgroup_charge_file(page, current->mm, 569 gfp_mask & GFP_RECLAIM_MASK); 570 if (error) 571 return error; 572 573 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); 574 if (error) { 575 mem_cgroup_uncharge_cache_page(page); 576 return error; 577 } 578 579 page_cache_get(page); 580 page->mapping = mapping; 581 page->index = offset; 582 583 spin_lock_irq(&mapping->tree_lock); 584 error = page_cache_tree_insert(mapping, page, shadowp); 585 radix_tree_preload_end(); 586 if (unlikely(error)) 587 goto err_insert; 588 __inc_zone_page_state(page, NR_FILE_PAGES); 589 spin_unlock_irq(&mapping->tree_lock); 590 trace_mm_filemap_add_to_page_cache(page); 591 return 0; 592 err_insert: 593 page->mapping = NULL; 594 /* Leave page->index set: truncation relies upon it */ 595 spin_unlock_irq(&mapping->tree_lock); 596 mem_cgroup_uncharge_cache_page(page); 597 page_cache_release(page); 598 return error; 599 } 600 601 /** 602 * add_to_page_cache_locked - add a locked page to the pagecache 603 * @page: page to add 604 * @mapping: the page's address_space 605 * @offset: page index 606 * @gfp_mask: page allocation mode 607 * 608 * This function is used to add a page to the pagecache. It must be locked. 609 * This function does not add the page to the LRU. The caller must do that. 610 */ 611 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 612 pgoff_t offset, gfp_t gfp_mask) 613 { 614 return __add_to_page_cache_locked(page, mapping, offset, 615 gfp_mask, NULL); 616 } 617 EXPORT_SYMBOL(add_to_page_cache_locked); 618 619 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 620 pgoff_t offset, gfp_t gfp_mask) 621 { 622 void *shadow = NULL; 623 int ret; 624 625 __set_page_locked(page); 626 ret = __add_to_page_cache_locked(page, mapping, offset, 627 gfp_mask, &shadow); 628 if (unlikely(ret)) 629 __clear_page_locked(page); 630 else { 631 /* 632 * The page might have been evicted from cache only 633 * recently, in which case it should be activated like 634 * any other repeatedly accessed page. 635 */ 636 if (shadow && workingset_refault(shadow)) { 637 SetPageActive(page); 638 workingset_activation(page); 639 } else 640 ClearPageActive(page); 641 lru_cache_add(page); 642 } 643 return ret; 644 } 645 EXPORT_SYMBOL_GPL(add_to_page_cache_lru); 646 647 #ifdef CONFIG_NUMA 648 struct page *__page_cache_alloc(gfp_t gfp) 649 { 650 int n; 651 struct page *page; 652 653 if (cpuset_do_page_mem_spread()) { 654 unsigned int cpuset_mems_cookie; 655 do { 656 cpuset_mems_cookie = read_mems_allowed_begin(); 657 n = cpuset_mem_spread_node(); 658 page = alloc_pages_exact_node(n, gfp, 0); 659 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); 660 661 return page; 662 } 663 return alloc_pages(gfp, 0); 664 } 665 EXPORT_SYMBOL(__page_cache_alloc); 666 #endif 667 668 /* 669 * In order to wait for pages to become available there must be 670 * waitqueues associated with pages. By using a hash table of 671 * waitqueues where the bucket discipline is to maintain all 672 * waiters on the same queue and wake all when any of the pages 673 * become available, and for the woken contexts to check to be 674 * sure the appropriate page became available, this saves space 675 * at a cost of "thundering herd" phenomena during rare hash 676 * collisions. 677 */ 678 static wait_queue_head_t *page_waitqueue(struct page *page) 679 { 680 const struct zone *zone = page_zone(page); 681 682 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; 683 } 684 685 static inline void wake_up_page(struct page *page, int bit) 686 { 687 __wake_up_bit(page_waitqueue(page), &page->flags, bit); 688 } 689 690 void wait_on_page_bit(struct page *page, int bit_nr) 691 { 692 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 693 694 if (test_bit(bit_nr, &page->flags)) 695 __wait_on_bit(page_waitqueue(page), &wait, sleep_on_page, 696 TASK_UNINTERRUPTIBLE); 697 } 698 EXPORT_SYMBOL(wait_on_page_bit); 699 700 int wait_on_page_bit_killable(struct page *page, int bit_nr) 701 { 702 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 703 704 if (!test_bit(bit_nr, &page->flags)) 705 return 0; 706 707 return __wait_on_bit(page_waitqueue(page), &wait, 708 sleep_on_page_killable, TASK_KILLABLE); 709 } 710 711 /** 712 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue 713 * @page: Page defining the wait queue of interest 714 * @waiter: Waiter to add to the queue 715 * 716 * Add an arbitrary @waiter to the wait queue for the nominated @page. 717 */ 718 void add_page_wait_queue(struct page *page, wait_queue_t *waiter) 719 { 720 wait_queue_head_t *q = page_waitqueue(page); 721 unsigned long flags; 722 723 spin_lock_irqsave(&q->lock, flags); 724 __add_wait_queue(q, waiter); 725 spin_unlock_irqrestore(&q->lock, flags); 726 } 727 EXPORT_SYMBOL_GPL(add_page_wait_queue); 728 729 /** 730 * unlock_page - unlock a locked page 731 * @page: the page 732 * 733 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). 734 * Also wakes sleepers in wait_on_page_writeback() because the wakeup 735 * mechananism between PageLocked pages and PageWriteback pages is shared. 736 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 737 * 738 * The mb is necessary to enforce ordering between the clear_bit and the read 739 * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()). 740 */ 741 void unlock_page(struct page *page) 742 { 743 VM_BUG_ON_PAGE(!PageLocked(page), page); 744 clear_bit_unlock(PG_locked, &page->flags); 745 smp_mb__after_atomic(); 746 wake_up_page(page, PG_locked); 747 } 748 EXPORT_SYMBOL(unlock_page); 749 750 /** 751 * end_page_writeback - end writeback against a page 752 * @page: the page 753 */ 754 void end_page_writeback(struct page *page) 755 { 756 /* 757 * TestClearPageReclaim could be used here but it is an atomic 758 * operation and overkill in this particular case. Failing to 759 * shuffle a page marked for immediate reclaim is too mild to 760 * justify taking an atomic operation penalty at the end of 761 * ever page writeback. 762 */ 763 if (PageReclaim(page)) { 764 ClearPageReclaim(page); 765 rotate_reclaimable_page(page); 766 } 767 768 if (!test_clear_page_writeback(page)) 769 BUG(); 770 771 smp_mb__after_atomic(); 772 wake_up_page(page, PG_writeback); 773 } 774 EXPORT_SYMBOL(end_page_writeback); 775 776 /* 777 * After completing I/O on a page, call this routine to update the page 778 * flags appropriately 779 */ 780 void page_endio(struct page *page, int rw, int err) 781 { 782 if (rw == READ) { 783 if (!err) { 784 SetPageUptodate(page); 785 } else { 786 ClearPageUptodate(page); 787 SetPageError(page); 788 } 789 unlock_page(page); 790 } else { /* rw == WRITE */ 791 if (err) { 792 SetPageError(page); 793 if (page->mapping) 794 mapping_set_error(page->mapping, err); 795 } 796 end_page_writeback(page); 797 } 798 } 799 EXPORT_SYMBOL_GPL(page_endio); 800 801 /** 802 * __lock_page - get a lock on the page, assuming we need to sleep to get it 803 * @page: the page to lock 804 */ 805 void __lock_page(struct page *page) 806 { 807 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 808 809 __wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page, 810 TASK_UNINTERRUPTIBLE); 811 } 812 EXPORT_SYMBOL(__lock_page); 813 814 int __lock_page_killable(struct page *page) 815 { 816 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 817 818 return __wait_on_bit_lock(page_waitqueue(page), &wait, 819 sleep_on_page_killable, TASK_KILLABLE); 820 } 821 EXPORT_SYMBOL_GPL(__lock_page_killable); 822 823 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 824 unsigned int flags) 825 { 826 if (flags & FAULT_FLAG_ALLOW_RETRY) { 827 /* 828 * CAUTION! In this case, mmap_sem is not released 829 * even though return 0. 830 */ 831 if (flags & FAULT_FLAG_RETRY_NOWAIT) 832 return 0; 833 834 up_read(&mm->mmap_sem); 835 if (flags & FAULT_FLAG_KILLABLE) 836 wait_on_page_locked_killable(page); 837 else 838 wait_on_page_locked(page); 839 return 0; 840 } else { 841 if (flags & FAULT_FLAG_KILLABLE) { 842 int ret; 843 844 ret = __lock_page_killable(page); 845 if (ret) { 846 up_read(&mm->mmap_sem); 847 return 0; 848 } 849 } else 850 __lock_page(page); 851 return 1; 852 } 853 } 854 855 /** 856 * page_cache_next_hole - find the next hole (not-present entry) 857 * @mapping: mapping 858 * @index: index 859 * @max_scan: maximum range to search 860 * 861 * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the 862 * lowest indexed hole. 863 * 864 * Returns: the index of the hole if found, otherwise returns an index 865 * outside of the set specified (in which case 'return - index >= 866 * max_scan' will be true). In rare cases of index wrap-around, 0 will 867 * be returned. 868 * 869 * page_cache_next_hole may be called under rcu_read_lock. However, 870 * like radix_tree_gang_lookup, this will not atomically search a 871 * snapshot of the tree at a single point in time. For example, if a 872 * hole is created at index 5, then subsequently a hole is created at 873 * index 10, page_cache_next_hole covering both indexes may return 10 874 * if called under rcu_read_lock. 875 */ 876 pgoff_t page_cache_next_hole(struct address_space *mapping, 877 pgoff_t index, unsigned long max_scan) 878 { 879 unsigned long i; 880 881 for (i = 0; i < max_scan; i++) { 882 struct page *page; 883 884 page = radix_tree_lookup(&mapping->page_tree, index); 885 if (!page || radix_tree_exceptional_entry(page)) 886 break; 887 index++; 888 if (index == 0) 889 break; 890 } 891 892 return index; 893 } 894 EXPORT_SYMBOL(page_cache_next_hole); 895 896 /** 897 * page_cache_prev_hole - find the prev hole (not-present entry) 898 * @mapping: mapping 899 * @index: index 900 * @max_scan: maximum range to search 901 * 902 * Search backwards in the range [max(index-max_scan+1, 0), index] for 903 * the first hole. 904 * 905 * Returns: the index of the hole if found, otherwise returns an index 906 * outside of the set specified (in which case 'index - return >= 907 * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX 908 * will be returned. 909 * 910 * page_cache_prev_hole may be called under rcu_read_lock. However, 911 * like radix_tree_gang_lookup, this will not atomically search a 912 * snapshot of the tree at a single point in time. For example, if a 913 * hole is created at index 10, then subsequently a hole is created at 914 * index 5, page_cache_prev_hole covering both indexes may return 5 if 915 * called under rcu_read_lock. 916 */ 917 pgoff_t page_cache_prev_hole(struct address_space *mapping, 918 pgoff_t index, unsigned long max_scan) 919 { 920 unsigned long i; 921 922 for (i = 0; i < max_scan; i++) { 923 struct page *page; 924 925 page = radix_tree_lookup(&mapping->page_tree, index); 926 if (!page || radix_tree_exceptional_entry(page)) 927 break; 928 index--; 929 if (index == ULONG_MAX) 930 break; 931 } 932 933 return index; 934 } 935 EXPORT_SYMBOL(page_cache_prev_hole); 936 937 /** 938 * find_get_entry - find and get a page cache entry 939 * @mapping: the address_space to search 940 * @offset: the page cache index 941 * 942 * Looks up the page cache slot at @mapping & @offset. If there is a 943 * page cache page, it is returned with an increased refcount. 944 * 945 * If the slot holds a shadow entry of a previously evicted page, or a 946 * swap entry from shmem/tmpfs, it is returned. 947 * 948 * Otherwise, %NULL is returned. 949 */ 950 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) 951 { 952 void **pagep; 953 struct page *page; 954 955 rcu_read_lock(); 956 repeat: 957 page = NULL; 958 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); 959 if (pagep) { 960 page = radix_tree_deref_slot(pagep); 961 if (unlikely(!page)) 962 goto out; 963 if (radix_tree_exception(page)) { 964 if (radix_tree_deref_retry(page)) 965 goto repeat; 966 /* 967 * A shadow entry of a recently evicted page, 968 * or a swap entry from shmem/tmpfs. Return 969 * it without attempting to raise page count. 970 */ 971 goto out; 972 } 973 if (!page_cache_get_speculative(page)) 974 goto repeat; 975 976 /* 977 * Has the page moved? 978 * This is part of the lockless pagecache protocol. See 979 * include/linux/pagemap.h for details. 980 */ 981 if (unlikely(page != *pagep)) { 982 page_cache_release(page); 983 goto repeat; 984 } 985 } 986 out: 987 rcu_read_unlock(); 988 989 return page; 990 } 991 EXPORT_SYMBOL(find_get_entry); 992 993 /** 994 * find_lock_entry - locate, pin and lock a page cache entry 995 * @mapping: the address_space to search 996 * @offset: the page cache index 997 * 998 * Looks up the page cache slot at @mapping & @offset. If there is a 999 * page cache page, it is returned locked and with an increased 1000 * refcount. 1001 * 1002 * If the slot holds a shadow entry of a previously evicted page, or a 1003 * swap entry from shmem/tmpfs, it is returned. 1004 * 1005 * Otherwise, %NULL is returned. 1006 * 1007 * find_lock_entry() may sleep. 1008 */ 1009 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) 1010 { 1011 struct page *page; 1012 1013 repeat: 1014 page = find_get_entry(mapping, offset); 1015 if (page && !radix_tree_exception(page)) { 1016 lock_page(page); 1017 /* Has the page been truncated? */ 1018 if (unlikely(page->mapping != mapping)) { 1019 unlock_page(page); 1020 page_cache_release(page); 1021 goto repeat; 1022 } 1023 VM_BUG_ON_PAGE(page->index != offset, page); 1024 } 1025 return page; 1026 } 1027 EXPORT_SYMBOL(find_lock_entry); 1028 1029 /** 1030 * pagecache_get_page - find and get a page reference 1031 * @mapping: the address_space to search 1032 * @offset: the page index 1033 * @fgp_flags: PCG flags 1034 * @gfp_mask: gfp mask to use if a page is to be allocated 1035 * 1036 * Looks up the page cache slot at @mapping & @offset. 1037 * 1038 * PCG flags modify how the page is returned 1039 * 1040 * FGP_ACCESSED: the page will be marked accessed 1041 * FGP_LOCK: Page is return locked 1042 * FGP_CREAT: If page is not present then a new page is allocated using 1043 * @gfp_mask and added to the page cache and the VM's LRU 1044 * list. The page is returned locked and with an increased 1045 * refcount. Otherwise, %NULL is returned. 1046 * 1047 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even 1048 * if the GFP flags specified for FGP_CREAT are atomic. 1049 * 1050 * If there is a page cache page, it is returned with an increased refcount. 1051 */ 1052 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, 1053 int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask) 1054 { 1055 struct page *page; 1056 1057 repeat: 1058 page = find_get_entry(mapping, offset); 1059 if (radix_tree_exceptional_entry(page)) 1060 page = NULL; 1061 if (!page) 1062 goto no_page; 1063 1064 if (fgp_flags & FGP_LOCK) { 1065 if (fgp_flags & FGP_NOWAIT) { 1066 if (!trylock_page(page)) { 1067 page_cache_release(page); 1068 return NULL; 1069 } 1070 } else { 1071 lock_page(page); 1072 } 1073 1074 /* Has the page been truncated? */ 1075 if (unlikely(page->mapping != mapping)) { 1076 unlock_page(page); 1077 page_cache_release(page); 1078 goto repeat; 1079 } 1080 VM_BUG_ON_PAGE(page->index != offset, page); 1081 } 1082 1083 if (page && (fgp_flags & FGP_ACCESSED)) 1084 mark_page_accessed(page); 1085 1086 no_page: 1087 if (!page && (fgp_flags & FGP_CREAT)) { 1088 int err; 1089 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) 1090 cache_gfp_mask |= __GFP_WRITE; 1091 if (fgp_flags & FGP_NOFS) { 1092 cache_gfp_mask &= ~__GFP_FS; 1093 radix_gfp_mask &= ~__GFP_FS; 1094 } 1095 1096 page = __page_cache_alloc(cache_gfp_mask); 1097 if (!page) 1098 return NULL; 1099 1100 if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK))) 1101 fgp_flags |= FGP_LOCK; 1102 1103 /* Init accessed so avoit atomic mark_page_accessed later */ 1104 if (fgp_flags & FGP_ACCESSED) 1105 init_page_accessed(page); 1106 1107 err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask); 1108 if (unlikely(err)) { 1109 page_cache_release(page); 1110 page = NULL; 1111 if (err == -EEXIST) 1112 goto repeat; 1113 } 1114 } 1115 1116 return page; 1117 } 1118 EXPORT_SYMBOL(pagecache_get_page); 1119 1120 /** 1121 * find_get_entries - gang pagecache lookup 1122 * @mapping: The address_space to search 1123 * @start: The starting page cache index 1124 * @nr_entries: The maximum number of entries 1125 * @entries: Where the resulting entries are placed 1126 * @indices: The cache indices corresponding to the entries in @entries 1127 * 1128 * find_get_entries() will search for and return a group of up to 1129 * @nr_entries entries in the mapping. The entries are placed at 1130 * @entries. find_get_entries() takes a reference against any actual 1131 * pages it returns. 1132 * 1133 * The search returns a group of mapping-contiguous page cache entries 1134 * with ascending indexes. There may be holes in the indices due to 1135 * not-present pages. 1136 * 1137 * Any shadow entries of evicted pages, or swap entries from 1138 * shmem/tmpfs, are included in the returned array. 1139 * 1140 * find_get_entries() returns the number of pages and shadow entries 1141 * which were found. 1142 */ 1143 unsigned find_get_entries(struct address_space *mapping, 1144 pgoff_t start, unsigned int nr_entries, 1145 struct page **entries, pgoff_t *indices) 1146 { 1147 void **slot; 1148 unsigned int ret = 0; 1149 struct radix_tree_iter iter; 1150 1151 if (!nr_entries) 1152 return 0; 1153 1154 rcu_read_lock(); 1155 restart: 1156 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1157 struct page *page; 1158 repeat: 1159 page = radix_tree_deref_slot(slot); 1160 if (unlikely(!page)) 1161 continue; 1162 if (radix_tree_exception(page)) { 1163 if (radix_tree_deref_retry(page)) 1164 goto restart; 1165 /* 1166 * A shadow entry of a recently evicted page, 1167 * or a swap entry from shmem/tmpfs. Return 1168 * it without attempting to raise page count. 1169 */ 1170 goto export; 1171 } 1172 if (!page_cache_get_speculative(page)) 1173 goto repeat; 1174 1175 /* Has the page moved? */ 1176 if (unlikely(page != *slot)) { 1177 page_cache_release(page); 1178 goto repeat; 1179 } 1180 export: 1181 indices[ret] = iter.index; 1182 entries[ret] = page; 1183 if (++ret == nr_entries) 1184 break; 1185 } 1186 rcu_read_unlock(); 1187 return ret; 1188 } 1189 1190 /** 1191 * find_get_pages - gang pagecache lookup 1192 * @mapping: The address_space to search 1193 * @start: The starting page index 1194 * @nr_pages: The maximum number of pages 1195 * @pages: Where the resulting pages are placed 1196 * 1197 * find_get_pages() will search for and return a group of up to 1198 * @nr_pages pages in the mapping. The pages are placed at @pages. 1199 * find_get_pages() takes a reference against the returned pages. 1200 * 1201 * The search returns a group of mapping-contiguous pages with ascending 1202 * indexes. There may be holes in the indices due to not-present pages. 1203 * 1204 * find_get_pages() returns the number of pages which were found. 1205 */ 1206 unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 1207 unsigned int nr_pages, struct page **pages) 1208 { 1209 struct radix_tree_iter iter; 1210 void **slot; 1211 unsigned ret = 0; 1212 1213 if (unlikely(!nr_pages)) 1214 return 0; 1215 1216 rcu_read_lock(); 1217 restart: 1218 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1219 struct page *page; 1220 repeat: 1221 page = radix_tree_deref_slot(slot); 1222 if (unlikely(!page)) 1223 continue; 1224 1225 if (radix_tree_exception(page)) { 1226 if (radix_tree_deref_retry(page)) { 1227 /* 1228 * Transient condition which can only trigger 1229 * when entry at index 0 moves out of or back 1230 * to root: none yet gotten, safe to restart. 1231 */ 1232 WARN_ON(iter.index); 1233 goto restart; 1234 } 1235 /* 1236 * A shadow entry of a recently evicted page, 1237 * or a swap entry from shmem/tmpfs. Skip 1238 * over it. 1239 */ 1240 continue; 1241 } 1242 1243 if (!page_cache_get_speculative(page)) 1244 goto repeat; 1245 1246 /* Has the page moved? */ 1247 if (unlikely(page != *slot)) { 1248 page_cache_release(page); 1249 goto repeat; 1250 } 1251 1252 pages[ret] = page; 1253 if (++ret == nr_pages) 1254 break; 1255 } 1256 1257 rcu_read_unlock(); 1258 return ret; 1259 } 1260 1261 /** 1262 * find_get_pages_contig - gang contiguous pagecache lookup 1263 * @mapping: The address_space to search 1264 * @index: The starting page index 1265 * @nr_pages: The maximum number of pages 1266 * @pages: Where the resulting pages are placed 1267 * 1268 * find_get_pages_contig() works exactly like find_get_pages(), except 1269 * that the returned number of pages are guaranteed to be contiguous. 1270 * 1271 * find_get_pages_contig() returns the number of pages which were found. 1272 */ 1273 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, 1274 unsigned int nr_pages, struct page **pages) 1275 { 1276 struct radix_tree_iter iter; 1277 void **slot; 1278 unsigned int ret = 0; 1279 1280 if (unlikely(!nr_pages)) 1281 return 0; 1282 1283 rcu_read_lock(); 1284 restart: 1285 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) { 1286 struct page *page; 1287 repeat: 1288 page = radix_tree_deref_slot(slot); 1289 /* The hole, there no reason to continue */ 1290 if (unlikely(!page)) 1291 break; 1292 1293 if (radix_tree_exception(page)) { 1294 if (radix_tree_deref_retry(page)) { 1295 /* 1296 * Transient condition which can only trigger 1297 * when entry at index 0 moves out of or back 1298 * to root: none yet gotten, safe to restart. 1299 */ 1300 goto restart; 1301 } 1302 /* 1303 * A shadow entry of a recently evicted page, 1304 * or a swap entry from shmem/tmpfs. Stop 1305 * looking for contiguous pages. 1306 */ 1307 break; 1308 } 1309 1310 if (!page_cache_get_speculative(page)) 1311 goto repeat; 1312 1313 /* Has the page moved? */ 1314 if (unlikely(page != *slot)) { 1315 page_cache_release(page); 1316 goto repeat; 1317 } 1318 1319 /* 1320 * must check mapping and index after taking the ref. 1321 * otherwise we can get both false positives and false 1322 * negatives, which is just confusing to the caller. 1323 */ 1324 if (page->mapping == NULL || page->index != iter.index) { 1325 page_cache_release(page); 1326 break; 1327 } 1328 1329 pages[ret] = page; 1330 if (++ret == nr_pages) 1331 break; 1332 } 1333 rcu_read_unlock(); 1334 return ret; 1335 } 1336 EXPORT_SYMBOL(find_get_pages_contig); 1337 1338 /** 1339 * find_get_pages_tag - find and return pages that match @tag 1340 * @mapping: the address_space to search 1341 * @index: the starting page index 1342 * @tag: the tag index 1343 * @nr_pages: the maximum number of pages 1344 * @pages: where the resulting pages are placed 1345 * 1346 * Like find_get_pages, except we only return pages which are tagged with 1347 * @tag. We update @index to index the next page for the traversal. 1348 */ 1349 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 1350 int tag, unsigned int nr_pages, struct page **pages) 1351 { 1352 struct radix_tree_iter iter; 1353 void **slot; 1354 unsigned ret = 0; 1355 1356 if (unlikely(!nr_pages)) 1357 return 0; 1358 1359 rcu_read_lock(); 1360 restart: 1361 radix_tree_for_each_tagged(slot, &mapping->page_tree, 1362 &iter, *index, tag) { 1363 struct page *page; 1364 repeat: 1365 page = radix_tree_deref_slot(slot); 1366 if (unlikely(!page)) 1367 continue; 1368 1369 if (radix_tree_exception(page)) { 1370 if (radix_tree_deref_retry(page)) { 1371 /* 1372 * Transient condition which can only trigger 1373 * when entry at index 0 moves out of or back 1374 * to root: none yet gotten, safe to restart. 1375 */ 1376 goto restart; 1377 } 1378 /* 1379 * A shadow entry of a recently evicted page. 1380 * 1381 * Those entries should never be tagged, but 1382 * this tree walk is lockless and the tags are 1383 * looked up in bulk, one radix tree node at a 1384 * time, so there is a sizable window for page 1385 * reclaim to evict a page we saw tagged. 1386 * 1387 * Skip over it. 1388 */ 1389 continue; 1390 } 1391 1392 if (!page_cache_get_speculative(page)) 1393 goto repeat; 1394 1395 /* Has the page moved? */ 1396 if (unlikely(page != *slot)) { 1397 page_cache_release(page); 1398 goto repeat; 1399 } 1400 1401 pages[ret] = page; 1402 if (++ret == nr_pages) 1403 break; 1404 } 1405 1406 rcu_read_unlock(); 1407 1408 if (ret) 1409 *index = pages[ret - 1]->index + 1; 1410 1411 return ret; 1412 } 1413 EXPORT_SYMBOL(find_get_pages_tag); 1414 1415 /* 1416 * CD/DVDs are error prone. When a medium error occurs, the driver may fail 1417 * a _large_ part of the i/o request. Imagine the worst scenario: 1418 * 1419 * ---R__________________________________________B__________ 1420 * ^ reading here ^ bad block(assume 4k) 1421 * 1422 * read(R) => miss => readahead(R...B) => media error => frustrating retries 1423 * => failing the whole request => read(R) => read(R+1) => 1424 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => 1425 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => 1426 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... 1427 * 1428 * It is going insane. Fix it by quickly scaling down the readahead size. 1429 */ 1430 static void shrink_readahead_size_eio(struct file *filp, 1431 struct file_ra_state *ra) 1432 { 1433 ra->ra_pages /= 4; 1434 } 1435 1436 /** 1437 * do_generic_file_read - generic file read routine 1438 * @filp: the file to read 1439 * @ppos: current file position 1440 * @iter: data destination 1441 * @written: already copied 1442 * 1443 * This is a generic file read routine, and uses the 1444 * mapping->a_ops->readpage() function for the actual low-level stuff. 1445 * 1446 * This is really ugly. But the goto's actually try to clarify some 1447 * of the logic when it comes to error handling etc. 1448 */ 1449 static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos, 1450 struct iov_iter *iter, ssize_t written) 1451 { 1452 struct address_space *mapping = filp->f_mapping; 1453 struct inode *inode = mapping->host; 1454 struct file_ra_state *ra = &filp->f_ra; 1455 pgoff_t index; 1456 pgoff_t last_index; 1457 pgoff_t prev_index; 1458 unsigned long offset; /* offset into pagecache page */ 1459 unsigned int prev_offset; 1460 int error = 0; 1461 1462 index = *ppos >> PAGE_CACHE_SHIFT; 1463 prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT; 1464 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1); 1465 last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 1466 offset = *ppos & ~PAGE_CACHE_MASK; 1467 1468 for (;;) { 1469 struct page *page; 1470 pgoff_t end_index; 1471 loff_t isize; 1472 unsigned long nr, ret; 1473 1474 cond_resched(); 1475 find_page: 1476 page = find_get_page(mapping, index); 1477 if (!page) { 1478 page_cache_sync_readahead(mapping, 1479 ra, filp, 1480 index, last_index - index); 1481 page = find_get_page(mapping, index); 1482 if (unlikely(page == NULL)) 1483 goto no_cached_page; 1484 } 1485 if (PageReadahead(page)) { 1486 page_cache_async_readahead(mapping, 1487 ra, filp, page, 1488 index, last_index - index); 1489 } 1490 if (!PageUptodate(page)) { 1491 if (inode->i_blkbits == PAGE_CACHE_SHIFT || 1492 !mapping->a_ops->is_partially_uptodate) 1493 goto page_not_up_to_date; 1494 if (!trylock_page(page)) 1495 goto page_not_up_to_date; 1496 /* Did it get truncated before we got the lock? */ 1497 if (!page->mapping) 1498 goto page_not_up_to_date_locked; 1499 if (!mapping->a_ops->is_partially_uptodate(page, 1500 offset, iter->count)) 1501 goto page_not_up_to_date_locked; 1502 unlock_page(page); 1503 } 1504 page_ok: 1505 /* 1506 * i_size must be checked after we know the page is Uptodate. 1507 * 1508 * Checking i_size after the check allows us to calculate 1509 * the correct value for "nr", which means the zero-filled 1510 * part of the page is not copied back to userspace (unless 1511 * another truncate extends the file - this is desired though). 1512 */ 1513 1514 isize = i_size_read(inode); 1515 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1516 if (unlikely(!isize || index > end_index)) { 1517 page_cache_release(page); 1518 goto out; 1519 } 1520 1521 /* nr is the maximum number of bytes to copy from this page */ 1522 nr = PAGE_CACHE_SIZE; 1523 if (index == end_index) { 1524 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1525 if (nr <= offset) { 1526 page_cache_release(page); 1527 goto out; 1528 } 1529 } 1530 nr = nr - offset; 1531 1532 /* If users can be writing to this page using arbitrary 1533 * virtual addresses, take care about potential aliasing 1534 * before reading the page on the kernel side. 1535 */ 1536 if (mapping_writably_mapped(mapping)) 1537 flush_dcache_page(page); 1538 1539 /* 1540 * When a sequential read accesses a page several times, 1541 * only mark it as accessed the first time. 1542 */ 1543 if (prev_index != index || offset != prev_offset) 1544 mark_page_accessed(page); 1545 prev_index = index; 1546 1547 /* 1548 * Ok, we have the page, and it's up-to-date, so 1549 * now we can copy it to user space... 1550 */ 1551 1552 ret = copy_page_to_iter(page, offset, nr, iter); 1553 offset += ret; 1554 index += offset >> PAGE_CACHE_SHIFT; 1555 offset &= ~PAGE_CACHE_MASK; 1556 prev_offset = offset; 1557 1558 page_cache_release(page); 1559 written += ret; 1560 if (!iov_iter_count(iter)) 1561 goto out; 1562 if (ret < nr) { 1563 error = -EFAULT; 1564 goto out; 1565 } 1566 continue; 1567 1568 page_not_up_to_date: 1569 /* Get exclusive access to the page ... */ 1570 error = lock_page_killable(page); 1571 if (unlikely(error)) 1572 goto readpage_error; 1573 1574 page_not_up_to_date_locked: 1575 /* Did it get truncated before we got the lock? */ 1576 if (!page->mapping) { 1577 unlock_page(page); 1578 page_cache_release(page); 1579 continue; 1580 } 1581 1582 /* Did somebody else fill it already? */ 1583 if (PageUptodate(page)) { 1584 unlock_page(page); 1585 goto page_ok; 1586 } 1587 1588 readpage: 1589 /* 1590 * A previous I/O error may have been due to temporary 1591 * failures, eg. multipath errors. 1592 * PG_error will be set again if readpage fails. 1593 */ 1594 ClearPageError(page); 1595 /* Start the actual read. The read will unlock the page. */ 1596 error = mapping->a_ops->readpage(filp, page); 1597 1598 if (unlikely(error)) { 1599 if (error == AOP_TRUNCATED_PAGE) { 1600 page_cache_release(page); 1601 error = 0; 1602 goto find_page; 1603 } 1604 goto readpage_error; 1605 } 1606 1607 if (!PageUptodate(page)) { 1608 error = lock_page_killable(page); 1609 if (unlikely(error)) 1610 goto readpage_error; 1611 if (!PageUptodate(page)) { 1612 if (page->mapping == NULL) { 1613 /* 1614 * invalidate_mapping_pages got it 1615 */ 1616 unlock_page(page); 1617 page_cache_release(page); 1618 goto find_page; 1619 } 1620 unlock_page(page); 1621 shrink_readahead_size_eio(filp, ra); 1622 error = -EIO; 1623 goto readpage_error; 1624 } 1625 unlock_page(page); 1626 } 1627 1628 goto page_ok; 1629 1630 readpage_error: 1631 /* UHHUH! A synchronous read error occurred. Report it */ 1632 page_cache_release(page); 1633 goto out; 1634 1635 no_cached_page: 1636 /* 1637 * Ok, it wasn't cached, so we need to create a new 1638 * page.. 1639 */ 1640 page = page_cache_alloc_cold(mapping); 1641 if (!page) { 1642 error = -ENOMEM; 1643 goto out; 1644 } 1645 error = add_to_page_cache_lru(page, mapping, 1646 index, GFP_KERNEL); 1647 if (error) { 1648 page_cache_release(page); 1649 if (error == -EEXIST) { 1650 error = 0; 1651 goto find_page; 1652 } 1653 goto out; 1654 } 1655 goto readpage; 1656 } 1657 1658 out: 1659 ra->prev_pos = prev_index; 1660 ra->prev_pos <<= PAGE_CACHE_SHIFT; 1661 ra->prev_pos |= prev_offset; 1662 1663 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; 1664 file_accessed(filp); 1665 return written ? written : error; 1666 } 1667 1668 /* 1669 * Performs necessary checks before doing a write 1670 * @iov: io vector request 1671 * @nr_segs: number of segments in the iovec 1672 * @count: number of bytes to write 1673 * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE 1674 * 1675 * Adjust number of segments and amount of bytes to write (nr_segs should be 1676 * properly initialized first). Returns appropriate error code that caller 1677 * should return or zero in case that write should be allowed. 1678 */ 1679 int generic_segment_checks(const struct iovec *iov, 1680 unsigned long *nr_segs, size_t *count, int access_flags) 1681 { 1682 unsigned long seg; 1683 size_t cnt = 0; 1684 for (seg = 0; seg < *nr_segs; seg++) { 1685 const struct iovec *iv = &iov[seg]; 1686 1687 /* 1688 * If any segment has a negative length, or the cumulative 1689 * length ever wraps negative then return -EINVAL. 1690 */ 1691 cnt += iv->iov_len; 1692 if (unlikely((ssize_t)(cnt|iv->iov_len) < 0)) 1693 return -EINVAL; 1694 if (access_ok(access_flags, iv->iov_base, iv->iov_len)) 1695 continue; 1696 if (seg == 0) 1697 return -EFAULT; 1698 *nr_segs = seg; 1699 cnt -= iv->iov_len; /* This segment is no good */ 1700 break; 1701 } 1702 *count = cnt; 1703 return 0; 1704 } 1705 EXPORT_SYMBOL(generic_segment_checks); 1706 1707 /** 1708 * generic_file_aio_read - generic filesystem read routine 1709 * @iocb: kernel I/O control block 1710 * @iov: io vector request 1711 * @nr_segs: number of segments in the iovec 1712 * @pos: current file position 1713 * 1714 * This is the "read()" routine for all filesystems 1715 * that can use the page cache directly. 1716 */ 1717 ssize_t 1718 generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, 1719 unsigned long nr_segs, loff_t pos) 1720 { 1721 struct file *filp = iocb->ki_filp; 1722 ssize_t retval; 1723 size_t count; 1724 loff_t *ppos = &iocb->ki_pos; 1725 struct iov_iter i; 1726 1727 count = 0; 1728 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); 1729 if (retval) 1730 return retval; 1731 iov_iter_init(&i, iov, nr_segs, count, 0); 1732 1733 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ 1734 if (filp->f_flags & O_DIRECT) { 1735 loff_t size; 1736 struct address_space *mapping; 1737 struct inode *inode; 1738 1739 mapping = filp->f_mapping; 1740 inode = mapping->host; 1741 if (!count) 1742 goto out; /* skip atime */ 1743 size = i_size_read(inode); 1744 retval = filemap_write_and_wait_range(mapping, pos, 1745 pos + iov_length(iov, nr_segs) - 1); 1746 if (!retval) { 1747 retval = mapping->a_ops->direct_IO(READ, iocb, 1748 iov, pos, nr_segs); 1749 } 1750 if (retval > 0) { 1751 *ppos = pos + retval; 1752 count -= retval; 1753 /* 1754 * If we did a short DIO read we need to skip the 1755 * section of the iov that we've already read data into. 1756 */ 1757 iov_iter_advance(&i, retval); 1758 } 1759 1760 /* 1761 * Btrfs can have a short DIO read if we encounter 1762 * compressed extents, so if there was an error, or if 1763 * we've already read everything we wanted to, or if 1764 * there was a short read because we hit EOF, go ahead 1765 * and return. Otherwise fallthrough to buffered io for 1766 * the rest of the read. 1767 */ 1768 if (retval < 0 || !count || *ppos >= size) { 1769 file_accessed(filp); 1770 goto out; 1771 } 1772 } 1773 1774 retval = do_generic_file_read(filp, ppos, &i, retval); 1775 out: 1776 return retval; 1777 } 1778 EXPORT_SYMBOL(generic_file_aio_read); 1779 1780 #ifdef CONFIG_MMU 1781 /** 1782 * page_cache_read - adds requested page to the page cache if not already there 1783 * @file: file to read 1784 * @offset: page index 1785 * 1786 * This adds the requested page to the page cache if it isn't already there, 1787 * and schedules an I/O to read in its contents from disk. 1788 */ 1789 static int page_cache_read(struct file *file, pgoff_t offset) 1790 { 1791 struct address_space *mapping = file->f_mapping; 1792 struct page *page; 1793 int ret; 1794 1795 do { 1796 page = page_cache_alloc_cold(mapping); 1797 if (!page) 1798 return -ENOMEM; 1799 1800 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL); 1801 if (ret == 0) 1802 ret = mapping->a_ops->readpage(file, page); 1803 else if (ret == -EEXIST) 1804 ret = 0; /* losing race to add is OK */ 1805 1806 page_cache_release(page); 1807 1808 } while (ret == AOP_TRUNCATED_PAGE); 1809 1810 return ret; 1811 } 1812 1813 #define MMAP_LOTSAMISS (100) 1814 1815 /* 1816 * Synchronous readahead happens when we don't even find 1817 * a page in the page cache at all. 1818 */ 1819 static void do_sync_mmap_readahead(struct vm_area_struct *vma, 1820 struct file_ra_state *ra, 1821 struct file *file, 1822 pgoff_t offset) 1823 { 1824 unsigned long ra_pages; 1825 struct address_space *mapping = file->f_mapping; 1826 1827 /* If we don't want any read-ahead, don't bother */ 1828 if (vma->vm_flags & VM_RAND_READ) 1829 return; 1830 if (!ra->ra_pages) 1831 return; 1832 1833 if (vma->vm_flags & VM_SEQ_READ) { 1834 page_cache_sync_readahead(mapping, ra, file, offset, 1835 ra->ra_pages); 1836 return; 1837 } 1838 1839 /* Avoid banging the cache line if not needed */ 1840 if (ra->mmap_miss < MMAP_LOTSAMISS * 10) 1841 ra->mmap_miss++; 1842 1843 /* 1844 * Do we miss much more than hit in this file? If so, 1845 * stop bothering with read-ahead. It will only hurt. 1846 */ 1847 if (ra->mmap_miss > MMAP_LOTSAMISS) 1848 return; 1849 1850 /* 1851 * mmap read-around 1852 */ 1853 ra_pages = max_sane_readahead(ra->ra_pages); 1854 ra->start = max_t(long, 0, offset - ra_pages / 2); 1855 ra->size = ra_pages; 1856 ra->async_size = ra_pages / 4; 1857 ra_submit(ra, mapping, file); 1858 } 1859 1860 /* 1861 * Asynchronous readahead happens when we find the page and PG_readahead, 1862 * so we want to possibly extend the readahead further.. 1863 */ 1864 static void do_async_mmap_readahead(struct vm_area_struct *vma, 1865 struct file_ra_state *ra, 1866 struct file *file, 1867 struct page *page, 1868 pgoff_t offset) 1869 { 1870 struct address_space *mapping = file->f_mapping; 1871 1872 /* If we don't want any read-ahead, don't bother */ 1873 if (vma->vm_flags & VM_RAND_READ) 1874 return; 1875 if (ra->mmap_miss > 0) 1876 ra->mmap_miss--; 1877 if (PageReadahead(page)) 1878 page_cache_async_readahead(mapping, ra, file, 1879 page, offset, ra->ra_pages); 1880 } 1881 1882 /** 1883 * filemap_fault - read in file data for page fault handling 1884 * @vma: vma in which the fault was taken 1885 * @vmf: struct vm_fault containing details of the fault 1886 * 1887 * filemap_fault() is invoked via the vma operations vector for a 1888 * mapped memory region to read in file data during a page fault. 1889 * 1890 * The goto's are kind of ugly, but this streamlines the normal case of having 1891 * it in the page cache, and handles the special cases reasonably without 1892 * having a lot of duplicated code. 1893 */ 1894 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1895 { 1896 int error; 1897 struct file *file = vma->vm_file; 1898 struct address_space *mapping = file->f_mapping; 1899 struct file_ra_state *ra = &file->f_ra; 1900 struct inode *inode = mapping->host; 1901 pgoff_t offset = vmf->pgoff; 1902 struct page *page; 1903 loff_t size; 1904 int ret = 0; 1905 1906 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 1907 if (offset >= size >> PAGE_CACHE_SHIFT) 1908 return VM_FAULT_SIGBUS; 1909 1910 /* 1911 * Do we have something in the page cache already? 1912 */ 1913 page = find_get_page(mapping, offset); 1914 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { 1915 /* 1916 * We found the page, so try async readahead before 1917 * waiting for the lock. 1918 */ 1919 do_async_mmap_readahead(vma, ra, file, page, offset); 1920 } else if (!page) { 1921 /* No page in the page cache at all */ 1922 do_sync_mmap_readahead(vma, ra, file, offset); 1923 count_vm_event(PGMAJFAULT); 1924 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1925 ret = VM_FAULT_MAJOR; 1926 retry_find: 1927 page = find_get_page(mapping, offset); 1928 if (!page) 1929 goto no_cached_page; 1930 } 1931 1932 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { 1933 page_cache_release(page); 1934 return ret | VM_FAULT_RETRY; 1935 } 1936 1937 /* Did it get truncated? */ 1938 if (unlikely(page->mapping != mapping)) { 1939 unlock_page(page); 1940 put_page(page); 1941 goto retry_find; 1942 } 1943 VM_BUG_ON_PAGE(page->index != offset, page); 1944 1945 /* 1946 * We have a locked page in the page cache, now we need to check 1947 * that it's up-to-date. If not, it is going to be due to an error. 1948 */ 1949 if (unlikely(!PageUptodate(page))) 1950 goto page_not_uptodate; 1951 1952 /* 1953 * Found the page and have a reference on it. 1954 * We must recheck i_size under page lock. 1955 */ 1956 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 1957 if (unlikely(offset >= size >> PAGE_CACHE_SHIFT)) { 1958 unlock_page(page); 1959 page_cache_release(page); 1960 return VM_FAULT_SIGBUS; 1961 } 1962 1963 vmf->page = page; 1964 return ret | VM_FAULT_LOCKED; 1965 1966 no_cached_page: 1967 /* 1968 * We're only likely to ever get here if MADV_RANDOM is in 1969 * effect. 1970 */ 1971 error = page_cache_read(file, offset); 1972 1973 /* 1974 * The page we want has now been added to the page cache. 1975 * In the unlikely event that someone removed it in the 1976 * meantime, we'll just come back here and read it again. 1977 */ 1978 if (error >= 0) 1979 goto retry_find; 1980 1981 /* 1982 * An error return from page_cache_read can result if the 1983 * system is low on memory, or a problem occurs while trying 1984 * to schedule I/O. 1985 */ 1986 if (error == -ENOMEM) 1987 return VM_FAULT_OOM; 1988 return VM_FAULT_SIGBUS; 1989 1990 page_not_uptodate: 1991 /* 1992 * Umm, take care of errors if the page isn't up-to-date. 1993 * Try to re-read it _once_. We do this synchronously, 1994 * because there really aren't any performance issues here 1995 * and we need to check for errors. 1996 */ 1997 ClearPageError(page); 1998 error = mapping->a_ops->readpage(file, page); 1999 if (!error) { 2000 wait_on_page_locked(page); 2001 if (!PageUptodate(page)) 2002 error = -EIO; 2003 } 2004 page_cache_release(page); 2005 2006 if (!error || error == AOP_TRUNCATED_PAGE) 2007 goto retry_find; 2008 2009 /* Things didn't work out. Return zero to tell the mm layer so. */ 2010 shrink_readahead_size_eio(file, ra); 2011 return VM_FAULT_SIGBUS; 2012 } 2013 EXPORT_SYMBOL(filemap_fault); 2014 2015 void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) 2016 { 2017 struct radix_tree_iter iter; 2018 void **slot; 2019 struct file *file = vma->vm_file; 2020 struct address_space *mapping = file->f_mapping; 2021 loff_t size; 2022 struct page *page; 2023 unsigned long address = (unsigned long) vmf->virtual_address; 2024 unsigned long addr; 2025 pte_t *pte; 2026 2027 rcu_read_lock(); 2028 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, vmf->pgoff) { 2029 if (iter.index > vmf->max_pgoff) 2030 break; 2031 repeat: 2032 page = radix_tree_deref_slot(slot); 2033 if (unlikely(!page)) 2034 goto next; 2035 if (radix_tree_exception(page)) { 2036 if (radix_tree_deref_retry(page)) 2037 break; 2038 else 2039 goto next; 2040 } 2041 2042 if (!page_cache_get_speculative(page)) 2043 goto repeat; 2044 2045 /* Has the page moved? */ 2046 if (unlikely(page != *slot)) { 2047 page_cache_release(page); 2048 goto repeat; 2049 } 2050 2051 if (!PageUptodate(page) || 2052 PageReadahead(page) || 2053 PageHWPoison(page)) 2054 goto skip; 2055 if (!trylock_page(page)) 2056 goto skip; 2057 2058 if (page->mapping != mapping || !PageUptodate(page)) 2059 goto unlock; 2060 2061 size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE); 2062 if (page->index >= size >> PAGE_CACHE_SHIFT) 2063 goto unlock; 2064 2065 pte = vmf->pte + page->index - vmf->pgoff; 2066 if (!pte_none(*pte)) 2067 goto unlock; 2068 2069 if (file->f_ra.mmap_miss > 0) 2070 file->f_ra.mmap_miss--; 2071 addr = address + (page->index - vmf->pgoff) * PAGE_SIZE; 2072 do_set_pte(vma, addr, page, pte, false, false); 2073 unlock_page(page); 2074 goto next; 2075 unlock: 2076 unlock_page(page); 2077 skip: 2078 page_cache_release(page); 2079 next: 2080 if (iter.index == vmf->max_pgoff) 2081 break; 2082 } 2083 rcu_read_unlock(); 2084 } 2085 EXPORT_SYMBOL(filemap_map_pages); 2086 2087 int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 2088 { 2089 struct page *page = vmf->page; 2090 struct inode *inode = file_inode(vma->vm_file); 2091 int ret = VM_FAULT_LOCKED; 2092 2093 sb_start_pagefault(inode->i_sb); 2094 file_update_time(vma->vm_file); 2095 lock_page(page); 2096 if (page->mapping != inode->i_mapping) { 2097 unlock_page(page); 2098 ret = VM_FAULT_NOPAGE; 2099 goto out; 2100 } 2101 /* 2102 * We mark the page dirty already here so that when freeze is in 2103 * progress, we are guaranteed that writeback during freezing will 2104 * see the dirty page and writeprotect it again. 2105 */ 2106 set_page_dirty(page); 2107 wait_for_stable_page(page); 2108 out: 2109 sb_end_pagefault(inode->i_sb); 2110 return ret; 2111 } 2112 EXPORT_SYMBOL(filemap_page_mkwrite); 2113 2114 const struct vm_operations_struct generic_file_vm_ops = { 2115 .fault = filemap_fault, 2116 .map_pages = filemap_map_pages, 2117 .page_mkwrite = filemap_page_mkwrite, 2118 .remap_pages = generic_file_remap_pages, 2119 }; 2120 2121 /* This is used for a general mmap of a disk file */ 2122 2123 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2124 { 2125 struct address_space *mapping = file->f_mapping; 2126 2127 if (!mapping->a_ops->readpage) 2128 return -ENOEXEC; 2129 file_accessed(file); 2130 vma->vm_ops = &generic_file_vm_ops; 2131 return 0; 2132 } 2133 2134 /* 2135 * This is for filesystems which do not implement ->writepage. 2136 */ 2137 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 2138 { 2139 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 2140 return -EINVAL; 2141 return generic_file_mmap(file, vma); 2142 } 2143 #else 2144 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2145 { 2146 return -ENOSYS; 2147 } 2148 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) 2149 { 2150 return -ENOSYS; 2151 } 2152 #endif /* CONFIG_MMU */ 2153 2154 EXPORT_SYMBOL(generic_file_mmap); 2155 EXPORT_SYMBOL(generic_file_readonly_mmap); 2156 2157 static struct page *wait_on_page_read(struct page *page) 2158 { 2159 if (!IS_ERR(page)) { 2160 wait_on_page_locked(page); 2161 if (!PageUptodate(page)) { 2162 page_cache_release(page); 2163 page = ERR_PTR(-EIO); 2164 } 2165 } 2166 return page; 2167 } 2168 2169 static struct page *__read_cache_page(struct address_space *mapping, 2170 pgoff_t index, 2171 int (*filler)(void *, struct page *), 2172 void *data, 2173 gfp_t gfp) 2174 { 2175 struct page *page; 2176 int err; 2177 repeat: 2178 page = find_get_page(mapping, index); 2179 if (!page) { 2180 page = __page_cache_alloc(gfp | __GFP_COLD); 2181 if (!page) 2182 return ERR_PTR(-ENOMEM); 2183 err = add_to_page_cache_lru(page, mapping, index, gfp); 2184 if (unlikely(err)) { 2185 page_cache_release(page); 2186 if (err == -EEXIST) 2187 goto repeat; 2188 /* Presumably ENOMEM for radix tree node */ 2189 return ERR_PTR(err); 2190 } 2191 err = filler(data, page); 2192 if (err < 0) { 2193 page_cache_release(page); 2194 page = ERR_PTR(err); 2195 } else { 2196 page = wait_on_page_read(page); 2197 } 2198 } 2199 return page; 2200 } 2201 2202 static struct page *do_read_cache_page(struct address_space *mapping, 2203 pgoff_t index, 2204 int (*filler)(void *, struct page *), 2205 void *data, 2206 gfp_t gfp) 2207 2208 { 2209 struct page *page; 2210 int err; 2211 2212 retry: 2213 page = __read_cache_page(mapping, index, filler, data, gfp); 2214 if (IS_ERR(page)) 2215 return page; 2216 if (PageUptodate(page)) 2217 goto out; 2218 2219 lock_page(page); 2220 if (!page->mapping) { 2221 unlock_page(page); 2222 page_cache_release(page); 2223 goto retry; 2224 } 2225 if (PageUptodate(page)) { 2226 unlock_page(page); 2227 goto out; 2228 } 2229 err = filler(data, page); 2230 if (err < 0) { 2231 page_cache_release(page); 2232 return ERR_PTR(err); 2233 } else { 2234 page = wait_on_page_read(page); 2235 if (IS_ERR(page)) 2236 return page; 2237 } 2238 out: 2239 mark_page_accessed(page); 2240 return page; 2241 } 2242 2243 /** 2244 * read_cache_page - read into page cache, fill it if needed 2245 * @mapping: the page's address_space 2246 * @index: the page index 2247 * @filler: function to perform the read 2248 * @data: first arg to filler(data, page) function, often left as NULL 2249 * 2250 * Read into the page cache. If a page already exists, and PageUptodate() is 2251 * not set, try to fill the page and wait for it to become unlocked. 2252 * 2253 * If the page does not get brought uptodate, return -EIO. 2254 */ 2255 struct page *read_cache_page(struct address_space *mapping, 2256 pgoff_t index, 2257 int (*filler)(void *, struct page *), 2258 void *data) 2259 { 2260 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); 2261 } 2262 EXPORT_SYMBOL(read_cache_page); 2263 2264 /** 2265 * read_cache_page_gfp - read into page cache, using specified page allocation flags. 2266 * @mapping: the page's address_space 2267 * @index: the page index 2268 * @gfp: the page allocator flags to use if allocating 2269 * 2270 * This is the same as "read_mapping_page(mapping, index, NULL)", but with 2271 * any new page allocations done using the specified allocation flags. 2272 * 2273 * If the page does not get brought uptodate, return -EIO. 2274 */ 2275 struct page *read_cache_page_gfp(struct address_space *mapping, 2276 pgoff_t index, 2277 gfp_t gfp) 2278 { 2279 filler_t *filler = (filler_t *)mapping->a_ops->readpage; 2280 2281 return do_read_cache_page(mapping, index, filler, NULL, gfp); 2282 } 2283 EXPORT_SYMBOL(read_cache_page_gfp); 2284 2285 /* 2286 * Performs necessary checks before doing a write 2287 * 2288 * Can adjust writing position or amount of bytes to write. 2289 * Returns appropriate error code that caller should return or 2290 * zero in case that write should be allowed. 2291 */ 2292 inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk) 2293 { 2294 struct inode *inode = file->f_mapping->host; 2295 unsigned long limit = rlimit(RLIMIT_FSIZE); 2296 2297 if (unlikely(*pos < 0)) 2298 return -EINVAL; 2299 2300 if (!isblk) { 2301 /* FIXME: this is for backwards compatibility with 2.4 */ 2302 if (file->f_flags & O_APPEND) 2303 *pos = i_size_read(inode); 2304 2305 if (limit != RLIM_INFINITY) { 2306 if (*pos >= limit) { 2307 send_sig(SIGXFSZ, current, 0); 2308 return -EFBIG; 2309 } 2310 if (*count > limit - (typeof(limit))*pos) { 2311 *count = limit - (typeof(limit))*pos; 2312 } 2313 } 2314 } 2315 2316 /* 2317 * LFS rule 2318 */ 2319 if (unlikely(*pos + *count > MAX_NON_LFS && 2320 !(file->f_flags & O_LARGEFILE))) { 2321 if (*pos >= MAX_NON_LFS) { 2322 return -EFBIG; 2323 } 2324 if (*count > MAX_NON_LFS - (unsigned long)*pos) { 2325 *count = MAX_NON_LFS - (unsigned long)*pos; 2326 } 2327 } 2328 2329 /* 2330 * Are we about to exceed the fs block limit ? 2331 * 2332 * If we have written data it becomes a short write. If we have 2333 * exceeded without writing data we send a signal and return EFBIG. 2334 * Linus frestrict idea will clean these up nicely.. 2335 */ 2336 if (likely(!isblk)) { 2337 if (unlikely(*pos >= inode->i_sb->s_maxbytes)) { 2338 if (*count || *pos > inode->i_sb->s_maxbytes) { 2339 return -EFBIG; 2340 } 2341 /* zero-length writes at ->s_maxbytes are OK */ 2342 } 2343 2344 if (unlikely(*pos + *count > inode->i_sb->s_maxbytes)) 2345 *count = inode->i_sb->s_maxbytes - *pos; 2346 } else { 2347 #ifdef CONFIG_BLOCK 2348 loff_t isize; 2349 if (bdev_read_only(I_BDEV(inode))) 2350 return -EPERM; 2351 isize = i_size_read(inode); 2352 if (*pos >= isize) { 2353 if (*count || *pos > isize) 2354 return -ENOSPC; 2355 } 2356 2357 if (*pos + *count > isize) 2358 *count = isize - *pos; 2359 #else 2360 return -EPERM; 2361 #endif 2362 } 2363 return 0; 2364 } 2365 EXPORT_SYMBOL(generic_write_checks); 2366 2367 int pagecache_write_begin(struct file *file, struct address_space *mapping, 2368 loff_t pos, unsigned len, unsigned flags, 2369 struct page **pagep, void **fsdata) 2370 { 2371 const struct address_space_operations *aops = mapping->a_ops; 2372 2373 return aops->write_begin(file, mapping, pos, len, flags, 2374 pagep, fsdata); 2375 } 2376 EXPORT_SYMBOL(pagecache_write_begin); 2377 2378 int pagecache_write_end(struct file *file, struct address_space *mapping, 2379 loff_t pos, unsigned len, unsigned copied, 2380 struct page *page, void *fsdata) 2381 { 2382 const struct address_space_operations *aops = mapping->a_ops; 2383 2384 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); 2385 } 2386 EXPORT_SYMBOL(pagecache_write_end); 2387 2388 ssize_t 2389 generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, 2390 unsigned long *nr_segs, loff_t pos, 2391 size_t count, size_t ocount) 2392 { 2393 struct file *file = iocb->ki_filp; 2394 struct address_space *mapping = file->f_mapping; 2395 struct inode *inode = mapping->host; 2396 ssize_t written; 2397 size_t write_len; 2398 pgoff_t end; 2399 2400 if (count != ocount) 2401 *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count); 2402 2403 write_len = iov_length(iov, *nr_segs); 2404 end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT; 2405 2406 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1); 2407 if (written) 2408 goto out; 2409 2410 /* 2411 * After a write we want buffered reads to be sure to go to disk to get 2412 * the new data. We invalidate clean cached page from the region we're 2413 * about to write. We do this *before* the write so that we can return 2414 * without clobbering -EIOCBQUEUED from ->direct_IO(). 2415 */ 2416 if (mapping->nrpages) { 2417 written = invalidate_inode_pages2_range(mapping, 2418 pos >> PAGE_CACHE_SHIFT, end); 2419 /* 2420 * If a page can not be invalidated, return 0 to fall back 2421 * to buffered write. 2422 */ 2423 if (written) { 2424 if (written == -EBUSY) 2425 return 0; 2426 goto out; 2427 } 2428 } 2429 2430 written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs); 2431 2432 /* 2433 * Finally, try again to invalidate clean pages which might have been 2434 * cached by non-direct readahead, or faulted in by get_user_pages() 2435 * if the source of the write was an mmap'ed region of the file 2436 * we're writing. Either one is a pretty crazy thing to do, 2437 * so we don't support it 100%. If this invalidation 2438 * fails, tough, the write still worked... 2439 */ 2440 if (mapping->nrpages) { 2441 invalidate_inode_pages2_range(mapping, 2442 pos >> PAGE_CACHE_SHIFT, end); 2443 } 2444 2445 if (written > 0) { 2446 pos += written; 2447 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { 2448 i_size_write(inode, pos); 2449 mark_inode_dirty(inode); 2450 } 2451 iocb->ki_pos = pos; 2452 } 2453 out: 2454 return written; 2455 } 2456 EXPORT_SYMBOL(generic_file_direct_write); 2457 2458 /* 2459 * Find or create a page at the given pagecache position. Return the locked 2460 * page. This function is specifically for buffered writes. 2461 */ 2462 struct page *grab_cache_page_write_begin(struct address_space *mapping, 2463 pgoff_t index, unsigned flags) 2464 { 2465 struct page *page; 2466 int fgp_flags = FGP_LOCK|FGP_ACCESSED|FGP_WRITE|FGP_CREAT; 2467 2468 if (flags & AOP_FLAG_NOFS) 2469 fgp_flags |= FGP_NOFS; 2470 2471 page = pagecache_get_page(mapping, index, fgp_flags, 2472 mapping_gfp_mask(mapping), 2473 GFP_KERNEL); 2474 if (page) 2475 wait_for_stable_page(page); 2476 2477 return page; 2478 } 2479 EXPORT_SYMBOL(grab_cache_page_write_begin); 2480 2481 ssize_t generic_perform_write(struct file *file, 2482 struct iov_iter *i, loff_t pos) 2483 { 2484 struct address_space *mapping = file->f_mapping; 2485 const struct address_space_operations *a_ops = mapping->a_ops; 2486 long status = 0; 2487 ssize_t written = 0; 2488 unsigned int flags = 0; 2489 2490 /* 2491 * Copies from kernel address space cannot fail (NFSD is a big user). 2492 */ 2493 if (segment_eq(get_fs(), KERNEL_DS)) 2494 flags |= AOP_FLAG_UNINTERRUPTIBLE; 2495 2496 do { 2497 struct page *page; 2498 unsigned long offset; /* Offset into pagecache page */ 2499 unsigned long bytes; /* Bytes to write to page */ 2500 size_t copied; /* Bytes copied from user */ 2501 void *fsdata; 2502 2503 offset = (pos & (PAGE_CACHE_SIZE - 1)); 2504 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2505 iov_iter_count(i)); 2506 2507 again: 2508 /* 2509 * Bring in the user page that we will copy from _first_. 2510 * Otherwise there's a nasty deadlock on copying from the 2511 * same page as we're writing to, without it being marked 2512 * up-to-date. 2513 * 2514 * Not only is this an optimisation, but it is also required 2515 * to check that the address is actually valid, when atomic 2516 * usercopies are used, below. 2517 */ 2518 if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 2519 status = -EFAULT; 2520 break; 2521 } 2522 2523 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 2524 &page, &fsdata); 2525 if (unlikely(status < 0)) 2526 break; 2527 2528 if (mapping_writably_mapped(mapping)) 2529 flush_dcache_page(page); 2530 2531 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 2532 flush_dcache_page(page); 2533 2534 status = a_ops->write_end(file, mapping, pos, bytes, copied, 2535 page, fsdata); 2536 if (unlikely(status < 0)) 2537 break; 2538 copied = status; 2539 2540 cond_resched(); 2541 2542 iov_iter_advance(i, copied); 2543 if (unlikely(copied == 0)) { 2544 /* 2545 * If we were unable to copy any data at all, we must 2546 * fall back to a single segment length write. 2547 * 2548 * If we didn't fallback here, we could livelock 2549 * because not all segments in the iov can be copied at 2550 * once without a pagefault. 2551 */ 2552 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2553 iov_iter_single_seg_count(i)); 2554 goto again; 2555 } 2556 pos += copied; 2557 written += copied; 2558 2559 balance_dirty_pages_ratelimited(mapping); 2560 if (fatal_signal_pending(current)) { 2561 status = -EINTR; 2562 break; 2563 } 2564 } while (iov_iter_count(i)); 2565 2566 return written ? written : status; 2567 } 2568 EXPORT_SYMBOL(generic_perform_write); 2569 2570 /** 2571 * __generic_file_aio_write - write data to a file 2572 * @iocb: IO state structure (file, offset, etc.) 2573 * @iov: vector with data to write 2574 * @nr_segs: number of segments in the vector 2575 * 2576 * This function does all the work needed for actually writing data to a 2577 * file. It does all basic checks, removes SUID from the file, updates 2578 * modification times and calls proper subroutines depending on whether we 2579 * do direct IO or a standard buffered write. 2580 * 2581 * It expects i_mutex to be grabbed unless we work on a block device or similar 2582 * object which does not need locking at all. 2583 * 2584 * This function does *not* take care of syncing data in case of O_SYNC write. 2585 * A caller has to handle it. This is mainly due to the fact that we want to 2586 * avoid syncing under i_mutex. 2587 */ 2588 ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, 2589 unsigned long nr_segs) 2590 { 2591 struct file *file = iocb->ki_filp; 2592 struct address_space * mapping = file->f_mapping; 2593 size_t ocount; /* original count */ 2594 size_t count; /* after file limit checks */ 2595 struct inode *inode = mapping->host; 2596 loff_t pos = iocb->ki_pos; 2597 ssize_t written = 0; 2598 ssize_t err; 2599 ssize_t status; 2600 struct iov_iter from; 2601 2602 ocount = 0; 2603 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); 2604 if (err) 2605 return err; 2606 2607 count = ocount; 2608 2609 /* We can write back this queue in page reclaim */ 2610 current->backing_dev_info = mapping->backing_dev_info; 2611 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 2612 if (err) 2613 goto out; 2614 2615 if (count == 0) 2616 goto out; 2617 2618 err = file_remove_suid(file); 2619 if (err) 2620 goto out; 2621 2622 err = file_update_time(file); 2623 if (err) 2624 goto out; 2625 2626 iov_iter_init(&from, iov, nr_segs, count, 0); 2627 2628 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ 2629 if (unlikely(file->f_flags & O_DIRECT)) { 2630 loff_t endbyte; 2631 2632 written = generic_file_direct_write(iocb, iov, &from.nr_segs, pos, 2633 count, ocount); 2634 if (written < 0 || written == count) 2635 goto out; 2636 iov_iter_advance(&from, written); 2637 2638 /* 2639 * direct-io write to a hole: fall through to buffered I/O 2640 * for completing the rest of the request. 2641 */ 2642 pos += written; 2643 count -= written; 2644 2645 status = generic_perform_write(file, &from, pos); 2646 /* 2647 * If generic_perform_write() returned a synchronous error 2648 * then we want to return the number of bytes which were 2649 * direct-written, or the error code if that was zero. Note 2650 * that this differs from normal direct-io semantics, which 2651 * will return -EFOO even if some bytes were written. 2652 */ 2653 if (unlikely(status < 0) && !written) { 2654 err = status; 2655 goto out; 2656 } 2657 iocb->ki_pos = pos + status; 2658 /* 2659 * We need to ensure that the page cache pages are written to 2660 * disk and invalidated to preserve the expected O_DIRECT 2661 * semantics. 2662 */ 2663 endbyte = pos + status - 1; 2664 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte); 2665 if (err == 0) { 2666 written += status; 2667 invalidate_mapping_pages(mapping, 2668 pos >> PAGE_CACHE_SHIFT, 2669 endbyte >> PAGE_CACHE_SHIFT); 2670 } else { 2671 /* 2672 * We don't know how much we wrote, so just return 2673 * the number of bytes which were direct-written 2674 */ 2675 } 2676 } else { 2677 written = generic_perform_write(file, &from, pos); 2678 if (likely(written >= 0)) 2679 iocb->ki_pos = pos + written; 2680 } 2681 out: 2682 current->backing_dev_info = NULL; 2683 return written ? written : err; 2684 } 2685 EXPORT_SYMBOL(__generic_file_aio_write); 2686 2687 /** 2688 * generic_file_aio_write - write data to a file 2689 * @iocb: IO state structure 2690 * @iov: vector with data to write 2691 * @nr_segs: number of segments in the vector 2692 * @pos: position in file where to write 2693 * 2694 * This is a wrapper around __generic_file_aio_write() to be used by most 2695 * filesystems. It takes care of syncing the file in case of O_SYNC file 2696 * and acquires i_mutex as needed. 2697 */ 2698 ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, 2699 unsigned long nr_segs, loff_t pos) 2700 { 2701 struct file *file = iocb->ki_filp; 2702 struct inode *inode = file->f_mapping->host; 2703 ssize_t ret; 2704 2705 BUG_ON(iocb->ki_pos != pos); 2706 2707 mutex_lock(&inode->i_mutex); 2708 ret = __generic_file_aio_write(iocb, iov, nr_segs); 2709 mutex_unlock(&inode->i_mutex); 2710 2711 if (ret > 0) { 2712 ssize_t err; 2713 2714 err = generic_write_sync(file, iocb->ki_pos - ret, ret); 2715 if (err < 0) 2716 ret = err; 2717 } 2718 return ret; 2719 } 2720 EXPORT_SYMBOL(generic_file_aio_write); 2721 2722 /** 2723 * try_to_release_page() - release old fs-specific metadata on a page 2724 * 2725 * @page: the page which the kernel is trying to free 2726 * @gfp_mask: memory allocation flags (and I/O mode) 2727 * 2728 * The address_space is to try to release any data against the page 2729 * (presumably at page->private). If the release was successful, return `1'. 2730 * Otherwise return zero. 2731 * 2732 * This may also be called if PG_fscache is set on a page, indicating that the 2733 * page is known to the local caching routines. 2734 * 2735 * The @gfp_mask argument specifies whether I/O may be performed to release 2736 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS). 2737 * 2738 */ 2739 int try_to_release_page(struct page *page, gfp_t gfp_mask) 2740 { 2741 struct address_space * const mapping = page->mapping; 2742 2743 BUG_ON(!PageLocked(page)); 2744 if (PageWriteback(page)) 2745 return 0; 2746 2747 if (mapping && mapping->a_ops->releasepage) 2748 return mapping->a_ops->releasepage(page, gfp_mask); 2749 return try_to_free_buffers(page); 2750 } 2751 2752 EXPORT_SYMBOL(try_to_release_page); 2753