1 /* 2 * linux/mm/filemap.c 3 * 4 * Copyright (C) 1994-1999 Linus Torvalds 5 */ 6 7 /* 8 * This file handles the generic file mmap semantics used by 9 * most "normal" filesystems (but you don't /have/ to use this: 10 * the NFS filesystem used to do this differently, for example) 11 */ 12 #include <linux/export.h> 13 #include <linux/compiler.h> 14 #include <linux/fs.h> 15 #include <linux/uaccess.h> 16 #include <linux/capability.h> 17 #include <linux/kernel_stat.h> 18 #include <linux/gfp.h> 19 #include <linux/mm.h> 20 #include <linux/swap.h> 21 #include <linux/mman.h> 22 #include <linux/pagemap.h> 23 #include <linux/file.h> 24 #include <linux/uio.h> 25 #include <linux/hash.h> 26 #include <linux/writeback.h> 27 #include <linux/backing-dev.h> 28 #include <linux/pagevec.h> 29 #include <linux/blkdev.h> 30 #include <linux/security.h> 31 #include <linux/cpuset.h> 32 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ 33 #include <linux/hugetlb.h> 34 #include <linux/memcontrol.h> 35 #include <linux/cleancache.h> 36 #include <linux/rmap.h> 37 #include "internal.h" 38 39 #define CREATE_TRACE_POINTS 40 #include <trace/events/filemap.h> 41 42 /* 43 * FIXME: remove all knowledge of the buffer layer from the core VM 44 */ 45 #include <linux/buffer_head.h> /* for try_to_free_buffers */ 46 47 #include <asm/mman.h> 48 49 /* 50 * Shared mappings implemented 30.11.1994. It's not fully working yet, 51 * though. 52 * 53 * Shared mappings now work. 15.8.1995 Bruno. 54 * 55 * finished 'unifying' the page and buffer cache and SMP-threaded the 56 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 57 * 58 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 59 */ 60 61 /* 62 * Lock ordering: 63 * 64 * ->i_mmap_rwsem (truncate_pagecache) 65 * ->private_lock (__free_pte->__set_page_dirty_buffers) 66 * ->swap_lock (exclusive_swap_page, others) 67 * ->mapping->tree_lock 68 * 69 * ->i_mutex 70 * ->i_mmap_rwsem (truncate->unmap_mapping_range) 71 * 72 * ->mmap_sem 73 * ->i_mmap_rwsem 74 * ->page_table_lock or pte_lock (various, mainly in memory.c) 75 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) 76 * 77 * ->mmap_sem 78 * ->lock_page (access_process_vm) 79 * 80 * ->i_mutex (generic_perform_write) 81 * ->mmap_sem (fault_in_pages_readable->do_page_fault) 82 * 83 * bdi->wb.list_lock 84 * sb_lock (fs/fs-writeback.c) 85 * ->mapping->tree_lock (__sync_single_inode) 86 * 87 * ->i_mmap_rwsem 88 * ->anon_vma.lock (vma_adjust) 89 * 90 * ->anon_vma.lock 91 * ->page_table_lock or pte_lock (anon_vma_prepare and various) 92 * 93 * ->page_table_lock or pte_lock 94 * ->swap_lock (try_to_unmap_one) 95 * ->private_lock (try_to_unmap_one) 96 * ->tree_lock (try_to_unmap_one) 97 * ->zone.lru_lock (follow_page->mark_page_accessed) 98 * ->zone.lru_lock (check_pte_range->isolate_lru_page) 99 * ->private_lock (page_remove_rmap->set_page_dirty) 100 * ->tree_lock (page_remove_rmap->set_page_dirty) 101 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) 102 * ->inode->i_lock (page_remove_rmap->set_page_dirty) 103 * ->memcg->move_lock (page_remove_rmap->mem_cgroup_begin_page_stat) 104 * bdi.wb->list_lock (zap_pte_range->set_page_dirty) 105 * ->inode->i_lock (zap_pte_range->set_page_dirty) 106 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 107 * 108 * ->i_mmap_rwsem 109 * ->tasklist_lock (memory_failure, collect_procs_ao) 110 */ 111 112 static void page_cache_tree_delete(struct address_space *mapping, 113 struct page *page, void *shadow) 114 { 115 struct radix_tree_node *node; 116 unsigned long index; 117 unsigned int offset; 118 unsigned int tag; 119 void **slot; 120 121 VM_BUG_ON(!PageLocked(page)); 122 123 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); 124 125 if (shadow) { 126 mapping->nrshadows++; 127 /* 128 * Make sure the nrshadows update is committed before 129 * the nrpages update so that final truncate racing 130 * with reclaim does not see both counters 0 at the 131 * same time and miss a shadow entry. 132 */ 133 smp_wmb(); 134 } 135 mapping->nrpages--; 136 137 if (!node) { 138 /* Clear direct pointer tags in root node */ 139 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK; 140 radix_tree_replace_slot(slot, shadow); 141 return; 142 } 143 144 /* Clear tree tags for the removed page */ 145 index = page->index; 146 offset = index & RADIX_TREE_MAP_MASK; 147 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { 148 if (test_bit(offset, node->tags[tag])) 149 radix_tree_tag_clear(&mapping->page_tree, index, tag); 150 } 151 152 /* Delete page, swap shadow entry */ 153 radix_tree_replace_slot(slot, shadow); 154 workingset_node_pages_dec(node); 155 if (shadow) 156 workingset_node_shadows_inc(node); 157 else 158 if (__radix_tree_delete_node(&mapping->page_tree, node)) 159 return; 160 161 /* 162 * Track node that only contains shadow entries. 163 * 164 * Avoid acquiring the list_lru lock if already tracked. The 165 * list_empty() test is safe as node->private_list is 166 * protected by mapping->tree_lock. 167 */ 168 if (!workingset_node_pages(node) && 169 list_empty(&node->private_list)) { 170 node->private_data = mapping; 171 list_lru_add(&workingset_shadow_nodes, &node->private_list); 172 } 173 } 174 175 /* 176 * Delete a page from the page cache and free it. Caller has to make 177 * sure the page is locked and that nobody else uses it - or that usage 178 * is safe. The caller must hold the mapping's tree_lock and 179 * mem_cgroup_begin_page_stat(). 180 */ 181 void __delete_from_page_cache(struct page *page, void *shadow, 182 struct mem_cgroup *memcg) 183 { 184 struct address_space *mapping = page->mapping; 185 186 trace_mm_filemap_delete_from_page_cache(page); 187 /* 188 * if we're uptodate, flush out into the cleancache, otherwise 189 * invalidate any existing cleancache entries. We can't leave 190 * stale data around in the cleancache once our page is gone 191 */ 192 if (PageUptodate(page) && PageMappedToDisk(page)) 193 cleancache_put_page(page); 194 else 195 cleancache_invalidate_page(mapping, page); 196 197 page_cache_tree_delete(mapping, page, shadow); 198 199 page->mapping = NULL; 200 /* Leave page->index set: truncation lookup relies upon it */ 201 202 /* hugetlb pages do not participate in page cache accounting. */ 203 if (!PageHuge(page)) 204 __dec_zone_page_state(page, NR_FILE_PAGES); 205 if (PageSwapBacked(page)) 206 __dec_zone_page_state(page, NR_SHMEM); 207 BUG_ON(page_mapped(page)); 208 209 /* 210 * At this point page must be either written or cleaned by truncate. 211 * Dirty page here signals a bug and loss of unwritten data. 212 * 213 * This fixes dirty accounting after removing the page entirely but 214 * leaves PageDirty set: it has no effect for truncated page and 215 * anyway will be cleared before returning page into buddy allocator. 216 */ 217 if (WARN_ON_ONCE(PageDirty(page))) 218 account_page_cleaned(page, mapping, memcg, 219 inode_to_wb(mapping->host)); 220 } 221 222 /** 223 * delete_from_page_cache - delete page from page cache 224 * @page: the page which the kernel is trying to remove from page cache 225 * 226 * This must be called only on pages that have been verified to be in the page 227 * cache and locked. It will never put the page into the free list, the caller 228 * has a reference on the page. 229 */ 230 void delete_from_page_cache(struct page *page) 231 { 232 struct address_space *mapping = page->mapping; 233 struct mem_cgroup *memcg; 234 unsigned long flags; 235 236 void (*freepage)(struct page *); 237 238 BUG_ON(!PageLocked(page)); 239 240 freepage = mapping->a_ops->freepage; 241 242 memcg = mem_cgroup_begin_page_stat(page); 243 spin_lock_irqsave(&mapping->tree_lock, flags); 244 __delete_from_page_cache(page, NULL, memcg); 245 spin_unlock_irqrestore(&mapping->tree_lock, flags); 246 mem_cgroup_end_page_stat(memcg); 247 248 if (freepage) 249 freepage(page); 250 page_cache_release(page); 251 } 252 EXPORT_SYMBOL(delete_from_page_cache); 253 254 static int filemap_check_errors(struct address_space *mapping) 255 { 256 int ret = 0; 257 /* Check for outstanding write errors */ 258 if (test_bit(AS_ENOSPC, &mapping->flags) && 259 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 260 ret = -ENOSPC; 261 if (test_bit(AS_EIO, &mapping->flags) && 262 test_and_clear_bit(AS_EIO, &mapping->flags)) 263 ret = -EIO; 264 return ret; 265 } 266 267 /** 268 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 269 * @mapping: address space structure to write 270 * @start: offset in bytes where the range starts 271 * @end: offset in bytes where the range ends (inclusive) 272 * @sync_mode: enable synchronous operation 273 * 274 * Start writeback against all of a mapping's dirty pages that lie 275 * within the byte offsets <start, end> inclusive. 276 * 277 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 278 * opposed to a regular memory cleansing writeback. The difference between 279 * these two operations is that if a dirty page/buffer is encountered, it must 280 * be waited upon, and not just skipped over. 281 */ 282 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 283 loff_t end, int sync_mode) 284 { 285 int ret; 286 struct writeback_control wbc = { 287 .sync_mode = sync_mode, 288 .nr_to_write = LONG_MAX, 289 .range_start = start, 290 .range_end = end, 291 }; 292 293 if (!mapping_cap_writeback_dirty(mapping)) 294 return 0; 295 296 wbc_attach_fdatawrite_inode(&wbc, mapping->host); 297 ret = do_writepages(mapping, &wbc); 298 wbc_detach_inode(&wbc); 299 return ret; 300 } 301 302 static inline int __filemap_fdatawrite(struct address_space *mapping, 303 int sync_mode) 304 { 305 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); 306 } 307 308 int filemap_fdatawrite(struct address_space *mapping) 309 { 310 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); 311 } 312 EXPORT_SYMBOL(filemap_fdatawrite); 313 314 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 315 loff_t end) 316 { 317 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 318 } 319 EXPORT_SYMBOL(filemap_fdatawrite_range); 320 321 /** 322 * filemap_flush - mostly a non-blocking flush 323 * @mapping: target address_space 324 * 325 * This is a mostly non-blocking flush. Not suitable for data-integrity 326 * purposes - I/O may not be started against all dirty pages. 327 */ 328 int filemap_flush(struct address_space *mapping) 329 { 330 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); 331 } 332 EXPORT_SYMBOL(filemap_flush); 333 334 /** 335 * filemap_fdatawait_range - wait for writeback to complete 336 * @mapping: address space structure to wait for 337 * @start_byte: offset in bytes where the range starts 338 * @end_byte: offset in bytes where the range ends (inclusive) 339 * 340 * Walk the list of under-writeback pages of the given address space 341 * in the given range and wait for all of them. 342 */ 343 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, 344 loff_t end_byte) 345 { 346 pgoff_t index = start_byte >> PAGE_CACHE_SHIFT; 347 pgoff_t end = end_byte >> PAGE_CACHE_SHIFT; 348 struct pagevec pvec; 349 int nr_pages; 350 int ret2, ret = 0; 351 352 if (end_byte < start_byte) 353 goto out; 354 355 pagevec_init(&pvec, 0); 356 while ((index <= end) && 357 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 358 PAGECACHE_TAG_WRITEBACK, 359 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { 360 unsigned i; 361 362 for (i = 0; i < nr_pages; i++) { 363 struct page *page = pvec.pages[i]; 364 365 /* until radix tree lookup accepts end_index */ 366 if (page->index > end) 367 continue; 368 369 wait_on_page_writeback(page); 370 if (TestClearPageError(page)) 371 ret = -EIO; 372 } 373 pagevec_release(&pvec); 374 cond_resched(); 375 } 376 out: 377 ret2 = filemap_check_errors(mapping); 378 if (!ret) 379 ret = ret2; 380 381 return ret; 382 } 383 EXPORT_SYMBOL(filemap_fdatawait_range); 384 385 /** 386 * filemap_fdatawait - wait for all under-writeback pages to complete 387 * @mapping: address space structure to wait for 388 * 389 * Walk the list of under-writeback pages of the given address space 390 * and wait for all of them. 391 */ 392 int filemap_fdatawait(struct address_space *mapping) 393 { 394 loff_t i_size = i_size_read(mapping->host); 395 396 if (i_size == 0) 397 return 0; 398 399 return filemap_fdatawait_range(mapping, 0, i_size - 1); 400 } 401 EXPORT_SYMBOL(filemap_fdatawait); 402 403 int filemap_write_and_wait(struct address_space *mapping) 404 { 405 int err = 0; 406 407 if (mapping->nrpages) { 408 err = filemap_fdatawrite(mapping); 409 /* 410 * Even if the above returned error, the pages may be 411 * written partially (e.g. -ENOSPC), so we wait for it. 412 * But the -EIO is special case, it may indicate the worst 413 * thing (e.g. bug) happened, so we avoid waiting for it. 414 */ 415 if (err != -EIO) { 416 int err2 = filemap_fdatawait(mapping); 417 if (!err) 418 err = err2; 419 } 420 } else { 421 err = filemap_check_errors(mapping); 422 } 423 return err; 424 } 425 EXPORT_SYMBOL(filemap_write_and_wait); 426 427 /** 428 * filemap_write_and_wait_range - write out & wait on a file range 429 * @mapping: the address_space for the pages 430 * @lstart: offset in bytes where the range starts 431 * @lend: offset in bytes where the range ends (inclusive) 432 * 433 * Write out and wait upon file offsets lstart->lend, inclusive. 434 * 435 * Note that `lend' is inclusive (describes the last byte to be written) so 436 * that this function can be used to write to the very end-of-file (end = -1). 437 */ 438 int filemap_write_and_wait_range(struct address_space *mapping, 439 loff_t lstart, loff_t lend) 440 { 441 int err = 0; 442 443 if (mapping->nrpages) { 444 err = __filemap_fdatawrite_range(mapping, lstart, lend, 445 WB_SYNC_ALL); 446 /* See comment of filemap_write_and_wait() */ 447 if (err != -EIO) { 448 int err2 = filemap_fdatawait_range(mapping, 449 lstart, lend); 450 if (!err) 451 err = err2; 452 } 453 } else { 454 err = filemap_check_errors(mapping); 455 } 456 return err; 457 } 458 EXPORT_SYMBOL(filemap_write_and_wait_range); 459 460 /** 461 * replace_page_cache_page - replace a pagecache page with a new one 462 * @old: page to be replaced 463 * @new: page to replace with 464 * @gfp_mask: allocation mode 465 * 466 * This function replaces a page in the pagecache with a new one. On 467 * success it acquires the pagecache reference for the new page and 468 * drops it for the old page. Both the old and new pages must be 469 * locked. This function does not add the new page to the LRU, the 470 * caller must do that. 471 * 472 * The remove + add is atomic. The only way this function can fail is 473 * memory allocation failure. 474 */ 475 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) 476 { 477 int error; 478 479 VM_BUG_ON_PAGE(!PageLocked(old), old); 480 VM_BUG_ON_PAGE(!PageLocked(new), new); 481 VM_BUG_ON_PAGE(new->mapping, new); 482 483 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 484 if (!error) { 485 struct address_space *mapping = old->mapping; 486 void (*freepage)(struct page *); 487 struct mem_cgroup *memcg; 488 unsigned long flags; 489 490 pgoff_t offset = old->index; 491 freepage = mapping->a_ops->freepage; 492 493 page_cache_get(new); 494 new->mapping = mapping; 495 new->index = offset; 496 497 memcg = mem_cgroup_begin_page_stat(old); 498 spin_lock_irqsave(&mapping->tree_lock, flags); 499 __delete_from_page_cache(old, NULL, memcg); 500 error = radix_tree_insert(&mapping->page_tree, offset, new); 501 BUG_ON(error); 502 mapping->nrpages++; 503 504 /* 505 * hugetlb pages do not participate in page cache accounting. 506 */ 507 if (!PageHuge(new)) 508 __inc_zone_page_state(new, NR_FILE_PAGES); 509 if (PageSwapBacked(new)) 510 __inc_zone_page_state(new, NR_SHMEM); 511 spin_unlock_irqrestore(&mapping->tree_lock, flags); 512 mem_cgroup_end_page_stat(memcg); 513 mem_cgroup_migrate(old, new, true); 514 radix_tree_preload_end(); 515 if (freepage) 516 freepage(old); 517 page_cache_release(old); 518 } 519 520 return error; 521 } 522 EXPORT_SYMBOL_GPL(replace_page_cache_page); 523 524 static int page_cache_tree_insert(struct address_space *mapping, 525 struct page *page, void **shadowp) 526 { 527 struct radix_tree_node *node; 528 void **slot; 529 int error; 530 531 error = __radix_tree_create(&mapping->page_tree, page->index, 532 &node, &slot); 533 if (error) 534 return error; 535 if (*slot) { 536 void *p; 537 538 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 539 if (!radix_tree_exceptional_entry(p)) 540 return -EEXIST; 541 if (shadowp) 542 *shadowp = p; 543 mapping->nrshadows--; 544 if (node) 545 workingset_node_shadows_dec(node); 546 } 547 radix_tree_replace_slot(slot, page); 548 mapping->nrpages++; 549 if (node) { 550 workingset_node_pages_inc(node); 551 /* 552 * Don't track node that contains actual pages. 553 * 554 * Avoid acquiring the list_lru lock if already 555 * untracked. The list_empty() test is safe as 556 * node->private_list is protected by 557 * mapping->tree_lock. 558 */ 559 if (!list_empty(&node->private_list)) 560 list_lru_del(&workingset_shadow_nodes, 561 &node->private_list); 562 } 563 return 0; 564 } 565 566 static int __add_to_page_cache_locked(struct page *page, 567 struct address_space *mapping, 568 pgoff_t offset, gfp_t gfp_mask, 569 void **shadowp) 570 { 571 int huge = PageHuge(page); 572 struct mem_cgroup *memcg; 573 int error; 574 575 VM_BUG_ON_PAGE(!PageLocked(page), page); 576 VM_BUG_ON_PAGE(PageSwapBacked(page), page); 577 578 if (!huge) { 579 error = mem_cgroup_try_charge(page, current->mm, 580 gfp_mask, &memcg); 581 if (error) 582 return error; 583 } 584 585 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); 586 if (error) { 587 if (!huge) 588 mem_cgroup_cancel_charge(page, memcg); 589 return error; 590 } 591 592 page_cache_get(page); 593 page->mapping = mapping; 594 page->index = offset; 595 596 spin_lock_irq(&mapping->tree_lock); 597 error = page_cache_tree_insert(mapping, page, shadowp); 598 radix_tree_preload_end(); 599 if (unlikely(error)) 600 goto err_insert; 601 602 /* hugetlb pages do not participate in page cache accounting. */ 603 if (!huge) 604 __inc_zone_page_state(page, NR_FILE_PAGES); 605 spin_unlock_irq(&mapping->tree_lock); 606 if (!huge) 607 mem_cgroup_commit_charge(page, memcg, false); 608 trace_mm_filemap_add_to_page_cache(page); 609 return 0; 610 err_insert: 611 page->mapping = NULL; 612 /* Leave page->index set: truncation relies upon it */ 613 spin_unlock_irq(&mapping->tree_lock); 614 if (!huge) 615 mem_cgroup_cancel_charge(page, memcg); 616 page_cache_release(page); 617 return error; 618 } 619 620 /** 621 * add_to_page_cache_locked - add a locked page to the pagecache 622 * @page: page to add 623 * @mapping: the page's address_space 624 * @offset: page index 625 * @gfp_mask: page allocation mode 626 * 627 * This function is used to add a page to the pagecache. It must be locked. 628 * This function does not add the page to the LRU. The caller must do that. 629 */ 630 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 631 pgoff_t offset, gfp_t gfp_mask) 632 { 633 return __add_to_page_cache_locked(page, mapping, offset, 634 gfp_mask, NULL); 635 } 636 EXPORT_SYMBOL(add_to_page_cache_locked); 637 638 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 639 pgoff_t offset, gfp_t gfp_mask) 640 { 641 void *shadow = NULL; 642 int ret; 643 644 __set_page_locked(page); 645 ret = __add_to_page_cache_locked(page, mapping, offset, 646 gfp_mask, &shadow); 647 if (unlikely(ret)) 648 __clear_page_locked(page); 649 else { 650 /* 651 * The page might have been evicted from cache only 652 * recently, in which case it should be activated like 653 * any other repeatedly accessed page. 654 */ 655 if (shadow && workingset_refault(shadow)) { 656 SetPageActive(page); 657 workingset_activation(page); 658 } else 659 ClearPageActive(page); 660 lru_cache_add(page); 661 } 662 return ret; 663 } 664 EXPORT_SYMBOL_GPL(add_to_page_cache_lru); 665 666 #ifdef CONFIG_NUMA 667 struct page *__page_cache_alloc(gfp_t gfp) 668 { 669 int n; 670 struct page *page; 671 672 if (cpuset_do_page_mem_spread()) { 673 unsigned int cpuset_mems_cookie; 674 do { 675 cpuset_mems_cookie = read_mems_allowed_begin(); 676 n = cpuset_mem_spread_node(); 677 page = __alloc_pages_node(n, gfp, 0); 678 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); 679 680 return page; 681 } 682 return alloc_pages(gfp, 0); 683 } 684 EXPORT_SYMBOL(__page_cache_alloc); 685 #endif 686 687 /* 688 * In order to wait for pages to become available there must be 689 * waitqueues associated with pages. By using a hash table of 690 * waitqueues where the bucket discipline is to maintain all 691 * waiters on the same queue and wake all when any of the pages 692 * become available, and for the woken contexts to check to be 693 * sure the appropriate page became available, this saves space 694 * at a cost of "thundering herd" phenomena during rare hash 695 * collisions. 696 */ 697 wait_queue_head_t *page_waitqueue(struct page *page) 698 { 699 const struct zone *zone = page_zone(page); 700 701 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; 702 } 703 EXPORT_SYMBOL(page_waitqueue); 704 705 void wait_on_page_bit(struct page *page, int bit_nr) 706 { 707 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 708 709 if (test_bit(bit_nr, &page->flags)) 710 __wait_on_bit(page_waitqueue(page), &wait, bit_wait_io, 711 TASK_UNINTERRUPTIBLE); 712 } 713 EXPORT_SYMBOL(wait_on_page_bit); 714 715 int wait_on_page_bit_killable(struct page *page, int bit_nr) 716 { 717 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 718 719 if (!test_bit(bit_nr, &page->flags)) 720 return 0; 721 722 return __wait_on_bit(page_waitqueue(page), &wait, 723 bit_wait_io, TASK_KILLABLE); 724 } 725 726 int wait_on_page_bit_killable_timeout(struct page *page, 727 int bit_nr, unsigned long timeout) 728 { 729 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 730 731 wait.key.timeout = jiffies + timeout; 732 if (!test_bit(bit_nr, &page->flags)) 733 return 0; 734 return __wait_on_bit(page_waitqueue(page), &wait, 735 bit_wait_io_timeout, TASK_KILLABLE); 736 } 737 EXPORT_SYMBOL_GPL(wait_on_page_bit_killable_timeout); 738 739 /** 740 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue 741 * @page: Page defining the wait queue of interest 742 * @waiter: Waiter to add to the queue 743 * 744 * Add an arbitrary @waiter to the wait queue for the nominated @page. 745 */ 746 void add_page_wait_queue(struct page *page, wait_queue_t *waiter) 747 { 748 wait_queue_head_t *q = page_waitqueue(page); 749 unsigned long flags; 750 751 spin_lock_irqsave(&q->lock, flags); 752 __add_wait_queue(q, waiter); 753 spin_unlock_irqrestore(&q->lock, flags); 754 } 755 EXPORT_SYMBOL_GPL(add_page_wait_queue); 756 757 /** 758 * unlock_page - unlock a locked page 759 * @page: the page 760 * 761 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). 762 * Also wakes sleepers in wait_on_page_writeback() because the wakeup 763 * mechanism between PageLocked pages and PageWriteback pages is shared. 764 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 765 * 766 * The mb is necessary to enforce ordering between the clear_bit and the read 767 * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()). 768 */ 769 void unlock_page(struct page *page) 770 { 771 VM_BUG_ON_PAGE(!PageLocked(page), page); 772 clear_bit_unlock(PG_locked, &page->flags); 773 smp_mb__after_atomic(); 774 wake_up_page(page, PG_locked); 775 } 776 EXPORT_SYMBOL(unlock_page); 777 778 /** 779 * end_page_writeback - end writeback against a page 780 * @page: the page 781 */ 782 void end_page_writeback(struct page *page) 783 { 784 /* 785 * TestClearPageReclaim could be used here but it is an atomic 786 * operation and overkill in this particular case. Failing to 787 * shuffle a page marked for immediate reclaim is too mild to 788 * justify taking an atomic operation penalty at the end of 789 * ever page writeback. 790 */ 791 if (PageReclaim(page)) { 792 ClearPageReclaim(page); 793 rotate_reclaimable_page(page); 794 } 795 796 if (!test_clear_page_writeback(page)) 797 BUG(); 798 799 smp_mb__after_atomic(); 800 wake_up_page(page, PG_writeback); 801 } 802 EXPORT_SYMBOL(end_page_writeback); 803 804 /* 805 * After completing I/O on a page, call this routine to update the page 806 * flags appropriately 807 */ 808 void page_endio(struct page *page, int rw, int err) 809 { 810 if (rw == READ) { 811 if (!err) { 812 SetPageUptodate(page); 813 } else { 814 ClearPageUptodate(page); 815 SetPageError(page); 816 } 817 unlock_page(page); 818 } else { /* rw == WRITE */ 819 if (err) { 820 SetPageError(page); 821 if (page->mapping) 822 mapping_set_error(page->mapping, err); 823 } 824 end_page_writeback(page); 825 } 826 } 827 EXPORT_SYMBOL_GPL(page_endio); 828 829 /** 830 * __lock_page - get a lock on the page, assuming we need to sleep to get it 831 * @page: the page to lock 832 */ 833 void __lock_page(struct page *page) 834 { 835 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 836 837 __wait_on_bit_lock(page_waitqueue(page), &wait, bit_wait_io, 838 TASK_UNINTERRUPTIBLE); 839 } 840 EXPORT_SYMBOL(__lock_page); 841 842 int __lock_page_killable(struct page *page) 843 { 844 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 845 846 return __wait_on_bit_lock(page_waitqueue(page), &wait, 847 bit_wait_io, TASK_KILLABLE); 848 } 849 EXPORT_SYMBOL_GPL(__lock_page_killable); 850 851 /* 852 * Return values: 853 * 1 - page is locked; mmap_sem is still held. 854 * 0 - page is not locked. 855 * mmap_sem has been released (up_read()), unless flags had both 856 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in 857 * which case mmap_sem is still held. 858 * 859 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1 860 * with the page locked and the mmap_sem unperturbed. 861 */ 862 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 863 unsigned int flags) 864 { 865 if (flags & FAULT_FLAG_ALLOW_RETRY) { 866 /* 867 * CAUTION! In this case, mmap_sem is not released 868 * even though return 0. 869 */ 870 if (flags & FAULT_FLAG_RETRY_NOWAIT) 871 return 0; 872 873 up_read(&mm->mmap_sem); 874 if (flags & FAULT_FLAG_KILLABLE) 875 wait_on_page_locked_killable(page); 876 else 877 wait_on_page_locked(page); 878 return 0; 879 } else { 880 if (flags & FAULT_FLAG_KILLABLE) { 881 int ret; 882 883 ret = __lock_page_killable(page); 884 if (ret) { 885 up_read(&mm->mmap_sem); 886 return 0; 887 } 888 } else 889 __lock_page(page); 890 return 1; 891 } 892 } 893 894 /** 895 * page_cache_next_hole - find the next hole (not-present entry) 896 * @mapping: mapping 897 * @index: index 898 * @max_scan: maximum range to search 899 * 900 * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the 901 * lowest indexed hole. 902 * 903 * Returns: the index of the hole if found, otherwise returns an index 904 * outside of the set specified (in which case 'return - index >= 905 * max_scan' will be true). In rare cases of index wrap-around, 0 will 906 * be returned. 907 * 908 * page_cache_next_hole may be called under rcu_read_lock. However, 909 * like radix_tree_gang_lookup, this will not atomically search a 910 * snapshot of the tree at a single point in time. For example, if a 911 * hole is created at index 5, then subsequently a hole is created at 912 * index 10, page_cache_next_hole covering both indexes may return 10 913 * if called under rcu_read_lock. 914 */ 915 pgoff_t page_cache_next_hole(struct address_space *mapping, 916 pgoff_t index, unsigned long max_scan) 917 { 918 unsigned long i; 919 920 for (i = 0; i < max_scan; i++) { 921 struct page *page; 922 923 page = radix_tree_lookup(&mapping->page_tree, index); 924 if (!page || radix_tree_exceptional_entry(page)) 925 break; 926 index++; 927 if (index == 0) 928 break; 929 } 930 931 return index; 932 } 933 EXPORT_SYMBOL(page_cache_next_hole); 934 935 /** 936 * page_cache_prev_hole - find the prev hole (not-present entry) 937 * @mapping: mapping 938 * @index: index 939 * @max_scan: maximum range to search 940 * 941 * Search backwards in the range [max(index-max_scan+1, 0), index] for 942 * the first hole. 943 * 944 * Returns: the index of the hole if found, otherwise returns an index 945 * outside of the set specified (in which case 'index - return >= 946 * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX 947 * will be returned. 948 * 949 * page_cache_prev_hole may be called under rcu_read_lock. However, 950 * like radix_tree_gang_lookup, this will not atomically search a 951 * snapshot of the tree at a single point in time. For example, if a 952 * hole is created at index 10, then subsequently a hole is created at 953 * index 5, page_cache_prev_hole covering both indexes may return 5 if 954 * called under rcu_read_lock. 955 */ 956 pgoff_t page_cache_prev_hole(struct address_space *mapping, 957 pgoff_t index, unsigned long max_scan) 958 { 959 unsigned long i; 960 961 for (i = 0; i < max_scan; i++) { 962 struct page *page; 963 964 page = radix_tree_lookup(&mapping->page_tree, index); 965 if (!page || radix_tree_exceptional_entry(page)) 966 break; 967 index--; 968 if (index == ULONG_MAX) 969 break; 970 } 971 972 return index; 973 } 974 EXPORT_SYMBOL(page_cache_prev_hole); 975 976 /** 977 * find_get_entry - find and get a page cache entry 978 * @mapping: the address_space to search 979 * @offset: the page cache index 980 * 981 * Looks up the page cache slot at @mapping & @offset. If there is a 982 * page cache page, it is returned with an increased refcount. 983 * 984 * If the slot holds a shadow entry of a previously evicted page, or a 985 * swap entry from shmem/tmpfs, it is returned. 986 * 987 * Otherwise, %NULL is returned. 988 */ 989 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) 990 { 991 void **pagep; 992 struct page *page; 993 994 rcu_read_lock(); 995 repeat: 996 page = NULL; 997 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); 998 if (pagep) { 999 page = radix_tree_deref_slot(pagep); 1000 if (unlikely(!page)) 1001 goto out; 1002 if (radix_tree_exception(page)) { 1003 if (radix_tree_deref_retry(page)) 1004 goto repeat; 1005 /* 1006 * A shadow entry of a recently evicted page, 1007 * or a swap entry from shmem/tmpfs. Return 1008 * it without attempting to raise page count. 1009 */ 1010 goto out; 1011 } 1012 if (!page_cache_get_speculative(page)) 1013 goto repeat; 1014 1015 /* 1016 * Has the page moved? 1017 * This is part of the lockless pagecache protocol. See 1018 * include/linux/pagemap.h for details. 1019 */ 1020 if (unlikely(page != *pagep)) { 1021 page_cache_release(page); 1022 goto repeat; 1023 } 1024 } 1025 out: 1026 rcu_read_unlock(); 1027 1028 return page; 1029 } 1030 EXPORT_SYMBOL(find_get_entry); 1031 1032 /** 1033 * find_lock_entry - locate, pin and lock a page cache entry 1034 * @mapping: the address_space to search 1035 * @offset: the page cache index 1036 * 1037 * Looks up the page cache slot at @mapping & @offset. If there is a 1038 * page cache page, it is returned locked and with an increased 1039 * refcount. 1040 * 1041 * If the slot holds a shadow entry of a previously evicted page, or a 1042 * swap entry from shmem/tmpfs, it is returned. 1043 * 1044 * Otherwise, %NULL is returned. 1045 * 1046 * find_lock_entry() may sleep. 1047 */ 1048 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) 1049 { 1050 struct page *page; 1051 1052 repeat: 1053 page = find_get_entry(mapping, offset); 1054 if (page && !radix_tree_exception(page)) { 1055 lock_page(page); 1056 /* Has the page been truncated? */ 1057 if (unlikely(page->mapping != mapping)) { 1058 unlock_page(page); 1059 page_cache_release(page); 1060 goto repeat; 1061 } 1062 VM_BUG_ON_PAGE(page->index != offset, page); 1063 } 1064 return page; 1065 } 1066 EXPORT_SYMBOL(find_lock_entry); 1067 1068 /** 1069 * pagecache_get_page - find and get a page reference 1070 * @mapping: the address_space to search 1071 * @offset: the page index 1072 * @fgp_flags: PCG flags 1073 * @gfp_mask: gfp mask to use for the page cache data page allocation 1074 * 1075 * Looks up the page cache slot at @mapping & @offset. 1076 * 1077 * PCG flags modify how the page is returned. 1078 * 1079 * FGP_ACCESSED: the page will be marked accessed 1080 * FGP_LOCK: Page is return locked 1081 * FGP_CREAT: If page is not present then a new page is allocated using 1082 * @gfp_mask and added to the page cache and the VM's LRU 1083 * list. The page is returned locked and with an increased 1084 * refcount. Otherwise, %NULL is returned. 1085 * 1086 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even 1087 * if the GFP flags specified for FGP_CREAT are atomic. 1088 * 1089 * If there is a page cache page, it is returned with an increased refcount. 1090 */ 1091 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, 1092 int fgp_flags, gfp_t gfp_mask) 1093 { 1094 struct page *page; 1095 1096 repeat: 1097 page = find_get_entry(mapping, offset); 1098 if (radix_tree_exceptional_entry(page)) 1099 page = NULL; 1100 if (!page) 1101 goto no_page; 1102 1103 if (fgp_flags & FGP_LOCK) { 1104 if (fgp_flags & FGP_NOWAIT) { 1105 if (!trylock_page(page)) { 1106 page_cache_release(page); 1107 return NULL; 1108 } 1109 } else { 1110 lock_page(page); 1111 } 1112 1113 /* Has the page been truncated? */ 1114 if (unlikely(page->mapping != mapping)) { 1115 unlock_page(page); 1116 page_cache_release(page); 1117 goto repeat; 1118 } 1119 VM_BUG_ON_PAGE(page->index != offset, page); 1120 } 1121 1122 if (page && (fgp_flags & FGP_ACCESSED)) 1123 mark_page_accessed(page); 1124 1125 no_page: 1126 if (!page && (fgp_flags & FGP_CREAT)) { 1127 int err; 1128 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) 1129 gfp_mask |= __GFP_WRITE; 1130 if (fgp_flags & FGP_NOFS) 1131 gfp_mask &= ~__GFP_FS; 1132 1133 page = __page_cache_alloc(gfp_mask); 1134 if (!page) 1135 return NULL; 1136 1137 if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK))) 1138 fgp_flags |= FGP_LOCK; 1139 1140 /* Init accessed so avoid atomic mark_page_accessed later */ 1141 if (fgp_flags & FGP_ACCESSED) 1142 __SetPageReferenced(page); 1143 1144 err = add_to_page_cache_lru(page, mapping, offset, 1145 gfp_mask & GFP_RECLAIM_MASK); 1146 if (unlikely(err)) { 1147 page_cache_release(page); 1148 page = NULL; 1149 if (err == -EEXIST) 1150 goto repeat; 1151 } 1152 } 1153 1154 return page; 1155 } 1156 EXPORT_SYMBOL(pagecache_get_page); 1157 1158 /** 1159 * find_get_entries - gang pagecache lookup 1160 * @mapping: The address_space to search 1161 * @start: The starting page cache index 1162 * @nr_entries: The maximum number of entries 1163 * @entries: Where the resulting entries are placed 1164 * @indices: The cache indices corresponding to the entries in @entries 1165 * 1166 * find_get_entries() will search for and return a group of up to 1167 * @nr_entries entries in the mapping. The entries are placed at 1168 * @entries. find_get_entries() takes a reference against any actual 1169 * pages it returns. 1170 * 1171 * The search returns a group of mapping-contiguous page cache entries 1172 * with ascending indexes. There may be holes in the indices due to 1173 * not-present pages. 1174 * 1175 * Any shadow entries of evicted pages, or swap entries from 1176 * shmem/tmpfs, are included in the returned array. 1177 * 1178 * find_get_entries() returns the number of pages and shadow entries 1179 * which were found. 1180 */ 1181 unsigned find_get_entries(struct address_space *mapping, 1182 pgoff_t start, unsigned int nr_entries, 1183 struct page **entries, pgoff_t *indices) 1184 { 1185 void **slot; 1186 unsigned int ret = 0; 1187 struct radix_tree_iter iter; 1188 1189 if (!nr_entries) 1190 return 0; 1191 1192 rcu_read_lock(); 1193 restart: 1194 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1195 struct page *page; 1196 repeat: 1197 page = radix_tree_deref_slot(slot); 1198 if (unlikely(!page)) 1199 continue; 1200 if (radix_tree_exception(page)) { 1201 if (radix_tree_deref_retry(page)) 1202 goto restart; 1203 /* 1204 * A shadow entry of a recently evicted page, 1205 * or a swap entry from shmem/tmpfs. Return 1206 * it without attempting to raise page count. 1207 */ 1208 goto export; 1209 } 1210 if (!page_cache_get_speculative(page)) 1211 goto repeat; 1212 1213 /* Has the page moved? */ 1214 if (unlikely(page != *slot)) { 1215 page_cache_release(page); 1216 goto repeat; 1217 } 1218 export: 1219 indices[ret] = iter.index; 1220 entries[ret] = page; 1221 if (++ret == nr_entries) 1222 break; 1223 } 1224 rcu_read_unlock(); 1225 return ret; 1226 } 1227 1228 /** 1229 * find_get_pages - gang pagecache lookup 1230 * @mapping: The address_space to search 1231 * @start: The starting page index 1232 * @nr_pages: The maximum number of pages 1233 * @pages: Where the resulting pages are placed 1234 * 1235 * find_get_pages() will search for and return a group of up to 1236 * @nr_pages pages in the mapping. The pages are placed at @pages. 1237 * find_get_pages() takes a reference against the returned pages. 1238 * 1239 * The search returns a group of mapping-contiguous pages with ascending 1240 * indexes. There may be holes in the indices due to not-present pages. 1241 * 1242 * find_get_pages() returns the number of pages which were found. 1243 */ 1244 unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 1245 unsigned int nr_pages, struct page **pages) 1246 { 1247 struct radix_tree_iter iter; 1248 void **slot; 1249 unsigned ret = 0; 1250 1251 if (unlikely(!nr_pages)) 1252 return 0; 1253 1254 rcu_read_lock(); 1255 restart: 1256 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1257 struct page *page; 1258 repeat: 1259 page = radix_tree_deref_slot(slot); 1260 if (unlikely(!page)) 1261 continue; 1262 1263 if (radix_tree_exception(page)) { 1264 if (radix_tree_deref_retry(page)) { 1265 /* 1266 * Transient condition which can only trigger 1267 * when entry at index 0 moves out of or back 1268 * to root: none yet gotten, safe to restart. 1269 */ 1270 WARN_ON(iter.index); 1271 goto restart; 1272 } 1273 /* 1274 * A shadow entry of a recently evicted page, 1275 * or a swap entry from shmem/tmpfs. Skip 1276 * over it. 1277 */ 1278 continue; 1279 } 1280 1281 if (!page_cache_get_speculative(page)) 1282 goto repeat; 1283 1284 /* Has the page moved? */ 1285 if (unlikely(page != *slot)) { 1286 page_cache_release(page); 1287 goto repeat; 1288 } 1289 1290 pages[ret] = page; 1291 if (++ret == nr_pages) 1292 break; 1293 } 1294 1295 rcu_read_unlock(); 1296 return ret; 1297 } 1298 1299 /** 1300 * find_get_pages_contig - gang contiguous pagecache lookup 1301 * @mapping: The address_space to search 1302 * @index: The starting page index 1303 * @nr_pages: The maximum number of pages 1304 * @pages: Where the resulting pages are placed 1305 * 1306 * find_get_pages_contig() works exactly like find_get_pages(), except 1307 * that the returned number of pages are guaranteed to be contiguous. 1308 * 1309 * find_get_pages_contig() returns the number of pages which were found. 1310 */ 1311 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, 1312 unsigned int nr_pages, struct page **pages) 1313 { 1314 struct radix_tree_iter iter; 1315 void **slot; 1316 unsigned int ret = 0; 1317 1318 if (unlikely(!nr_pages)) 1319 return 0; 1320 1321 rcu_read_lock(); 1322 restart: 1323 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) { 1324 struct page *page; 1325 repeat: 1326 page = radix_tree_deref_slot(slot); 1327 /* The hole, there no reason to continue */ 1328 if (unlikely(!page)) 1329 break; 1330 1331 if (radix_tree_exception(page)) { 1332 if (radix_tree_deref_retry(page)) { 1333 /* 1334 * Transient condition which can only trigger 1335 * when entry at index 0 moves out of or back 1336 * to root: none yet gotten, safe to restart. 1337 */ 1338 goto restart; 1339 } 1340 /* 1341 * A shadow entry of a recently evicted page, 1342 * or a swap entry from shmem/tmpfs. Stop 1343 * looking for contiguous pages. 1344 */ 1345 break; 1346 } 1347 1348 if (!page_cache_get_speculative(page)) 1349 goto repeat; 1350 1351 /* Has the page moved? */ 1352 if (unlikely(page != *slot)) { 1353 page_cache_release(page); 1354 goto repeat; 1355 } 1356 1357 /* 1358 * must check mapping and index after taking the ref. 1359 * otherwise we can get both false positives and false 1360 * negatives, which is just confusing to the caller. 1361 */ 1362 if (page->mapping == NULL || page->index != iter.index) { 1363 page_cache_release(page); 1364 break; 1365 } 1366 1367 pages[ret] = page; 1368 if (++ret == nr_pages) 1369 break; 1370 } 1371 rcu_read_unlock(); 1372 return ret; 1373 } 1374 EXPORT_SYMBOL(find_get_pages_contig); 1375 1376 /** 1377 * find_get_pages_tag - find and return pages that match @tag 1378 * @mapping: the address_space to search 1379 * @index: the starting page index 1380 * @tag: the tag index 1381 * @nr_pages: the maximum number of pages 1382 * @pages: where the resulting pages are placed 1383 * 1384 * Like find_get_pages, except we only return pages which are tagged with 1385 * @tag. We update @index to index the next page for the traversal. 1386 */ 1387 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 1388 int tag, unsigned int nr_pages, struct page **pages) 1389 { 1390 struct radix_tree_iter iter; 1391 void **slot; 1392 unsigned ret = 0; 1393 1394 if (unlikely(!nr_pages)) 1395 return 0; 1396 1397 rcu_read_lock(); 1398 restart: 1399 radix_tree_for_each_tagged(slot, &mapping->page_tree, 1400 &iter, *index, tag) { 1401 struct page *page; 1402 repeat: 1403 page = radix_tree_deref_slot(slot); 1404 if (unlikely(!page)) 1405 continue; 1406 1407 if (radix_tree_exception(page)) { 1408 if (radix_tree_deref_retry(page)) { 1409 /* 1410 * Transient condition which can only trigger 1411 * when entry at index 0 moves out of or back 1412 * to root: none yet gotten, safe to restart. 1413 */ 1414 goto restart; 1415 } 1416 /* 1417 * A shadow entry of a recently evicted page. 1418 * 1419 * Those entries should never be tagged, but 1420 * this tree walk is lockless and the tags are 1421 * looked up in bulk, one radix tree node at a 1422 * time, so there is a sizable window for page 1423 * reclaim to evict a page we saw tagged. 1424 * 1425 * Skip over it. 1426 */ 1427 continue; 1428 } 1429 1430 if (!page_cache_get_speculative(page)) 1431 goto repeat; 1432 1433 /* Has the page moved? */ 1434 if (unlikely(page != *slot)) { 1435 page_cache_release(page); 1436 goto repeat; 1437 } 1438 1439 pages[ret] = page; 1440 if (++ret == nr_pages) 1441 break; 1442 } 1443 1444 rcu_read_unlock(); 1445 1446 if (ret) 1447 *index = pages[ret - 1]->index + 1; 1448 1449 return ret; 1450 } 1451 EXPORT_SYMBOL(find_get_pages_tag); 1452 1453 /* 1454 * CD/DVDs are error prone. When a medium error occurs, the driver may fail 1455 * a _large_ part of the i/o request. Imagine the worst scenario: 1456 * 1457 * ---R__________________________________________B__________ 1458 * ^ reading here ^ bad block(assume 4k) 1459 * 1460 * read(R) => miss => readahead(R...B) => media error => frustrating retries 1461 * => failing the whole request => read(R) => read(R+1) => 1462 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => 1463 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => 1464 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... 1465 * 1466 * It is going insane. Fix it by quickly scaling down the readahead size. 1467 */ 1468 static void shrink_readahead_size_eio(struct file *filp, 1469 struct file_ra_state *ra) 1470 { 1471 ra->ra_pages /= 4; 1472 } 1473 1474 /** 1475 * do_generic_file_read - generic file read routine 1476 * @filp: the file to read 1477 * @ppos: current file position 1478 * @iter: data destination 1479 * @written: already copied 1480 * 1481 * This is a generic file read routine, and uses the 1482 * mapping->a_ops->readpage() function for the actual low-level stuff. 1483 * 1484 * This is really ugly. But the goto's actually try to clarify some 1485 * of the logic when it comes to error handling etc. 1486 */ 1487 static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos, 1488 struct iov_iter *iter, ssize_t written) 1489 { 1490 struct address_space *mapping = filp->f_mapping; 1491 struct inode *inode = mapping->host; 1492 struct file_ra_state *ra = &filp->f_ra; 1493 pgoff_t index; 1494 pgoff_t last_index; 1495 pgoff_t prev_index; 1496 unsigned long offset; /* offset into pagecache page */ 1497 unsigned int prev_offset; 1498 int error = 0; 1499 1500 index = *ppos >> PAGE_CACHE_SHIFT; 1501 prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT; 1502 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1); 1503 last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 1504 offset = *ppos & ~PAGE_CACHE_MASK; 1505 1506 for (;;) { 1507 struct page *page; 1508 pgoff_t end_index; 1509 loff_t isize; 1510 unsigned long nr, ret; 1511 1512 cond_resched(); 1513 find_page: 1514 page = find_get_page(mapping, index); 1515 if (!page) { 1516 page_cache_sync_readahead(mapping, 1517 ra, filp, 1518 index, last_index - index); 1519 page = find_get_page(mapping, index); 1520 if (unlikely(page == NULL)) 1521 goto no_cached_page; 1522 } 1523 if (PageReadahead(page)) { 1524 page_cache_async_readahead(mapping, 1525 ra, filp, page, 1526 index, last_index - index); 1527 } 1528 if (!PageUptodate(page)) { 1529 if (inode->i_blkbits == PAGE_CACHE_SHIFT || 1530 !mapping->a_ops->is_partially_uptodate) 1531 goto page_not_up_to_date; 1532 if (!trylock_page(page)) 1533 goto page_not_up_to_date; 1534 /* Did it get truncated before we got the lock? */ 1535 if (!page->mapping) 1536 goto page_not_up_to_date_locked; 1537 if (!mapping->a_ops->is_partially_uptodate(page, 1538 offset, iter->count)) 1539 goto page_not_up_to_date_locked; 1540 unlock_page(page); 1541 } 1542 page_ok: 1543 /* 1544 * i_size must be checked after we know the page is Uptodate. 1545 * 1546 * Checking i_size after the check allows us to calculate 1547 * the correct value for "nr", which means the zero-filled 1548 * part of the page is not copied back to userspace (unless 1549 * another truncate extends the file - this is desired though). 1550 */ 1551 1552 isize = i_size_read(inode); 1553 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1554 if (unlikely(!isize || index > end_index)) { 1555 page_cache_release(page); 1556 goto out; 1557 } 1558 1559 /* nr is the maximum number of bytes to copy from this page */ 1560 nr = PAGE_CACHE_SIZE; 1561 if (index == end_index) { 1562 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1563 if (nr <= offset) { 1564 page_cache_release(page); 1565 goto out; 1566 } 1567 } 1568 nr = nr - offset; 1569 1570 /* If users can be writing to this page using arbitrary 1571 * virtual addresses, take care about potential aliasing 1572 * before reading the page on the kernel side. 1573 */ 1574 if (mapping_writably_mapped(mapping)) 1575 flush_dcache_page(page); 1576 1577 /* 1578 * When a sequential read accesses a page several times, 1579 * only mark it as accessed the first time. 1580 */ 1581 if (prev_index != index || offset != prev_offset) 1582 mark_page_accessed(page); 1583 prev_index = index; 1584 1585 /* 1586 * Ok, we have the page, and it's up-to-date, so 1587 * now we can copy it to user space... 1588 */ 1589 1590 ret = copy_page_to_iter(page, offset, nr, iter); 1591 offset += ret; 1592 index += offset >> PAGE_CACHE_SHIFT; 1593 offset &= ~PAGE_CACHE_MASK; 1594 prev_offset = offset; 1595 1596 page_cache_release(page); 1597 written += ret; 1598 if (!iov_iter_count(iter)) 1599 goto out; 1600 if (ret < nr) { 1601 error = -EFAULT; 1602 goto out; 1603 } 1604 continue; 1605 1606 page_not_up_to_date: 1607 /* Get exclusive access to the page ... */ 1608 error = lock_page_killable(page); 1609 if (unlikely(error)) 1610 goto readpage_error; 1611 1612 page_not_up_to_date_locked: 1613 /* Did it get truncated before we got the lock? */ 1614 if (!page->mapping) { 1615 unlock_page(page); 1616 page_cache_release(page); 1617 continue; 1618 } 1619 1620 /* Did somebody else fill it already? */ 1621 if (PageUptodate(page)) { 1622 unlock_page(page); 1623 goto page_ok; 1624 } 1625 1626 readpage: 1627 /* 1628 * A previous I/O error may have been due to temporary 1629 * failures, eg. multipath errors. 1630 * PG_error will be set again if readpage fails. 1631 */ 1632 ClearPageError(page); 1633 /* Start the actual read. The read will unlock the page. */ 1634 error = mapping->a_ops->readpage(filp, page); 1635 1636 if (unlikely(error)) { 1637 if (error == AOP_TRUNCATED_PAGE) { 1638 page_cache_release(page); 1639 error = 0; 1640 goto find_page; 1641 } 1642 goto readpage_error; 1643 } 1644 1645 if (!PageUptodate(page)) { 1646 error = lock_page_killable(page); 1647 if (unlikely(error)) 1648 goto readpage_error; 1649 if (!PageUptodate(page)) { 1650 if (page->mapping == NULL) { 1651 /* 1652 * invalidate_mapping_pages got it 1653 */ 1654 unlock_page(page); 1655 page_cache_release(page); 1656 goto find_page; 1657 } 1658 unlock_page(page); 1659 shrink_readahead_size_eio(filp, ra); 1660 error = -EIO; 1661 goto readpage_error; 1662 } 1663 unlock_page(page); 1664 } 1665 1666 goto page_ok; 1667 1668 readpage_error: 1669 /* UHHUH! A synchronous read error occurred. Report it */ 1670 page_cache_release(page); 1671 goto out; 1672 1673 no_cached_page: 1674 /* 1675 * Ok, it wasn't cached, so we need to create a new 1676 * page.. 1677 */ 1678 page = page_cache_alloc_cold(mapping); 1679 if (!page) { 1680 error = -ENOMEM; 1681 goto out; 1682 } 1683 error = add_to_page_cache_lru(page, mapping, index, 1684 GFP_KERNEL & mapping_gfp_mask(mapping)); 1685 if (error) { 1686 page_cache_release(page); 1687 if (error == -EEXIST) { 1688 error = 0; 1689 goto find_page; 1690 } 1691 goto out; 1692 } 1693 goto readpage; 1694 } 1695 1696 out: 1697 ra->prev_pos = prev_index; 1698 ra->prev_pos <<= PAGE_CACHE_SHIFT; 1699 ra->prev_pos |= prev_offset; 1700 1701 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; 1702 file_accessed(filp); 1703 return written ? written : error; 1704 } 1705 1706 /** 1707 * generic_file_read_iter - generic filesystem read routine 1708 * @iocb: kernel I/O control block 1709 * @iter: destination for the data read 1710 * 1711 * This is the "read_iter()" routine for all filesystems 1712 * that can use the page cache directly. 1713 */ 1714 ssize_t 1715 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 1716 { 1717 struct file *file = iocb->ki_filp; 1718 ssize_t retval = 0; 1719 loff_t *ppos = &iocb->ki_pos; 1720 loff_t pos = *ppos; 1721 1722 if (iocb->ki_flags & IOCB_DIRECT) { 1723 struct address_space *mapping = file->f_mapping; 1724 struct inode *inode = mapping->host; 1725 size_t count = iov_iter_count(iter); 1726 loff_t size; 1727 1728 if (!count) 1729 goto out; /* skip atime */ 1730 size = i_size_read(inode); 1731 retval = filemap_write_and_wait_range(mapping, pos, 1732 pos + count - 1); 1733 if (!retval) { 1734 struct iov_iter data = *iter; 1735 retval = mapping->a_ops->direct_IO(iocb, &data, pos); 1736 } 1737 1738 if (retval > 0) { 1739 *ppos = pos + retval; 1740 iov_iter_advance(iter, retval); 1741 } 1742 1743 /* 1744 * Btrfs can have a short DIO read if we encounter 1745 * compressed extents, so if there was an error, or if 1746 * we've already read everything we wanted to, or if 1747 * there was a short read because we hit EOF, go ahead 1748 * and return. Otherwise fallthrough to buffered io for 1749 * the rest of the read. Buffered reads will not work for 1750 * DAX files, so don't bother trying. 1751 */ 1752 if (retval < 0 || !iov_iter_count(iter) || *ppos >= size || 1753 IS_DAX(inode)) { 1754 file_accessed(file); 1755 goto out; 1756 } 1757 } 1758 1759 retval = do_generic_file_read(file, ppos, iter, retval); 1760 out: 1761 return retval; 1762 } 1763 EXPORT_SYMBOL(generic_file_read_iter); 1764 1765 #ifdef CONFIG_MMU 1766 /** 1767 * page_cache_read - adds requested page to the page cache if not already there 1768 * @file: file to read 1769 * @offset: page index 1770 * 1771 * This adds the requested page to the page cache if it isn't already there, 1772 * and schedules an I/O to read in its contents from disk. 1773 */ 1774 static int page_cache_read(struct file *file, pgoff_t offset) 1775 { 1776 struct address_space *mapping = file->f_mapping; 1777 struct page *page; 1778 int ret; 1779 1780 do { 1781 page = page_cache_alloc_cold(mapping); 1782 if (!page) 1783 return -ENOMEM; 1784 1785 ret = add_to_page_cache_lru(page, mapping, offset, 1786 GFP_KERNEL & mapping_gfp_mask(mapping)); 1787 if (ret == 0) 1788 ret = mapping->a_ops->readpage(file, page); 1789 else if (ret == -EEXIST) 1790 ret = 0; /* losing race to add is OK */ 1791 1792 page_cache_release(page); 1793 1794 } while (ret == AOP_TRUNCATED_PAGE); 1795 1796 return ret; 1797 } 1798 1799 #define MMAP_LOTSAMISS (100) 1800 1801 /* 1802 * Synchronous readahead happens when we don't even find 1803 * a page in the page cache at all. 1804 */ 1805 static void do_sync_mmap_readahead(struct vm_area_struct *vma, 1806 struct file_ra_state *ra, 1807 struct file *file, 1808 pgoff_t offset) 1809 { 1810 unsigned long ra_pages; 1811 struct address_space *mapping = file->f_mapping; 1812 1813 /* If we don't want any read-ahead, don't bother */ 1814 if (vma->vm_flags & VM_RAND_READ) 1815 return; 1816 if (!ra->ra_pages) 1817 return; 1818 1819 if (vma->vm_flags & VM_SEQ_READ) { 1820 page_cache_sync_readahead(mapping, ra, file, offset, 1821 ra->ra_pages); 1822 return; 1823 } 1824 1825 /* Avoid banging the cache line if not needed */ 1826 if (ra->mmap_miss < MMAP_LOTSAMISS * 10) 1827 ra->mmap_miss++; 1828 1829 /* 1830 * Do we miss much more than hit in this file? If so, 1831 * stop bothering with read-ahead. It will only hurt. 1832 */ 1833 if (ra->mmap_miss > MMAP_LOTSAMISS) 1834 return; 1835 1836 /* 1837 * mmap read-around 1838 */ 1839 ra_pages = max_sane_readahead(ra->ra_pages); 1840 ra->start = max_t(long, 0, offset - ra_pages / 2); 1841 ra->size = ra_pages; 1842 ra->async_size = ra_pages / 4; 1843 ra_submit(ra, mapping, file); 1844 } 1845 1846 /* 1847 * Asynchronous readahead happens when we find the page and PG_readahead, 1848 * so we want to possibly extend the readahead further.. 1849 */ 1850 static void do_async_mmap_readahead(struct vm_area_struct *vma, 1851 struct file_ra_state *ra, 1852 struct file *file, 1853 struct page *page, 1854 pgoff_t offset) 1855 { 1856 struct address_space *mapping = file->f_mapping; 1857 1858 /* If we don't want any read-ahead, don't bother */ 1859 if (vma->vm_flags & VM_RAND_READ) 1860 return; 1861 if (ra->mmap_miss > 0) 1862 ra->mmap_miss--; 1863 if (PageReadahead(page)) 1864 page_cache_async_readahead(mapping, ra, file, 1865 page, offset, ra->ra_pages); 1866 } 1867 1868 /** 1869 * filemap_fault - read in file data for page fault handling 1870 * @vma: vma in which the fault was taken 1871 * @vmf: struct vm_fault containing details of the fault 1872 * 1873 * filemap_fault() is invoked via the vma operations vector for a 1874 * mapped memory region to read in file data during a page fault. 1875 * 1876 * The goto's are kind of ugly, but this streamlines the normal case of having 1877 * it in the page cache, and handles the special cases reasonably without 1878 * having a lot of duplicated code. 1879 * 1880 * vma->vm_mm->mmap_sem must be held on entry. 1881 * 1882 * If our return value has VM_FAULT_RETRY set, it's because 1883 * lock_page_or_retry() returned 0. 1884 * The mmap_sem has usually been released in this case. 1885 * See __lock_page_or_retry() for the exception. 1886 * 1887 * If our return value does not have VM_FAULT_RETRY set, the mmap_sem 1888 * has not been released. 1889 * 1890 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. 1891 */ 1892 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1893 { 1894 int error; 1895 struct file *file = vma->vm_file; 1896 struct address_space *mapping = file->f_mapping; 1897 struct file_ra_state *ra = &file->f_ra; 1898 struct inode *inode = mapping->host; 1899 pgoff_t offset = vmf->pgoff; 1900 struct page *page; 1901 loff_t size; 1902 int ret = 0; 1903 1904 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 1905 if (offset >= size >> PAGE_CACHE_SHIFT) 1906 return VM_FAULT_SIGBUS; 1907 1908 /* 1909 * Do we have something in the page cache already? 1910 */ 1911 page = find_get_page(mapping, offset); 1912 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { 1913 /* 1914 * We found the page, so try async readahead before 1915 * waiting for the lock. 1916 */ 1917 do_async_mmap_readahead(vma, ra, file, page, offset); 1918 } else if (!page) { 1919 /* No page in the page cache at all */ 1920 do_sync_mmap_readahead(vma, ra, file, offset); 1921 count_vm_event(PGMAJFAULT); 1922 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1923 ret = VM_FAULT_MAJOR; 1924 retry_find: 1925 page = find_get_page(mapping, offset); 1926 if (!page) 1927 goto no_cached_page; 1928 } 1929 1930 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { 1931 page_cache_release(page); 1932 return ret | VM_FAULT_RETRY; 1933 } 1934 1935 /* Did it get truncated? */ 1936 if (unlikely(page->mapping != mapping)) { 1937 unlock_page(page); 1938 put_page(page); 1939 goto retry_find; 1940 } 1941 VM_BUG_ON_PAGE(page->index != offset, page); 1942 1943 /* 1944 * We have a locked page in the page cache, now we need to check 1945 * that it's up-to-date. If not, it is going to be due to an error. 1946 */ 1947 if (unlikely(!PageUptodate(page))) 1948 goto page_not_uptodate; 1949 1950 /* 1951 * Found the page and have a reference on it. 1952 * We must recheck i_size under page lock. 1953 */ 1954 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 1955 if (unlikely(offset >= size >> PAGE_CACHE_SHIFT)) { 1956 unlock_page(page); 1957 page_cache_release(page); 1958 return VM_FAULT_SIGBUS; 1959 } 1960 1961 vmf->page = page; 1962 return ret | VM_FAULT_LOCKED; 1963 1964 no_cached_page: 1965 /* 1966 * We're only likely to ever get here if MADV_RANDOM is in 1967 * effect. 1968 */ 1969 error = page_cache_read(file, offset); 1970 1971 /* 1972 * The page we want has now been added to the page cache. 1973 * In the unlikely event that someone removed it in the 1974 * meantime, we'll just come back here and read it again. 1975 */ 1976 if (error >= 0) 1977 goto retry_find; 1978 1979 /* 1980 * An error return from page_cache_read can result if the 1981 * system is low on memory, or a problem occurs while trying 1982 * to schedule I/O. 1983 */ 1984 if (error == -ENOMEM) 1985 return VM_FAULT_OOM; 1986 return VM_FAULT_SIGBUS; 1987 1988 page_not_uptodate: 1989 /* 1990 * Umm, take care of errors if the page isn't up-to-date. 1991 * Try to re-read it _once_. We do this synchronously, 1992 * because there really aren't any performance issues here 1993 * and we need to check for errors. 1994 */ 1995 ClearPageError(page); 1996 error = mapping->a_ops->readpage(file, page); 1997 if (!error) { 1998 wait_on_page_locked(page); 1999 if (!PageUptodate(page)) 2000 error = -EIO; 2001 } 2002 page_cache_release(page); 2003 2004 if (!error || error == AOP_TRUNCATED_PAGE) 2005 goto retry_find; 2006 2007 /* Things didn't work out. Return zero to tell the mm layer so. */ 2008 shrink_readahead_size_eio(file, ra); 2009 return VM_FAULT_SIGBUS; 2010 } 2011 EXPORT_SYMBOL(filemap_fault); 2012 2013 void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) 2014 { 2015 struct radix_tree_iter iter; 2016 void **slot; 2017 struct file *file = vma->vm_file; 2018 struct address_space *mapping = file->f_mapping; 2019 loff_t size; 2020 struct page *page; 2021 unsigned long address = (unsigned long) vmf->virtual_address; 2022 unsigned long addr; 2023 pte_t *pte; 2024 2025 rcu_read_lock(); 2026 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, vmf->pgoff) { 2027 if (iter.index > vmf->max_pgoff) 2028 break; 2029 repeat: 2030 page = radix_tree_deref_slot(slot); 2031 if (unlikely(!page)) 2032 goto next; 2033 if (radix_tree_exception(page)) { 2034 if (radix_tree_deref_retry(page)) 2035 break; 2036 else 2037 goto next; 2038 } 2039 2040 if (!page_cache_get_speculative(page)) 2041 goto repeat; 2042 2043 /* Has the page moved? */ 2044 if (unlikely(page != *slot)) { 2045 page_cache_release(page); 2046 goto repeat; 2047 } 2048 2049 if (!PageUptodate(page) || 2050 PageReadahead(page) || 2051 PageHWPoison(page)) 2052 goto skip; 2053 if (!trylock_page(page)) 2054 goto skip; 2055 2056 if (page->mapping != mapping || !PageUptodate(page)) 2057 goto unlock; 2058 2059 size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE); 2060 if (page->index >= size >> PAGE_CACHE_SHIFT) 2061 goto unlock; 2062 2063 pte = vmf->pte + page->index - vmf->pgoff; 2064 if (!pte_none(*pte)) 2065 goto unlock; 2066 2067 if (file->f_ra.mmap_miss > 0) 2068 file->f_ra.mmap_miss--; 2069 addr = address + (page->index - vmf->pgoff) * PAGE_SIZE; 2070 do_set_pte(vma, addr, page, pte, false, false); 2071 unlock_page(page); 2072 goto next; 2073 unlock: 2074 unlock_page(page); 2075 skip: 2076 page_cache_release(page); 2077 next: 2078 if (iter.index == vmf->max_pgoff) 2079 break; 2080 } 2081 rcu_read_unlock(); 2082 } 2083 EXPORT_SYMBOL(filemap_map_pages); 2084 2085 int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 2086 { 2087 struct page *page = vmf->page; 2088 struct inode *inode = file_inode(vma->vm_file); 2089 int ret = VM_FAULT_LOCKED; 2090 2091 sb_start_pagefault(inode->i_sb); 2092 file_update_time(vma->vm_file); 2093 lock_page(page); 2094 if (page->mapping != inode->i_mapping) { 2095 unlock_page(page); 2096 ret = VM_FAULT_NOPAGE; 2097 goto out; 2098 } 2099 /* 2100 * We mark the page dirty already here so that when freeze is in 2101 * progress, we are guaranteed that writeback during freezing will 2102 * see the dirty page and writeprotect it again. 2103 */ 2104 set_page_dirty(page); 2105 wait_for_stable_page(page); 2106 out: 2107 sb_end_pagefault(inode->i_sb); 2108 return ret; 2109 } 2110 EXPORT_SYMBOL(filemap_page_mkwrite); 2111 2112 const struct vm_operations_struct generic_file_vm_ops = { 2113 .fault = filemap_fault, 2114 .map_pages = filemap_map_pages, 2115 .page_mkwrite = filemap_page_mkwrite, 2116 }; 2117 2118 /* This is used for a general mmap of a disk file */ 2119 2120 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2121 { 2122 struct address_space *mapping = file->f_mapping; 2123 2124 if (!mapping->a_ops->readpage) 2125 return -ENOEXEC; 2126 file_accessed(file); 2127 vma->vm_ops = &generic_file_vm_ops; 2128 return 0; 2129 } 2130 2131 /* 2132 * This is for filesystems which do not implement ->writepage. 2133 */ 2134 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 2135 { 2136 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 2137 return -EINVAL; 2138 return generic_file_mmap(file, vma); 2139 } 2140 #else 2141 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2142 { 2143 return -ENOSYS; 2144 } 2145 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) 2146 { 2147 return -ENOSYS; 2148 } 2149 #endif /* CONFIG_MMU */ 2150 2151 EXPORT_SYMBOL(generic_file_mmap); 2152 EXPORT_SYMBOL(generic_file_readonly_mmap); 2153 2154 static struct page *wait_on_page_read(struct page *page) 2155 { 2156 if (!IS_ERR(page)) { 2157 wait_on_page_locked(page); 2158 if (!PageUptodate(page)) { 2159 page_cache_release(page); 2160 page = ERR_PTR(-EIO); 2161 } 2162 } 2163 return page; 2164 } 2165 2166 static struct page *__read_cache_page(struct address_space *mapping, 2167 pgoff_t index, 2168 int (*filler)(void *, struct page *), 2169 void *data, 2170 gfp_t gfp) 2171 { 2172 struct page *page; 2173 int err; 2174 repeat: 2175 page = find_get_page(mapping, index); 2176 if (!page) { 2177 page = __page_cache_alloc(gfp | __GFP_COLD); 2178 if (!page) 2179 return ERR_PTR(-ENOMEM); 2180 err = add_to_page_cache_lru(page, mapping, index, gfp); 2181 if (unlikely(err)) { 2182 page_cache_release(page); 2183 if (err == -EEXIST) 2184 goto repeat; 2185 /* Presumably ENOMEM for radix tree node */ 2186 return ERR_PTR(err); 2187 } 2188 err = filler(data, page); 2189 if (err < 0) { 2190 page_cache_release(page); 2191 page = ERR_PTR(err); 2192 } else { 2193 page = wait_on_page_read(page); 2194 } 2195 } 2196 return page; 2197 } 2198 2199 static struct page *do_read_cache_page(struct address_space *mapping, 2200 pgoff_t index, 2201 int (*filler)(void *, struct page *), 2202 void *data, 2203 gfp_t gfp) 2204 2205 { 2206 struct page *page; 2207 int err; 2208 2209 retry: 2210 page = __read_cache_page(mapping, index, filler, data, gfp); 2211 if (IS_ERR(page)) 2212 return page; 2213 if (PageUptodate(page)) 2214 goto out; 2215 2216 lock_page(page); 2217 if (!page->mapping) { 2218 unlock_page(page); 2219 page_cache_release(page); 2220 goto retry; 2221 } 2222 if (PageUptodate(page)) { 2223 unlock_page(page); 2224 goto out; 2225 } 2226 err = filler(data, page); 2227 if (err < 0) { 2228 page_cache_release(page); 2229 return ERR_PTR(err); 2230 } else { 2231 page = wait_on_page_read(page); 2232 if (IS_ERR(page)) 2233 return page; 2234 } 2235 out: 2236 mark_page_accessed(page); 2237 return page; 2238 } 2239 2240 /** 2241 * read_cache_page - read into page cache, fill it if needed 2242 * @mapping: the page's address_space 2243 * @index: the page index 2244 * @filler: function to perform the read 2245 * @data: first arg to filler(data, page) function, often left as NULL 2246 * 2247 * Read into the page cache. If a page already exists, and PageUptodate() is 2248 * not set, try to fill the page and wait for it to become unlocked. 2249 * 2250 * If the page does not get brought uptodate, return -EIO. 2251 */ 2252 struct page *read_cache_page(struct address_space *mapping, 2253 pgoff_t index, 2254 int (*filler)(void *, struct page *), 2255 void *data) 2256 { 2257 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); 2258 } 2259 EXPORT_SYMBOL(read_cache_page); 2260 2261 /** 2262 * read_cache_page_gfp - read into page cache, using specified page allocation flags. 2263 * @mapping: the page's address_space 2264 * @index: the page index 2265 * @gfp: the page allocator flags to use if allocating 2266 * 2267 * This is the same as "read_mapping_page(mapping, index, NULL)", but with 2268 * any new page allocations done using the specified allocation flags. 2269 * 2270 * If the page does not get brought uptodate, return -EIO. 2271 */ 2272 struct page *read_cache_page_gfp(struct address_space *mapping, 2273 pgoff_t index, 2274 gfp_t gfp) 2275 { 2276 filler_t *filler = (filler_t *)mapping->a_ops->readpage; 2277 2278 return do_read_cache_page(mapping, index, filler, NULL, gfp); 2279 } 2280 EXPORT_SYMBOL(read_cache_page_gfp); 2281 2282 /* 2283 * Performs necessary checks before doing a write 2284 * 2285 * Can adjust writing position or amount of bytes to write. 2286 * Returns appropriate error code that caller should return or 2287 * zero in case that write should be allowed. 2288 */ 2289 inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) 2290 { 2291 struct file *file = iocb->ki_filp; 2292 struct inode *inode = file->f_mapping->host; 2293 unsigned long limit = rlimit(RLIMIT_FSIZE); 2294 loff_t pos; 2295 2296 if (!iov_iter_count(from)) 2297 return 0; 2298 2299 /* FIXME: this is for backwards compatibility with 2.4 */ 2300 if (iocb->ki_flags & IOCB_APPEND) 2301 iocb->ki_pos = i_size_read(inode); 2302 2303 pos = iocb->ki_pos; 2304 2305 if (limit != RLIM_INFINITY) { 2306 if (iocb->ki_pos >= limit) { 2307 send_sig(SIGXFSZ, current, 0); 2308 return -EFBIG; 2309 } 2310 iov_iter_truncate(from, limit - (unsigned long)pos); 2311 } 2312 2313 /* 2314 * LFS rule 2315 */ 2316 if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS && 2317 !(file->f_flags & O_LARGEFILE))) { 2318 if (pos >= MAX_NON_LFS) 2319 return -EFBIG; 2320 iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos); 2321 } 2322 2323 /* 2324 * Are we about to exceed the fs block limit ? 2325 * 2326 * If we have written data it becomes a short write. If we have 2327 * exceeded without writing data we send a signal and return EFBIG. 2328 * Linus frestrict idea will clean these up nicely.. 2329 */ 2330 if (unlikely(pos >= inode->i_sb->s_maxbytes)) 2331 return -EFBIG; 2332 2333 iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos); 2334 return iov_iter_count(from); 2335 } 2336 EXPORT_SYMBOL(generic_write_checks); 2337 2338 int pagecache_write_begin(struct file *file, struct address_space *mapping, 2339 loff_t pos, unsigned len, unsigned flags, 2340 struct page **pagep, void **fsdata) 2341 { 2342 const struct address_space_operations *aops = mapping->a_ops; 2343 2344 return aops->write_begin(file, mapping, pos, len, flags, 2345 pagep, fsdata); 2346 } 2347 EXPORT_SYMBOL(pagecache_write_begin); 2348 2349 int pagecache_write_end(struct file *file, struct address_space *mapping, 2350 loff_t pos, unsigned len, unsigned copied, 2351 struct page *page, void *fsdata) 2352 { 2353 const struct address_space_operations *aops = mapping->a_ops; 2354 2355 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); 2356 } 2357 EXPORT_SYMBOL(pagecache_write_end); 2358 2359 ssize_t 2360 generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos) 2361 { 2362 struct file *file = iocb->ki_filp; 2363 struct address_space *mapping = file->f_mapping; 2364 struct inode *inode = mapping->host; 2365 ssize_t written; 2366 size_t write_len; 2367 pgoff_t end; 2368 struct iov_iter data; 2369 2370 write_len = iov_iter_count(from); 2371 end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT; 2372 2373 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1); 2374 if (written) 2375 goto out; 2376 2377 /* 2378 * After a write we want buffered reads to be sure to go to disk to get 2379 * the new data. We invalidate clean cached page from the region we're 2380 * about to write. We do this *before* the write so that we can return 2381 * without clobbering -EIOCBQUEUED from ->direct_IO(). 2382 */ 2383 if (mapping->nrpages) { 2384 written = invalidate_inode_pages2_range(mapping, 2385 pos >> PAGE_CACHE_SHIFT, end); 2386 /* 2387 * If a page can not be invalidated, return 0 to fall back 2388 * to buffered write. 2389 */ 2390 if (written) { 2391 if (written == -EBUSY) 2392 return 0; 2393 goto out; 2394 } 2395 } 2396 2397 data = *from; 2398 written = mapping->a_ops->direct_IO(iocb, &data, pos); 2399 2400 /* 2401 * Finally, try again to invalidate clean pages which might have been 2402 * cached by non-direct readahead, or faulted in by get_user_pages() 2403 * if the source of the write was an mmap'ed region of the file 2404 * we're writing. Either one is a pretty crazy thing to do, 2405 * so we don't support it 100%. If this invalidation 2406 * fails, tough, the write still worked... 2407 */ 2408 if (mapping->nrpages) { 2409 invalidate_inode_pages2_range(mapping, 2410 pos >> PAGE_CACHE_SHIFT, end); 2411 } 2412 2413 if (written > 0) { 2414 pos += written; 2415 iov_iter_advance(from, written); 2416 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { 2417 i_size_write(inode, pos); 2418 mark_inode_dirty(inode); 2419 } 2420 iocb->ki_pos = pos; 2421 } 2422 out: 2423 return written; 2424 } 2425 EXPORT_SYMBOL(generic_file_direct_write); 2426 2427 /* 2428 * Find or create a page at the given pagecache position. Return the locked 2429 * page. This function is specifically for buffered writes. 2430 */ 2431 struct page *grab_cache_page_write_begin(struct address_space *mapping, 2432 pgoff_t index, unsigned flags) 2433 { 2434 struct page *page; 2435 int fgp_flags = FGP_LOCK|FGP_ACCESSED|FGP_WRITE|FGP_CREAT; 2436 2437 if (flags & AOP_FLAG_NOFS) 2438 fgp_flags |= FGP_NOFS; 2439 2440 page = pagecache_get_page(mapping, index, fgp_flags, 2441 mapping_gfp_mask(mapping)); 2442 if (page) 2443 wait_for_stable_page(page); 2444 2445 return page; 2446 } 2447 EXPORT_SYMBOL(grab_cache_page_write_begin); 2448 2449 ssize_t generic_perform_write(struct file *file, 2450 struct iov_iter *i, loff_t pos) 2451 { 2452 struct address_space *mapping = file->f_mapping; 2453 const struct address_space_operations *a_ops = mapping->a_ops; 2454 long status = 0; 2455 ssize_t written = 0; 2456 unsigned int flags = 0; 2457 2458 /* 2459 * Copies from kernel address space cannot fail (NFSD is a big user). 2460 */ 2461 if (!iter_is_iovec(i)) 2462 flags |= AOP_FLAG_UNINTERRUPTIBLE; 2463 2464 do { 2465 struct page *page; 2466 unsigned long offset; /* Offset into pagecache page */ 2467 unsigned long bytes; /* Bytes to write to page */ 2468 size_t copied; /* Bytes copied from user */ 2469 void *fsdata; 2470 2471 offset = (pos & (PAGE_CACHE_SIZE - 1)); 2472 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2473 iov_iter_count(i)); 2474 2475 again: 2476 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 2477 &page, &fsdata); 2478 if (unlikely(status < 0)) 2479 break; 2480 2481 if (mapping_writably_mapped(mapping)) 2482 flush_dcache_page(page); 2483 /* 2484 * 'page' is now locked. If we are trying to copy from a 2485 * mapping of 'page' in userspace, the copy might fault and 2486 * would need PageUptodate() to complete. But, page can not be 2487 * made Uptodate without acquiring the page lock, which we hold. 2488 * Deadlock. Avoid with pagefault_disable(). Fix up below with 2489 * iov_iter_fault_in_readable(). 2490 */ 2491 pagefault_disable(); 2492 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 2493 pagefault_enable(); 2494 flush_dcache_page(page); 2495 2496 status = a_ops->write_end(file, mapping, pos, bytes, copied, 2497 page, fsdata); 2498 if (unlikely(status < 0)) 2499 break; 2500 copied = status; 2501 2502 cond_resched(); 2503 2504 iov_iter_advance(i, copied); 2505 if (unlikely(copied == 0)) { 2506 /* 2507 * If we were unable to copy any data at all, we must 2508 * fall back to a single segment length write. 2509 * 2510 * If we didn't fallback here, we could livelock 2511 * because not all segments in the iov can be copied at 2512 * once without a pagefault. 2513 */ 2514 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2515 iov_iter_single_seg_count(i)); 2516 /* 2517 * This is the fallback to recover if the copy from 2518 * userspace above faults. 2519 */ 2520 if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 2521 status = -EFAULT; 2522 break; 2523 } 2524 goto again; 2525 } 2526 pos += copied; 2527 written += copied; 2528 2529 balance_dirty_pages_ratelimited(mapping); 2530 if (fatal_signal_pending(current)) { 2531 status = -EINTR; 2532 break; 2533 } 2534 } while (iov_iter_count(i)); 2535 2536 return written ? written : status; 2537 } 2538 EXPORT_SYMBOL(generic_perform_write); 2539 2540 /** 2541 * __generic_file_write_iter - write data to a file 2542 * @iocb: IO state structure (file, offset, etc.) 2543 * @from: iov_iter with data to write 2544 * 2545 * This function does all the work needed for actually writing data to a 2546 * file. It does all basic checks, removes SUID from the file, updates 2547 * modification times and calls proper subroutines depending on whether we 2548 * do direct IO or a standard buffered write. 2549 * 2550 * It expects i_mutex to be grabbed unless we work on a block device or similar 2551 * object which does not need locking at all. 2552 * 2553 * This function does *not* take care of syncing data in case of O_SYNC write. 2554 * A caller has to handle it. This is mainly due to the fact that we want to 2555 * avoid syncing under i_mutex. 2556 */ 2557 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2558 { 2559 struct file *file = iocb->ki_filp; 2560 struct address_space * mapping = file->f_mapping; 2561 struct inode *inode = mapping->host; 2562 ssize_t written = 0; 2563 ssize_t err; 2564 ssize_t status; 2565 2566 /* We can write back this queue in page reclaim */ 2567 current->backing_dev_info = inode_to_bdi(inode); 2568 err = file_remove_privs(file); 2569 if (err) 2570 goto out; 2571 2572 err = file_update_time(file); 2573 if (err) 2574 goto out; 2575 2576 if (iocb->ki_flags & IOCB_DIRECT) { 2577 loff_t pos, endbyte; 2578 2579 written = generic_file_direct_write(iocb, from, iocb->ki_pos); 2580 /* 2581 * If the write stopped short of completing, fall back to 2582 * buffered writes. Some filesystems do this for writes to 2583 * holes, for example. For DAX files, a buffered write will 2584 * not succeed (even if it did, DAX does not handle dirty 2585 * page-cache pages correctly). 2586 */ 2587 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) 2588 goto out; 2589 2590 status = generic_perform_write(file, from, pos = iocb->ki_pos); 2591 /* 2592 * If generic_perform_write() returned a synchronous error 2593 * then we want to return the number of bytes which were 2594 * direct-written, or the error code if that was zero. Note 2595 * that this differs from normal direct-io semantics, which 2596 * will return -EFOO even if some bytes were written. 2597 */ 2598 if (unlikely(status < 0)) { 2599 err = status; 2600 goto out; 2601 } 2602 /* 2603 * We need to ensure that the page cache pages are written to 2604 * disk and invalidated to preserve the expected O_DIRECT 2605 * semantics. 2606 */ 2607 endbyte = pos + status - 1; 2608 err = filemap_write_and_wait_range(mapping, pos, endbyte); 2609 if (err == 0) { 2610 iocb->ki_pos = endbyte + 1; 2611 written += status; 2612 invalidate_mapping_pages(mapping, 2613 pos >> PAGE_CACHE_SHIFT, 2614 endbyte >> PAGE_CACHE_SHIFT); 2615 } else { 2616 /* 2617 * We don't know how much we wrote, so just return 2618 * the number of bytes which were direct-written 2619 */ 2620 } 2621 } else { 2622 written = generic_perform_write(file, from, iocb->ki_pos); 2623 if (likely(written > 0)) 2624 iocb->ki_pos += written; 2625 } 2626 out: 2627 current->backing_dev_info = NULL; 2628 return written ? written : err; 2629 } 2630 EXPORT_SYMBOL(__generic_file_write_iter); 2631 2632 /** 2633 * generic_file_write_iter - write data to a file 2634 * @iocb: IO state structure 2635 * @from: iov_iter with data to write 2636 * 2637 * This is a wrapper around __generic_file_write_iter() to be used by most 2638 * filesystems. It takes care of syncing the file in case of O_SYNC file 2639 * and acquires i_mutex as needed. 2640 */ 2641 ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2642 { 2643 struct file *file = iocb->ki_filp; 2644 struct inode *inode = file->f_mapping->host; 2645 ssize_t ret; 2646 2647 mutex_lock(&inode->i_mutex); 2648 ret = generic_write_checks(iocb, from); 2649 if (ret > 0) 2650 ret = __generic_file_write_iter(iocb, from); 2651 mutex_unlock(&inode->i_mutex); 2652 2653 if (ret > 0) { 2654 ssize_t err; 2655 2656 err = generic_write_sync(file, iocb->ki_pos - ret, ret); 2657 if (err < 0) 2658 ret = err; 2659 } 2660 return ret; 2661 } 2662 EXPORT_SYMBOL(generic_file_write_iter); 2663 2664 /** 2665 * try_to_release_page() - release old fs-specific metadata on a page 2666 * 2667 * @page: the page which the kernel is trying to free 2668 * @gfp_mask: memory allocation flags (and I/O mode) 2669 * 2670 * The address_space is to try to release any data against the page 2671 * (presumably at page->private). If the release was successful, return `1'. 2672 * Otherwise return zero. 2673 * 2674 * This may also be called if PG_fscache is set on a page, indicating that the 2675 * page is known to the local caching routines. 2676 * 2677 * The @gfp_mask argument specifies whether I/O may be performed to release 2678 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS). 2679 * 2680 */ 2681 int try_to_release_page(struct page *page, gfp_t gfp_mask) 2682 { 2683 struct address_space * const mapping = page->mapping; 2684 2685 BUG_ON(!PageLocked(page)); 2686 if (PageWriteback(page)) 2687 return 0; 2688 2689 if (mapping && mapping->a_ops->releasepage) 2690 return mapping->a_ops->releasepage(page, gfp_mask); 2691 return try_to_free_buffers(page); 2692 } 2693 2694 EXPORT_SYMBOL(try_to_release_page); 2695