1 /* 2 * linux/mm/filemap.c 3 * 4 * Copyright (C) 1994-1999 Linus Torvalds 5 */ 6 7 /* 8 * This file handles the generic file mmap semantics used by 9 * most "normal" filesystems (but you don't /have/ to use this: 10 * the NFS filesystem used to do this differently, for example) 11 */ 12 #include <linux/export.h> 13 #include <linux/compiler.h> 14 #include <linux/fs.h> 15 #include <linux/uaccess.h> 16 #include <linux/capability.h> 17 #include <linux/kernel_stat.h> 18 #include <linux/gfp.h> 19 #include <linux/mm.h> 20 #include <linux/swap.h> 21 #include <linux/mman.h> 22 #include <linux/pagemap.h> 23 #include <linux/file.h> 24 #include <linux/uio.h> 25 #include <linux/hash.h> 26 #include <linux/writeback.h> 27 #include <linux/backing-dev.h> 28 #include <linux/pagevec.h> 29 #include <linux/blkdev.h> 30 #include <linux/security.h> 31 #include <linux/cpuset.h> 32 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ 33 #include <linux/hugetlb.h> 34 #include <linux/memcontrol.h> 35 #include <linux/cleancache.h> 36 #include <linux/rmap.h> 37 #include "internal.h" 38 39 #define CREATE_TRACE_POINTS 40 #include <trace/events/filemap.h> 41 42 /* 43 * FIXME: remove all knowledge of the buffer layer from the core VM 44 */ 45 #include <linux/buffer_head.h> /* for try_to_free_buffers */ 46 47 #include <asm/mman.h> 48 49 /* 50 * Shared mappings implemented 30.11.1994. It's not fully working yet, 51 * though. 52 * 53 * Shared mappings now work. 15.8.1995 Bruno. 54 * 55 * finished 'unifying' the page and buffer cache and SMP-threaded the 56 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 57 * 58 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 59 */ 60 61 /* 62 * Lock ordering: 63 * 64 * ->i_mmap_rwsem (truncate_pagecache) 65 * ->private_lock (__free_pte->__set_page_dirty_buffers) 66 * ->swap_lock (exclusive_swap_page, others) 67 * ->mapping->tree_lock 68 * 69 * ->i_mutex 70 * ->i_mmap_rwsem (truncate->unmap_mapping_range) 71 * 72 * ->mmap_sem 73 * ->i_mmap_rwsem 74 * ->page_table_lock or pte_lock (various, mainly in memory.c) 75 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) 76 * 77 * ->mmap_sem 78 * ->lock_page (access_process_vm) 79 * 80 * ->i_mutex (generic_perform_write) 81 * ->mmap_sem (fault_in_pages_readable->do_page_fault) 82 * 83 * bdi->wb.list_lock 84 * sb_lock (fs/fs-writeback.c) 85 * ->mapping->tree_lock (__sync_single_inode) 86 * 87 * ->i_mmap_rwsem 88 * ->anon_vma.lock (vma_adjust) 89 * 90 * ->anon_vma.lock 91 * ->page_table_lock or pte_lock (anon_vma_prepare and various) 92 * 93 * ->page_table_lock or pte_lock 94 * ->swap_lock (try_to_unmap_one) 95 * ->private_lock (try_to_unmap_one) 96 * ->tree_lock (try_to_unmap_one) 97 * ->zone.lru_lock (follow_page->mark_page_accessed) 98 * ->zone.lru_lock (check_pte_range->isolate_lru_page) 99 * ->private_lock (page_remove_rmap->set_page_dirty) 100 * ->tree_lock (page_remove_rmap->set_page_dirty) 101 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) 102 * ->inode->i_lock (page_remove_rmap->set_page_dirty) 103 * ->memcg->move_lock (page_remove_rmap->mem_cgroup_begin_page_stat) 104 * bdi.wb->list_lock (zap_pte_range->set_page_dirty) 105 * ->inode->i_lock (zap_pte_range->set_page_dirty) 106 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 107 * 108 * ->i_mmap_rwsem 109 * ->tasklist_lock (memory_failure, collect_procs_ao) 110 */ 111 112 static void page_cache_tree_delete(struct address_space *mapping, 113 struct page *page, void *shadow) 114 { 115 struct radix_tree_node *node; 116 unsigned long index; 117 unsigned int offset; 118 unsigned int tag; 119 void **slot; 120 121 VM_BUG_ON(!PageLocked(page)); 122 123 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); 124 125 if (shadow) { 126 mapping->nrshadows++; 127 /* 128 * Make sure the nrshadows update is committed before 129 * the nrpages update so that final truncate racing 130 * with reclaim does not see both counters 0 at the 131 * same time and miss a shadow entry. 132 */ 133 smp_wmb(); 134 } 135 mapping->nrpages--; 136 137 if (!node) { 138 /* Clear direct pointer tags in root node */ 139 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK; 140 radix_tree_replace_slot(slot, shadow); 141 return; 142 } 143 144 /* Clear tree tags for the removed page */ 145 index = page->index; 146 offset = index & RADIX_TREE_MAP_MASK; 147 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { 148 if (test_bit(offset, node->tags[tag])) 149 radix_tree_tag_clear(&mapping->page_tree, index, tag); 150 } 151 152 /* Delete page, swap shadow entry */ 153 radix_tree_replace_slot(slot, shadow); 154 workingset_node_pages_dec(node); 155 if (shadow) 156 workingset_node_shadows_inc(node); 157 else 158 if (__radix_tree_delete_node(&mapping->page_tree, node)) 159 return; 160 161 /* 162 * Track node that only contains shadow entries. 163 * 164 * Avoid acquiring the list_lru lock if already tracked. The 165 * list_empty() test is safe as node->private_list is 166 * protected by mapping->tree_lock. 167 */ 168 if (!workingset_node_pages(node) && 169 list_empty(&node->private_list)) { 170 node->private_data = mapping; 171 list_lru_add(&workingset_shadow_nodes, &node->private_list); 172 } 173 } 174 175 /* 176 * Delete a page from the page cache and free it. Caller has to make 177 * sure the page is locked and that nobody else uses it - or that usage 178 * is safe. The caller must hold the mapping's tree_lock and 179 * mem_cgroup_begin_page_stat(). 180 */ 181 void __delete_from_page_cache(struct page *page, void *shadow, 182 struct mem_cgroup *memcg) 183 { 184 struct address_space *mapping = page->mapping; 185 186 trace_mm_filemap_delete_from_page_cache(page); 187 /* 188 * if we're uptodate, flush out into the cleancache, otherwise 189 * invalidate any existing cleancache entries. We can't leave 190 * stale data around in the cleancache once our page is gone 191 */ 192 if (PageUptodate(page) && PageMappedToDisk(page)) 193 cleancache_put_page(page); 194 else 195 cleancache_invalidate_page(mapping, page); 196 197 page_cache_tree_delete(mapping, page, shadow); 198 199 page->mapping = NULL; 200 /* Leave page->index set: truncation lookup relies upon it */ 201 202 /* hugetlb pages do not participate in page cache accounting. */ 203 if (!PageHuge(page)) 204 __dec_zone_page_state(page, NR_FILE_PAGES); 205 if (PageSwapBacked(page)) 206 __dec_zone_page_state(page, NR_SHMEM); 207 BUG_ON(page_mapped(page)); 208 209 /* 210 * At this point page must be either written or cleaned by truncate. 211 * Dirty page here signals a bug and loss of unwritten data. 212 * 213 * This fixes dirty accounting after removing the page entirely but 214 * leaves PageDirty set: it has no effect for truncated page and 215 * anyway will be cleared before returning page into buddy allocator. 216 */ 217 if (WARN_ON_ONCE(PageDirty(page))) 218 account_page_cleaned(page, mapping, memcg, 219 inode_to_wb(mapping->host)); 220 } 221 222 /** 223 * delete_from_page_cache - delete page from page cache 224 * @page: the page which the kernel is trying to remove from page cache 225 * 226 * This must be called only on pages that have been verified to be in the page 227 * cache and locked. It will never put the page into the free list, the caller 228 * has a reference on the page. 229 */ 230 void delete_from_page_cache(struct page *page) 231 { 232 struct address_space *mapping = page->mapping; 233 struct mem_cgroup *memcg; 234 unsigned long flags; 235 236 void (*freepage)(struct page *); 237 238 BUG_ON(!PageLocked(page)); 239 240 freepage = mapping->a_ops->freepage; 241 242 memcg = mem_cgroup_begin_page_stat(page); 243 spin_lock_irqsave(&mapping->tree_lock, flags); 244 __delete_from_page_cache(page, NULL, memcg); 245 spin_unlock_irqrestore(&mapping->tree_lock, flags); 246 mem_cgroup_end_page_stat(memcg); 247 248 if (freepage) 249 freepage(page); 250 page_cache_release(page); 251 } 252 EXPORT_SYMBOL(delete_from_page_cache); 253 254 static int filemap_check_errors(struct address_space *mapping) 255 { 256 int ret = 0; 257 /* Check for outstanding write errors */ 258 if (test_bit(AS_ENOSPC, &mapping->flags) && 259 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 260 ret = -ENOSPC; 261 if (test_bit(AS_EIO, &mapping->flags) && 262 test_and_clear_bit(AS_EIO, &mapping->flags)) 263 ret = -EIO; 264 return ret; 265 } 266 267 /** 268 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 269 * @mapping: address space structure to write 270 * @start: offset in bytes where the range starts 271 * @end: offset in bytes where the range ends (inclusive) 272 * @sync_mode: enable synchronous operation 273 * 274 * Start writeback against all of a mapping's dirty pages that lie 275 * within the byte offsets <start, end> inclusive. 276 * 277 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 278 * opposed to a regular memory cleansing writeback. The difference between 279 * these two operations is that if a dirty page/buffer is encountered, it must 280 * be waited upon, and not just skipped over. 281 */ 282 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 283 loff_t end, int sync_mode) 284 { 285 int ret; 286 struct writeback_control wbc = { 287 .sync_mode = sync_mode, 288 .nr_to_write = LONG_MAX, 289 .range_start = start, 290 .range_end = end, 291 }; 292 293 if (!mapping_cap_writeback_dirty(mapping)) 294 return 0; 295 296 wbc_attach_fdatawrite_inode(&wbc, mapping->host); 297 ret = do_writepages(mapping, &wbc); 298 wbc_detach_inode(&wbc); 299 return ret; 300 } 301 302 static inline int __filemap_fdatawrite(struct address_space *mapping, 303 int sync_mode) 304 { 305 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); 306 } 307 308 int filemap_fdatawrite(struct address_space *mapping) 309 { 310 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); 311 } 312 EXPORT_SYMBOL(filemap_fdatawrite); 313 314 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 315 loff_t end) 316 { 317 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 318 } 319 EXPORT_SYMBOL(filemap_fdatawrite_range); 320 321 /** 322 * filemap_flush - mostly a non-blocking flush 323 * @mapping: target address_space 324 * 325 * This is a mostly non-blocking flush. Not suitable for data-integrity 326 * purposes - I/O may not be started against all dirty pages. 327 */ 328 int filemap_flush(struct address_space *mapping) 329 { 330 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); 331 } 332 EXPORT_SYMBOL(filemap_flush); 333 334 static int __filemap_fdatawait_range(struct address_space *mapping, 335 loff_t start_byte, loff_t end_byte) 336 { 337 pgoff_t index = start_byte >> PAGE_CACHE_SHIFT; 338 pgoff_t end = end_byte >> PAGE_CACHE_SHIFT; 339 struct pagevec pvec; 340 int nr_pages; 341 int ret = 0; 342 343 if (end_byte < start_byte) 344 goto out; 345 346 pagevec_init(&pvec, 0); 347 while ((index <= end) && 348 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 349 PAGECACHE_TAG_WRITEBACK, 350 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { 351 unsigned i; 352 353 for (i = 0; i < nr_pages; i++) { 354 struct page *page = pvec.pages[i]; 355 356 /* until radix tree lookup accepts end_index */ 357 if (page->index > end) 358 continue; 359 360 wait_on_page_writeback(page); 361 if (TestClearPageError(page)) 362 ret = -EIO; 363 } 364 pagevec_release(&pvec); 365 cond_resched(); 366 } 367 out: 368 return ret; 369 } 370 371 /** 372 * filemap_fdatawait_range - wait for writeback to complete 373 * @mapping: address space structure to wait for 374 * @start_byte: offset in bytes where the range starts 375 * @end_byte: offset in bytes where the range ends (inclusive) 376 * 377 * Walk the list of under-writeback pages of the given address space 378 * in the given range and wait for all of them. Check error status of 379 * the address space and return it. 380 * 381 * Since the error status of the address space is cleared by this function, 382 * callers are responsible for checking the return value and handling and/or 383 * reporting the error. 384 */ 385 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, 386 loff_t end_byte) 387 { 388 int ret, ret2; 389 390 ret = __filemap_fdatawait_range(mapping, start_byte, end_byte); 391 ret2 = filemap_check_errors(mapping); 392 if (!ret) 393 ret = ret2; 394 395 return ret; 396 } 397 EXPORT_SYMBOL(filemap_fdatawait_range); 398 399 /** 400 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors 401 * @mapping: address space structure to wait for 402 * 403 * Walk the list of under-writeback pages of the given address space 404 * and wait for all of them. Unlike filemap_fdatawait(), this function 405 * does not clear error status of the address space. 406 * 407 * Use this function if callers don't handle errors themselves. Expected 408 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), 409 * fsfreeze(8) 410 */ 411 void filemap_fdatawait_keep_errors(struct address_space *mapping) 412 { 413 loff_t i_size = i_size_read(mapping->host); 414 415 if (i_size == 0) 416 return; 417 418 __filemap_fdatawait_range(mapping, 0, i_size - 1); 419 } 420 421 /** 422 * filemap_fdatawait - wait for all under-writeback pages to complete 423 * @mapping: address space structure to wait for 424 * 425 * Walk the list of under-writeback pages of the given address space 426 * and wait for all of them. Check error status of the address space 427 * and return it. 428 * 429 * Since the error status of the address space is cleared by this function, 430 * callers are responsible for checking the return value and handling and/or 431 * reporting the error. 432 */ 433 int filemap_fdatawait(struct address_space *mapping) 434 { 435 loff_t i_size = i_size_read(mapping->host); 436 437 if (i_size == 0) 438 return 0; 439 440 return filemap_fdatawait_range(mapping, 0, i_size - 1); 441 } 442 EXPORT_SYMBOL(filemap_fdatawait); 443 444 int filemap_write_and_wait(struct address_space *mapping) 445 { 446 int err = 0; 447 448 if (mapping->nrpages) { 449 err = filemap_fdatawrite(mapping); 450 /* 451 * Even if the above returned error, the pages may be 452 * written partially (e.g. -ENOSPC), so we wait for it. 453 * But the -EIO is special case, it may indicate the worst 454 * thing (e.g. bug) happened, so we avoid waiting for it. 455 */ 456 if (err != -EIO) { 457 int err2 = filemap_fdatawait(mapping); 458 if (!err) 459 err = err2; 460 } 461 } else { 462 err = filemap_check_errors(mapping); 463 } 464 return err; 465 } 466 EXPORT_SYMBOL(filemap_write_and_wait); 467 468 /** 469 * filemap_write_and_wait_range - write out & wait on a file range 470 * @mapping: the address_space for the pages 471 * @lstart: offset in bytes where the range starts 472 * @lend: offset in bytes where the range ends (inclusive) 473 * 474 * Write out and wait upon file offsets lstart->lend, inclusive. 475 * 476 * Note that `lend' is inclusive (describes the last byte to be written) so 477 * that this function can be used to write to the very end-of-file (end = -1). 478 */ 479 int filemap_write_and_wait_range(struct address_space *mapping, 480 loff_t lstart, loff_t lend) 481 { 482 int err = 0; 483 484 if (mapping->nrpages) { 485 err = __filemap_fdatawrite_range(mapping, lstart, lend, 486 WB_SYNC_ALL); 487 /* See comment of filemap_write_and_wait() */ 488 if (err != -EIO) { 489 int err2 = filemap_fdatawait_range(mapping, 490 lstart, lend); 491 if (!err) 492 err = err2; 493 } 494 } else { 495 err = filemap_check_errors(mapping); 496 } 497 return err; 498 } 499 EXPORT_SYMBOL(filemap_write_and_wait_range); 500 501 /** 502 * replace_page_cache_page - replace a pagecache page with a new one 503 * @old: page to be replaced 504 * @new: page to replace with 505 * @gfp_mask: allocation mode 506 * 507 * This function replaces a page in the pagecache with a new one. On 508 * success it acquires the pagecache reference for the new page and 509 * drops it for the old page. Both the old and new pages must be 510 * locked. This function does not add the new page to the LRU, the 511 * caller must do that. 512 * 513 * The remove + add is atomic. The only way this function can fail is 514 * memory allocation failure. 515 */ 516 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) 517 { 518 int error; 519 520 VM_BUG_ON_PAGE(!PageLocked(old), old); 521 VM_BUG_ON_PAGE(!PageLocked(new), new); 522 VM_BUG_ON_PAGE(new->mapping, new); 523 524 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 525 if (!error) { 526 struct address_space *mapping = old->mapping; 527 void (*freepage)(struct page *); 528 struct mem_cgroup *memcg; 529 unsigned long flags; 530 531 pgoff_t offset = old->index; 532 freepage = mapping->a_ops->freepage; 533 534 page_cache_get(new); 535 new->mapping = mapping; 536 new->index = offset; 537 538 memcg = mem_cgroup_begin_page_stat(old); 539 spin_lock_irqsave(&mapping->tree_lock, flags); 540 __delete_from_page_cache(old, NULL, memcg); 541 error = radix_tree_insert(&mapping->page_tree, offset, new); 542 BUG_ON(error); 543 mapping->nrpages++; 544 545 /* 546 * hugetlb pages do not participate in page cache accounting. 547 */ 548 if (!PageHuge(new)) 549 __inc_zone_page_state(new, NR_FILE_PAGES); 550 if (PageSwapBacked(new)) 551 __inc_zone_page_state(new, NR_SHMEM); 552 spin_unlock_irqrestore(&mapping->tree_lock, flags); 553 mem_cgroup_end_page_stat(memcg); 554 mem_cgroup_replace_page(old, new); 555 radix_tree_preload_end(); 556 if (freepage) 557 freepage(old); 558 page_cache_release(old); 559 } 560 561 return error; 562 } 563 EXPORT_SYMBOL_GPL(replace_page_cache_page); 564 565 static int page_cache_tree_insert(struct address_space *mapping, 566 struct page *page, void **shadowp) 567 { 568 struct radix_tree_node *node; 569 void **slot; 570 int error; 571 572 error = __radix_tree_create(&mapping->page_tree, page->index, 573 &node, &slot); 574 if (error) 575 return error; 576 if (*slot) { 577 void *p; 578 579 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 580 if (!radix_tree_exceptional_entry(p)) 581 return -EEXIST; 582 if (shadowp) 583 *shadowp = p; 584 mapping->nrshadows--; 585 if (node) 586 workingset_node_shadows_dec(node); 587 } 588 radix_tree_replace_slot(slot, page); 589 mapping->nrpages++; 590 if (node) { 591 workingset_node_pages_inc(node); 592 /* 593 * Don't track node that contains actual pages. 594 * 595 * Avoid acquiring the list_lru lock if already 596 * untracked. The list_empty() test is safe as 597 * node->private_list is protected by 598 * mapping->tree_lock. 599 */ 600 if (!list_empty(&node->private_list)) 601 list_lru_del(&workingset_shadow_nodes, 602 &node->private_list); 603 } 604 return 0; 605 } 606 607 static int __add_to_page_cache_locked(struct page *page, 608 struct address_space *mapping, 609 pgoff_t offset, gfp_t gfp_mask, 610 void **shadowp) 611 { 612 int huge = PageHuge(page); 613 struct mem_cgroup *memcg; 614 int error; 615 616 VM_BUG_ON_PAGE(!PageLocked(page), page); 617 VM_BUG_ON_PAGE(PageSwapBacked(page), page); 618 619 if (!huge) { 620 error = mem_cgroup_try_charge(page, current->mm, 621 gfp_mask, &memcg); 622 if (error) 623 return error; 624 } 625 626 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); 627 if (error) { 628 if (!huge) 629 mem_cgroup_cancel_charge(page, memcg); 630 return error; 631 } 632 633 page_cache_get(page); 634 page->mapping = mapping; 635 page->index = offset; 636 637 spin_lock_irq(&mapping->tree_lock); 638 error = page_cache_tree_insert(mapping, page, shadowp); 639 radix_tree_preload_end(); 640 if (unlikely(error)) 641 goto err_insert; 642 643 /* hugetlb pages do not participate in page cache accounting. */ 644 if (!huge) 645 __inc_zone_page_state(page, NR_FILE_PAGES); 646 spin_unlock_irq(&mapping->tree_lock); 647 if (!huge) 648 mem_cgroup_commit_charge(page, memcg, false); 649 trace_mm_filemap_add_to_page_cache(page); 650 return 0; 651 err_insert: 652 page->mapping = NULL; 653 /* Leave page->index set: truncation relies upon it */ 654 spin_unlock_irq(&mapping->tree_lock); 655 if (!huge) 656 mem_cgroup_cancel_charge(page, memcg); 657 page_cache_release(page); 658 return error; 659 } 660 661 /** 662 * add_to_page_cache_locked - add a locked page to the pagecache 663 * @page: page to add 664 * @mapping: the page's address_space 665 * @offset: page index 666 * @gfp_mask: page allocation mode 667 * 668 * This function is used to add a page to the pagecache. It must be locked. 669 * This function does not add the page to the LRU. The caller must do that. 670 */ 671 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 672 pgoff_t offset, gfp_t gfp_mask) 673 { 674 return __add_to_page_cache_locked(page, mapping, offset, 675 gfp_mask, NULL); 676 } 677 EXPORT_SYMBOL(add_to_page_cache_locked); 678 679 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 680 pgoff_t offset, gfp_t gfp_mask) 681 { 682 void *shadow = NULL; 683 int ret; 684 685 __set_page_locked(page); 686 ret = __add_to_page_cache_locked(page, mapping, offset, 687 gfp_mask, &shadow); 688 if (unlikely(ret)) 689 __clear_page_locked(page); 690 else { 691 /* 692 * The page might have been evicted from cache only 693 * recently, in which case it should be activated like 694 * any other repeatedly accessed page. 695 */ 696 if (shadow && workingset_refault(shadow)) { 697 SetPageActive(page); 698 workingset_activation(page); 699 } else 700 ClearPageActive(page); 701 lru_cache_add(page); 702 } 703 return ret; 704 } 705 EXPORT_SYMBOL_GPL(add_to_page_cache_lru); 706 707 #ifdef CONFIG_NUMA 708 struct page *__page_cache_alloc(gfp_t gfp) 709 { 710 int n; 711 struct page *page; 712 713 if (cpuset_do_page_mem_spread()) { 714 unsigned int cpuset_mems_cookie; 715 do { 716 cpuset_mems_cookie = read_mems_allowed_begin(); 717 n = cpuset_mem_spread_node(); 718 page = __alloc_pages_node(n, gfp, 0); 719 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); 720 721 return page; 722 } 723 return alloc_pages(gfp, 0); 724 } 725 EXPORT_SYMBOL(__page_cache_alloc); 726 #endif 727 728 /* 729 * In order to wait for pages to become available there must be 730 * waitqueues associated with pages. By using a hash table of 731 * waitqueues where the bucket discipline is to maintain all 732 * waiters on the same queue and wake all when any of the pages 733 * become available, and for the woken contexts to check to be 734 * sure the appropriate page became available, this saves space 735 * at a cost of "thundering herd" phenomena during rare hash 736 * collisions. 737 */ 738 wait_queue_head_t *page_waitqueue(struct page *page) 739 { 740 const struct zone *zone = page_zone(page); 741 742 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; 743 } 744 EXPORT_SYMBOL(page_waitqueue); 745 746 void wait_on_page_bit(struct page *page, int bit_nr) 747 { 748 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 749 750 if (test_bit(bit_nr, &page->flags)) 751 __wait_on_bit(page_waitqueue(page), &wait, bit_wait_io, 752 TASK_UNINTERRUPTIBLE); 753 } 754 EXPORT_SYMBOL(wait_on_page_bit); 755 756 int wait_on_page_bit_killable(struct page *page, int bit_nr) 757 { 758 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 759 760 if (!test_bit(bit_nr, &page->flags)) 761 return 0; 762 763 return __wait_on_bit(page_waitqueue(page), &wait, 764 bit_wait_io, TASK_KILLABLE); 765 } 766 767 int wait_on_page_bit_killable_timeout(struct page *page, 768 int bit_nr, unsigned long timeout) 769 { 770 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 771 772 wait.key.timeout = jiffies + timeout; 773 if (!test_bit(bit_nr, &page->flags)) 774 return 0; 775 return __wait_on_bit(page_waitqueue(page), &wait, 776 bit_wait_io_timeout, TASK_KILLABLE); 777 } 778 EXPORT_SYMBOL_GPL(wait_on_page_bit_killable_timeout); 779 780 /** 781 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue 782 * @page: Page defining the wait queue of interest 783 * @waiter: Waiter to add to the queue 784 * 785 * Add an arbitrary @waiter to the wait queue for the nominated @page. 786 */ 787 void add_page_wait_queue(struct page *page, wait_queue_t *waiter) 788 { 789 wait_queue_head_t *q = page_waitqueue(page); 790 unsigned long flags; 791 792 spin_lock_irqsave(&q->lock, flags); 793 __add_wait_queue(q, waiter); 794 spin_unlock_irqrestore(&q->lock, flags); 795 } 796 EXPORT_SYMBOL_GPL(add_page_wait_queue); 797 798 /** 799 * unlock_page - unlock a locked page 800 * @page: the page 801 * 802 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). 803 * Also wakes sleepers in wait_on_page_writeback() because the wakeup 804 * mechanism between PageLocked pages and PageWriteback pages is shared. 805 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 806 * 807 * The mb is necessary to enforce ordering between the clear_bit and the read 808 * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()). 809 */ 810 void unlock_page(struct page *page) 811 { 812 VM_BUG_ON_PAGE(!PageLocked(page), page); 813 clear_bit_unlock(PG_locked, &page->flags); 814 smp_mb__after_atomic(); 815 wake_up_page(page, PG_locked); 816 } 817 EXPORT_SYMBOL(unlock_page); 818 819 /** 820 * end_page_writeback - end writeback against a page 821 * @page: the page 822 */ 823 void end_page_writeback(struct page *page) 824 { 825 /* 826 * TestClearPageReclaim could be used here but it is an atomic 827 * operation and overkill in this particular case. Failing to 828 * shuffle a page marked for immediate reclaim is too mild to 829 * justify taking an atomic operation penalty at the end of 830 * ever page writeback. 831 */ 832 if (PageReclaim(page)) { 833 ClearPageReclaim(page); 834 rotate_reclaimable_page(page); 835 } 836 837 if (!test_clear_page_writeback(page)) 838 BUG(); 839 840 smp_mb__after_atomic(); 841 wake_up_page(page, PG_writeback); 842 } 843 EXPORT_SYMBOL(end_page_writeback); 844 845 /* 846 * After completing I/O on a page, call this routine to update the page 847 * flags appropriately 848 */ 849 void page_endio(struct page *page, int rw, int err) 850 { 851 if (rw == READ) { 852 if (!err) { 853 SetPageUptodate(page); 854 } else { 855 ClearPageUptodate(page); 856 SetPageError(page); 857 } 858 unlock_page(page); 859 } else { /* rw == WRITE */ 860 if (err) { 861 SetPageError(page); 862 if (page->mapping) 863 mapping_set_error(page->mapping, err); 864 } 865 end_page_writeback(page); 866 } 867 } 868 EXPORT_SYMBOL_GPL(page_endio); 869 870 /** 871 * __lock_page - get a lock on the page, assuming we need to sleep to get it 872 * @page: the page to lock 873 */ 874 void __lock_page(struct page *page) 875 { 876 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 877 878 __wait_on_bit_lock(page_waitqueue(page), &wait, bit_wait_io, 879 TASK_UNINTERRUPTIBLE); 880 } 881 EXPORT_SYMBOL(__lock_page); 882 883 int __lock_page_killable(struct page *page) 884 { 885 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 886 887 return __wait_on_bit_lock(page_waitqueue(page), &wait, 888 bit_wait_io, TASK_KILLABLE); 889 } 890 EXPORT_SYMBOL_GPL(__lock_page_killable); 891 892 /* 893 * Return values: 894 * 1 - page is locked; mmap_sem is still held. 895 * 0 - page is not locked. 896 * mmap_sem has been released (up_read()), unless flags had both 897 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in 898 * which case mmap_sem is still held. 899 * 900 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1 901 * with the page locked and the mmap_sem unperturbed. 902 */ 903 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 904 unsigned int flags) 905 { 906 if (flags & FAULT_FLAG_ALLOW_RETRY) { 907 /* 908 * CAUTION! In this case, mmap_sem is not released 909 * even though return 0. 910 */ 911 if (flags & FAULT_FLAG_RETRY_NOWAIT) 912 return 0; 913 914 up_read(&mm->mmap_sem); 915 if (flags & FAULT_FLAG_KILLABLE) 916 wait_on_page_locked_killable(page); 917 else 918 wait_on_page_locked(page); 919 return 0; 920 } else { 921 if (flags & FAULT_FLAG_KILLABLE) { 922 int ret; 923 924 ret = __lock_page_killable(page); 925 if (ret) { 926 up_read(&mm->mmap_sem); 927 return 0; 928 } 929 } else 930 __lock_page(page); 931 return 1; 932 } 933 } 934 935 /** 936 * page_cache_next_hole - find the next hole (not-present entry) 937 * @mapping: mapping 938 * @index: index 939 * @max_scan: maximum range to search 940 * 941 * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the 942 * lowest indexed hole. 943 * 944 * Returns: the index of the hole if found, otherwise returns an index 945 * outside of the set specified (in which case 'return - index >= 946 * max_scan' will be true). In rare cases of index wrap-around, 0 will 947 * be returned. 948 * 949 * page_cache_next_hole may be called under rcu_read_lock. However, 950 * like radix_tree_gang_lookup, this will not atomically search a 951 * snapshot of the tree at a single point in time. For example, if a 952 * hole is created at index 5, then subsequently a hole is created at 953 * index 10, page_cache_next_hole covering both indexes may return 10 954 * if called under rcu_read_lock. 955 */ 956 pgoff_t page_cache_next_hole(struct address_space *mapping, 957 pgoff_t index, unsigned long max_scan) 958 { 959 unsigned long i; 960 961 for (i = 0; i < max_scan; i++) { 962 struct page *page; 963 964 page = radix_tree_lookup(&mapping->page_tree, index); 965 if (!page || radix_tree_exceptional_entry(page)) 966 break; 967 index++; 968 if (index == 0) 969 break; 970 } 971 972 return index; 973 } 974 EXPORT_SYMBOL(page_cache_next_hole); 975 976 /** 977 * page_cache_prev_hole - find the prev hole (not-present entry) 978 * @mapping: mapping 979 * @index: index 980 * @max_scan: maximum range to search 981 * 982 * Search backwards in the range [max(index-max_scan+1, 0), index] for 983 * the first hole. 984 * 985 * Returns: the index of the hole if found, otherwise returns an index 986 * outside of the set specified (in which case 'index - return >= 987 * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX 988 * will be returned. 989 * 990 * page_cache_prev_hole may be called under rcu_read_lock. However, 991 * like radix_tree_gang_lookup, this will not atomically search a 992 * snapshot of the tree at a single point in time. For example, if a 993 * hole is created at index 10, then subsequently a hole is created at 994 * index 5, page_cache_prev_hole covering both indexes may return 5 if 995 * called under rcu_read_lock. 996 */ 997 pgoff_t page_cache_prev_hole(struct address_space *mapping, 998 pgoff_t index, unsigned long max_scan) 999 { 1000 unsigned long i; 1001 1002 for (i = 0; i < max_scan; i++) { 1003 struct page *page; 1004 1005 page = radix_tree_lookup(&mapping->page_tree, index); 1006 if (!page || radix_tree_exceptional_entry(page)) 1007 break; 1008 index--; 1009 if (index == ULONG_MAX) 1010 break; 1011 } 1012 1013 return index; 1014 } 1015 EXPORT_SYMBOL(page_cache_prev_hole); 1016 1017 /** 1018 * find_get_entry - find and get a page cache entry 1019 * @mapping: the address_space to search 1020 * @offset: the page cache index 1021 * 1022 * Looks up the page cache slot at @mapping & @offset. If there is a 1023 * page cache page, it is returned with an increased refcount. 1024 * 1025 * If the slot holds a shadow entry of a previously evicted page, or a 1026 * swap entry from shmem/tmpfs, it is returned. 1027 * 1028 * Otherwise, %NULL is returned. 1029 */ 1030 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) 1031 { 1032 void **pagep; 1033 struct page *page; 1034 1035 rcu_read_lock(); 1036 repeat: 1037 page = NULL; 1038 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); 1039 if (pagep) { 1040 page = radix_tree_deref_slot(pagep); 1041 if (unlikely(!page)) 1042 goto out; 1043 if (radix_tree_exception(page)) { 1044 if (radix_tree_deref_retry(page)) 1045 goto repeat; 1046 /* 1047 * A shadow entry of a recently evicted page, 1048 * or a swap entry from shmem/tmpfs. Return 1049 * it without attempting to raise page count. 1050 */ 1051 goto out; 1052 } 1053 if (!page_cache_get_speculative(page)) 1054 goto repeat; 1055 1056 /* 1057 * Has the page moved? 1058 * This is part of the lockless pagecache protocol. See 1059 * include/linux/pagemap.h for details. 1060 */ 1061 if (unlikely(page != *pagep)) { 1062 page_cache_release(page); 1063 goto repeat; 1064 } 1065 } 1066 out: 1067 rcu_read_unlock(); 1068 1069 return page; 1070 } 1071 EXPORT_SYMBOL(find_get_entry); 1072 1073 /** 1074 * find_lock_entry - locate, pin and lock a page cache entry 1075 * @mapping: the address_space to search 1076 * @offset: the page cache index 1077 * 1078 * Looks up the page cache slot at @mapping & @offset. If there is a 1079 * page cache page, it is returned locked and with an increased 1080 * refcount. 1081 * 1082 * If the slot holds a shadow entry of a previously evicted page, or a 1083 * swap entry from shmem/tmpfs, it is returned. 1084 * 1085 * Otherwise, %NULL is returned. 1086 * 1087 * find_lock_entry() may sleep. 1088 */ 1089 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) 1090 { 1091 struct page *page; 1092 1093 repeat: 1094 page = find_get_entry(mapping, offset); 1095 if (page && !radix_tree_exception(page)) { 1096 lock_page(page); 1097 /* Has the page been truncated? */ 1098 if (unlikely(page->mapping != mapping)) { 1099 unlock_page(page); 1100 page_cache_release(page); 1101 goto repeat; 1102 } 1103 VM_BUG_ON_PAGE(page->index != offset, page); 1104 } 1105 return page; 1106 } 1107 EXPORT_SYMBOL(find_lock_entry); 1108 1109 /** 1110 * pagecache_get_page - find and get a page reference 1111 * @mapping: the address_space to search 1112 * @offset: the page index 1113 * @fgp_flags: PCG flags 1114 * @gfp_mask: gfp mask to use for the page cache data page allocation 1115 * 1116 * Looks up the page cache slot at @mapping & @offset. 1117 * 1118 * PCG flags modify how the page is returned. 1119 * 1120 * FGP_ACCESSED: the page will be marked accessed 1121 * FGP_LOCK: Page is return locked 1122 * FGP_CREAT: If page is not present then a new page is allocated using 1123 * @gfp_mask and added to the page cache and the VM's LRU 1124 * list. The page is returned locked and with an increased 1125 * refcount. Otherwise, %NULL is returned. 1126 * 1127 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even 1128 * if the GFP flags specified for FGP_CREAT are atomic. 1129 * 1130 * If there is a page cache page, it is returned with an increased refcount. 1131 */ 1132 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, 1133 int fgp_flags, gfp_t gfp_mask) 1134 { 1135 struct page *page; 1136 1137 repeat: 1138 page = find_get_entry(mapping, offset); 1139 if (radix_tree_exceptional_entry(page)) 1140 page = NULL; 1141 if (!page) 1142 goto no_page; 1143 1144 if (fgp_flags & FGP_LOCK) { 1145 if (fgp_flags & FGP_NOWAIT) { 1146 if (!trylock_page(page)) { 1147 page_cache_release(page); 1148 return NULL; 1149 } 1150 } else { 1151 lock_page(page); 1152 } 1153 1154 /* Has the page been truncated? */ 1155 if (unlikely(page->mapping != mapping)) { 1156 unlock_page(page); 1157 page_cache_release(page); 1158 goto repeat; 1159 } 1160 VM_BUG_ON_PAGE(page->index != offset, page); 1161 } 1162 1163 if (page && (fgp_flags & FGP_ACCESSED)) 1164 mark_page_accessed(page); 1165 1166 no_page: 1167 if (!page && (fgp_flags & FGP_CREAT)) { 1168 int err; 1169 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) 1170 gfp_mask |= __GFP_WRITE; 1171 if (fgp_flags & FGP_NOFS) 1172 gfp_mask &= ~__GFP_FS; 1173 1174 page = __page_cache_alloc(gfp_mask); 1175 if (!page) 1176 return NULL; 1177 1178 if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK))) 1179 fgp_flags |= FGP_LOCK; 1180 1181 /* Init accessed so avoid atomic mark_page_accessed later */ 1182 if (fgp_flags & FGP_ACCESSED) 1183 __SetPageReferenced(page); 1184 1185 err = add_to_page_cache_lru(page, mapping, offset, 1186 gfp_mask & GFP_RECLAIM_MASK); 1187 if (unlikely(err)) { 1188 page_cache_release(page); 1189 page = NULL; 1190 if (err == -EEXIST) 1191 goto repeat; 1192 } 1193 } 1194 1195 return page; 1196 } 1197 EXPORT_SYMBOL(pagecache_get_page); 1198 1199 /** 1200 * find_get_entries - gang pagecache lookup 1201 * @mapping: The address_space to search 1202 * @start: The starting page cache index 1203 * @nr_entries: The maximum number of entries 1204 * @entries: Where the resulting entries are placed 1205 * @indices: The cache indices corresponding to the entries in @entries 1206 * 1207 * find_get_entries() will search for and return a group of up to 1208 * @nr_entries entries in the mapping. The entries are placed at 1209 * @entries. find_get_entries() takes a reference against any actual 1210 * pages it returns. 1211 * 1212 * The search returns a group of mapping-contiguous page cache entries 1213 * with ascending indexes. There may be holes in the indices due to 1214 * not-present pages. 1215 * 1216 * Any shadow entries of evicted pages, or swap entries from 1217 * shmem/tmpfs, are included in the returned array. 1218 * 1219 * find_get_entries() returns the number of pages and shadow entries 1220 * which were found. 1221 */ 1222 unsigned find_get_entries(struct address_space *mapping, 1223 pgoff_t start, unsigned int nr_entries, 1224 struct page **entries, pgoff_t *indices) 1225 { 1226 void **slot; 1227 unsigned int ret = 0; 1228 struct radix_tree_iter iter; 1229 1230 if (!nr_entries) 1231 return 0; 1232 1233 rcu_read_lock(); 1234 restart: 1235 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1236 struct page *page; 1237 repeat: 1238 page = radix_tree_deref_slot(slot); 1239 if (unlikely(!page)) 1240 continue; 1241 if (radix_tree_exception(page)) { 1242 if (radix_tree_deref_retry(page)) 1243 goto restart; 1244 /* 1245 * A shadow entry of a recently evicted page, 1246 * or a swap entry from shmem/tmpfs. Return 1247 * it without attempting to raise page count. 1248 */ 1249 goto export; 1250 } 1251 if (!page_cache_get_speculative(page)) 1252 goto repeat; 1253 1254 /* Has the page moved? */ 1255 if (unlikely(page != *slot)) { 1256 page_cache_release(page); 1257 goto repeat; 1258 } 1259 export: 1260 indices[ret] = iter.index; 1261 entries[ret] = page; 1262 if (++ret == nr_entries) 1263 break; 1264 } 1265 rcu_read_unlock(); 1266 return ret; 1267 } 1268 1269 /** 1270 * find_get_pages - gang pagecache lookup 1271 * @mapping: The address_space to search 1272 * @start: The starting page index 1273 * @nr_pages: The maximum number of pages 1274 * @pages: Where the resulting pages are placed 1275 * 1276 * find_get_pages() will search for and return a group of up to 1277 * @nr_pages pages in the mapping. The pages are placed at @pages. 1278 * find_get_pages() takes a reference against the returned pages. 1279 * 1280 * The search returns a group of mapping-contiguous pages with ascending 1281 * indexes. There may be holes in the indices due to not-present pages. 1282 * 1283 * find_get_pages() returns the number of pages which were found. 1284 */ 1285 unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 1286 unsigned int nr_pages, struct page **pages) 1287 { 1288 struct radix_tree_iter iter; 1289 void **slot; 1290 unsigned ret = 0; 1291 1292 if (unlikely(!nr_pages)) 1293 return 0; 1294 1295 rcu_read_lock(); 1296 restart: 1297 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1298 struct page *page; 1299 repeat: 1300 page = radix_tree_deref_slot(slot); 1301 if (unlikely(!page)) 1302 continue; 1303 1304 if (radix_tree_exception(page)) { 1305 if (radix_tree_deref_retry(page)) { 1306 /* 1307 * Transient condition which can only trigger 1308 * when entry at index 0 moves out of or back 1309 * to root: none yet gotten, safe to restart. 1310 */ 1311 WARN_ON(iter.index); 1312 goto restart; 1313 } 1314 /* 1315 * A shadow entry of a recently evicted page, 1316 * or a swap entry from shmem/tmpfs. Skip 1317 * over it. 1318 */ 1319 continue; 1320 } 1321 1322 if (!page_cache_get_speculative(page)) 1323 goto repeat; 1324 1325 /* Has the page moved? */ 1326 if (unlikely(page != *slot)) { 1327 page_cache_release(page); 1328 goto repeat; 1329 } 1330 1331 pages[ret] = page; 1332 if (++ret == nr_pages) 1333 break; 1334 } 1335 1336 rcu_read_unlock(); 1337 return ret; 1338 } 1339 1340 /** 1341 * find_get_pages_contig - gang contiguous pagecache lookup 1342 * @mapping: The address_space to search 1343 * @index: The starting page index 1344 * @nr_pages: The maximum number of pages 1345 * @pages: Where the resulting pages are placed 1346 * 1347 * find_get_pages_contig() works exactly like find_get_pages(), except 1348 * that the returned number of pages are guaranteed to be contiguous. 1349 * 1350 * find_get_pages_contig() returns the number of pages which were found. 1351 */ 1352 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, 1353 unsigned int nr_pages, struct page **pages) 1354 { 1355 struct radix_tree_iter iter; 1356 void **slot; 1357 unsigned int ret = 0; 1358 1359 if (unlikely(!nr_pages)) 1360 return 0; 1361 1362 rcu_read_lock(); 1363 restart: 1364 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) { 1365 struct page *page; 1366 repeat: 1367 page = radix_tree_deref_slot(slot); 1368 /* The hole, there no reason to continue */ 1369 if (unlikely(!page)) 1370 break; 1371 1372 if (radix_tree_exception(page)) { 1373 if (radix_tree_deref_retry(page)) { 1374 /* 1375 * Transient condition which can only trigger 1376 * when entry at index 0 moves out of or back 1377 * to root: none yet gotten, safe to restart. 1378 */ 1379 goto restart; 1380 } 1381 /* 1382 * A shadow entry of a recently evicted page, 1383 * or a swap entry from shmem/tmpfs. Stop 1384 * looking for contiguous pages. 1385 */ 1386 break; 1387 } 1388 1389 if (!page_cache_get_speculative(page)) 1390 goto repeat; 1391 1392 /* Has the page moved? */ 1393 if (unlikely(page != *slot)) { 1394 page_cache_release(page); 1395 goto repeat; 1396 } 1397 1398 /* 1399 * must check mapping and index after taking the ref. 1400 * otherwise we can get both false positives and false 1401 * negatives, which is just confusing to the caller. 1402 */ 1403 if (page->mapping == NULL || page->index != iter.index) { 1404 page_cache_release(page); 1405 break; 1406 } 1407 1408 pages[ret] = page; 1409 if (++ret == nr_pages) 1410 break; 1411 } 1412 rcu_read_unlock(); 1413 return ret; 1414 } 1415 EXPORT_SYMBOL(find_get_pages_contig); 1416 1417 /** 1418 * find_get_pages_tag - find and return pages that match @tag 1419 * @mapping: the address_space to search 1420 * @index: the starting page index 1421 * @tag: the tag index 1422 * @nr_pages: the maximum number of pages 1423 * @pages: where the resulting pages are placed 1424 * 1425 * Like find_get_pages, except we only return pages which are tagged with 1426 * @tag. We update @index to index the next page for the traversal. 1427 */ 1428 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 1429 int tag, unsigned int nr_pages, struct page **pages) 1430 { 1431 struct radix_tree_iter iter; 1432 void **slot; 1433 unsigned ret = 0; 1434 1435 if (unlikely(!nr_pages)) 1436 return 0; 1437 1438 rcu_read_lock(); 1439 restart: 1440 radix_tree_for_each_tagged(slot, &mapping->page_tree, 1441 &iter, *index, tag) { 1442 struct page *page; 1443 repeat: 1444 page = radix_tree_deref_slot(slot); 1445 if (unlikely(!page)) 1446 continue; 1447 1448 if (radix_tree_exception(page)) { 1449 if (radix_tree_deref_retry(page)) { 1450 /* 1451 * Transient condition which can only trigger 1452 * when entry at index 0 moves out of or back 1453 * to root: none yet gotten, safe to restart. 1454 */ 1455 goto restart; 1456 } 1457 /* 1458 * A shadow entry of a recently evicted page. 1459 * 1460 * Those entries should never be tagged, but 1461 * this tree walk is lockless and the tags are 1462 * looked up in bulk, one radix tree node at a 1463 * time, so there is a sizable window for page 1464 * reclaim to evict a page we saw tagged. 1465 * 1466 * Skip over it. 1467 */ 1468 continue; 1469 } 1470 1471 if (!page_cache_get_speculative(page)) 1472 goto repeat; 1473 1474 /* Has the page moved? */ 1475 if (unlikely(page != *slot)) { 1476 page_cache_release(page); 1477 goto repeat; 1478 } 1479 1480 pages[ret] = page; 1481 if (++ret == nr_pages) 1482 break; 1483 } 1484 1485 rcu_read_unlock(); 1486 1487 if (ret) 1488 *index = pages[ret - 1]->index + 1; 1489 1490 return ret; 1491 } 1492 EXPORT_SYMBOL(find_get_pages_tag); 1493 1494 /* 1495 * CD/DVDs are error prone. When a medium error occurs, the driver may fail 1496 * a _large_ part of the i/o request. Imagine the worst scenario: 1497 * 1498 * ---R__________________________________________B__________ 1499 * ^ reading here ^ bad block(assume 4k) 1500 * 1501 * read(R) => miss => readahead(R...B) => media error => frustrating retries 1502 * => failing the whole request => read(R) => read(R+1) => 1503 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => 1504 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => 1505 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... 1506 * 1507 * It is going insane. Fix it by quickly scaling down the readahead size. 1508 */ 1509 static void shrink_readahead_size_eio(struct file *filp, 1510 struct file_ra_state *ra) 1511 { 1512 ra->ra_pages /= 4; 1513 } 1514 1515 /** 1516 * do_generic_file_read - generic file read routine 1517 * @filp: the file to read 1518 * @ppos: current file position 1519 * @iter: data destination 1520 * @written: already copied 1521 * 1522 * This is a generic file read routine, and uses the 1523 * mapping->a_ops->readpage() function for the actual low-level stuff. 1524 * 1525 * This is really ugly. But the goto's actually try to clarify some 1526 * of the logic when it comes to error handling etc. 1527 */ 1528 static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos, 1529 struct iov_iter *iter, ssize_t written) 1530 { 1531 struct address_space *mapping = filp->f_mapping; 1532 struct inode *inode = mapping->host; 1533 struct file_ra_state *ra = &filp->f_ra; 1534 pgoff_t index; 1535 pgoff_t last_index; 1536 pgoff_t prev_index; 1537 unsigned long offset; /* offset into pagecache page */ 1538 unsigned int prev_offset; 1539 int error = 0; 1540 1541 index = *ppos >> PAGE_CACHE_SHIFT; 1542 prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT; 1543 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1); 1544 last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 1545 offset = *ppos & ~PAGE_CACHE_MASK; 1546 1547 for (;;) { 1548 struct page *page; 1549 pgoff_t end_index; 1550 loff_t isize; 1551 unsigned long nr, ret; 1552 1553 cond_resched(); 1554 find_page: 1555 page = find_get_page(mapping, index); 1556 if (!page) { 1557 page_cache_sync_readahead(mapping, 1558 ra, filp, 1559 index, last_index - index); 1560 page = find_get_page(mapping, index); 1561 if (unlikely(page == NULL)) 1562 goto no_cached_page; 1563 } 1564 if (PageReadahead(page)) { 1565 page_cache_async_readahead(mapping, 1566 ra, filp, page, 1567 index, last_index - index); 1568 } 1569 if (!PageUptodate(page)) { 1570 if (inode->i_blkbits == PAGE_CACHE_SHIFT || 1571 !mapping->a_ops->is_partially_uptodate) 1572 goto page_not_up_to_date; 1573 if (!trylock_page(page)) 1574 goto page_not_up_to_date; 1575 /* Did it get truncated before we got the lock? */ 1576 if (!page->mapping) 1577 goto page_not_up_to_date_locked; 1578 if (!mapping->a_ops->is_partially_uptodate(page, 1579 offset, iter->count)) 1580 goto page_not_up_to_date_locked; 1581 unlock_page(page); 1582 } 1583 page_ok: 1584 /* 1585 * i_size must be checked after we know the page is Uptodate. 1586 * 1587 * Checking i_size after the check allows us to calculate 1588 * the correct value for "nr", which means the zero-filled 1589 * part of the page is not copied back to userspace (unless 1590 * another truncate extends the file - this is desired though). 1591 */ 1592 1593 isize = i_size_read(inode); 1594 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1595 if (unlikely(!isize || index > end_index)) { 1596 page_cache_release(page); 1597 goto out; 1598 } 1599 1600 /* nr is the maximum number of bytes to copy from this page */ 1601 nr = PAGE_CACHE_SIZE; 1602 if (index == end_index) { 1603 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1604 if (nr <= offset) { 1605 page_cache_release(page); 1606 goto out; 1607 } 1608 } 1609 nr = nr - offset; 1610 1611 /* If users can be writing to this page using arbitrary 1612 * virtual addresses, take care about potential aliasing 1613 * before reading the page on the kernel side. 1614 */ 1615 if (mapping_writably_mapped(mapping)) 1616 flush_dcache_page(page); 1617 1618 /* 1619 * When a sequential read accesses a page several times, 1620 * only mark it as accessed the first time. 1621 */ 1622 if (prev_index != index || offset != prev_offset) 1623 mark_page_accessed(page); 1624 prev_index = index; 1625 1626 /* 1627 * Ok, we have the page, and it's up-to-date, so 1628 * now we can copy it to user space... 1629 */ 1630 1631 ret = copy_page_to_iter(page, offset, nr, iter); 1632 offset += ret; 1633 index += offset >> PAGE_CACHE_SHIFT; 1634 offset &= ~PAGE_CACHE_MASK; 1635 prev_offset = offset; 1636 1637 page_cache_release(page); 1638 written += ret; 1639 if (!iov_iter_count(iter)) 1640 goto out; 1641 if (ret < nr) { 1642 error = -EFAULT; 1643 goto out; 1644 } 1645 continue; 1646 1647 page_not_up_to_date: 1648 /* Get exclusive access to the page ... */ 1649 error = lock_page_killable(page); 1650 if (unlikely(error)) 1651 goto readpage_error; 1652 1653 page_not_up_to_date_locked: 1654 /* Did it get truncated before we got the lock? */ 1655 if (!page->mapping) { 1656 unlock_page(page); 1657 page_cache_release(page); 1658 continue; 1659 } 1660 1661 /* Did somebody else fill it already? */ 1662 if (PageUptodate(page)) { 1663 unlock_page(page); 1664 goto page_ok; 1665 } 1666 1667 readpage: 1668 /* 1669 * A previous I/O error may have been due to temporary 1670 * failures, eg. multipath errors. 1671 * PG_error will be set again if readpage fails. 1672 */ 1673 ClearPageError(page); 1674 /* Start the actual read. The read will unlock the page. */ 1675 error = mapping->a_ops->readpage(filp, page); 1676 1677 if (unlikely(error)) { 1678 if (error == AOP_TRUNCATED_PAGE) { 1679 page_cache_release(page); 1680 error = 0; 1681 goto find_page; 1682 } 1683 goto readpage_error; 1684 } 1685 1686 if (!PageUptodate(page)) { 1687 error = lock_page_killable(page); 1688 if (unlikely(error)) 1689 goto readpage_error; 1690 if (!PageUptodate(page)) { 1691 if (page->mapping == NULL) { 1692 /* 1693 * invalidate_mapping_pages got it 1694 */ 1695 unlock_page(page); 1696 page_cache_release(page); 1697 goto find_page; 1698 } 1699 unlock_page(page); 1700 shrink_readahead_size_eio(filp, ra); 1701 error = -EIO; 1702 goto readpage_error; 1703 } 1704 unlock_page(page); 1705 } 1706 1707 goto page_ok; 1708 1709 readpage_error: 1710 /* UHHUH! A synchronous read error occurred. Report it */ 1711 page_cache_release(page); 1712 goto out; 1713 1714 no_cached_page: 1715 /* 1716 * Ok, it wasn't cached, so we need to create a new 1717 * page.. 1718 */ 1719 page = page_cache_alloc_cold(mapping); 1720 if (!page) { 1721 error = -ENOMEM; 1722 goto out; 1723 } 1724 error = add_to_page_cache_lru(page, mapping, index, 1725 mapping_gfp_constraint(mapping, GFP_KERNEL)); 1726 if (error) { 1727 page_cache_release(page); 1728 if (error == -EEXIST) { 1729 error = 0; 1730 goto find_page; 1731 } 1732 goto out; 1733 } 1734 goto readpage; 1735 } 1736 1737 out: 1738 ra->prev_pos = prev_index; 1739 ra->prev_pos <<= PAGE_CACHE_SHIFT; 1740 ra->prev_pos |= prev_offset; 1741 1742 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; 1743 file_accessed(filp); 1744 return written ? written : error; 1745 } 1746 1747 /** 1748 * generic_file_read_iter - generic filesystem read routine 1749 * @iocb: kernel I/O control block 1750 * @iter: destination for the data read 1751 * 1752 * This is the "read_iter()" routine for all filesystems 1753 * that can use the page cache directly. 1754 */ 1755 ssize_t 1756 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 1757 { 1758 struct file *file = iocb->ki_filp; 1759 ssize_t retval = 0; 1760 loff_t *ppos = &iocb->ki_pos; 1761 loff_t pos = *ppos; 1762 1763 if (iocb->ki_flags & IOCB_DIRECT) { 1764 struct address_space *mapping = file->f_mapping; 1765 struct inode *inode = mapping->host; 1766 size_t count = iov_iter_count(iter); 1767 loff_t size; 1768 1769 if (!count) 1770 goto out; /* skip atime */ 1771 size = i_size_read(inode); 1772 retval = filemap_write_and_wait_range(mapping, pos, 1773 pos + count - 1); 1774 if (!retval) { 1775 struct iov_iter data = *iter; 1776 retval = mapping->a_ops->direct_IO(iocb, &data, pos); 1777 } 1778 1779 if (retval > 0) { 1780 *ppos = pos + retval; 1781 iov_iter_advance(iter, retval); 1782 } 1783 1784 /* 1785 * Btrfs can have a short DIO read if we encounter 1786 * compressed extents, so if there was an error, or if 1787 * we've already read everything we wanted to, or if 1788 * there was a short read because we hit EOF, go ahead 1789 * and return. Otherwise fallthrough to buffered io for 1790 * the rest of the read. Buffered reads will not work for 1791 * DAX files, so don't bother trying. 1792 */ 1793 if (retval < 0 || !iov_iter_count(iter) || *ppos >= size || 1794 IS_DAX(inode)) { 1795 file_accessed(file); 1796 goto out; 1797 } 1798 } 1799 1800 retval = do_generic_file_read(file, ppos, iter, retval); 1801 out: 1802 return retval; 1803 } 1804 EXPORT_SYMBOL(generic_file_read_iter); 1805 1806 #ifdef CONFIG_MMU 1807 /** 1808 * page_cache_read - adds requested page to the page cache if not already there 1809 * @file: file to read 1810 * @offset: page index 1811 * 1812 * This adds the requested page to the page cache if it isn't already there, 1813 * and schedules an I/O to read in its contents from disk. 1814 */ 1815 static int page_cache_read(struct file *file, pgoff_t offset) 1816 { 1817 struct address_space *mapping = file->f_mapping; 1818 struct page *page; 1819 int ret; 1820 1821 do { 1822 page = page_cache_alloc_cold(mapping); 1823 if (!page) 1824 return -ENOMEM; 1825 1826 ret = add_to_page_cache_lru(page, mapping, offset, 1827 mapping_gfp_constraint(mapping, GFP_KERNEL)); 1828 if (ret == 0) 1829 ret = mapping->a_ops->readpage(file, page); 1830 else if (ret == -EEXIST) 1831 ret = 0; /* losing race to add is OK */ 1832 1833 page_cache_release(page); 1834 1835 } while (ret == AOP_TRUNCATED_PAGE); 1836 1837 return ret; 1838 } 1839 1840 #define MMAP_LOTSAMISS (100) 1841 1842 /* 1843 * Synchronous readahead happens when we don't even find 1844 * a page in the page cache at all. 1845 */ 1846 static void do_sync_mmap_readahead(struct vm_area_struct *vma, 1847 struct file_ra_state *ra, 1848 struct file *file, 1849 pgoff_t offset) 1850 { 1851 struct address_space *mapping = file->f_mapping; 1852 1853 /* If we don't want any read-ahead, don't bother */ 1854 if (vma->vm_flags & VM_RAND_READ) 1855 return; 1856 if (!ra->ra_pages) 1857 return; 1858 1859 if (vma->vm_flags & VM_SEQ_READ) { 1860 page_cache_sync_readahead(mapping, ra, file, offset, 1861 ra->ra_pages); 1862 return; 1863 } 1864 1865 /* Avoid banging the cache line if not needed */ 1866 if (ra->mmap_miss < MMAP_LOTSAMISS * 10) 1867 ra->mmap_miss++; 1868 1869 /* 1870 * Do we miss much more than hit in this file? If so, 1871 * stop bothering with read-ahead. It will only hurt. 1872 */ 1873 if (ra->mmap_miss > MMAP_LOTSAMISS) 1874 return; 1875 1876 /* 1877 * mmap read-around 1878 */ 1879 ra->start = max_t(long, 0, offset - ra->ra_pages / 2); 1880 ra->size = ra->ra_pages; 1881 ra->async_size = ra->ra_pages / 4; 1882 ra_submit(ra, mapping, file); 1883 } 1884 1885 /* 1886 * Asynchronous readahead happens when we find the page and PG_readahead, 1887 * so we want to possibly extend the readahead further.. 1888 */ 1889 static void do_async_mmap_readahead(struct vm_area_struct *vma, 1890 struct file_ra_state *ra, 1891 struct file *file, 1892 struct page *page, 1893 pgoff_t offset) 1894 { 1895 struct address_space *mapping = file->f_mapping; 1896 1897 /* If we don't want any read-ahead, don't bother */ 1898 if (vma->vm_flags & VM_RAND_READ) 1899 return; 1900 if (ra->mmap_miss > 0) 1901 ra->mmap_miss--; 1902 if (PageReadahead(page)) 1903 page_cache_async_readahead(mapping, ra, file, 1904 page, offset, ra->ra_pages); 1905 } 1906 1907 /** 1908 * filemap_fault - read in file data for page fault handling 1909 * @vma: vma in which the fault was taken 1910 * @vmf: struct vm_fault containing details of the fault 1911 * 1912 * filemap_fault() is invoked via the vma operations vector for a 1913 * mapped memory region to read in file data during a page fault. 1914 * 1915 * The goto's are kind of ugly, but this streamlines the normal case of having 1916 * it in the page cache, and handles the special cases reasonably without 1917 * having a lot of duplicated code. 1918 * 1919 * vma->vm_mm->mmap_sem must be held on entry. 1920 * 1921 * If our return value has VM_FAULT_RETRY set, it's because 1922 * lock_page_or_retry() returned 0. 1923 * The mmap_sem has usually been released in this case. 1924 * See __lock_page_or_retry() for the exception. 1925 * 1926 * If our return value does not have VM_FAULT_RETRY set, the mmap_sem 1927 * has not been released. 1928 * 1929 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. 1930 */ 1931 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1932 { 1933 int error; 1934 struct file *file = vma->vm_file; 1935 struct address_space *mapping = file->f_mapping; 1936 struct file_ra_state *ra = &file->f_ra; 1937 struct inode *inode = mapping->host; 1938 pgoff_t offset = vmf->pgoff; 1939 struct page *page; 1940 loff_t size; 1941 int ret = 0; 1942 1943 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 1944 if (offset >= size >> PAGE_CACHE_SHIFT) 1945 return VM_FAULT_SIGBUS; 1946 1947 /* 1948 * Do we have something in the page cache already? 1949 */ 1950 page = find_get_page(mapping, offset); 1951 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { 1952 /* 1953 * We found the page, so try async readahead before 1954 * waiting for the lock. 1955 */ 1956 do_async_mmap_readahead(vma, ra, file, page, offset); 1957 } else if (!page) { 1958 /* No page in the page cache at all */ 1959 do_sync_mmap_readahead(vma, ra, file, offset); 1960 count_vm_event(PGMAJFAULT); 1961 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1962 ret = VM_FAULT_MAJOR; 1963 retry_find: 1964 page = find_get_page(mapping, offset); 1965 if (!page) 1966 goto no_cached_page; 1967 } 1968 1969 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { 1970 page_cache_release(page); 1971 return ret | VM_FAULT_RETRY; 1972 } 1973 1974 /* Did it get truncated? */ 1975 if (unlikely(page->mapping != mapping)) { 1976 unlock_page(page); 1977 put_page(page); 1978 goto retry_find; 1979 } 1980 VM_BUG_ON_PAGE(page->index != offset, page); 1981 1982 /* 1983 * We have a locked page in the page cache, now we need to check 1984 * that it's up-to-date. If not, it is going to be due to an error. 1985 */ 1986 if (unlikely(!PageUptodate(page))) 1987 goto page_not_uptodate; 1988 1989 /* 1990 * Found the page and have a reference on it. 1991 * We must recheck i_size under page lock. 1992 */ 1993 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 1994 if (unlikely(offset >= size >> PAGE_CACHE_SHIFT)) { 1995 unlock_page(page); 1996 page_cache_release(page); 1997 return VM_FAULT_SIGBUS; 1998 } 1999 2000 vmf->page = page; 2001 return ret | VM_FAULT_LOCKED; 2002 2003 no_cached_page: 2004 /* 2005 * We're only likely to ever get here if MADV_RANDOM is in 2006 * effect. 2007 */ 2008 error = page_cache_read(file, offset); 2009 2010 /* 2011 * The page we want has now been added to the page cache. 2012 * In the unlikely event that someone removed it in the 2013 * meantime, we'll just come back here and read it again. 2014 */ 2015 if (error >= 0) 2016 goto retry_find; 2017 2018 /* 2019 * An error return from page_cache_read can result if the 2020 * system is low on memory, or a problem occurs while trying 2021 * to schedule I/O. 2022 */ 2023 if (error == -ENOMEM) 2024 return VM_FAULT_OOM; 2025 return VM_FAULT_SIGBUS; 2026 2027 page_not_uptodate: 2028 /* 2029 * Umm, take care of errors if the page isn't up-to-date. 2030 * Try to re-read it _once_. We do this synchronously, 2031 * because there really aren't any performance issues here 2032 * and we need to check for errors. 2033 */ 2034 ClearPageError(page); 2035 error = mapping->a_ops->readpage(file, page); 2036 if (!error) { 2037 wait_on_page_locked(page); 2038 if (!PageUptodate(page)) 2039 error = -EIO; 2040 } 2041 page_cache_release(page); 2042 2043 if (!error || error == AOP_TRUNCATED_PAGE) 2044 goto retry_find; 2045 2046 /* Things didn't work out. Return zero to tell the mm layer so. */ 2047 shrink_readahead_size_eio(file, ra); 2048 return VM_FAULT_SIGBUS; 2049 } 2050 EXPORT_SYMBOL(filemap_fault); 2051 2052 void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) 2053 { 2054 struct radix_tree_iter iter; 2055 void **slot; 2056 struct file *file = vma->vm_file; 2057 struct address_space *mapping = file->f_mapping; 2058 loff_t size; 2059 struct page *page; 2060 unsigned long address = (unsigned long) vmf->virtual_address; 2061 unsigned long addr; 2062 pte_t *pte; 2063 2064 rcu_read_lock(); 2065 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, vmf->pgoff) { 2066 if (iter.index > vmf->max_pgoff) 2067 break; 2068 repeat: 2069 page = radix_tree_deref_slot(slot); 2070 if (unlikely(!page)) 2071 goto next; 2072 if (radix_tree_exception(page)) { 2073 if (radix_tree_deref_retry(page)) 2074 break; 2075 else 2076 goto next; 2077 } 2078 2079 if (!page_cache_get_speculative(page)) 2080 goto repeat; 2081 2082 /* Has the page moved? */ 2083 if (unlikely(page != *slot)) { 2084 page_cache_release(page); 2085 goto repeat; 2086 } 2087 2088 if (!PageUptodate(page) || 2089 PageReadahead(page) || 2090 PageHWPoison(page)) 2091 goto skip; 2092 if (!trylock_page(page)) 2093 goto skip; 2094 2095 if (page->mapping != mapping || !PageUptodate(page)) 2096 goto unlock; 2097 2098 size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE); 2099 if (page->index >= size >> PAGE_CACHE_SHIFT) 2100 goto unlock; 2101 2102 pte = vmf->pte + page->index - vmf->pgoff; 2103 if (!pte_none(*pte)) 2104 goto unlock; 2105 2106 if (file->f_ra.mmap_miss > 0) 2107 file->f_ra.mmap_miss--; 2108 addr = address + (page->index - vmf->pgoff) * PAGE_SIZE; 2109 do_set_pte(vma, addr, page, pte, false, false); 2110 unlock_page(page); 2111 goto next; 2112 unlock: 2113 unlock_page(page); 2114 skip: 2115 page_cache_release(page); 2116 next: 2117 if (iter.index == vmf->max_pgoff) 2118 break; 2119 } 2120 rcu_read_unlock(); 2121 } 2122 EXPORT_SYMBOL(filemap_map_pages); 2123 2124 int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 2125 { 2126 struct page *page = vmf->page; 2127 struct inode *inode = file_inode(vma->vm_file); 2128 int ret = VM_FAULT_LOCKED; 2129 2130 sb_start_pagefault(inode->i_sb); 2131 file_update_time(vma->vm_file); 2132 lock_page(page); 2133 if (page->mapping != inode->i_mapping) { 2134 unlock_page(page); 2135 ret = VM_FAULT_NOPAGE; 2136 goto out; 2137 } 2138 /* 2139 * We mark the page dirty already here so that when freeze is in 2140 * progress, we are guaranteed that writeback during freezing will 2141 * see the dirty page and writeprotect it again. 2142 */ 2143 set_page_dirty(page); 2144 wait_for_stable_page(page); 2145 out: 2146 sb_end_pagefault(inode->i_sb); 2147 return ret; 2148 } 2149 EXPORT_SYMBOL(filemap_page_mkwrite); 2150 2151 const struct vm_operations_struct generic_file_vm_ops = { 2152 .fault = filemap_fault, 2153 .map_pages = filemap_map_pages, 2154 .page_mkwrite = filemap_page_mkwrite, 2155 }; 2156 2157 /* This is used for a general mmap of a disk file */ 2158 2159 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2160 { 2161 struct address_space *mapping = file->f_mapping; 2162 2163 if (!mapping->a_ops->readpage) 2164 return -ENOEXEC; 2165 file_accessed(file); 2166 vma->vm_ops = &generic_file_vm_ops; 2167 return 0; 2168 } 2169 2170 /* 2171 * This is for filesystems which do not implement ->writepage. 2172 */ 2173 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 2174 { 2175 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 2176 return -EINVAL; 2177 return generic_file_mmap(file, vma); 2178 } 2179 #else 2180 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2181 { 2182 return -ENOSYS; 2183 } 2184 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) 2185 { 2186 return -ENOSYS; 2187 } 2188 #endif /* CONFIG_MMU */ 2189 2190 EXPORT_SYMBOL(generic_file_mmap); 2191 EXPORT_SYMBOL(generic_file_readonly_mmap); 2192 2193 static struct page *wait_on_page_read(struct page *page) 2194 { 2195 if (!IS_ERR(page)) { 2196 wait_on_page_locked(page); 2197 if (!PageUptodate(page)) { 2198 page_cache_release(page); 2199 page = ERR_PTR(-EIO); 2200 } 2201 } 2202 return page; 2203 } 2204 2205 static struct page *__read_cache_page(struct address_space *mapping, 2206 pgoff_t index, 2207 int (*filler)(void *, struct page *), 2208 void *data, 2209 gfp_t gfp) 2210 { 2211 struct page *page; 2212 int err; 2213 repeat: 2214 page = find_get_page(mapping, index); 2215 if (!page) { 2216 page = __page_cache_alloc(gfp | __GFP_COLD); 2217 if (!page) 2218 return ERR_PTR(-ENOMEM); 2219 err = add_to_page_cache_lru(page, mapping, index, gfp); 2220 if (unlikely(err)) { 2221 page_cache_release(page); 2222 if (err == -EEXIST) 2223 goto repeat; 2224 /* Presumably ENOMEM for radix tree node */ 2225 return ERR_PTR(err); 2226 } 2227 err = filler(data, page); 2228 if (err < 0) { 2229 page_cache_release(page); 2230 page = ERR_PTR(err); 2231 } else { 2232 page = wait_on_page_read(page); 2233 } 2234 } 2235 return page; 2236 } 2237 2238 static struct page *do_read_cache_page(struct address_space *mapping, 2239 pgoff_t index, 2240 int (*filler)(void *, struct page *), 2241 void *data, 2242 gfp_t gfp) 2243 2244 { 2245 struct page *page; 2246 int err; 2247 2248 retry: 2249 page = __read_cache_page(mapping, index, filler, data, gfp); 2250 if (IS_ERR(page)) 2251 return page; 2252 if (PageUptodate(page)) 2253 goto out; 2254 2255 lock_page(page); 2256 if (!page->mapping) { 2257 unlock_page(page); 2258 page_cache_release(page); 2259 goto retry; 2260 } 2261 if (PageUptodate(page)) { 2262 unlock_page(page); 2263 goto out; 2264 } 2265 err = filler(data, page); 2266 if (err < 0) { 2267 page_cache_release(page); 2268 return ERR_PTR(err); 2269 } else { 2270 page = wait_on_page_read(page); 2271 if (IS_ERR(page)) 2272 return page; 2273 } 2274 out: 2275 mark_page_accessed(page); 2276 return page; 2277 } 2278 2279 /** 2280 * read_cache_page - read into page cache, fill it if needed 2281 * @mapping: the page's address_space 2282 * @index: the page index 2283 * @filler: function to perform the read 2284 * @data: first arg to filler(data, page) function, often left as NULL 2285 * 2286 * Read into the page cache. If a page already exists, and PageUptodate() is 2287 * not set, try to fill the page and wait for it to become unlocked. 2288 * 2289 * If the page does not get brought uptodate, return -EIO. 2290 */ 2291 struct page *read_cache_page(struct address_space *mapping, 2292 pgoff_t index, 2293 int (*filler)(void *, struct page *), 2294 void *data) 2295 { 2296 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); 2297 } 2298 EXPORT_SYMBOL(read_cache_page); 2299 2300 /** 2301 * read_cache_page_gfp - read into page cache, using specified page allocation flags. 2302 * @mapping: the page's address_space 2303 * @index: the page index 2304 * @gfp: the page allocator flags to use if allocating 2305 * 2306 * This is the same as "read_mapping_page(mapping, index, NULL)", but with 2307 * any new page allocations done using the specified allocation flags. 2308 * 2309 * If the page does not get brought uptodate, return -EIO. 2310 */ 2311 struct page *read_cache_page_gfp(struct address_space *mapping, 2312 pgoff_t index, 2313 gfp_t gfp) 2314 { 2315 filler_t *filler = (filler_t *)mapping->a_ops->readpage; 2316 2317 return do_read_cache_page(mapping, index, filler, NULL, gfp); 2318 } 2319 EXPORT_SYMBOL(read_cache_page_gfp); 2320 2321 /* 2322 * Performs necessary checks before doing a write 2323 * 2324 * Can adjust writing position or amount of bytes to write. 2325 * Returns appropriate error code that caller should return or 2326 * zero in case that write should be allowed. 2327 */ 2328 inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) 2329 { 2330 struct file *file = iocb->ki_filp; 2331 struct inode *inode = file->f_mapping->host; 2332 unsigned long limit = rlimit(RLIMIT_FSIZE); 2333 loff_t pos; 2334 2335 if (!iov_iter_count(from)) 2336 return 0; 2337 2338 /* FIXME: this is for backwards compatibility with 2.4 */ 2339 if (iocb->ki_flags & IOCB_APPEND) 2340 iocb->ki_pos = i_size_read(inode); 2341 2342 pos = iocb->ki_pos; 2343 2344 if (limit != RLIM_INFINITY) { 2345 if (iocb->ki_pos >= limit) { 2346 send_sig(SIGXFSZ, current, 0); 2347 return -EFBIG; 2348 } 2349 iov_iter_truncate(from, limit - (unsigned long)pos); 2350 } 2351 2352 /* 2353 * LFS rule 2354 */ 2355 if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS && 2356 !(file->f_flags & O_LARGEFILE))) { 2357 if (pos >= MAX_NON_LFS) 2358 return -EFBIG; 2359 iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos); 2360 } 2361 2362 /* 2363 * Are we about to exceed the fs block limit ? 2364 * 2365 * If we have written data it becomes a short write. If we have 2366 * exceeded without writing data we send a signal and return EFBIG. 2367 * Linus frestrict idea will clean these up nicely.. 2368 */ 2369 if (unlikely(pos >= inode->i_sb->s_maxbytes)) 2370 return -EFBIG; 2371 2372 iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos); 2373 return iov_iter_count(from); 2374 } 2375 EXPORT_SYMBOL(generic_write_checks); 2376 2377 int pagecache_write_begin(struct file *file, struct address_space *mapping, 2378 loff_t pos, unsigned len, unsigned flags, 2379 struct page **pagep, void **fsdata) 2380 { 2381 const struct address_space_operations *aops = mapping->a_ops; 2382 2383 return aops->write_begin(file, mapping, pos, len, flags, 2384 pagep, fsdata); 2385 } 2386 EXPORT_SYMBOL(pagecache_write_begin); 2387 2388 int pagecache_write_end(struct file *file, struct address_space *mapping, 2389 loff_t pos, unsigned len, unsigned copied, 2390 struct page *page, void *fsdata) 2391 { 2392 const struct address_space_operations *aops = mapping->a_ops; 2393 2394 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); 2395 } 2396 EXPORT_SYMBOL(pagecache_write_end); 2397 2398 ssize_t 2399 generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos) 2400 { 2401 struct file *file = iocb->ki_filp; 2402 struct address_space *mapping = file->f_mapping; 2403 struct inode *inode = mapping->host; 2404 ssize_t written; 2405 size_t write_len; 2406 pgoff_t end; 2407 struct iov_iter data; 2408 2409 write_len = iov_iter_count(from); 2410 end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT; 2411 2412 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1); 2413 if (written) 2414 goto out; 2415 2416 /* 2417 * After a write we want buffered reads to be sure to go to disk to get 2418 * the new data. We invalidate clean cached page from the region we're 2419 * about to write. We do this *before* the write so that we can return 2420 * without clobbering -EIOCBQUEUED from ->direct_IO(). 2421 */ 2422 if (mapping->nrpages) { 2423 written = invalidate_inode_pages2_range(mapping, 2424 pos >> PAGE_CACHE_SHIFT, end); 2425 /* 2426 * If a page can not be invalidated, return 0 to fall back 2427 * to buffered write. 2428 */ 2429 if (written) { 2430 if (written == -EBUSY) 2431 return 0; 2432 goto out; 2433 } 2434 } 2435 2436 data = *from; 2437 written = mapping->a_ops->direct_IO(iocb, &data, pos); 2438 2439 /* 2440 * Finally, try again to invalidate clean pages which might have been 2441 * cached by non-direct readahead, or faulted in by get_user_pages() 2442 * if the source of the write was an mmap'ed region of the file 2443 * we're writing. Either one is a pretty crazy thing to do, 2444 * so we don't support it 100%. If this invalidation 2445 * fails, tough, the write still worked... 2446 */ 2447 if (mapping->nrpages) { 2448 invalidate_inode_pages2_range(mapping, 2449 pos >> PAGE_CACHE_SHIFT, end); 2450 } 2451 2452 if (written > 0) { 2453 pos += written; 2454 iov_iter_advance(from, written); 2455 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { 2456 i_size_write(inode, pos); 2457 mark_inode_dirty(inode); 2458 } 2459 iocb->ki_pos = pos; 2460 } 2461 out: 2462 return written; 2463 } 2464 EXPORT_SYMBOL(generic_file_direct_write); 2465 2466 /* 2467 * Find or create a page at the given pagecache position. Return the locked 2468 * page. This function is specifically for buffered writes. 2469 */ 2470 struct page *grab_cache_page_write_begin(struct address_space *mapping, 2471 pgoff_t index, unsigned flags) 2472 { 2473 struct page *page; 2474 int fgp_flags = FGP_LOCK|FGP_ACCESSED|FGP_WRITE|FGP_CREAT; 2475 2476 if (flags & AOP_FLAG_NOFS) 2477 fgp_flags |= FGP_NOFS; 2478 2479 page = pagecache_get_page(mapping, index, fgp_flags, 2480 mapping_gfp_mask(mapping)); 2481 if (page) 2482 wait_for_stable_page(page); 2483 2484 return page; 2485 } 2486 EXPORT_SYMBOL(grab_cache_page_write_begin); 2487 2488 ssize_t generic_perform_write(struct file *file, 2489 struct iov_iter *i, loff_t pos) 2490 { 2491 struct address_space *mapping = file->f_mapping; 2492 const struct address_space_operations *a_ops = mapping->a_ops; 2493 long status = 0; 2494 ssize_t written = 0; 2495 unsigned int flags = 0; 2496 2497 /* 2498 * Copies from kernel address space cannot fail (NFSD is a big user). 2499 */ 2500 if (!iter_is_iovec(i)) 2501 flags |= AOP_FLAG_UNINTERRUPTIBLE; 2502 2503 do { 2504 struct page *page; 2505 unsigned long offset; /* Offset into pagecache page */ 2506 unsigned long bytes; /* Bytes to write to page */ 2507 size_t copied; /* Bytes copied from user */ 2508 void *fsdata; 2509 2510 offset = (pos & (PAGE_CACHE_SIZE - 1)); 2511 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2512 iov_iter_count(i)); 2513 2514 again: 2515 /* 2516 * Bring in the user page that we will copy from _first_. 2517 * Otherwise there's a nasty deadlock on copying from the 2518 * same page as we're writing to, without it being marked 2519 * up-to-date. 2520 * 2521 * Not only is this an optimisation, but it is also required 2522 * to check that the address is actually valid, when atomic 2523 * usercopies are used, below. 2524 */ 2525 if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 2526 status = -EFAULT; 2527 break; 2528 } 2529 2530 if (fatal_signal_pending(current)) { 2531 status = -EINTR; 2532 break; 2533 } 2534 2535 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 2536 &page, &fsdata); 2537 if (unlikely(status < 0)) 2538 break; 2539 2540 if (mapping_writably_mapped(mapping)) 2541 flush_dcache_page(page); 2542 2543 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 2544 flush_dcache_page(page); 2545 2546 status = a_ops->write_end(file, mapping, pos, bytes, copied, 2547 page, fsdata); 2548 if (unlikely(status < 0)) 2549 break; 2550 copied = status; 2551 2552 cond_resched(); 2553 2554 iov_iter_advance(i, copied); 2555 if (unlikely(copied == 0)) { 2556 /* 2557 * If we were unable to copy any data at all, we must 2558 * fall back to a single segment length write. 2559 * 2560 * If we didn't fallback here, we could livelock 2561 * because not all segments in the iov can be copied at 2562 * once without a pagefault. 2563 */ 2564 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2565 iov_iter_single_seg_count(i)); 2566 goto again; 2567 } 2568 pos += copied; 2569 written += copied; 2570 2571 balance_dirty_pages_ratelimited(mapping); 2572 } while (iov_iter_count(i)); 2573 2574 return written ? written : status; 2575 } 2576 EXPORT_SYMBOL(generic_perform_write); 2577 2578 /** 2579 * __generic_file_write_iter - write data to a file 2580 * @iocb: IO state structure (file, offset, etc.) 2581 * @from: iov_iter with data to write 2582 * 2583 * This function does all the work needed for actually writing data to a 2584 * file. It does all basic checks, removes SUID from the file, updates 2585 * modification times and calls proper subroutines depending on whether we 2586 * do direct IO or a standard buffered write. 2587 * 2588 * It expects i_mutex to be grabbed unless we work on a block device or similar 2589 * object which does not need locking at all. 2590 * 2591 * This function does *not* take care of syncing data in case of O_SYNC write. 2592 * A caller has to handle it. This is mainly due to the fact that we want to 2593 * avoid syncing under i_mutex. 2594 */ 2595 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2596 { 2597 struct file *file = iocb->ki_filp; 2598 struct address_space * mapping = file->f_mapping; 2599 struct inode *inode = mapping->host; 2600 ssize_t written = 0; 2601 ssize_t err; 2602 ssize_t status; 2603 2604 /* We can write back this queue in page reclaim */ 2605 current->backing_dev_info = inode_to_bdi(inode); 2606 err = file_remove_privs(file); 2607 if (err) 2608 goto out; 2609 2610 err = file_update_time(file); 2611 if (err) 2612 goto out; 2613 2614 if (iocb->ki_flags & IOCB_DIRECT) { 2615 loff_t pos, endbyte; 2616 2617 written = generic_file_direct_write(iocb, from, iocb->ki_pos); 2618 /* 2619 * If the write stopped short of completing, fall back to 2620 * buffered writes. Some filesystems do this for writes to 2621 * holes, for example. For DAX files, a buffered write will 2622 * not succeed (even if it did, DAX does not handle dirty 2623 * page-cache pages correctly). 2624 */ 2625 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) 2626 goto out; 2627 2628 status = generic_perform_write(file, from, pos = iocb->ki_pos); 2629 /* 2630 * If generic_perform_write() returned a synchronous error 2631 * then we want to return the number of bytes which were 2632 * direct-written, or the error code if that was zero. Note 2633 * that this differs from normal direct-io semantics, which 2634 * will return -EFOO even if some bytes were written. 2635 */ 2636 if (unlikely(status < 0)) { 2637 err = status; 2638 goto out; 2639 } 2640 /* 2641 * We need to ensure that the page cache pages are written to 2642 * disk and invalidated to preserve the expected O_DIRECT 2643 * semantics. 2644 */ 2645 endbyte = pos + status - 1; 2646 err = filemap_write_and_wait_range(mapping, pos, endbyte); 2647 if (err == 0) { 2648 iocb->ki_pos = endbyte + 1; 2649 written += status; 2650 invalidate_mapping_pages(mapping, 2651 pos >> PAGE_CACHE_SHIFT, 2652 endbyte >> PAGE_CACHE_SHIFT); 2653 } else { 2654 /* 2655 * We don't know how much we wrote, so just return 2656 * the number of bytes which were direct-written 2657 */ 2658 } 2659 } else { 2660 written = generic_perform_write(file, from, iocb->ki_pos); 2661 if (likely(written > 0)) 2662 iocb->ki_pos += written; 2663 } 2664 out: 2665 current->backing_dev_info = NULL; 2666 return written ? written : err; 2667 } 2668 EXPORT_SYMBOL(__generic_file_write_iter); 2669 2670 /** 2671 * generic_file_write_iter - write data to a file 2672 * @iocb: IO state structure 2673 * @from: iov_iter with data to write 2674 * 2675 * This is a wrapper around __generic_file_write_iter() to be used by most 2676 * filesystems. It takes care of syncing the file in case of O_SYNC file 2677 * and acquires i_mutex as needed. 2678 */ 2679 ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2680 { 2681 struct file *file = iocb->ki_filp; 2682 struct inode *inode = file->f_mapping->host; 2683 ssize_t ret; 2684 2685 mutex_lock(&inode->i_mutex); 2686 ret = generic_write_checks(iocb, from); 2687 if (ret > 0) 2688 ret = __generic_file_write_iter(iocb, from); 2689 mutex_unlock(&inode->i_mutex); 2690 2691 if (ret > 0) { 2692 ssize_t err; 2693 2694 err = generic_write_sync(file, iocb->ki_pos - ret, ret); 2695 if (err < 0) 2696 ret = err; 2697 } 2698 return ret; 2699 } 2700 EXPORT_SYMBOL(generic_file_write_iter); 2701 2702 /** 2703 * try_to_release_page() - release old fs-specific metadata on a page 2704 * 2705 * @page: the page which the kernel is trying to free 2706 * @gfp_mask: memory allocation flags (and I/O mode) 2707 * 2708 * The address_space is to try to release any data against the page 2709 * (presumably at page->private). If the release was successful, return `1'. 2710 * Otherwise return zero. 2711 * 2712 * This may also be called if PG_fscache is set on a page, indicating that the 2713 * page is known to the local caching routines. 2714 * 2715 * The @gfp_mask argument specifies whether I/O may be performed to release 2716 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS). 2717 * 2718 */ 2719 int try_to_release_page(struct page *page, gfp_t gfp_mask) 2720 { 2721 struct address_space * const mapping = page->mapping; 2722 2723 BUG_ON(!PageLocked(page)); 2724 if (PageWriteback(page)) 2725 return 0; 2726 2727 if (mapping && mapping->a_ops->releasepage) 2728 return mapping->a_ops->releasepage(page, gfp_mask); 2729 return try_to_free_buffers(page); 2730 } 2731 2732 EXPORT_SYMBOL(try_to_release_page); 2733