shmem.c (289733ed456f7c1cbdb1f1ca58312f77c239953b) shmem.c (047fe3605235888f3ebcda0c728cb31937eadfe6)
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.

--- 669 unchanged lines hidden (view full) ---

678
679 gfp = mapping_gfp_mask(mapping);
680 if (shmem_should_replace_page(*pagep, gfp)) {
681 mutex_unlock(&shmem_swaplist_mutex);
682 error = shmem_replace_page(pagep, gfp, info, index);
683 mutex_lock(&shmem_swaplist_mutex);
684 /*
685 * We needed to drop mutex to make that restrictive page
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.

--- 669 unchanged lines hidden (view full) ---

678
679 gfp = mapping_gfp_mask(mapping);
680 if (shmem_should_replace_page(*pagep, gfp)) {
681 mutex_unlock(&shmem_swaplist_mutex);
682 error = shmem_replace_page(pagep, gfp, info, index);
683 mutex_lock(&shmem_swaplist_mutex);
684 /*
685 * We needed to drop mutex to make that restrictive page
686 * allocation, but the inode might have been freed while we
687 * dropped it: although a racing shmem_evict_inode() cannot
688 * complete without emptying the radix_tree, our page lock
689 * on this swapcache page is not enough to prevent that -
690 * free_swap_and_cache() of our swap entry will only
691 * trylock_page(), removing swap from radix_tree whatever.
692 *
693 * We must not proceed to shmem_add_to_page_cache() if the
694 * inode has been freed, but of course we cannot rely on
695 * inode or mapping or info to check that. However, we can
696 * safely check if our swap entry is still in use (and here
697 * it can't have got reused for another page): if it's still
698 * in use, then the inode cannot have been freed yet, and we
699 * can safely proceed (if it's no longer in use, that tells
700 * nothing about the inode, but we don't need to unuse swap).
686 * allocation; but the inode might already be freed by now,
687 * and we cannot refer to inode or mapping or info to check.
688 * However, we do hold page lock on the PageSwapCache page,
689 * so can check if that still has our reference remaining.
701 */
702 if (!page_swapcount(*pagep))
703 error = -ENOENT;
704 }
705
706 /*
707 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
708 * but also to hold up shmem_evict_inode(): so inode cannot be freed

--- 27 unchanged lines hidden (view full) ---

736{
737 struct list_head *this, *next;
738 struct shmem_inode_info *info;
739 int found = 0;
740 int error = 0;
741
742 /*
743 * There's a faint possibility that swap page was replaced before
690 */
691 if (!page_swapcount(*pagep))
692 error = -ENOENT;
693 }
694
695 /*
696 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
697 * but also to hold up shmem_evict_inode(): so inode cannot be freed

--- 27 unchanged lines hidden (view full) ---

725{
726 struct list_head *this, *next;
727 struct shmem_inode_info *info;
728 int found = 0;
729 int error = 0;
730
731 /*
732 * There's a faint possibility that swap page was replaced before
744 * caller locked it: caller will come back later with the right page.
733 * caller locked it: it will come back later with the right page.
745 */
734 */
746 if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
735 if (unlikely(!PageSwapCache(page)))
747 goto out;
748
749 /*
750 * Charge page using GFP_KERNEL while we can wait, before taking
751 * the shmem_swaplist_mutex which might hold up shmem_writepage().
752 * Charged back to the user (not to caller) when swap account is used.
753 */
754 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);

--- 246 unchanged lines hidden (view full) ---

1001 /*
1002 * We have arrived here because our zones are constrained, so don't
1003 * limit chance of success by further cpuset and node constraints.
1004 */
1005 gfp &= ~GFP_CONSTRAINT_MASK;
1006 newpage = shmem_alloc_page(gfp, info, index);
1007 if (!newpage)
1008 return -ENOMEM;
736 goto out;
737
738 /*
739 * Charge page using GFP_KERNEL while we can wait, before taking
740 * the shmem_swaplist_mutex which might hold up shmem_writepage().
741 * Charged back to the user (not to caller) when swap account is used.
742 */
743 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);

--- 246 unchanged lines hidden (view full) ---

990 /*
991 * We have arrived here because our zones are constrained, so don't
992 * limit chance of success by further cpuset and node constraints.
993 */
994 gfp &= ~GFP_CONSTRAINT_MASK;
995 newpage = shmem_alloc_page(gfp, info, index);
996 if (!newpage)
997 return -ENOMEM;
998 VM_BUG_ON(shmem_should_replace_page(newpage, gfp));
1009
999
1000 *pagep = newpage;
1010 page_cache_get(newpage);
1011 copy_highpage(newpage, oldpage);
1001 page_cache_get(newpage);
1002 copy_highpage(newpage, oldpage);
1012 flush_dcache_page(newpage);
1013
1003
1004 VM_BUG_ON(!PageLocked(oldpage));
1014 __set_page_locked(newpage);
1005 __set_page_locked(newpage);
1006 VM_BUG_ON(!PageUptodate(oldpage));
1015 SetPageUptodate(newpage);
1007 SetPageUptodate(newpage);
1008 VM_BUG_ON(!PageSwapBacked(oldpage));
1016 SetPageSwapBacked(newpage);
1009 SetPageSwapBacked(newpage);
1010 VM_BUG_ON(!swap_index);
1017 set_page_private(newpage, swap_index);
1011 set_page_private(newpage, swap_index);
1012 VM_BUG_ON(!PageSwapCache(oldpage));
1018 SetPageSwapCache(newpage);
1019
1020 /*
1021 * Our caller will very soon move newpage out of swapcache, but it's
1022 * a nice clean interface for us to replace oldpage by newpage there.
1023 */
1024 spin_lock_irq(&swap_mapping->tree_lock);
1025 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
1026 newpage);
1013 SetPageSwapCache(newpage);
1014
1015 /*
1016 * Our caller will very soon move newpage out of swapcache, but it's
1017 * a nice clean interface for us to replace oldpage by newpage there.
1018 */
1019 spin_lock_irq(&swap_mapping->tree_lock);
1020 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
1021 newpage);
1027 if (!error) {
1028 __inc_zone_page_state(newpage, NR_FILE_PAGES);
1029 __dec_zone_page_state(oldpage, NR_FILE_PAGES);
1030 }
1022 __inc_zone_page_state(newpage, NR_FILE_PAGES);
1023 __dec_zone_page_state(oldpage, NR_FILE_PAGES);
1031 spin_unlock_irq(&swap_mapping->tree_lock);
1024 spin_unlock_irq(&swap_mapping->tree_lock);
1025 BUG_ON(error);
1032
1026
1033 if (unlikely(error)) {
1034 /*
1035 * Is this possible? I think not, now that our callers check
1036 * both PageSwapCache and page_private after getting page lock;
1037 * but be defensive. Reverse old to newpage for clear and free.
1038 */
1039 oldpage = newpage;
1040 } else {
1041 mem_cgroup_replace_page_cache(oldpage, newpage);
1042 lru_cache_add_anon(newpage);
1043 *pagep = newpage;
1044 }
1027 mem_cgroup_replace_page_cache(oldpage, newpage);
1028 lru_cache_add_anon(newpage);
1045
1046 ClearPageSwapCache(oldpage);
1047 set_page_private(oldpage, 0);
1048
1049 unlock_page(oldpage);
1050 page_cache_release(oldpage);
1051 page_cache_release(oldpage);
1029
1030 ClearPageSwapCache(oldpage);
1031 set_page_private(oldpage, 0);
1032
1033 unlock_page(oldpage);
1034 page_cache_release(oldpage);
1035 page_cache_release(oldpage);
1052 return error;
1036 return 0;
1053}
1054
1055/*
1056 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1057 *
1058 * If we allocate a new one we do not mark it dirty. That's up to the
1059 * vm. If we swap it in we mark it dirty since we also free the swap
1060 * entry since a page cannot live in both the swap and page cache

--- 57 unchanged lines hidden (view full) ---

1118 if (!page) {
1119 error = -ENOMEM;
1120 goto failed;
1121 }
1122 }
1123
1124 /* We have to do this with page locked to prevent races */
1125 lock_page(page);
1037}
1038
1039/*
1040 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1041 *
1042 * If we allocate a new one we do not mark it dirty. That's up to the
1043 * vm. If we swap it in we mark it dirty since we also free the swap
1044 * entry since a page cannot live in both the swap and page cache

--- 57 unchanged lines hidden (view full) ---

1102 if (!page) {
1103 error = -ENOMEM;
1104 goto failed;
1105 }
1106 }
1107
1108 /* We have to do this with page locked to prevent races */
1109 lock_page(page);
1126 if (!PageSwapCache(page) || page_private(page) != swap.val ||
1127 page->mapping) {
1110 if (!PageSwapCache(page) || page->mapping) {
1128 error = -EEXIST; /* try again */
1129 goto failed;
1130 }
1131 if (!PageUptodate(page)) {
1132 error = -EIO;
1133 goto failed;
1134 }
1135 wait_on_page_writeback(page);

--- 453 unchanged lines hidden (view full) ---

1589 struct partial_page partial[PIPE_DEF_BUFFERS];
1590 struct page *page;
1591 pgoff_t index, end_index;
1592 loff_t isize, left;
1593 int error, page_nr;
1594 struct splice_pipe_desc spd = {
1595 .pages = pages,
1596 .partial = partial,
1111 error = -EEXIST; /* try again */
1112 goto failed;
1113 }
1114 if (!PageUptodate(page)) {
1115 error = -EIO;
1116 goto failed;
1117 }
1118 wait_on_page_writeback(page);

--- 453 unchanged lines hidden (view full) ---

1572 struct partial_page partial[PIPE_DEF_BUFFERS];
1573 struct page *page;
1574 pgoff_t index, end_index;
1575 loff_t isize, left;
1576 int error, page_nr;
1577 struct splice_pipe_desc spd = {
1578 .pages = pages,
1579 .partial = partial,
1580 .nr_pages_max = PIPE_DEF_BUFFERS,
1597 .flags = flags,
1598 .ops = &page_cache_pipe_buf_ops,
1599 .spd_release = spd_release_page,
1600 };
1601
1602 isize = i_size_read(inode);
1603 if (unlikely(*ppos >= isize))
1604 return 0;

--- 72 unchanged lines hidden (view full) ---

1677 }
1678
1679 while (page_nr < nr_pages)
1680 page_cache_release(spd.pages[page_nr++]);
1681
1682 if (spd.nr_pages)
1683 error = splice_to_pipe(pipe, &spd);
1684
1581 .flags = flags,
1582 .ops = &page_cache_pipe_buf_ops,
1583 .spd_release = spd_release_page,
1584 };
1585
1586 isize = i_size_read(inode);
1587 if (unlikely(*ppos >= isize))
1588 return 0;

--- 72 unchanged lines hidden (view full) ---

1661 }
1662
1663 while (page_nr < nr_pages)
1664 page_cache_release(spd.pages[page_nr++]);
1665
1666 if (spd.nr_pages)
1667 error = splice_to_pipe(pipe, &spd);
1668
1685 splice_shrink_spd(pipe, &spd);
1669 splice_shrink_spd(&spd);
1686
1687 if (error > 0) {
1688 *ppos += error;
1689 file_accessed(in);
1690 }
1691 return error;
1692}
1693

--- 1406 unchanged lines hidden ---
1670
1671 if (error > 0) {
1672 *ppos += error;
1673 file_accessed(in);
1674 }
1675 return error;
1676}
1677

--- 1406 unchanged lines hidden ---