swapfile.c (ec8acf20afb8534ed511f6613dd2226b9e301010) swapfile.c (9e16b7fb1d066d38d01fd57c449f2640c5d208cb)
1/*
2 * linux/mm/swapfile.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 */
7
8#include <linux/mm.h>

--- 860 unchanged lines hidden (view full) ---

869/*
870 * No need to decide whether this PTE shares the swap entry with others,
871 * just let do_wp_page work it out if a write is requested later - to
872 * force COW, vm_page_prot omits write permission from any private vma.
873 */
874static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
875 unsigned long addr, swp_entry_t entry, struct page *page)
876{
1/*
2 * linux/mm/swapfile.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 */
7
8#include <linux/mm.h>

--- 860 unchanged lines hidden (view full) ---

869/*
870 * No need to decide whether this PTE shares the swap entry with others,
871 * just let do_wp_page work it out if a write is requested later - to
872 * force COW, vm_page_prot omits write permission from any private vma.
873 */
874static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
875 unsigned long addr, swp_entry_t entry, struct page *page)
876{
877 struct page *swapcache;
877 struct mem_cgroup *memcg;
878 spinlock_t *ptl;
879 pte_t *pte;
880 int ret = 1;
881
878 struct mem_cgroup *memcg;
879 spinlock_t *ptl;
880 pte_t *pte;
881 int ret = 1;
882
883 swapcache = page;
884 page = ksm_might_need_to_copy(page, vma, addr);
885 if (unlikely(!page))
886 return -ENOMEM;
887
882 if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
883 GFP_KERNEL, &memcg)) {
884 ret = -ENOMEM;
885 goto out_nolock;
886 }
887
888 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
889 if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
890 mem_cgroup_cancel_charge_swapin(memcg);
891 ret = 0;
892 goto out;
893 }
894
895 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
896 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
897 get_page(page);
898 set_pte_at(vma->vm_mm, addr, pte,
899 pte_mkold(mk_pte(page, vma->vm_page_prot)));
888 if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
889 GFP_KERNEL, &memcg)) {
890 ret = -ENOMEM;
891 goto out_nolock;
892 }
893
894 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
895 if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
896 mem_cgroup_cancel_charge_swapin(memcg);
897 ret = 0;
898 goto out;
899 }
900
901 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
902 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
903 get_page(page);
904 set_pte_at(vma->vm_mm, addr, pte,
905 pte_mkold(mk_pte(page, vma->vm_page_prot)));
900 page_add_anon_rmap(page, vma, addr);
906 if (page == swapcache)
907 page_add_anon_rmap(page, vma, addr);
908 else /* ksm created a completely new copy */
909 page_add_new_anon_rmap(page, vma, addr);
901 mem_cgroup_commit_charge_swapin(page, memcg);
902 swap_free(entry);
903 /*
904 * Move the page to the active list so it is not
905 * immediately swapped out again after swapon.
906 */
907 activate_page(page);
908out:
909 pte_unmap_unlock(pte, ptl);
910out_nolock:
910 mem_cgroup_commit_charge_swapin(page, memcg);
911 swap_free(entry);
912 /*
913 * Move the page to the active list so it is not
914 * immediately swapped out again after swapon.
915 */
916 activate_page(page);
917out:
918 pte_unmap_unlock(pte, ptl);
919out_nolock:
920 if (page != swapcache) {
921 unlock_page(page);
922 put_page(page);
923 }
911 return ret;
912}
913
914static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
915 unsigned long addr, unsigned long end,
916 swp_entry_t entry, struct page *page)
917{
918 pte_t swp_pte = swp_entry_to_pte(entry);

--- 1618 unchanged lines hidden ---
924 return ret;
925}
926
927static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
928 unsigned long addr, unsigned long end,
929 swp_entry_t entry, struct page *page)
930{
931 pte_t swp_pte = swp_entry_to_pte(entry);

--- 1618 unchanged lines hidden ---