memory.c (6f84981772535e670e4e2df051a672af229b6694) memory.c (7d4a8be0c4b2b7ffb367929d2b352651f083806b)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/memory.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 */
7
8/*

--- 611 unchanged lines hidden (view full) ---

620 /*
621 * NOTE! We still have PageReserved() pages in the page tables.
622 * eg. VDSO mappings can cause them to exist.
623 */
624out:
625 return pfn_to_page(pfn);
626}
627
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/memory.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 */
7
8/*

--- 611 unchanged lines hidden (view full) ---

620 /*
621 * NOTE! We still have PageReserved() pages in the page tables.
622 * eg. VDSO mappings can cause them to exist.
623 */
624out:
625 return pfn_to_page(pfn);
626}
627
628struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
629 pte_t pte)
630{
631 struct page *page = vm_normal_page(vma, addr, pte);
632
633 if (page)
634 return page_folio(page);
635 return NULL;
636}
637
628#ifdef CONFIG_TRANSPARENT_HUGEPAGE
629struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
630 pmd_t pmd)
631{
632 unsigned long pfn = pmd_pfn(pmd);
633
634 /*
635 * There is no pmd_special() but there may be special pmds, e.g.

--- 187 unchanged lines hidden (view full) ---

823 * exclusive entries currently only support private writable
824 * (ie. COW) mappings.
825 */
826 VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
827 if (try_restore_exclusive_pte(src_pte, src_vma, addr))
828 return -EBUSY;
829 return -ENOENT;
830 } else if (is_pte_marker_entry(entry)) {
638#ifdef CONFIG_TRANSPARENT_HUGEPAGE
639struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
640 pmd_t pmd)
641{
642 unsigned long pfn = pmd_pfn(pmd);
643
644 /*
645 * There is no pmd_special() but there may be special pmds, e.g.

--- 187 unchanged lines hidden (view full) ---

833 * exclusive entries currently only support private writable
834 * (ie. COW) mappings.
835 */
836 VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
837 if (try_restore_exclusive_pte(src_pte, src_vma, addr))
838 return -EBUSY;
839 return -ENOENT;
840 } else if (is_pte_marker_entry(entry)) {
831 /*
832 * We're copying the pgtable should only because dst_vma has
833 * uffd-wp enabled, do sanity check.
834 */
835 WARN_ON_ONCE(!userfaultfd_wp(dst_vma));
836 set_pte_at(dst_mm, addr, dst_pte, pte);
841 if (is_swapin_error_entry(entry) || userfaultfd_wp(dst_vma))
842 set_pte_at(dst_mm, addr, dst_pte, pte);
837 return 0;
838 }
839 if (!userfaultfd_wp(dst_vma))
840 pte = pte_swp_clear_uffd_wp(pte);
841 set_pte_at(dst_mm, addr, dst_pte, pte);
842 return 0;
843}
844

--- 32 unchanged lines hidden (view full) ---

877 lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
878 rss[mm_counter(new_page)]++;
879
880 /* All done, just insert the new page copy in the child */
881 pte = mk_pte(new_page, dst_vma->vm_page_prot);
882 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
883 if (userfaultfd_pte_wp(dst_vma, *src_pte))
884 /* Uffd-wp needs to be delivered to dest pte as well */
843 return 0;
844 }
845 if (!userfaultfd_wp(dst_vma))
846 pte = pte_swp_clear_uffd_wp(pte);
847 set_pte_at(dst_mm, addr, dst_pte, pte);
848 return 0;
849}
850

--- 32 unchanged lines hidden (view full) ---

883 lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
884 rss[mm_counter(new_page)]++;
885
886 /* All done, just insert the new page copy in the child */
887 pte = mk_pte(new_page, dst_vma->vm_page_prot);
888 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
889 if (userfaultfd_pte_wp(dst_vma, *src_pte))
890 /* Uffd-wp needs to be delivered to dest pte as well */
885 pte = pte_wrprotect(pte_mkuffd_wp(pte));
891 pte = pte_mkuffd_wp(pte);
886 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
887 return 0;
888}
889
890/*
891 * Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page
892 * is required to copy this pte.
893 */

--- 361 unchanged lines hidden (view full) ---

1255 * there could be a permission downgrade on the ptes of the
1256 * parent mm. And a permission downgrade will only happen if
1257 * is_cow_mapping() returns true.
1258 */
1259 is_cow = is_cow_mapping(src_vma->vm_flags);
1260
1261 if (is_cow) {
1262 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
892 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
893 return 0;
894}
895
896/*
897 * Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page
898 * is required to copy this pte.
899 */

--- 361 unchanged lines hidden (view full) ---

1261 * there could be a permission downgrade on the ptes of the
1262 * parent mm. And a permission downgrade will only happen if
1263 * is_cow_mapping() returns true.
1264 */
1265 is_cow = is_cow_mapping(src_vma->vm_flags);
1266
1267 if (is_cow) {
1268 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1263 0, src_vma, src_mm, addr, end);
1269 0, src_mm, addr, end);
1264 mmu_notifier_invalidate_range_start(&range);
1265 /*
1266 * Disabling preemption is not needed for the write side, as
1267 * the read side doesn't spin, but goes to the mmap_lock.
1268 *
1269 * Use the raw variant of the seqcount_t write API to avoid
1270 * lockdep complaining about preemptibility.
1271 */

--- 119 unchanged lines hidden (view full) ---

1391 if (!PageAnon(page)) {
1392 if (pte_dirty(ptent)) {
1393 set_page_dirty(page);
1394 if (tlb_delay_rmap(tlb)) {
1395 delay_rmap = 1;
1396 force_flush = 1;
1397 }
1398 }
1270 mmu_notifier_invalidate_range_start(&range);
1271 /*
1272 * Disabling preemption is not needed for the write side, as
1273 * the read side doesn't spin, but goes to the mmap_lock.
1274 *
1275 * Use the raw variant of the seqcount_t write API to avoid
1276 * lockdep complaining about preemptibility.
1277 */

--- 119 unchanged lines hidden (view full) ---

1397 if (!PageAnon(page)) {
1398 if (pte_dirty(ptent)) {
1399 set_page_dirty(page);
1400 if (tlb_delay_rmap(tlb)) {
1401 delay_rmap = 1;
1402 force_flush = 1;
1403 }
1404 }
1399 if (pte_young(ptent) &&
1400 likely(!(vma->vm_flags & VM_SEQ_READ)))
1405 if (pte_young(ptent) && likely(vma_has_recency(vma)))
1401 mark_page_accessed(page);
1402 }
1403 rss[mm_counter(page)]--;
1404 if (!delay_rmap) {
1405 page_remove_rmap(page, vma, false);
1406 if (unlikely(page_mapcount(page) < 0))
1407 print_bad_pte(vma, addr, ptent, page);
1408 }

--- 264 unchanged lines hidden (view full) ---

1673 struct mmu_notifier_range range;
1674 struct zap_details details = {
1675 .zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
1676 /* Careful - we need to zap private pages too! */
1677 .even_cows = true,
1678 };
1679 MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
1680
1406 mark_page_accessed(page);
1407 }
1408 rss[mm_counter(page)]--;
1409 if (!delay_rmap) {
1410 page_remove_rmap(page, vma, false);
1411 if (unlikely(page_mapcount(page) < 0))
1412 print_bad_pte(vma, addr, ptent, page);
1413 }

--- 264 unchanged lines hidden (view full) ---

1678 struct mmu_notifier_range range;
1679 struct zap_details details = {
1680 .zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
1681 /* Careful - we need to zap private pages too! */
1682 .even_cows = true,
1683 };
1684 MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
1685
1681 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1686 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
1682 start_addr, end_addr);
1683 mmu_notifier_invalidate_range_start(&range);
1684 do {
1685 unmap_single_vma(tlb, vma, start_addr, end_addr, &details);
1686 } while ((vma = mas_find(&mas, end_addr - 1)) != NULL);
1687 mmu_notifier_invalidate_range_end(&range);
1688}
1689
1690/**
1687 start_addr, end_addr);
1688 mmu_notifier_invalidate_range_start(&range);
1689 do {
1690 unmap_single_vma(tlb, vma, start_addr, end_addr, &details);
1691 } while ((vma = mas_find(&mas, end_addr - 1)) != NULL);
1692 mmu_notifier_invalidate_range_end(&range);
1693}
1694
1695/**
1691 * zap_page_range - remove user pages in a given range
1692 * @vma: vm_area_struct holding the applicable pages
1693 * @start: starting address of pages to zap
1694 * @size: number of bytes to zap
1695 *
1696 * Caller must protect the VMA list
1697 */
1698void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1699 unsigned long size)
1700{
1701 struct maple_tree *mt = &vma->vm_mm->mm_mt;
1702 unsigned long end = start + size;
1703 struct mmu_notifier_range range;
1704 struct mmu_gather tlb;
1705 MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
1706
1707 lru_add_drain();
1708 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1709 start, start + size);
1710 tlb_gather_mmu(&tlb, vma->vm_mm);
1711 update_hiwater_rss(vma->vm_mm);
1712 mmu_notifier_invalidate_range_start(&range);
1713 do {
1714 unmap_single_vma(&tlb, vma, start, range.end, NULL);
1715 } while ((vma = mas_find(&mas, end - 1)) != NULL);
1716 mmu_notifier_invalidate_range_end(&range);
1717 tlb_finish_mmu(&tlb);
1718}
1719
1720/**
1721 * zap_page_range_single - remove user pages in a given range
1722 * @vma: vm_area_struct holding the applicable pages
1723 * @address: starting address of pages to zap
1724 * @size: number of bytes to zap
1725 * @details: details of shared cache invalidation
1726 *
1727 * The range must fit into one VMA.
1728 */
1729void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1730 unsigned long size, struct zap_details *details)
1731{
1732 const unsigned long end = address + size;
1733 struct mmu_notifier_range range;
1734 struct mmu_gather tlb;
1735
1736 lru_add_drain();
1696 * zap_page_range_single - remove user pages in a given range
1697 * @vma: vm_area_struct holding the applicable pages
1698 * @address: starting address of pages to zap
1699 * @size: number of bytes to zap
1700 * @details: details of shared cache invalidation
1701 *
1702 * The range must fit into one VMA.
1703 */
1704void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1705 unsigned long size, struct zap_details *details)
1706{
1707 const unsigned long end = address + size;
1708 struct mmu_notifier_range range;
1709 struct mmu_gather tlb;
1710
1711 lru_add_drain();
1737 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1712 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
1738 address, end);
1739 if (is_vm_hugetlb_page(vma))
1740 adjust_range_if_pmd_sharing_possible(vma, &range.start,
1741 &range.end);
1742 tlb_gather_mmu(&tlb, vma->vm_mm);
1743 update_hiwater_rss(vma->vm_mm);
1744 mmu_notifier_invalidate_range_start(&range);
1745 /*

--- 1365 unchanged lines hidden (view full) ---

3111 }
3112
3113 if (mem_cgroup_charge(page_folio(new_page), mm, GFP_KERNEL))
3114 goto oom_free_new;
3115 cgroup_throttle_swaprate(new_page, GFP_KERNEL);
3116
3117 __SetPageUptodate(new_page);
3118
1713 address, end);
1714 if (is_vm_hugetlb_page(vma))
1715 adjust_range_if_pmd_sharing_possible(vma, &range.start,
1716 &range.end);
1717 tlb_gather_mmu(&tlb, vma->vm_mm);
1718 update_hiwater_rss(vma->vm_mm);
1719 mmu_notifier_invalidate_range_start(&range);
1720 /*

--- 1365 unchanged lines hidden (view full) ---

3086 }
3087
3088 if (mem_cgroup_charge(page_folio(new_page), mm, GFP_KERNEL))
3089 goto oom_free_new;
3090 cgroup_throttle_swaprate(new_page, GFP_KERNEL);
3091
3092 __SetPageUptodate(new_page);
3093
3119 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
3094 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
3120 vmf->address & PAGE_MASK,
3121 (vmf->address & PAGE_MASK) + PAGE_SIZE);
3122 mmu_notifier_invalidate_range_start(&range);
3123
3124 /*
3125 * Re-check the pte - we dropped the lock
3126 */
3127 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);

--- 453 unchanged lines hidden (view full) ---

3581static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
3582{
3583 struct folio *folio = page_folio(vmf->page);
3584 struct vm_area_struct *vma = vmf->vma;
3585 struct mmu_notifier_range range;
3586
3587 if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags))
3588 return VM_FAULT_RETRY;
3095 vmf->address & PAGE_MASK,
3096 (vmf->address & PAGE_MASK) + PAGE_SIZE);
3097 mmu_notifier_invalidate_range_start(&range);
3098
3099 /*
3100 * Re-check the pte - we dropped the lock
3101 */
3102 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);

--- 453 unchanged lines hidden (view full) ---

3556static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
3557{
3558 struct folio *folio = page_folio(vmf->page);
3559 struct vm_area_struct *vma = vmf->vma;
3560 struct mmu_notifier_range range;
3561
3562 if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags))
3563 return VM_FAULT_RETRY;
3589 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
3564 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
3590 vma->vm_mm, vmf->address & PAGE_MASK,
3591 (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
3592 mmu_notifier_invalidate_range_start(&range);
3593
3594 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3595 &vmf->ptl);
3596 if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
3597 restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);

--- 26 unchanged lines hidden (view full) ---

3624
3625static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
3626{
3627 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
3628 vmf->address, &vmf->ptl);
3629 /*
3630 * Be careful so that we will only recover a special uffd-wp pte into a
3631 * none pte. Otherwise it means the pte could have changed, so retry.
3565 vma->vm_mm, vmf->address & PAGE_MASK,
3566 (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
3567 mmu_notifier_invalidate_range_start(&range);
3568
3569 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3570 &vmf->ptl);
3571 if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
3572 restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);

--- 26 unchanged lines hidden (view full) ---

3599
3600static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
3601{
3602 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
3603 vmf->address, &vmf->ptl);
3604 /*
3605 * Be careful so that we will only recover a special uffd-wp pte into a
3606 * none pte. Otherwise it means the pte could have changed, so retry.
3607 *
3608 * This should also cover the case where e.g. the pte changed
3609 * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_SWAPIN_ERROR.
3610 * So is_pte_marker() check is not enough to safely drop the pte.
3632 */
3611 */
3633 if (is_pte_marker(*vmf->pte))
3612 if (pte_same(vmf->orig_pte, *vmf->pte))
3634 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte);
3635 pte_unmap_unlock(vmf->pte, vmf->ptl);
3636 return 0;
3637}
3638
3639/*
3640 * This is actually a page-missing access, but with uffd-wp special pte
3641 * installed. It means this pte was wr-protected before being unmapped.

--- 303 unchanged lines hidden (view full) ---

3945 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
3946 vmf->flags &= ~FAULT_FLAG_WRITE;
3947 }
3948 rmap_flags |= RMAP_EXCLUSIVE;
3949 }
3950 flush_icache_page(vma, page);
3951 if (pte_swp_soft_dirty(vmf->orig_pte))
3952 pte = pte_mksoft_dirty(pte);
3613 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte);
3614 pte_unmap_unlock(vmf->pte, vmf->ptl);
3615 return 0;
3616}
3617
3618/*
3619 * This is actually a page-missing access, but with uffd-wp special pte
3620 * installed. It means this pte was wr-protected before being unmapped.

--- 303 unchanged lines hidden (view full) ---

3924 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
3925 vmf->flags &= ~FAULT_FLAG_WRITE;
3926 }
3927 rmap_flags |= RMAP_EXCLUSIVE;
3928 }
3929 flush_icache_page(vma, page);
3930 if (pte_swp_soft_dirty(vmf->orig_pte))
3931 pte = pte_mksoft_dirty(pte);
3953 if (pte_swp_uffd_wp(vmf->orig_pte)) {
3932 if (pte_swp_uffd_wp(vmf->orig_pte))
3954 pte = pte_mkuffd_wp(pte);
3933 pte = pte_mkuffd_wp(pte);
3955 pte = pte_wrprotect(pte);
3956 }
3957 vmf->orig_pte = pte;
3958
3959 /* ksm created a completely new copy */
3960 if (unlikely(folio != swapcache && swapcache)) {
3961 page_add_new_anon_rmap(page, vma, vmf->address);
3962 folio_add_lru_vma(folio, vma);
3963 } else {
3964 page_add_anon_rmap(page, vma, vmf->address, rmap_flags);

--- 326 unchanged lines hidden (view full) ---

4291 if (prefault && arch_wants_old_prefaulted_pte())
4292 entry = pte_mkold(entry);
4293 else
4294 entry = pte_sw_mkyoung(entry);
4295
4296 if (write)
4297 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
4298 if (unlikely(uffd_wp))
3934 vmf->orig_pte = pte;
3935
3936 /* ksm created a completely new copy */
3937 if (unlikely(folio != swapcache && swapcache)) {
3938 page_add_new_anon_rmap(page, vma, vmf->address);
3939 folio_add_lru_vma(folio, vma);
3940 } else {
3941 page_add_anon_rmap(page, vma, vmf->address, rmap_flags);

--- 326 unchanged lines hidden (view full) ---

4268 if (prefault && arch_wants_old_prefaulted_pte())
4269 entry = pte_mkold(entry);
4270 else
4271 entry = pte_sw_mkyoung(entry);
4272
4273 if (write)
4274 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
4275 if (unlikely(uffd_wp))
4299 entry = pte_mkuffd_wp(pte_wrprotect(entry));
4276 entry = pte_mkuffd_wp(entry);
4300 /* copy-on-write page */
4301 if (write && !(vma->vm_flags & VM_SHARED)) {
4302 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
4303 page_add_new_anon_rmap(page, vma, addr);
4304 lru_cache_add_inactive_or_unevictable(page, vma);
4305 } else {
4306 inc_mm_counter(vma->vm_mm, mm_counter_file(page));
4307 page_add_file_rmap(page, vma, false);

--- 824 unchanged lines hidden (view full) ---

5132 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
5133 else
5134 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
5135}
5136
5137#ifdef CONFIG_LRU_GEN
5138static void lru_gen_enter_fault(struct vm_area_struct *vma)
5139{
4277 /* copy-on-write page */
4278 if (write && !(vma->vm_flags & VM_SHARED)) {
4279 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
4280 page_add_new_anon_rmap(page, vma, addr);
4281 lru_cache_add_inactive_or_unevictable(page, vma);
4282 } else {
4283 inc_mm_counter(vma->vm_mm, mm_counter_file(page));
4284 page_add_file_rmap(page, vma, false);

--- 824 unchanged lines hidden (view full) ---

5109 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
5110 else
5111 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
5112}
5113
5114#ifdef CONFIG_LRU_GEN
5115static void lru_gen_enter_fault(struct vm_area_struct *vma)
5116{
5140 /* the LRU algorithm doesn't apply to sequential or random reads */
5141 current->in_lru_fault = !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ));
5117 /* the LRU algorithm only applies to accesses with recency */
5118 current->in_lru_fault = vma_has_recency(vma);
5142}
5143
5144static void lru_gen_exit_fault(void)
5145{
5146 current->in_lru_fault = false;
5147}
5148#else
5149static void lru_gen_enter_fault(struct vm_area_struct *vma)

--- 705 unchanged lines hidden ---
5119}
5120
5121static void lru_gen_exit_fault(void)
5122{
5123 current->in_lru_fault = false;
5124}
5125#else
5126static void lru_gen_enter_fault(struct vm_area_struct *vma)

--- 705 unchanged lines hidden ---