hugetlb.c (714c189108244f1df579689061db1d785d92e7e2) hugetlb.c (f619147104c8ea71e120e4936d2b68ec11a1e527)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic hugetlb support.
4 * (C) Nadia Yvette Chambers, April 2004
5 */
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/mm.h>

--- 25 unchanged lines hidden (view full) ---

34#include <asm/page.h>
35#include <asm/pgalloc.h>
36#include <asm/tlb.h>
37
38#include <linux/io.h>
39#include <linux/hugetlb.h>
40#include <linux/hugetlb_cgroup.h>
41#include <linux/node.h>
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic hugetlb support.
4 * (C) Nadia Yvette Chambers, April 2004
5 */
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/mm.h>

--- 25 unchanged lines hidden (view full) ---

34#include <asm/page.h>
35#include <asm/pgalloc.h>
36#include <asm/tlb.h>
37
38#include <linux/io.h>
39#include <linux/hugetlb.h>
40#include <linux/hugetlb_cgroup.h>
41#include <linux/node.h>
42#include <linux/userfaultfd_k.h>
43#include <linux/page_owner.h>
44#include "internal.h"
45
46int hugetlb_max_hstate __read_mostly;
47unsigned int default_hstate_idx;
48struct hstate hstates[HUGE_MAX_HSTATE];
49
50#ifdef CONFIG_CMA

--- 4809 unchanged lines hidden (view full) ---

4860 * Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with
4861 * modifications for huge pages.
4862 */
4863int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4864 pte_t *dst_pte,
4865 struct vm_area_struct *dst_vma,
4866 unsigned long dst_addr,
4867 unsigned long src_addr,
42#include <linux/page_owner.h>
43#include "internal.h"
44
45int hugetlb_max_hstate __read_mostly;
46unsigned int default_hstate_idx;
47struct hstate hstates[HUGE_MAX_HSTATE];
48
49#ifdef CONFIG_CMA

--- 4809 unchanged lines hidden (view full) ---

4859 * Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with
4860 * modifications for huge pages.
4861 */
4862int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4863 pte_t *dst_pte,
4864 struct vm_area_struct *dst_vma,
4865 unsigned long dst_addr,
4866 unsigned long src_addr,
4867 enum mcopy_atomic_mode mode,
4868 struct page **pagep)
4869{
4868 struct page **pagep)
4869{
4870 bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE);
4870 struct address_space *mapping;
4871 pgoff_t idx;
4872 unsigned long size;
4873 int vm_shared = dst_vma->vm_flags & VM_SHARED;
4874 struct hstate *h = hstate_vma(dst_vma);
4875 pte_t _dst_pte;
4876 spinlock_t *ptl;
4877 int ret;
4878 struct page *page;
4871 struct address_space *mapping;
4872 pgoff_t idx;
4873 unsigned long size;
4874 int vm_shared = dst_vma->vm_flags & VM_SHARED;
4875 struct hstate *h = hstate_vma(dst_vma);
4876 pte_t _dst_pte;
4877 spinlock_t *ptl;
4878 int ret;
4879 struct page *page;
4880 int writable;
4879
4881
4880 if (!*pagep) {
4882 mapping = dst_vma->vm_file->f_mapping;
4883 idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4884
4885 if (is_continue) {
4886 ret = -EFAULT;
4887 page = find_lock_page(mapping, idx);
4888 if (!page)
4889 goto out;
4890 } else if (!*pagep) {
4881 ret = -ENOMEM;
4882 page = alloc_huge_page(dst_vma, dst_addr, 0);
4883 if (IS_ERR(page))
4884 goto out;
4885
4886 ret = copy_huge_page_from_user(page,
4887 (const void __user *) src_addr,
4888 pages_per_huge_page(h), false);

--- 12 unchanged lines hidden (view full) ---

4901
4902 /*
4903 * The memory barrier inside __SetPageUptodate makes sure that
4904 * preceding stores to the page contents become visible before
4905 * the set_pte_at() write.
4906 */
4907 __SetPageUptodate(page);
4908
4891 ret = -ENOMEM;
4892 page = alloc_huge_page(dst_vma, dst_addr, 0);
4893 if (IS_ERR(page))
4894 goto out;
4895
4896 ret = copy_huge_page_from_user(page,
4897 (const void __user *) src_addr,
4898 pages_per_huge_page(h), false);

--- 12 unchanged lines hidden (view full) ---

4911
4912 /*
4913 * The memory barrier inside __SetPageUptodate makes sure that
4914 * preceding stores to the page contents become visible before
4915 * the set_pte_at() write.
4916 */
4917 __SetPageUptodate(page);
4918
4909 mapping = dst_vma->vm_file->f_mapping;
4910 idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4911
4912 /*
4913 * If shared, add to page cache
4914 */
4915 if (vm_shared) {
4919 /* Add shared, newly allocated pages to the page cache. */
4920 if (vm_shared && !is_continue) {
4916 size = i_size_read(mapping->host) >> huge_page_shift(h);
4917 ret = -EFAULT;
4918 if (idx >= size)
4919 goto out_release_nounlock;
4920
4921 /*
4922 * Serialization between remove_inode_hugepages() and
4923 * huge_add_to_page_cache() below happens through the

--- 28 unchanged lines hidden (view full) ---

4952
4953 if (vm_shared) {
4954 page_dup_rmap(page, true);
4955 } else {
4956 ClearHPageRestoreReserve(page);
4957 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4958 }
4959
4921 size = i_size_read(mapping->host) >> huge_page_shift(h);
4922 ret = -EFAULT;
4923 if (idx >= size)
4924 goto out_release_nounlock;
4925
4926 /*
4927 * Serialization between remove_inode_hugepages() and
4928 * huge_add_to_page_cache() below happens through the

--- 28 unchanged lines hidden (view full) ---

4957
4958 if (vm_shared) {
4959 page_dup_rmap(page, true);
4960 } else {
4961 ClearHPageRestoreReserve(page);
4962 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4963 }
4964
4960 _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
4961 if (dst_vma->vm_flags & VM_WRITE)
4965 /* For CONTINUE on a non-shared VMA, don't set VM_WRITE for CoW. */
4966 if (is_continue && !vm_shared)
4967 writable = 0;
4968 else
4969 writable = dst_vma->vm_flags & VM_WRITE;
4970
4971 _dst_pte = make_huge_pte(dst_vma, page, writable);
4972 if (writable)
4962 _dst_pte = huge_pte_mkdirty(_dst_pte);
4963 _dst_pte = pte_mkyoung(_dst_pte);
4964
4965 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4966
4967 (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4968 dst_vma->vm_flags & VM_WRITE);
4969 hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4970
4971 /* No need to invalidate - it was non-present before */
4972 update_mmu_cache(dst_vma, dst_addr, dst_pte);
4973
4974 spin_unlock(ptl);
4973 _dst_pte = huge_pte_mkdirty(_dst_pte);
4974 _dst_pte = pte_mkyoung(_dst_pte);
4975
4976 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4977
4978 (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4979 dst_vma->vm_flags & VM_WRITE);
4980 hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4981
4982 /* No need to invalidate - it was non-present before */
4983 update_mmu_cache(dst_vma, dst_addr, dst_pte);
4984
4985 spin_unlock(ptl);
4975 SetHPageMigratable(page);
4976 if (vm_shared)
4986 if (!is_continue)
4987 SetHPageMigratable(page);
4988 if (vm_shared || is_continue)
4977 unlock_page(page);
4978 ret = 0;
4979out:
4980 return ret;
4981out_release_unlock:
4982 spin_unlock(ptl);
4989 unlock_page(page);
4990 ret = 0;
4991out:
4992 return ret;
4993out_release_unlock:
4994 spin_unlock(ptl);
4983 if (vm_shared)
4995 if (vm_shared || is_continue)
4984 unlock_page(page);
4985out_release_nounlock:
4986 put_page(page);
4987 goto out;
4988}
4989#endif /* CONFIG_USERFAULTFD */
4990
4991static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,

--- 1015 unchanged lines hidden ---
4996 unlock_page(page);
4997out_release_nounlock:
4998 put_page(page);
4999 goto out;
5000}
5001#endif /* CONFIG_USERFAULTFD */
5002
5003static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,

--- 1015 unchanged lines hidden ---