xref: /openbmc/linux/arch/riscv/mm/pageattr.c (revision ca2478a7d974f38d29d27acb42a952c7f168916e)
1d3ab332aSZong Li // SPDX-License-Identifier: GPL-2.0-only
2d3ab332aSZong Li /*
3d3ab332aSZong Li  * Copyright (C) 2019 SiFive
4d3ab332aSZong Li  */
5d3ab332aSZong Li 
6d3ab332aSZong Li #include <linux/pagewalk.h>
7ca5999fdSMike Rapoport #include <linux/pgtable.h>
85254434aSAlexandre Ghiti #include <linux/vmalloc.h>
9d3ab332aSZong Li #include <asm/tlbflush.h>
10d3ab332aSZong Li #include <asm/bitops.h>
113843aca0SZong Li #include <asm/set_memory.h>
12d3ab332aSZong Li 
13d3ab332aSZong Li struct pageattr_masks {
14d3ab332aSZong Li 	pgprot_t set_mask;
15d3ab332aSZong Li 	pgprot_t clear_mask;
16d3ab332aSZong Li };
17d3ab332aSZong Li 
set_pageattr_masks(unsigned long val,struct mm_walk * walk)18d3ab332aSZong Li static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
19d3ab332aSZong Li {
20d3ab332aSZong Li 	struct pageattr_masks *masks = walk->private;
21d3ab332aSZong Li 	unsigned long new_val = val;
22d3ab332aSZong Li 
23d3ab332aSZong Li 	new_val &= ~(pgprot_val(masks->clear_mask));
24d3ab332aSZong Li 	new_val |= (pgprot_val(masks->set_mask));
25d3ab332aSZong Li 
26d3ab332aSZong Li 	return new_val;
27d3ab332aSZong Li }
28d3ab332aSZong Li 
pageattr_p4d_entry(p4d_t * p4d,unsigned long addr,unsigned long next,struct mm_walk * walk)29d3ab332aSZong Li static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
30d3ab332aSZong Li 			      unsigned long next, struct mm_walk *walk)
31d3ab332aSZong Li {
32*e0316069SAlexandre Ghiti 	p4d_t val = p4dp_get(p4d);
33d3ab332aSZong Li 
34d3ab332aSZong Li 	if (p4d_leaf(val)) {
35d3ab332aSZong Li 		val = __p4d(set_pageattr_masks(p4d_val(val), walk));
36d3ab332aSZong Li 		set_p4d(p4d, val);
37d3ab332aSZong Li 	}
38d3ab332aSZong Li 
39d3ab332aSZong Li 	return 0;
40d3ab332aSZong Li }
41d3ab332aSZong Li 
pageattr_pud_entry(pud_t * pud,unsigned long addr,unsigned long next,struct mm_walk * walk)42d3ab332aSZong Li static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
43d3ab332aSZong Li 			      unsigned long next, struct mm_walk *walk)
44d3ab332aSZong Li {
45*e0316069SAlexandre Ghiti 	pud_t val = pudp_get(pud);
46d3ab332aSZong Li 
47d3ab332aSZong Li 	if (pud_leaf(val)) {
48d3ab332aSZong Li 		val = __pud(set_pageattr_masks(pud_val(val), walk));
49d3ab332aSZong Li 		set_pud(pud, val);
50d3ab332aSZong Li 	}
51d3ab332aSZong Li 
52d3ab332aSZong Li 	return 0;
53d3ab332aSZong Li }
54d3ab332aSZong Li 
pageattr_pmd_entry(pmd_t * pmd,unsigned long addr,unsigned long next,struct mm_walk * walk)55d3ab332aSZong Li static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
56d3ab332aSZong Li 			      unsigned long next, struct mm_walk *walk)
57d3ab332aSZong Li {
58*e0316069SAlexandre Ghiti 	pmd_t val = pmdp_get(pmd);
59d3ab332aSZong Li 
60d3ab332aSZong Li 	if (pmd_leaf(val)) {
61d3ab332aSZong Li 		val = __pmd(set_pageattr_masks(pmd_val(val), walk));
62d3ab332aSZong Li 		set_pmd(pmd, val);
63d3ab332aSZong Li 	}
64d3ab332aSZong Li 
65d3ab332aSZong Li 	return 0;
66d3ab332aSZong Li }
67d3ab332aSZong Li 
pageattr_pte_entry(pte_t * pte,unsigned long addr,unsigned long next,struct mm_walk * walk)68d3ab332aSZong Li static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
69d3ab332aSZong Li 			      unsigned long next, struct mm_walk *walk)
70d3ab332aSZong Li {
71*e0316069SAlexandre Ghiti 	pte_t val = ptep_get(pte);
72d3ab332aSZong Li 
73d3ab332aSZong Li 	val = __pte(set_pageattr_masks(pte_val(val), walk));
74d3ab332aSZong Li 	set_pte(pte, val);
75d3ab332aSZong Li 
76d3ab332aSZong Li 	return 0;
77d3ab332aSZong Li }
78d3ab332aSZong Li 
pageattr_pte_hole(unsigned long addr,unsigned long next,int depth,struct mm_walk * walk)79d3ab332aSZong Li static int pageattr_pte_hole(unsigned long addr, unsigned long next,
80d3ab332aSZong Li 			     int depth, struct mm_walk *walk)
81d3ab332aSZong Li {
82d3ab332aSZong Li 	/* Nothing to do here */
83d3ab332aSZong Li 	return 0;
84d3ab332aSZong Li }
85d3ab332aSZong Li 
863843aca0SZong Li static const struct mm_walk_ops pageattr_ops = {
87d3ab332aSZong Li 	.p4d_entry = pageattr_p4d_entry,
88d3ab332aSZong Li 	.pud_entry = pageattr_pud_entry,
89d3ab332aSZong Li 	.pmd_entry = pageattr_pmd_entry,
90d3ab332aSZong Li 	.pte_entry = pageattr_pte_entry,
91d3ab332aSZong Li 	.pte_hole = pageattr_pte_hole,
9249b06385SSuren Baghdasaryan 	.walk_lock = PGWALK_RDLOCK,
93d3ab332aSZong Li };
94d3ab332aSZong Li 
955254434aSAlexandre Ghiti #ifdef CONFIG_64BIT
__split_linear_mapping_pmd(pud_t * pudp,unsigned long vaddr,unsigned long end)965254434aSAlexandre Ghiti static int __split_linear_mapping_pmd(pud_t *pudp,
975254434aSAlexandre Ghiti 				      unsigned long vaddr, unsigned long end)
985254434aSAlexandre Ghiti {
995254434aSAlexandre Ghiti 	pmd_t *pmdp;
1005254434aSAlexandre Ghiti 	unsigned long next;
1015254434aSAlexandre Ghiti 
1025254434aSAlexandre Ghiti 	pmdp = pmd_offset(pudp, vaddr);
1035254434aSAlexandre Ghiti 
1045254434aSAlexandre Ghiti 	do {
1055254434aSAlexandre Ghiti 		next = pmd_addr_end(vaddr, end);
1065254434aSAlexandre Ghiti 
1075254434aSAlexandre Ghiti 		if (next - vaddr >= PMD_SIZE &&
1085254434aSAlexandre Ghiti 		    vaddr <= (vaddr & PMD_MASK) && end >= next)
1095254434aSAlexandre Ghiti 			continue;
1105254434aSAlexandre Ghiti 
111*e0316069SAlexandre Ghiti 		if (pmd_leaf(pmdp_get(pmdp))) {
1125254434aSAlexandre Ghiti 			struct page *pte_page;
113*e0316069SAlexandre Ghiti 			unsigned long pfn = _pmd_pfn(pmdp_get(pmdp));
114*e0316069SAlexandre Ghiti 			pgprot_t prot = __pgprot(pmd_val(pmdp_get(pmdp)) & ~_PAGE_PFN_MASK);
1155254434aSAlexandre Ghiti 			pte_t *ptep_new;
1165254434aSAlexandre Ghiti 			int i;
1175254434aSAlexandre Ghiti 
1185254434aSAlexandre Ghiti 			pte_page = alloc_page(GFP_KERNEL);
1195254434aSAlexandre Ghiti 			if (!pte_page)
1205254434aSAlexandre Ghiti 				return -ENOMEM;
1215254434aSAlexandre Ghiti 
1225254434aSAlexandre Ghiti 			ptep_new = (pte_t *)page_address(pte_page);
1235254434aSAlexandre Ghiti 			for (i = 0; i < PTRS_PER_PTE; ++i, ++ptep_new)
1245254434aSAlexandre Ghiti 				set_pte(ptep_new, pfn_pte(pfn + i, prot));
1255254434aSAlexandre Ghiti 
1265254434aSAlexandre Ghiti 			smp_wmb();
1275254434aSAlexandre Ghiti 
1285254434aSAlexandre Ghiti 			set_pmd(pmdp, pfn_pmd(page_to_pfn(pte_page), PAGE_TABLE));
1295254434aSAlexandre Ghiti 		}
1305254434aSAlexandre Ghiti 	} while (pmdp++, vaddr = next, vaddr != end);
1315254434aSAlexandre Ghiti 
1325254434aSAlexandre Ghiti 	return 0;
1335254434aSAlexandre Ghiti }
1345254434aSAlexandre Ghiti 
__split_linear_mapping_pud(p4d_t * p4dp,unsigned long vaddr,unsigned long end)1355254434aSAlexandre Ghiti static int __split_linear_mapping_pud(p4d_t *p4dp,
1365254434aSAlexandre Ghiti 				      unsigned long vaddr, unsigned long end)
1375254434aSAlexandre Ghiti {
1385254434aSAlexandre Ghiti 	pud_t *pudp;
1395254434aSAlexandre Ghiti 	unsigned long next;
1405254434aSAlexandre Ghiti 	int ret;
1415254434aSAlexandre Ghiti 
1425254434aSAlexandre Ghiti 	pudp = pud_offset(p4dp, vaddr);
1435254434aSAlexandre Ghiti 
1445254434aSAlexandre Ghiti 	do {
1455254434aSAlexandre Ghiti 		next = pud_addr_end(vaddr, end);
1465254434aSAlexandre Ghiti 
1475254434aSAlexandre Ghiti 		if (next - vaddr >= PUD_SIZE &&
1485254434aSAlexandre Ghiti 		    vaddr <= (vaddr & PUD_MASK) && end >= next)
1495254434aSAlexandre Ghiti 			continue;
1505254434aSAlexandre Ghiti 
151*e0316069SAlexandre Ghiti 		if (pud_leaf(pudp_get(pudp))) {
1525254434aSAlexandre Ghiti 			struct page *pmd_page;
153*e0316069SAlexandre Ghiti 			unsigned long pfn = _pud_pfn(pudp_get(pudp));
154*e0316069SAlexandre Ghiti 			pgprot_t prot = __pgprot(pud_val(pudp_get(pudp)) & ~_PAGE_PFN_MASK);
1555254434aSAlexandre Ghiti 			pmd_t *pmdp_new;
1565254434aSAlexandre Ghiti 			int i;
1575254434aSAlexandre Ghiti 
1585254434aSAlexandre Ghiti 			pmd_page = alloc_page(GFP_KERNEL);
1595254434aSAlexandre Ghiti 			if (!pmd_page)
1605254434aSAlexandre Ghiti 				return -ENOMEM;
1615254434aSAlexandre Ghiti 
1625254434aSAlexandre Ghiti 			pmdp_new = (pmd_t *)page_address(pmd_page);
1635254434aSAlexandre Ghiti 			for (i = 0; i < PTRS_PER_PMD; ++i, ++pmdp_new)
1645254434aSAlexandre Ghiti 				set_pmd(pmdp_new,
1655254434aSAlexandre Ghiti 					pfn_pmd(pfn + ((i * PMD_SIZE) >> PAGE_SHIFT), prot));
1665254434aSAlexandre Ghiti 
1675254434aSAlexandre Ghiti 			smp_wmb();
1685254434aSAlexandre Ghiti 
1695254434aSAlexandre Ghiti 			set_pud(pudp, pfn_pud(page_to_pfn(pmd_page), PAGE_TABLE));
1705254434aSAlexandre Ghiti 		}
1715254434aSAlexandre Ghiti 
1725254434aSAlexandre Ghiti 		ret = __split_linear_mapping_pmd(pudp, vaddr, next);
1735254434aSAlexandre Ghiti 		if (ret)
1745254434aSAlexandre Ghiti 			return ret;
1755254434aSAlexandre Ghiti 	} while (pudp++, vaddr = next, vaddr != end);
1765254434aSAlexandre Ghiti 
1775254434aSAlexandre Ghiti 	return 0;
1785254434aSAlexandre Ghiti }
1795254434aSAlexandre Ghiti 
__split_linear_mapping_p4d(pgd_t * pgdp,unsigned long vaddr,unsigned long end)1805254434aSAlexandre Ghiti static int __split_linear_mapping_p4d(pgd_t *pgdp,
1815254434aSAlexandre Ghiti 				      unsigned long vaddr, unsigned long end)
1825254434aSAlexandre Ghiti {
1835254434aSAlexandre Ghiti 	p4d_t *p4dp;
1845254434aSAlexandre Ghiti 	unsigned long next;
1855254434aSAlexandre Ghiti 	int ret;
1865254434aSAlexandre Ghiti 
1875254434aSAlexandre Ghiti 	p4dp = p4d_offset(pgdp, vaddr);
1885254434aSAlexandre Ghiti 
1895254434aSAlexandre Ghiti 	do {
1905254434aSAlexandre Ghiti 		next = p4d_addr_end(vaddr, end);
1915254434aSAlexandre Ghiti 
1925254434aSAlexandre Ghiti 		/*
1935254434aSAlexandre Ghiti 		 * If [vaddr; end] contains [vaddr & P4D_MASK; next], we don't
1945254434aSAlexandre Ghiti 		 * need to split, we'll change the protections on the whole P4D.
1955254434aSAlexandre Ghiti 		 */
1965254434aSAlexandre Ghiti 		if (next - vaddr >= P4D_SIZE &&
1975254434aSAlexandre Ghiti 		    vaddr <= (vaddr & P4D_MASK) && end >= next)
1985254434aSAlexandre Ghiti 			continue;
1995254434aSAlexandre Ghiti 
200*e0316069SAlexandre Ghiti 		if (p4d_leaf(p4dp_get(p4dp))) {
2015254434aSAlexandre Ghiti 			struct page *pud_page;
202*e0316069SAlexandre Ghiti 			unsigned long pfn = _p4d_pfn(p4dp_get(p4dp));
203*e0316069SAlexandre Ghiti 			pgprot_t prot = __pgprot(p4d_val(p4dp_get(p4dp)) & ~_PAGE_PFN_MASK);
2045254434aSAlexandre Ghiti 			pud_t *pudp_new;
2055254434aSAlexandre Ghiti 			int i;
2065254434aSAlexandre Ghiti 
2075254434aSAlexandre Ghiti 			pud_page = alloc_page(GFP_KERNEL);
2085254434aSAlexandre Ghiti 			if (!pud_page)
2095254434aSAlexandre Ghiti 				return -ENOMEM;
2105254434aSAlexandre Ghiti 
2115254434aSAlexandre Ghiti 			/*
2125254434aSAlexandre Ghiti 			 * Fill the pud level with leaf puds that have the same
2135254434aSAlexandre Ghiti 			 * protections as the leaf p4d.
2145254434aSAlexandre Ghiti 			 */
2155254434aSAlexandre Ghiti 			pudp_new = (pud_t *)page_address(pud_page);
2165254434aSAlexandre Ghiti 			for (i = 0; i < PTRS_PER_PUD; ++i, ++pudp_new)
2175254434aSAlexandre Ghiti 				set_pud(pudp_new,
2185254434aSAlexandre Ghiti 					pfn_pud(pfn + ((i * PUD_SIZE) >> PAGE_SHIFT), prot));
2195254434aSAlexandre Ghiti 
2205254434aSAlexandre Ghiti 			/*
2215254434aSAlexandre Ghiti 			 * Make sure the pud filling is not reordered with the
2225254434aSAlexandre Ghiti 			 * p4d store which could result in seeing a partially
2235254434aSAlexandre Ghiti 			 * filled pud level.
2245254434aSAlexandre Ghiti 			 */
2255254434aSAlexandre Ghiti 			smp_wmb();
2265254434aSAlexandre Ghiti 
2275254434aSAlexandre Ghiti 			set_p4d(p4dp, pfn_p4d(page_to_pfn(pud_page), PAGE_TABLE));
2285254434aSAlexandre Ghiti 		}
2295254434aSAlexandre Ghiti 
2305254434aSAlexandre Ghiti 		ret = __split_linear_mapping_pud(p4dp, vaddr, next);
2315254434aSAlexandre Ghiti 		if (ret)
2325254434aSAlexandre Ghiti 			return ret;
2335254434aSAlexandre Ghiti 	} while (p4dp++, vaddr = next, vaddr != end);
2345254434aSAlexandre Ghiti 
2355254434aSAlexandre Ghiti 	return 0;
2365254434aSAlexandre Ghiti }
2375254434aSAlexandre Ghiti 
__split_linear_mapping_pgd(pgd_t * pgdp,unsigned long vaddr,unsigned long end)2385254434aSAlexandre Ghiti static int __split_linear_mapping_pgd(pgd_t *pgdp,
2395254434aSAlexandre Ghiti 				      unsigned long vaddr,
2405254434aSAlexandre Ghiti 				      unsigned long end)
2415254434aSAlexandre Ghiti {
2425254434aSAlexandre Ghiti 	unsigned long next;
2435254434aSAlexandre Ghiti 	int ret;
2445254434aSAlexandre Ghiti 
2455254434aSAlexandre Ghiti 	do {
2465254434aSAlexandre Ghiti 		next = pgd_addr_end(vaddr, end);
2475254434aSAlexandre Ghiti 		/* We never use PGD mappings for the linear mapping */
2485254434aSAlexandre Ghiti 		ret = __split_linear_mapping_p4d(pgdp, vaddr, next);
2495254434aSAlexandre Ghiti 		if (ret)
2505254434aSAlexandre Ghiti 			return ret;
2515254434aSAlexandre Ghiti 	} while (pgdp++, vaddr = next, vaddr != end);
2525254434aSAlexandre Ghiti 
2535254434aSAlexandre Ghiti 	return 0;
2545254434aSAlexandre Ghiti }
2555254434aSAlexandre Ghiti 
split_linear_mapping(unsigned long start,unsigned long end)2565254434aSAlexandre Ghiti static int split_linear_mapping(unsigned long start, unsigned long end)
2575254434aSAlexandre Ghiti {
2585254434aSAlexandre Ghiti 	return __split_linear_mapping_pgd(pgd_offset_k(start), start, end);
2595254434aSAlexandre Ghiti }
2605254434aSAlexandre Ghiti #endif	/* CONFIG_64BIT */
2615254434aSAlexandre Ghiti 
__set_memory(unsigned long addr,int numpages,pgprot_t set_mask,pgprot_t clear_mask)262d3ab332aSZong Li static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
263d3ab332aSZong Li 			pgprot_t clear_mask)
264d3ab332aSZong Li {
265d3ab332aSZong Li 	int ret;
266d3ab332aSZong Li 	unsigned long start = addr;
267d3ab332aSZong Li 	unsigned long end = start + PAGE_SIZE * numpages;
2685254434aSAlexandre Ghiti 	unsigned long __maybe_unused lm_start;
2695254434aSAlexandre Ghiti 	unsigned long __maybe_unused lm_end;
270d3ab332aSZong Li 	struct pageattr_masks masks = {
271d3ab332aSZong Li 		.set_mask = set_mask,
272d3ab332aSZong Li 		.clear_mask = clear_mask
273d3ab332aSZong Li 	};
274d3ab332aSZong Li 
275d3ab332aSZong Li 	if (!numpages)
276d3ab332aSZong Li 		return 0;
277d3ab332aSZong Li 
2788782fb61SSteven Price 	mmap_write_lock(&init_mm);
2795254434aSAlexandre Ghiti 
2805254434aSAlexandre Ghiti #ifdef CONFIG_64BIT
2815254434aSAlexandre Ghiti 	/*
2825254434aSAlexandre Ghiti 	 * We are about to change the permissions of a kernel mapping, we must
2835254434aSAlexandre Ghiti 	 * apply the same changes to its linear mapping alias, which may imply
2845254434aSAlexandre Ghiti 	 * splitting a huge mapping.
2855254434aSAlexandre Ghiti 	 */
2865254434aSAlexandre Ghiti 
2875254434aSAlexandre Ghiti 	if (is_vmalloc_or_module_addr((void *)start)) {
2885254434aSAlexandre Ghiti 		struct vm_struct *area = NULL;
2895254434aSAlexandre Ghiti 		int i, page_start;
2905254434aSAlexandre Ghiti 
2915254434aSAlexandre Ghiti 		area = find_vm_area((void *)start);
2925254434aSAlexandre Ghiti 		page_start = (start - (unsigned long)area->addr) >> PAGE_SHIFT;
2935254434aSAlexandre Ghiti 
2945254434aSAlexandre Ghiti 		for (i = page_start; i < page_start + numpages; ++i) {
2955254434aSAlexandre Ghiti 			lm_start = (unsigned long)page_address(area->pages[i]);
2965254434aSAlexandre Ghiti 			lm_end = lm_start + PAGE_SIZE;
2975254434aSAlexandre Ghiti 
2985254434aSAlexandre Ghiti 			ret = split_linear_mapping(lm_start, lm_end);
2995254434aSAlexandre Ghiti 			if (ret)
3005254434aSAlexandre Ghiti 				goto unlock;
3015254434aSAlexandre Ghiti 
3025254434aSAlexandre Ghiti 			ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
3035254434aSAlexandre Ghiti 						    &pageattr_ops, NULL, &masks);
3045254434aSAlexandre Ghiti 			if (ret)
3055254434aSAlexandre Ghiti 				goto unlock;
3065254434aSAlexandre Ghiti 		}
3075254434aSAlexandre Ghiti 	} else if (is_kernel_mapping(start) || is_linear_mapping(start)) {
30893d357caSAlexandre Ghiti 		if (is_kernel_mapping(start)) {
3095254434aSAlexandre Ghiti 			lm_start = (unsigned long)lm_alias(start);
3105254434aSAlexandre Ghiti 			lm_end = (unsigned long)lm_alias(end);
31193d357caSAlexandre Ghiti 		} else {
31293d357caSAlexandre Ghiti 			lm_start = start;
31393d357caSAlexandre Ghiti 			lm_end = end;
31493d357caSAlexandre Ghiti 		}
3155254434aSAlexandre Ghiti 
3165254434aSAlexandre Ghiti 		ret = split_linear_mapping(lm_start, lm_end);
3175254434aSAlexandre Ghiti 		if (ret)
3185254434aSAlexandre Ghiti 			goto unlock;
3195254434aSAlexandre Ghiti 
3205254434aSAlexandre Ghiti 		ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
3215254434aSAlexandre Ghiti 					    &pageattr_ops, NULL, &masks);
3225254434aSAlexandre Ghiti 		if (ret)
3235254434aSAlexandre Ghiti 			goto unlock;
3245254434aSAlexandre Ghiti 	}
3255254434aSAlexandre Ghiti 
326d3ab332aSZong Li 	ret =  walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
327d3ab332aSZong Li 				     &masks);
3285254434aSAlexandre Ghiti 
3295254434aSAlexandre Ghiti unlock:
3305254434aSAlexandre Ghiti 	mmap_write_unlock(&init_mm);
3315254434aSAlexandre Ghiti 
3325254434aSAlexandre Ghiti 	/*
3335254434aSAlexandre Ghiti 	 * We can't use flush_tlb_kernel_range() here as we may have split a
3345254434aSAlexandre Ghiti 	 * hugepage that is larger than that, so let's flush everything.
3355254434aSAlexandre Ghiti 	 */
3365254434aSAlexandre Ghiti 	flush_tlb_all();
3375254434aSAlexandre Ghiti #else
3385254434aSAlexandre Ghiti 	ret =  walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
3395254434aSAlexandre Ghiti 				     &masks);
3405254434aSAlexandre Ghiti 
3418782fb61SSteven Price 	mmap_write_unlock(&init_mm);
342d3ab332aSZong Li 
343d3ab332aSZong Li 	flush_tlb_kernel_range(start, end);
3445254434aSAlexandre Ghiti #endif
345d3ab332aSZong Li 
346d3ab332aSZong Li 	return ret;
347d3ab332aSZong Li }
348d3ab332aSZong Li 
set_memory_rw_nx(unsigned long addr,int numpages)34919a00869SAtish Patra int set_memory_rw_nx(unsigned long addr, int numpages)
35019a00869SAtish Patra {
35119a00869SAtish Patra 	return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
35219a00869SAtish Patra 			    __pgprot(_PAGE_EXEC));
35319a00869SAtish Patra }
35419a00869SAtish Patra 
set_memory_ro(unsigned long addr,int numpages)355d3ab332aSZong Li int set_memory_ro(unsigned long addr, int numpages)
356d3ab332aSZong Li {
357d3ab332aSZong Li 	return __set_memory(addr, numpages, __pgprot(_PAGE_READ),
358d3ab332aSZong Li 			    __pgprot(_PAGE_WRITE));
359d3ab332aSZong Li }
360d3ab332aSZong Li 
set_memory_rw(unsigned long addr,int numpages)361d3ab332aSZong Li int set_memory_rw(unsigned long addr, int numpages)
362d3ab332aSZong Li {
363d3ab332aSZong Li 	return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
364d3ab332aSZong Li 			    __pgprot(0));
365d3ab332aSZong Li }
366d3ab332aSZong Li 
set_memory_x(unsigned long addr,int numpages)367d3ab332aSZong Li int set_memory_x(unsigned long addr, int numpages)
368d3ab332aSZong Li {
369d3ab332aSZong Li 	return __set_memory(addr, numpages, __pgprot(_PAGE_EXEC), __pgprot(0));
370d3ab332aSZong Li }
371d3ab332aSZong Li 
set_memory_nx(unsigned long addr,int numpages)372d3ab332aSZong Li int set_memory_nx(unsigned long addr, int numpages)
373d3ab332aSZong Li {
374d3ab332aSZong Li 	return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
375d3ab332aSZong Li }
376395a21ffSZong Li 
set_direct_map_invalid_noflush(struct page * page)377395a21ffSZong Li int set_direct_map_invalid_noflush(struct page *page)
378395a21ffSZong Li {
3795254434aSAlexandre Ghiti 	return __set_memory((unsigned long)page_address(page), 1,
3805254434aSAlexandre Ghiti 			    __pgprot(0), __pgprot(_PAGE_PRESENT));
381395a21ffSZong Li }
382395a21ffSZong Li 
set_direct_map_default_noflush(struct page * page)383395a21ffSZong Li int set_direct_map_default_noflush(struct page *page)
384395a21ffSZong Li {
3855254434aSAlexandre Ghiti 	return __set_memory((unsigned long)page_address(page), 1,
386e1e10588SAlexandre Ghiti 			    PAGE_KERNEL, __pgprot(_PAGE_EXEC));
387395a21ffSZong Li }
3885fde3db5SZong Li 
3895d6ad668SMike Rapoport #ifdef CONFIG_DEBUG_PAGEALLOC
debug_pagealloc_set_page(pte_t * pte,unsigned long addr,void * data)3908661a7afSNam Cao static int debug_pagealloc_set_page(pte_t *pte, unsigned long addr, void *data)
3918661a7afSNam Cao {
3928661a7afSNam Cao 	int enable = *(int *)data;
3938661a7afSNam Cao 
3948661a7afSNam Cao 	unsigned long val = pte_val(ptep_get(pte));
3958661a7afSNam Cao 
3968661a7afSNam Cao 	if (enable)
3978661a7afSNam Cao 		val |= _PAGE_PRESENT;
3988661a7afSNam Cao 	else
3998661a7afSNam Cao 		val &= ~_PAGE_PRESENT;
4008661a7afSNam Cao 
4018661a7afSNam Cao 	set_pte(pte, __pte(val));
4028661a7afSNam Cao 
4038661a7afSNam Cao 	return 0;
4048661a7afSNam Cao }
4058661a7afSNam Cao 
__kernel_map_pages(struct page * page,int numpages,int enable)4065fde3db5SZong Li void __kernel_map_pages(struct page *page, int numpages, int enable)
4075fde3db5SZong Li {
4085fde3db5SZong Li 	if (!debug_pagealloc_enabled())
4095fde3db5SZong Li 		return;
4105fde3db5SZong Li 
4118661a7afSNam Cao 	unsigned long start = (unsigned long)page_address(page);
4128661a7afSNam Cao 	unsigned long size = PAGE_SIZE * numpages;
4138661a7afSNam Cao 
4148661a7afSNam Cao 	apply_to_existing_page_range(&init_mm, start, size, debug_pagealloc_set_page, &enable);
4158661a7afSNam Cao 
4168661a7afSNam Cao 	flush_tlb_kernel_range(start, start + size);
4175fde3db5SZong Li }
4185d6ad668SMike Rapoport #endif
41932a0de88SMike Rapoport 
kernel_page_present(struct page * page)42032a0de88SMike Rapoport bool kernel_page_present(struct page *page)
42132a0de88SMike Rapoport {
42232a0de88SMike Rapoport 	unsigned long addr = (unsigned long)page_address(page);
42332a0de88SMike Rapoport 	pgd_t *pgd;
42432a0de88SMike Rapoport 	pud_t *pud;
42532a0de88SMike Rapoport 	p4d_t *p4d;
42632a0de88SMike Rapoport 	pmd_t *pmd;
42732a0de88SMike Rapoport 	pte_t *pte;
42832a0de88SMike Rapoport 
42932a0de88SMike Rapoport 	pgd = pgd_offset_k(addr);
430*e0316069SAlexandre Ghiti 	if (!pgd_present(pgdp_get(pgd)))
43132a0de88SMike Rapoport 		return false;
432*e0316069SAlexandre Ghiti 	if (pgd_leaf(pgdp_get(pgd)))
433a15c90b6SSia Jee Heng 		return true;
43432a0de88SMike Rapoport 
43532a0de88SMike Rapoport 	p4d = p4d_offset(pgd, addr);
436*e0316069SAlexandre Ghiti 	if (!p4d_present(p4dp_get(p4d)))
43732a0de88SMike Rapoport 		return false;
438*e0316069SAlexandre Ghiti 	if (p4d_leaf(p4dp_get(p4d)))
439a15c90b6SSia Jee Heng 		return true;
44032a0de88SMike Rapoport 
44132a0de88SMike Rapoport 	pud = pud_offset(p4d, addr);
442*e0316069SAlexandre Ghiti 	if (!pud_present(pudp_get(pud)))
44332a0de88SMike Rapoport 		return false;
444*e0316069SAlexandre Ghiti 	if (pud_leaf(pudp_get(pud)))
445a15c90b6SSia Jee Heng 		return true;
44632a0de88SMike Rapoport 
44732a0de88SMike Rapoport 	pmd = pmd_offset(pud, addr);
448*e0316069SAlexandre Ghiti 	if (!pmd_present(pmdp_get(pmd)))
44932a0de88SMike Rapoport 		return false;
450*e0316069SAlexandre Ghiti 	if (pmd_leaf(pmdp_get(pmd)))
451a15c90b6SSia Jee Heng 		return true;
45232a0de88SMike Rapoport 
45332a0de88SMike Rapoport 	pte = pte_offset_kernel(pmd, addr);
454*e0316069SAlexandre Ghiti 	return pte_present(ptep_get(pte));
45532a0de88SMike Rapoport }
456