xref: /openbmc/linux/arch/arm64/mm/pageattr.c (revision c845428b7a9157523103100806bc8130d64769c8)
197fb5e8dSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
211d91a77SLaura Abbott /*
311d91a77SLaura Abbott  * Copyright (c) 2014, The Linux Foundation. All rights reserved.
411d91a77SLaura Abbott  */
511d91a77SLaura Abbott #include <linux/kernel.h>
611d91a77SLaura Abbott #include <linux/mm.h>
711d91a77SLaura Abbott #include <linux/module.h>
811d91a77SLaura Abbott #include <linux/sched.h>
995f5c800SArd Biesheuvel #include <linux/vmalloc.h>
1011d91a77SLaura Abbott 
11152d75d6STian Tao #include <asm/cacheflush.h>
12d4bbc30bSLaura Abbott #include <asm/set_memory.h>
1311d91a77SLaura Abbott #include <asm/tlbflush.h>
14bfa7965bSZhenhua Huang #include <asm/kfence.h>
1511d91a77SLaura Abbott 
1611d91a77SLaura Abbott struct page_change_data {
1711d91a77SLaura Abbott 	pgprot_t set_mask;
1811d91a77SLaura Abbott 	pgprot_t clear_mask;
1911d91a77SLaura Abbott };
2011d91a77SLaura Abbott 
21c55191e9SArd Biesheuvel bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
22c55191e9SArd Biesheuvel 
can_set_direct_map(void)236d47c23bSMike Rapoport bool can_set_direct_map(void)
246d47c23bSMike Rapoport {
25b9dd04a2SMike Rapoport 	/*
26bfa7965bSZhenhua Huang 	 * rodata_full and DEBUG_PAGEALLOC require linear map to be
27b9dd04a2SMike Rapoport 	 * mapped at page granularity, so that it is possible to
28b9dd04a2SMike Rapoport 	 * protect/unprotect single pages.
29bfa7965bSZhenhua Huang 	 *
30bfa7965bSZhenhua Huang 	 * KFENCE pool requires page-granular mapping if initialized late.
31b9dd04a2SMike Rapoport 	 */
32*1bf36832SWill Deacon 	return rodata_full || debug_pagealloc_enabled() ||
33bfa7965bSZhenhua Huang 	       arm64_kfence_can_set_direct_map();
346d47c23bSMike Rapoport }
356d47c23bSMike Rapoport 
change_page_range(pte_t * ptep,unsigned long addr,void * data)368b1e0f81SAnshuman Khandual static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
3711d91a77SLaura Abbott {
3811d91a77SLaura Abbott 	struct page_change_data *cdata = data;
3920a004e7SWill Deacon 	pte_t pte = READ_ONCE(*ptep);
4011d91a77SLaura Abbott 
4111d91a77SLaura Abbott 	pte = clear_pte_bit(pte, cdata->clear_mask);
4211d91a77SLaura Abbott 	pte = set_pte_bit(pte, cdata->set_mask);
4311d91a77SLaura Abbott 
4411d91a77SLaura Abbott 	set_pte(ptep, pte);
4511d91a77SLaura Abbott 	return 0;
4611d91a77SLaura Abbott }
4711d91a77SLaura Abbott 
4883863f25SLaura Abbott /*
4983863f25SLaura Abbott  * This function assumes that the range is mapped with PAGE_SIZE pages.
5083863f25SLaura Abbott  */
__change_memory_common(unsigned long start,unsigned long size,pgprot_t set_mask,pgprot_t clear_mask)5183863f25SLaura Abbott static int __change_memory_common(unsigned long start, unsigned long size,
5283863f25SLaura Abbott 				pgprot_t set_mask, pgprot_t clear_mask)
5383863f25SLaura Abbott {
5483863f25SLaura Abbott 	struct page_change_data data;
5583863f25SLaura Abbott 	int ret;
5683863f25SLaura Abbott 
5783863f25SLaura Abbott 	data.set_mask = set_mask;
5883863f25SLaura Abbott 	data.clear_mask = clear_mask;
5983863f25SLaura Abbott 
6083863f25SLaura Abbott 	ret = apply_to_page_range(&init_mm, start, size, change_page_range,
6183863f25SLaura Abbott 					&data);
6283863f25SLaura Abbott 
6383863f25SLaura Abbott 	flush_tlb_kernel_range(start, start + size);
6483863f25SLaura Abbott 	return ret;
6583863f25SLaura Abbott }
6683863f25SLaura Abbott 
change_memory_common(unsigned long addr,int numpages,pgprot_t set_mask,pgprot_t clear_mask)6711d91a77SLaura Abbott static int change_memory_common(unsigned long addr, int numpages,
6811d91a77SLaura Abbott 				pgprot_t set_mask, pgprot_t clear_mask)
6911d91a77SLaura Abbott {
7011d91a77SLaura Abbott 	unsigned long start = addr;
7111d91a77SLaura Abbott 	unsigned long size = PAGE_SIZE * numpages;
7211d91a77SLaura Abbott 	unsigned long end = start + size;
7395f5c800SArd Biesheuvel 	struct vm_struct *area;
74c55191e9SArd Biesheuvel 	int i;
7511d91a77SLaura Abbott 
76f23bef34SAlexander Kuleshov 	if (!PAGE_ALIGNED(addr)) {
77b4da1840SLaura Abbott 		start &= PAGE_MASK;
78b4da1840SLaura Abbott 		end = start + size;
7911d91a77SLaura Abbott 		WARN_ON_ONCE(1);
8011d91a77SLaura Abbott 	}
8111d91a77SLaura Abbott 
8295f5c800SArd Biesheuvel 	/*
8395f5c800SArd Biesheuvel 	 * Kernel VA mappings are always live, and splitting live section
8495f5c800SArd Biesheuvel 	 * mappings into page mappings may cause TLB conflicts. This means
8595f5c800SArd Biesheuvel 	 * we have to ensure that changing the permission bits of the range
8695f5c800SArd Biesheuvel 	 * we are operating on does not result in such splitting.
8795f5c800SArd Biesheuvel 	 *
8895f5c800SArd Biesheuvel 	 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
8995f5c800SArd Biesheuvel 	 * Those are guaranteed to consist entirely of page mappings, and
9095f5c800SArd Biesheuvel 	 * splitting is never needed.
9195f5c800SArd Biesheuvel 	 *
9295f5c800SArd Biesheuvel 	 * So check whether the [addr, addr + size) interval is entirely
9395f5c800SArd Biesheuvel 	 * covered by precisely one VM area that has the VM_ALLOC flag set.
9495f5c800SArd Biesheuvel 	 */
9595f5c800SArd Biesheuvel 	area = find_vm_area((void *)addr);
9695f5c800SArd Biesheuvel 	if (!area ||
9736c4a73bSAndrey Konovalov 	    end > (unsigned long)kasan_reset_tag(area->addr) + area->size ||
9895f5c800SArd Biesheuvel 	    !(area->flags & VM_ALLOC))
9911d91a77SLaura Abbott 		return -EINVAL;
10011d91a77SLaura Abbott 
10157adec86SMika Penttilä 	if (!numpages)
10257adec86SMika Penttilä 		return 0;
10357adec86SMika Penttilä 
104b34d2ef0SArd Biesheuvel 	/*
105c55191e9SArd Biesheuvel 	 * If we are manipulating read-only permissions, apply the same
106c55191e9SArd Biesheuvel 	 * change to the linear mapping of the pages that back this VM area.
107c55191e9SArd Biesheuvel 	 */
108*1bf36832SWill Deacon 	if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
109c55191e9SArd Biesheuvel 			    pgprot_val(clear_mask) == PTE_RDONLY)) {
110c55191e9SArd Biesheuvel 		for (i = 0; i < area->nr_pages; i++) {
111c55191e9SArd Biesheuvel 			__change_memory_common((u64)page_address(area->pages[i]),
112c55191e9SArd Biesheuvel 					       PAGE_SIZE, set_mask, clear_mask);
113c55191e9SArd Biesheuvel 		}
114c55191e9SArd Biesheuvel 	}
115c55191e9SArd Biesheuvel 
116c55191e9SArd Biesheuvel 	/*
117b34d2ef0SArd Biesheuvel 	 * Get rid of potentially aliasing lazily unmapped vm areas that may
118b34d2ef0SArd Biesheuvel 	 * have permissions set that deviate from the ones we are setting here.
119b34d2ef0SArd Biesheuvel 	 */
120b34d2ef0SArd Biesheuvel 	vm_unmap_aliases();
121b34d2ef0SArd Biesheuvel 
12283863f25SLaura Abbott 	return __change_memory_common(start, size, set_mask, clear_mask);
12311d91a77SLaura Abbott }
12411d91a77SLaura Abbott 
set_memory_ro(unsigned long addr,int numpages)12511d91a77SLaura Abbott int set_memory_ro(unsigned long addr, int numpages)
12611d91a77SLaura Abbott {
12711d91a77SLaura Abbott 	return change_memory_common(addr, numpages,
12811d91a77SLaura Abbott 					__pgprot(PTE_RDONLY),
12911d91a77SLaura Abbott 					__pgprot(PTE_WRITE));
13011d91a77SLaura Abbott }
13111d91a77SLaura Abbott 
set_memory_rw(unsigned long addr,int numpages)13211d91a77SLaura Abbott int set_memory_rw(unsigned long addr, int numpages)
13311d91a77SLaura Abbott {
13411d91a77SLaura Abbott 	return change_memory_common(addr, numpages,
13511d91a77SLaura Abbott 					__pgprot(PTE_WRITE),
13611d91a77SLaura Abbott 					__pgprot(PTE_RDONLY));
13711d91a77SLaura Abbott }
13811d91a77SLaura Abbott 
set_memory_nx(unsigned long addr,int numpages)13911d91a77SLaura Abbott int set_memory_nx(unsigned long addr, int numpages)
14011d91a77SLaura Abbott {
14111d91a77SLaura Abbott 	return change_memory_common(addr, numpages,
14211d91a77SLaura Abbott 					__pgprot(PTE_PXN),
14367d4a1cdSMark Brown 					__pgprot(PTE_MAYBE_GP));
14411d91a77SLaura Abbott }
14511d91a77SLaura Abbott 
set_memory_x(unsigned long addr,int numpages)14611d91a77SLaura Abbott int set_memory_x(unsigned long addr, int numpages)
14711d91a77SLaura Abbott {
14811d91a77SLaura Abbott 	return change_memory_common(addr, numpages,
14967d4a1cdSMark Brown 					__pgprot(PTE_MAYBE_GP),
15011d91a77SLaura Abbott 					__pgprot(PTE_PXN));
15111d91a77SLaura Abbott }
15283863f25SLaura Abbott 
set_memory_valid(unsigned long addr,int numpages,int enable)1539b0aa14eSAKASHI Takahiro int set_memory_valid(unsigned long addr, int numpages, int enable)
15483863f25SLaura Abbott {
15583863f25SLaura Abbott 	if (enable)
1569b0aa14eSAKASHI Takahiro 		return __change_memory_common(addr, PAGE_SIZE * numpages,
15783863f25SLaura Abbott 					__pgprot(PTE_VALID),
15883863f25SLaura Abbott 					__pgprot(0));
15983863f25SLaura Abbott 	else
1609b0aa14eSAKASHI Takahiro 		return __change_memory_common(addr, PAGE_SIZE * numpages,
16183863f25SLaura Abbott 					__pgprot(0),
16283863f25SLaura Abbott 					__pgprot(PTE_VALID));
16383863f25SLaura Abbott }
1649b0aa14eSAKASHI Takahiro 
set_direct_map_invalid_noflush(struct page * page)1654739d53fSArd Biesheuvel int set_direct_map_invalid_noflush(struct page *page)
1664739d53fSArd Biesheuvel {
1674739d53fSArd Biesheuvel 	struct page_change_data data = {
1684739d53fSArd Biesheuvel 		.set_mask = __pgprot(0),
1694739d53fSArd Biesheuvel 		.clear_mask = __pgprot(PTE_VALID),
1704739d53fSArd Biesheuvel 	};
1714739d53fSArd Biesheuvel 
1726d47c23bSMike Rapoport 	if (!can_set_direct_map())
1734739d53fSArd Biesheuvel 		return 0;
1744739d53fSArd Biesheuvel 
1754739d53fSArd Biesheuvel 	return apply_to_page_range(&init_mm,
1764739d53fSArd Biesheuvel 				   (unsigned long)page_address(page),
1774739d53fSArd Biesheuvel 				   PAGE_SIZE, change_page_range, &data);
1784739d53fSArd Biesheuvel }
1794739d53fSArd Biesheuvel 
set_direct_map_default_noflush(struct page * page)1804739d53fSArd Biesheuvel int set_direct_map_default_noflush(struct page *page)
1814739d53fSArd Biesheuvel {
1824739d53fSArd Biesheuvel 	struct page_change_data data = {
1834739d53fSArd Biesheuvel 		.set_mask = __pgprot(PTE_VALID | PTE_WRITE),
1844739d53fSArd Biesheuvel 		.clear_mask = __pgprot(PTE_RDONLY),
1854739d53fSArd Biesheuvel 	};
1864739d53fSArd Biesheuvel 
1876d47c23bSMike Rapoport 	if (!can_set_direct_map())
1884739d53fSArd Biesheuvel 		return 0;
1894739d53fSArd Biesheuvel 
1904739d53fSArd Biesheuvel 	return apply_to_page_range(&init_mm,
1914739d53fSArd Biesheuvel 				   (unsigned long)page_address(page),
1924739d53fSArd Biesheuvel 				   PAGE_SIZE, change_page_range, &data);
1934739d53fSArd Biesheuvel }
1944739d53fSArd Biesheuvel 
1955d6ad668SMike Rapoport #ifdef CONFIG_DEBUG_PAGEALLOC
__kernel_map_pages(struct page * page,int numpages,int enable)1969b0aa14eSAKASHI Takahiro void __kernel_map_pages(struct page *page, int numpages, int enable)
1979b0aa14eSAKASHI Takahiro {
1986d47c23bSMike Rapoport 	if (!can_set_direct_map())
1994739d53fSArd Biesheuvel 		return;
2004739d53fSArd Biesheuvel 
2019b0aa14eSAKASHI Takahiro 	set_memory_valid((unsigned long)page_address(page), numpages, enable);
2029b0aa14eSAKASHI Takahiro }
20332a0de88SMike Rapoport #endif /* CONFIG_DEBUG_PAGEALLOC */
2044739d53fSArd Biesheuvel 
2055ebe3a44SJames Morse /*
2064739d53fSArd Biesheuvel  * This function is used to determine if a linear map page has been marked as
207e025ab84SKefeng Wang  * not-valid. Walk the page table and check the PTE_VALID bit.
2085ebe3a44SJames Morse  *
2095ebe3a44SJames Morse  * Because this is only called on the kernel linear map,  p?d_sect() implies
2105ebe3a44SJames Morse  * p?d_present(). When debug_pagealloc is enabled, sections mappings are
2115ebe3a44SJames Morse  * disabled.
2125ebe3a44SJames Morse  */
kernel_page_present(struct page * page)2135ebe3a44SJames Morse bool kernel_page_present(struct page *page)
2145ebe3a44SJames Morse {
21520a004e7SWill Deacon 	pgd_t *pgdp;
216e9f63768SMike Rapoport 	p4d_t *p4dp;
21720a004e7SWill Deacon 	pud_t *pudp, pud;
21820a004e7SWill Deacon 	pmd_t *pmdp, pmd;
21920a004e7SWill Deacon 	pte_t *ptep;
2205ebe3a44SJames Morse 	unsigned long addr = (unsigned long)page_address(page);
2215ebe3a44SJames Morse 
22220a004e7SWill Deacon 	pgdp = pgd_offset_k(addr);
22320a004e7SWill Deacon 	if (pgd_none(READ_ONCE(*pgdp)))
2245ebe3a44SJames Morse 		return false;
2255ebe3a44SJames Morse 
226e9f63768SMike Rapoport 	p4dp = p4d_offset(pgdp, addr);
227e9f63768SMike Rapoport 	if (p4d_none(READ_ONCE(*p4dp)))
228e9f63768SMike Rapoport 		return false;
229e9f63768SMike Rapoport 
230e9f63768SMike Rapoport 	pudp = pud_offset(p4dp, addr);
23120a004e7SWill Deacon 	pud = READ_ONCE(*pudp);
23220a004e7SWill Deacon 	if (pud_none(pud))
2335ebe3a44SJames Morse 		return false;
23420a004e7SWill Deacon 	if (pud_sect(pud))
2355ebe3a44SJames Morse 		return true;
2365ebe3a44SJames Morse 
23720a004e7SWill Deacon 	pmdp = pmd_offset(pudp, addr);
23820a004e7SWill Deacon 	pmd = READ_ONCE(*pmdp);
23920a004e7SWill Deacon 	if (pmd_none(pmd))
2405ebe3a44SJames Morse 		return false;
24120a004e7SWill Deacon 	if (pmd_sect(pmd))
2425ebe3a44SJames Morse 		return true;
2435ebe3a44SJames Morse 
24420a004e7SWill Deacon 	ptep = pte_offset_kernel(pmdp, addr);
24520a004e7SWill Deacon 	return pte_valid(READ_ONCE(*ptep));
2465ebe3a44SJames Morse }
247