xref: /openbmc/linux/arch/s390/mm/vmem.c (revision 907835e6)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2f4eb07c1SHeiko Carstens /*
3f4eb07c1SHeiko Carstens  *    Copyright IBM Corp. 2006
4f4eb07c1SHeiko Carstens  */
5f4eb07c1SHeiko Carstens 
67707248aSAnshuman Khandual #include <linux/memory_hotplug.h>
757c8a661SMike Rapoport #include <linux/memblock.h>
8f4eb07c1SHeiko Carstens #include <linux/pfn.h>
9f4eb07c1SHeiko Carstens #include <linux/mm.h>
10ff24b07aSPaul Gortmaker #include <linux/init.h>
11f4eb07c1SHeiko Carstens #include <linux/list.h>
1253492b1dSGerald Schaefer #include <linux/hugetlb.h>
135a0e3ad6STejun Heo #include <linux/slab.h>
14bb1520d5SAlexander Gordeev #include <linux/sort.h>
158847617eSHeiko Carstens #include <asm/page-states.h>
16bab247ffSHeiko Carstens #include <asm/cacheflush.h>
173b051e89SSven Schnelle #include <asm/nospec-branch.h>
18f4eb07c1SHeiko Carstens #include <asm/pgalloc.h>
19f4eb07c1SHeiko Carstens #include <asm/setup.h>
20f4eb07c1SHeiko Carstens #include <asm/tlbflush.h>
2153492b1dSGerald Schaefer #include <asm/sections.h>
22e6c7c630SLaura Abbott #include <asm/set_memory.h>
23f4eb07c1SHeiko Carstens 
24f4eb07c1SHeiko Carstens static DEFINE_MUTEX(vmem_mutex);
25f4eb07c1SHeiko Carstens 
vmem_alloc_pages(unsigned int order)2667060d9cSHeiko Carstens static void __ref *vmem_alloc_pages(unsigned int order)
2767060d9cSHeiko Carstens {
282e9996fcSHeiko Carstens 	unsigned long size = PAGE_SIZE << order;
292e9996fcSHeiko Carstens 
3067060d9cSHeiko Carstens 	if (slab_is_available())
3167060d9cSHeiko Carstens 		return (void *)__get_free_pages(GFP_KERNEL, order);
324c86d2f5SAlexander Gordeev 	return memblock_alloc(size, size);
3367060d9cSHeiko Carstens }
3467060d9cSHeiko Carstens 
vmem_free_pages(unsigned long addr,int order)359ec8fa8dSDavid Hildenbrand static void vmem_free_pages(unsigned long addr, int order)
369ec8fa8dSDavid Hildenbrand {
379ec8fa8dSDavid Hildenbrand 	/* We don't expect boot memory to be removed ever. */
389ec8fa8dSDavid Hildenbrand 	if (!slab_is_available() ||
392d1494fbSLinus Walleij 	    WARN_ON_ONCE(PageReserved(virt_to_page((void *)addr))))
409ec8fa8dSDavid Hildenbrand 		return;
419ec8fa8dSDavid Hildenbrand 	free_pages(addr, order);
429ec8fa8dSDavid Hildenbrand }
439ec8fa8dSDavid Hildenbrand 
vmem_crst_alloc(unsigned long val)44a01ef308SHeiko Carstens void *vmem_crst_alloc(unsigned long val)
451aea9b3fSMartin Schwidefsky {
46a01ef308SHeiko Carstens 	unsigned long *table;
471aea9b3fSMartin Schwidefsky 
48a01ef308SHeiko Carstens 	table = vmem_alloc_pages(CRST_ALLOC_ORDER);
498847617eSHeiko Carstens 	if (!table)
508847617eSHeiko Carstens 		return NULL;
51a01ef308SHeiko Carstens 	crst_table_init(table, val);
528847617eSHeiko Carstens 	if (slab_is_available())
538847617eSHeiko Carstens 		arch_set_page_dat(virt_to_page(table), CRST_ALLOC_ORDER);
54a01ef308SHeiko Carstens 	return table;
55f4eb07c1SHeiko Carstens }
56f4eb07c1SHeiko Carstens 
vmem_pte_alloc(void)57e8a97e42SHeiko Carstens pte_t __ref *vmem_pte_alloc(void)
58f4eb07c1SHeiko Carstens {
599e427365SHeiko Carstens 	unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
60146e4b3cSMartin Schwidefsky 	pte_t *pte;
61f4eb07c1SHeiko Carstens 
62146e4b3cSMartin Schwidefsky 	if (slab_is_available())
63527e30b4SMartin Schwidefsky 		pte = (pte_t *) page_table_alloc(&init_mm);
64146e4b3cSMartin Schwidefsky 	else
654c86d2f5SAlexander Gordeev 		pte = (pte_t *) memblock_alloc(size, size);
66f4eb07c1SHeiko Carstens 	if (!pte)
67f4eb07c1SHeiko Carstens 		return NULL;
6841879ff6SHeiko Carstens 	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
69f4eb07c1SHeiko Carstens 	return pte;
70f4eb07c1SHeiko Carstens }
71f4eb07c1SHeiko Carstens 
vmem_pte_free(unsigned long * table)72b9ff8100SDavid Hildenbrand static void vmem_pte_free(unsigned long *table)
73b9ff8100SDavid Hildenbrand {
74b9ff8100SDavid Hildenbrand 	/* We don't expect boot memory to be removed ever. */
75b9ff8100SDavid Hildenbrand 	if (!slab_is_available() ||
76b9ff8100SDavid Hildenbrand 	    WARN_ON_ONCE(PageReserved(virt_to_page(table))))
77b9ff8100SDavid Hildenbrand 		return;
78b9ff8100SDavid Hildenbrand 	page_table_free(&init_mm, table);
79b9ff8100SDavid Hildenbrand }
80b9ff8100SDavid Hildenbrand 
81cd5781d6SDavid Hildenbrand #define PAGE_UNUSED 0xFD
82cd5781d6SDavid Hildenbrand 
832c114df0SDavid Hildenbrand /*
842c114df0SDavid Hildenbrand  * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
8512bb4c68SAlexander Gordeev  * from unused_sub_pmd_start to next PMD_SIZE boundary.
862c114df0SDavid Hildenbrand  */
8712bb4c68SAlexander Gordeev static unsigned long unused_sub_pmd_start;
882c114df0SDavid Hildenbrand 
vmemmap_flush_unused_sub_pmd(void)8912bb4c68SAlexander Gordeev static void vmemmap_flush_unused_sub_pmd(void)
902c114df0SDavid Hildenbrand {
9112bb4c68SAlexander Gordeev 	if (!unused_sub_pmd_start)
922c114df0SDavid Hildenbrand 		return;
934c86d2f5SAlexander Gordeev 	memset((void *)unused_sub_pmd_start, PAGE_UNUSED,
9412bb4c68SAlexander Gordeev 	       ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
9512bb4c68SAlexander Gordeev 	unused_sub_pmd_start = 0;
962c114df0SDavid Hildenbrand }
972c114df0SDavid Hildenbrand 
vmemmap_mark_sub_pmd_used(unsigned long start,unsigned long end)9812bb4c68SAlexander Gordeev static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end)
99cd5781d6SDavid Hildenbrand {
100cd5781d6SDavid Hildenbrand 	/*
101cd5781d6SDavid Hildenbrand 	 * As we expect to add in the same granularity as we remove, it's
102cd5781d6SDavid Hildenbrand 	 * sufficient to mark only some piece used to block the memmap page from
103cd5781d6SDavid Hildenbrand 	 * getting removed (just in case the memmap never gets initialized,
104cd5781d6SDavid Hildenbrand 	 * e.g., because the memory block never gets onlined).
105cd5781d6SDavid Hildenbrand 	 */
1064c86d2f5SAlexander Gordeev 	memset((void *)start, 0, sizeof(struct page));
107cd5781d6SDavid Hildenbrand }
108cd5781d6SDavid Hildenbrand 
vmemmap_use_sub_pmd(unsigned long start,unsigned long end)1092c114df0SDavid Hildenbrand static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
1102c114df0SDavid Hildenbrand {
1112c114df0SDavid Hildenbrand 	/*
1122c114df0SDavid Hildenbrand 	 * We only optimize if the new used range directly follows the
1132c114df0SDavid Hildenbrand 	 * previously unused range (esp., when populating consecutive sections).
1142c114df0SDavid Hildenbrand 	 */
11512bb4c68SAlexander Gordeev 	if (unused_sub_pmd_start == start) {
11612bb4c68SAlexander Gordeev 		unused_sub_pmd_start = end;
11712bb4c68SAlexander Gordeev 		if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE)))
11812bb4c68SAlexander Gordeev 			unused_sub_pmd_start = 0;
1192c114df0SDavid Hildenbrand 		return;
1202c114df0SDavid Hildenbrand 	}
12112bb4c68SAlexander Gordeev 	vmemmap_flush_unused_sub_pmd();
12212bb4c68SAlexander Gordeev 	vmemmap_mark_sub_pmd_used(start, end);
1232c114df0SDavid Hildenbrand }
1242c114df0SDavid Hildenbrand 
vmemmap_use_new_sub_pmd(unsigned long start,unsigned long end)125cd5781d6SDavid Hildenbrand static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
126cd5781d6SDavid Hildenbrand {
1274c86d2f5SAlexander Gordeev 	unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
128cd5781d6SDavid Hildenbrand 
12912bb4c68SAlexander Gordeev 	vmemmap_flush_unused_sub_pmd();
1302c114df0SDavid Hildenbrand 
131cd5781d6SDavid Hildenbrand 	/* Could be our memmap page is filled with PAGE_UNUSED already ... */
13212bb4c68SAlexander Gordeev 	vmemmap_mark_sub_pmd_used(start, end);
133cd5781d6SDavid Hildenbrand 
134cd5781d6SDavid Hildenbrand 	/* Mark the unused parts of the new memmap page PAGE_UNUSED. */
135cd5781d6SDavid Hildenbrand 	if (!IS_ALIGNED(start, PMD_SIZE))
1364c86d2f5SAlexander Gordeev 		memset((void *)page, PAGE_UNUSED, start - page);
1372c114df0SDavid Hildenbrand 	/*
1382c114df0SDavid Hildenbrand 	 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
1392c114df0SDavid Hildenbrand 	 * consecutive sections. Remember for the last added PMD the last
1402c114df0SDavid Hildenbrand 	 * unused range in the populated PMD.
1412c114df0SDavid Hildenbrand 	 */
142cd5781d6SDavid Hildenbrand 	if (!IS_ALIGNED(end, PMD_SIZE))
14312bb4c68SAlexander Gordeev 		unused_sub_pmd_start = end;
144cd5781d6SDavid Hildenbrand }
145cd5781d6SDavid Hildenbrand 
146cd5781d6SDavid Hildenbrand /* Returns true if the PMD is completely unused and can be freed. */
vmemmap_unuse_sub_pmd(unsigned long start,unsigned long end)147cd5781d6SDavid Hildenbrand static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
148cd5781d6SDavid Hildenbrand {
1494c86d2f5SAlexander Gordeev 	unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
150cd5781d6SDavid Hildenbrand 
15112bb4c68SAlexander Gordeev 	vmemmap_flush_unused_sub_pmd();
1524c86d2f5SAlexander Gordeev 	memset((void *)start, PAGE_UNUSED, end - start);
1534c86d2f5SAlexander Gordeev 	return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE);
154cd5781d6SDavid Hildenbrand }
155cd5781d6SDavid Hildenbrand 
1569ec8fa8dSDavid Hildenbrand /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
modify_pte_table(pmd_t * pmd,unsigned long addr,unsigned long end,bool add,bool direct)1579ec8fa8dSDavid Hildenbrand static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
1589ec8fa8dSDavid Hildenbrand 				  unsigned long end, bool add, bool direct)
1593e0d3e40SDavid Hildenbrand {
1603e0d3e40SDavid Hildenbrand 	unsigned long prot, pages = 0;
1619ec8fa8dSDavid Hildenbrand 	int ret = -ENOMEM;
1623e0d3e40SDavid Hildenbrand 	pte_t *pte;
1633e0d3e40SDavid Hildenbrand 
1643e0d3e40SDavid Hildenbrand 	prot = pgprot_val(PAGE_KERNEL);
1653e0d3e40SDavid Hildenbrand 	if (!MACHINE_HAS_NX)
1663e0d3e40SDavid Hildenbrand 		prot &= ~_PAGE_NOEXEC;
1673e0d3e40SDavid Hildenbrand 
1683e0d3e40SDavid Hildenbrand 	pte = pte_offset_kernel(pmd, addr);
1693e0d3e40SDavid Hildenbrand 	for (; addr < end; addr += PAGE_SIZE, pte++) {
1703e0d3e40SDavid Hildenbrand 		if (!add) {
1713e0d3e40SDavid Hildenbrand 			if (pte_none(*pte))
1723e0d3e40SDavid Hildenbrand 				continue;
1739ec8fa8dSDavid Hildenbrand 			if (!direct)
1744c86d2f5SAlexander Gordeev 				vmem_free_pages((unsigned long) pfn_to_virt(pte_pfn(*pte)), 0);
1753e0d3e40SDavid Hildenbrand 			pte_clear(&init_mm, addr, pte);
1763e0d3e40SDavid Hildenbrand 		} else if (pte_none(*pte)) {
1779ec8fa8dSDavid Hildenbrand 			if (!direct) {
1789a996c67SHeiko Carstens 				void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
1799ec8fa8dSDavid Hildenbrand 
1809ec8fa8dSDavid Hildenbrand 				if (!new_page)
1819ec8fa8dSDavid Hildenbrand 					goto out;
182b8e3b379SHeiko Carstens 				set_pte(pte, __pte(__pa(new_page) | prot));
1839a996c67SHeiko Carstens 			} else {
184b8e3b379SHeiko Carstens 				set_pte(pte, __pte(__pa(addr) | prot));
1859a996c67SHeiko Carstens 			}
1869a996c67SHeiko Carstens 		} else {
1873e0d3e40SDavid Hildenbrand 			continue;
1889a996c67SHeiko Carstens 		}
1893e0d3e40SDavid Hildenbrand 		pages++;
1903e0d3e40SDavid Hildenbrand 	}
1919ec8fa8dSDavid Hildenbrand 	ret = 0;
1929ec8fa8dSDavid Hildenbrand out:
1939ec8fa8dSDavid Hildenbrand 	if (direct)
1943e0d3e40SDavid Hildenbrand 		update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
1959ec8fa8dSDavid Hildenbrand 	return ret;
1963e0d3e40SDavid Hildenbrand }
1973e0d3e40SDavid Hildenbrand 
try_free_pte_table(pmd_t * pmd,unsigned long start)198b9ff8100SDavid Hildenbrand static void try_free_pte_table(pmd_t *pmd, unsigned long start)
199b9ff8100SDavid Hildenbrand {
200b9ff8100SDavid Hildenbrand 	pte_t *pte;
201b9ff8100SDavid Hildenbrand 	int i;
202b9ff8100SDavid Hildenbrand 
203b9ff8100SDavid Hildenbrand 	/* We can safely assume this is fully in 1:1 mapping & vmemmap area */
204b9ff8100SDavid Hildenbrand 	pte = pte_offset_kernel(pmd, start);
2059a996c67SHeiko Carstens 	for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
206b9ff8100SDavid Hildenbrand 		if (!pte_none(*pte))
207b9ff8100SDavid Hildenbrand 			return;
2089a996c67SHeiko Carstens 	}
2094c86d2f5SAlexander Gordeev 	vmem_pte_free((unsigned long *) pmd_deref(*pmd));
210b9ff8100SDavid Hildenbrand 	pmd_clear(pmd);
211b9ff8100SDavid Hildenbrand }
212b9ff8100SDavid Hildenbrand 
2139ec8fa8dSDavid Hildenbrand /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
modify_pmd_table(pud_t * pud,unsigned long addr,unsigned long end,bool add,bool direct)2149ec8fa8dSDavid Hildenbrand static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
2159ec8fa8dSDavid Hildenbrand 				  unsigned long end, bool add, bool direct)
2163e0d3e40SDavid Hildenbrand {
2173e0d3e40SDavid Hildenbrand 	unsigned long next, prot, pages = 0;
2183e0d3e40SDavid Hildenbrand 	int ret = -ENOMEM;
2193e0d3e40SDavid Hildenbrand 	pmd_t *pmd;
2203e0d3e40SDavid Hildenbrand 	pte_t *pte;
2213e0d3e40SDavid Hildenbrand 
2223e0d3e40SDavid Hildenbrand 	prot = pgprot_val(SEGMENT_KERNEL);
2233e0d3e40SDavid Hildenbrand 	if (!MACHINE_HAS_NX)
2243e0d3e40SDavid Hildenbrand 		prot &= ~_SEGMENT_ENTRY_NOEXEC;
2253e0d3e40SDavid Hildenbrand 
2263e0d3e40SDavid Hildenbrand 	pmd = pmd_offset(pud, addr);
2273e0d3e40SDavid Hildenbrand 	for (; addr < end; addr = next, pmd++) {
2283e0d3e40SDavid Hildenbrand 		next = pmd_addr_end(addr, end);
2293e0d3e40SDavid Hildenbrand 		if (!add) {
2303e0d3e40SDavid Hildenbrand 			if (pmd_none(*pmd))
2313e0d3e40SDavid Hildenbrand 				continue;
232af71657cSAlexander Gordeev 			if (pmd_large(*pmd)) {
2333e0d3e40SDavid Hildenbrand 				if (IS_ALIGNED(addr, PMD_SIZE) &&
2343e0d3e40SDavid Hildenbrand 				    IS_ALIGNED(next, PMD_SIZE)) {
2359ec8fa8dSDavid Hildenbrand 					if (!direct)
2369a996c67SHeiko Carstens 						vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
2373e0d3e40SDavid Hildenbrand 					pmd_clear(pmd);
2383e0d3e40SDavid Hildenbrand 					pages++;
2399a996c67SHeiko Carstens 				} else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
2409a996c67SHeiko Carstens 					vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
241cd5781d6SDavid Hildenbrand 					pmd_clear(pmd);
2423e0d3e40SDavid Hildenbrand 				}
2433e0d3e40SDavid Hildenbrand 				continue;
2443e0d3e40SDavid Hildenbrand 			}
2453e0d3e40SDavid Hildenbrand 		} else if (pmd_none(*pmd)) {
2463e0d3e40SDavid Hildenbrand 			if (IS_ALIGNED(addr, PMD_SIZE) &&
2473e0d3e40SDavid Hildenbrand 			    IS_ALIGNED(next, PMD_SIZE) &&
248b193d2d4SHeiko Carstens 			    MACHINE_HAS_EDAT1 && direct &&
2493e0d3e40SDavid Hildenbrand 			    !debug_pagealloc_enabled()) {
250b8e3b379SHeiko Carstens 				set_pmd(pmd, __pmd(__pa(addr) | prot));
2513e0d3e40SDavid Hildenbrand 				pages++;
2523e0d3e40SDavid Hildenbrand 				continue;
2539ec8fa8dSDavid Hildenbrand 			} else if (!direct && MACHINE_HAS_EDAT1) {
2549ec8fa8dSDavid Hildenbrand 				void *new_page;
2559ec8fa8dSDavid Hildenbrand 
2569ec8fa8dSDavid Hildenbrand 				/*
2579ec8fa8dSDavid Hildenbrand 				 * Use 1MB frames for vmemmap if available. We
2589ec8fa8dSDavid Hildenbrand 				 * always use large frames even if they are only
2599ec8fa8dSDavid Hildenbrand 				 * partially used. Otherwise we would have also
2609ec8fa8dSDavid Hildenbrand 				 * page tables since vmemmap_populate gets
2619ec8fa8dSDavid Hildenbrand 				 * called for each section separately.
2629ec8fa8dSDavid Hildenbrand 				 */
2639a996c67SHeiko Carstens 				new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE);
264f2057b42SDavid Hildenbrand 				if (new_page) {
265b8e3b379SHeiko Carstens 					set_pmd(pmd, __pmd(__pa(new_page) | prot));
266cd5781d6SDavid Hildenbrand 					if (!IS_ALIGNED(addr, PMD_SIZE) ||
267cd5781d6SDavid Hildenbrand 					    !IS_ALIGNED(next, PMD_SIZE)) {
2689a996c67SHeiko Carstens 						vmemmap_use_new_sub_pmd(addr, next);
269cd5781d6SDavid Hildenbrand 					}
2709ec8fa8dSDavid Hildenbrand 					continue;
2713e0d3e40SDavid Hildenbrand 				}
272f2057b42SDavid Hildenbrand 			}
2733e0d3e40SDavid Hildenbrand 			pte = vmem_pte_alloc();
2743e0d3e40SDavid Hildenbrand 			if (!pte)
2753e0d3e40SDavid Hildenbrand 				goto out;
2763e0d3e40SDavid Hildenbrand 			pmd_populate(&init_mm, pmd, pte);
277cd5781d6SDavid Hildenbrand 		} else if (pmd_large(*pmd)) {
278cd5781d6SDavid Hildenbrand 			if (!direct)
279cd5781d6SDavid Hildenbrand 				vmemmap_use_sub_pmd(addr, next);
2803e0d3e40SDavid Hildenbrand 			continue;
281cd5781d6SDavid Hildenbrand 		}
2829ec8fa8dSDavid Hildenbrand 		ret = modify_pte_table(pmd, addr, next, add, direct);
2839ec8fa8dSDavid Hildenbrand 		if (ret)
2849ec8fa8dSDavid Hildenbrand 			goto out;
285b9ff8100SDavid Hildenbrand 		if (!add)
286b9ff8100SDavid Hildenbrand 			try_free_pte_table(pmd, addr & PMD_MASK);
2873e0d3e40SDavid Hildenbrand 	}
2883e0d3e40SDavid Hildenbrand 	ret = 0;
2893e0d3e40SDavid Hildenbrand out:
2909ec8fa8dSDavid Hildenbrand 	if (direct)
2913e0d3e40SDavid Hildenbrand 		update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
2923e0d3e40SDavid Hildenbrand 	return ret;
2933e0d3e40SDavid Hildenbrand }
2943e0d3e40SDavid Hildenbrand 
try_free_pmd_table(pud_t * pud,unsigned long start)295b9ff8100SDavid Hildenbrand static void try_free_pmd_table(pud_t *pud, unsigned long start)
296b9ff8100SDavid Hildenbrand {
297b9ff8100SDavid Hildenbrand 	pmd_t *pmd;
298b9ff8100SDavid Hildenbrand 	int i;
299b9ff8100SDavid Hildenbrand 
300b9ff8100SDavid Hildenbrand 	pmd = pmd_offset(pud, start);
301b9ff8100SDavid Hildenbrand 	for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
302b9ff8100SDavid Hildenbrand 		if (!pmd_none(*pmd))
303b9ff8100SDavid Hildenbrand 			return;
304b9ff8100SDavid Hildenbrand 	vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER);
305b9ff8100SDavid Hildenbrand 	pud_clear(pud);
306b9ff8100SDavid Hildenbrand }
307b9ff8100SDavid Hildenbrand 
modify_pud_table(p4d_t * p4d,unsigned long addr,unsigned long end,bool add,bool direct)3083e0d3e40SDavid Hildenbrand static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
3099ec8fa8dSDavid Hildenbrand 			    bool add, bool direct)
3103e0d3e40SDavid Hildenbrand {
3113e0d3e40SDavid Hildenbrand 	unsigned long next, prot, pages = 0;
3123e0d3e40SDavid Hildenbrand 	int ret = -ENOMEM;
3133e0d3e40SDavid Hildenbrand 	pud_t *pud;
3143e0d3e40SDavid Hildenbrand 	pmd_t *pmd;
3153e0d3e40SDavid Hildenbrand 
3163e0d3e40SDavid Hildenbrand 	prot = pgprot_val(REGION3_KERNEL);
3173e0d3e40SDavid Hildenbrand 	if (!MACHINE_HAS_NX)
3183e0d3e40SDavid Hildenbrand 		prot &= ~_REGION_ENTRY_NOEXEC;
3193e0d3e40SDavid Hildenbrand 	pud = pud_offset(p4d, addr);
3203e0d3e40SDavid Hildenbrand 	for (; addr < end; addr = next, pud++) {
3213e0d3e40SDavid Hildenbrand 		next = pud_addr_end(addr, end);
3223e0d3e40SDavid Hildenbrand 		if (!add) {
3233e0d3e40SDavid Hildenbrand 			if (pud_none(*pud))
3243e0d3e40SDavid Hildenbrand 				continue;
325*907835e6SPeter Xu 			if (pud_leaf(*pud)) {
3263e0d3e40SDavid Hildenbrand 				if (IS_ALIGNED(addr, PUD_SIZE) &&
3273e0d3e40SDavid Hildenbrand 				    IS_ALIGNED(next, PUD_SIZE)) {
3283e0d3e40SDavid Hildenbrand 					pud_clear(pud);
3293e0d3e40SDavid Hildenbrand 					pages++;
3303e0d3e40SDavid Hildenbrand 				}
3313e0d3e40SDavid Hildenbrand 				continue;
3323e0d3e40SDavid Hildenbrand 			}
3333e0d3e40SDavid Hildenbrand 		} else if (pud_none(*pud)) {
3343e0d3e40SDavid Hildenbrand 			if (IS_ALIGNED(addr, PUD_SIZE) &&
3353e0d3e40SDavid Hildenbrand 			    IS_ALIGNED(next, PUD_SIZE) &&
336b193d2d4SHeiko Carstens 			    MACHINE_HAS_EDAT2 && direct &&
3373e0d3e40SDavid Hildenbrand 			    !debug_pagealloc_enabled()) {
338b8e3b379SHeiko Carstens 				set_pud(pud, __pud(__pa(addr) | prot));
3393e0d3e40SDavid Hildenbrand 				pages++;
3403e0d3e40SDavid Hildenbrand 				continue;
3413e0d3e40SDavid Hildenbrand 			}
3423e0d3e40SDavid Hildenbrand 			pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
3433e0d3e40SDavid Hildenbrand 			if (!pmd)
3443e0d3e40SDavid Hildenbrand 				goto out;
3453e0d3e40SDavid Hildenbrand 			pud_populate(&init_mm, pud, pmd);
346*907835e6SPeter Xu 		} else if (pud_leaf(*pud)) {
3473e0d3e40SDavid Hildenbrand 			continue;
3489a996c67SHeiko Carstens 		}
3499ec8fa8dSDavid Hildenbrand 		ret = modify_pmd_table(pud, addr, next, add, direct);
3503e0d3e40SDavid Hildenbrand 		if (ret)
3513e0d3e40SDavid Hildenbrand 			goto out;
352b9ff8100SDavid Hildenbrand 		if (!add)
353b9ff8100SDavid Hildenbrand 			try_free_pmd_table(pud, addr & PUD_MASK);
3543e0d3e40SDavid Hildenbrand 	}
3553e0d3e40SDavid Hildenbrand 	ret = 0;
3563e0d3e40SDavid Hildenbrand out:
3579ec8fa8dSDavid Hildenbrand 	if (direct)
3583e0d3e40SDavid Hildenbrand 		update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
3593e0d3e40SDavid Hildenbrand 	return ret;
3603e0d3e40SDavid Hildenbrand }
3613e0d3e40SDavid Hildenbrand 
try_free_pud_table(p4d_t * p4d,unsigned long start)362b9ff8100SDavid Hildenbrand static void try_free_pud_table(p4d_t *p4d, unsigned long start)
363b9ff8100SDavid Hildenbrand {
364b9ff8100SDavid Hildenbrand 	pud_t *pud;
365b9ff8100SDavid Hildenbrand 	int i;
366b9ff8100SDavid Hildenbrand 
367b9ff8100SDavid Hildenbrand 	pud = pud_offset(p4d, start);
3689a996c67SHeiko Carstens 	for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
369b9ff8100SDavid Hildenbrand 		if (!pud_none(*pud))
370b9ff8100SDavid Hildenbrand 			return;
3719a996c67SHeiko Carstens 	}
372b9ff8100SDavid Hildenbrand 	vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER);
373b9ff8100SDavid Hildenbrand 	p4d_clear(p4d);
374b9ff8100SDavid Hildenbrand }
375b9ff8100SDavid Hildenbrand 
modify_p4d_table(pgd_t * pgd,unsigned long addr,unsigned long end,bool add,bool direct)3763e0d3e40SDavid Hildenbrand static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
3779ec8fa8dSDavid Hildenbrand 			    bool add, bool direct)
3783e0d3e40SDavid Hildenbrand {
3793e0d3e40SDavid Hildenbrand 	unsigned long next;
3803e0d3e40SDavid Hildenbrand 	int ret = -ENOMEM;
3813e0d3e40SDavid Hildenbrand 	p4d_t *p4d;
3823e0d3e40SDavid Hildenbrand 	pud_t *pud;
3833e0d3e40SDavid Hildenbrand 
3843e0d3e40SDavid Hildenbrand 	p4d = p4d_offset(pgd, addr);
3853e0d3e40SDavid Hildenbrand 	for (; addr < end; addr = next, p4d++) {
3863e0d3e40SDavid Hildenbrand 		next = p4d_addr_end(addr, end);
3873e0d3e40SDavid Hildenbrand 		if (!add) {
3883e0d3e40SDavid Hildenbrand 			if (p4d_none(*p4d))
3893e0d3e40SDavid Hildenbrand 				continue;
3903e0d3e40SDavid Hildenbrand 		} else if (p4d_none(*p4d)) {
3913e0d3e40SDavid Hildenbrand 			pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
3923e0d3e40SDavid Hildenbrand 			if (!pud)
3933e0d3e40SDavid Hildenbrand 				goto out;
394bffc2f7aSVasily Gorbik 			p4d_populate(&init_mm, p4d, pud);
3953e0d3e40SDavid Hildenbrand 		}
3969ec8fa8dSDavid Hildenbrand 		ret = modify_pud_table(p4d, addr, next, add, direct);
3973e0d3e40SDavid Hildenbrand 		if (ret)
3983e0d3e40SDavid Hildenbrand 			goto out;
399b9ff8100SDavid Hildenbrand 		if (!add)
400b9ff8100SDavid Hildenbrand 			try_free_pud_table(p4d, addr & P4D_MASK);
4013e0d3e40SDavid Hildenbrand 	}
4023e0d3e40SDavid Hildenbrand 	ret = 0;
4033e0d3e40SDavid Hildenbrand out:
4043e0d3e40SDavid Hildenbrand 	return ret;
4053e0d3e40SDavid Hildenbrand }
4063e0d3e40SDavid Hildenbrand 
try_free_p4d_table(pgd_t * pgd,unsigned long start)407b9ff8100SDavid Hildenbrand static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
408b9ff8100SDavid Hildenbrand {
409b9ff8100SDavid Hildenbrand 	p4d_t *p4d;
410b9ff8100SDavid Hildenbrand 	int i;
411b9ff8100SDavid Hildenbrand 
412b9ff8100SDavid Hildenbrand 	p4d = p4d_offset(pgd, start);
4139a996c67SHeiko Carstens 	for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
414b9ff8100SDavid Hildenbrand 		if (!p4d_none(*p4d))
415b9ff8100SDavid Hildenbrand 			return;
4169a996c67SHeiko Carstens 	}
417b9ff8100SDavid Hildenbrand 	vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER);
418b9ff8100SDavid Hildenbrand 	pgd_clear(pgd);
419b9ff8100SDavid Hildenbrand }
420b9ff8100SDavid Hildenbrand 
modify_pagetable(unsigned long start,unsigned long end,bool add,bool direct)4219ec8fa8dSDavid Hildenbrand static int modify_pagetable(unsigned long start, unsigned long end, bool add,
4229ec8fa8dSDavid Hildenbrand 			    bool direct)
4233e0d3e40SDavid Hildenbrand {
4243e0d3e40SDavid Hildenbrand 	unsigned long addr, next;
4253e0d3e40SDavid Hildenbrand 	int ret = -ENOMEM;
4263e0d3e40SDavid Hildenbrand 	pgd_t *pgd;
4273e0d3e40SDavid Hildenbrand 	p4d_t *p4d;
4283e0d3e40SDavid Hildenbrand 
4293e0d3e40SDavid Hildenbrand 	if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
4303e0d3e40SDavid Hildenbrand 		return -EINVAL;
43106fc3b0dSAlexander Gordeev 	/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
43206fc3b0dSAlexander Gordeev 	if (WARN_ON_ONCE(end > VMALLOC_START))
43306fc3b0dSAlexander Gordeev 		return -EINVAL;
4343e0d3e40SDavid Hildenbrand 	for (addr = start; addr < end; addr = next) {
4353e0d3e40SDavid Hildenbrand 		next = pgd_addr_end(addr, end);
4363e0d3e40SDavid Hildenbrand 		pgd = pgd_offset_k(addr);
4373e0d3e40SDavid Hildenbrand 
4383e0d3e40SDavid Hildenbrand 		if (!add) {
4393e0d3e40SDavid Hildenbrand 			if (pgd_none(*pgd))
4403e0d3e40SDavid Hildenbrand 				continue;
4413e0d3e40SDavid Hildenbrand 		} else if (pgd_none(*pgd)) {
4423e0d3e40SDavid Hildenbrand 			p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
4433e0d3e40SDavid Hildenbrand 			if (!p4d)
4443e0d3e40SDavid Hildenbrand 				goto out;
4453e0d3e40SDavid Hildenbrand 			pgd_populate(&init_mm, pgd, p4d);
4463e0d3e40SDavid Hildenbrand 		}
4479ec8fa8dSDavid Hildenbrand 		ret = modify_p4d_table(pgd, addr, next, add, direct);
4483e0d3e40SDavid Hildenbrand 		if (ret)
4493e0d3e40SDavid Hildenbrand 			goto out;
450b9ff8100SDavid Hildenbrand 		if (!add)
451b9ff8100SDavid Hildenbrand 			try_free_p4d_table(pgd, addr & PGDIR_MASK);
4523e0d3e40SDavid Hildenbrand 	}
4533e0d3e40SDavid Hildenbrand 	ret = 0;
4543e0d3e40SDavid Hildenbrand out:
4553e0d3e40SDavid Hildenbrand 	if (!add)
4563e0d3e40SDavid Hildenbrand 		flush_tlb_kernel_range(start, end);
4573e0d3e40SDavid Hildenbrand 	return ret;
4583e0d3e40SDavid Hildenbrand }
4593e0d3e40SDavid Hildenbrand 
add_pagetable(unsigned long start,unsigned long end,bool direct)4609ec8fa8dSDavid Hildenbrand static int add_pagetable(unsigned long start, unsigned long end, bool direct)
4613e0d3e40SDavid Hildenbrand {
4629ec8fa8dSDavid Hildenbrand 	return modify_pagetable(start, end, true, direct);
4633e0d3e40SDavid Hildenbrand }
4643e0d3e40SDavid Hildenbrand 
remove_pagetable(unsigned long start,unsigned long end,bool direct)4659ec8fa8dSDavid Hildenbrand static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
4663e0d3e40SDavid Hildenbrand {
4679ec8fa8dSDavid Hildenbrand 	return modify_pagetable(start, end, false, direct);
4683e0d3e40SDavid Hildenbrand }
4693e0d3e40SDavid Hildenbrand 
470f4eb07c1SHeiko Carstens /*
471f4eb07c1SHeiko Carstens  * Add a physical memory range to the 1:1 mapping.
472f4eb07c1SHeiko Carstens  */
vmem_add_range(unsigned long start,unsigned long size)4738398b226SDavid Hildenbrand static int vmem_add_range(unsigned long start, unsigned long size)
474f4eb07c1SHeiko Carstens {
475688fcbbbSAlexander Gordeev 	start = (unsigned long)__va(start);
4769ec8fa8dSDavid Hildenbrand 	return add_pagetable(start, start + size, true);
477f4eb07c1SHeiko Carstens }
478f4eb07c1SHeiko Carstens 
479f4eb07c1SHeiko Carstens /*
480f4eb07c1SHeiko Carstens  * Remove a physical memory range from the 1:1 mapping.
481f4eb07c1SHeiko Carstens  */
vmem_remove_range(unsigned long start,unsigned long size)482f4eb07c1SHeiko Carstens static void vmem_remove_range(unsigned long start, unsigned long size)
483f4eb07c1SHeiko Carstens {
484688fcbbbSAlexander Gordeev 	start = (unsigned long)__va(start);
4859ec8fa8dSDavid Hildenbrand 	remove_pagetable(start, start + size, true);
486f4eb07c1SHeiko Carstens }
487f4eb07c1SHeiko Carstens 
488f4eb07c1SHeiko Carstens /*
489f4eb07c1SHeiko Carstens  * Add a backed mem_map array to the virtual mem_map array.
490f4eb07c1SHeiko Carstens  */
vmemmap_populate(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)4917b73d978SChristoph Hellwig int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
4927b73d978SChristoph Hellwig 			       struct vmem_altmap *altmap)
493f4eb07c1SHeiko Carstens {
494c00f05a9SDavid Hildenbrand 	int ret;
495c00f05a9SDavid Hildenbrand 
496aa18e0e6SDavid Hildenbrand 	mutex_lock(&vmem_mutex);
4979ec8fa8dSDavid Hildenbrand 	/* We don't care about the node, just use NUMA_NO_NODE on allocations */
498c00f05a9SDavid Hildenbrand 	ret = add_pagetable(start, end, false);
499c00f05a9SDavid Hildenbrand 	if (ret)
500c00f05a9SDavid Hildenbrand 		remove_pagetable(start, end, false);
501aa18e0e6SDavid Hildenbrand 	mutex_unlock(&vmem_mutex);
502c00f05a9SDavid Hildenbrand 	return ret;
503f4eb07c1SHeiko Carstens }
504f4eb07c1SHeiko Carstens 
vmemmap_free(unsigned long start,unsigned long end,struct vmem_altmap * altmap)50524b6d416SChristoph Hellwig void vmemmap_free(unsigned long start, unsigned long end,
50624b6d416SChristoph Hellwig 		  struct vmem_altmap *altmap)
5070197518cSTang Chen {
508aa18e0e6SDavid Hildenbrand 	mutex_lock(&vmem_mutex);
5099ec8fa8dSDavid Hildenbrand 	remove_pagetable(start, end, false);
510aa18e0e6SDavid Hildenbrand 	mutex_unlock(&vmem_mutex);
5110197518cSTang Chen }
5120197518cSTang Chen 
vmem_remove_mapping(unsigned long start,unsigned long size)513f05f62d0SDavid Hildenbrand void vmem_remove_mapping(unsigned long start, unsigned long size)
514f4eb07c1SHeiko Carstens {
515f4eb07c1SHeiko Carstens 	mutex_lock(&vmem_mutex);
516f05f62d0SDavid Hildenbrand 	vmem_remove_range(start, size);
517f4eb07c1SHeiko Carstens 	mutex_unlock(&vmem_mutex);
518f4eb07c1SHeiko Carstens }
519f4eb07c1SHeiko Carstens 
arch_get_mappable_range(void)5207707248aSAnshuman Khandual struct range arch_get_mappable_range(void)
5217707248aSAnshuman Khandual {
5227707248aSAnshuman Khandual 	struct range mhp_range;
5237707248aSAnshuman Khandual 
5247707248aSAnshuman Khandual 	mhp_range.start = 0;
52594fd5220SAlexander Gordeev 	mhp_range.end = max_mappable - 1;
5267707248aSAnshuman Khandual 	return mhp_range;
5277707248aSAnshuman Khandual }
5287707248aSAnshuman Khandual 
vmem_add_mapping(unsigned long start,unsigned long size)52917f34580SHeiko Carstens int vmem_add_mapping(unsigned long start, unsigned long size)
530f4eb07c1SHeiko Carstens {
5317707248aSAnshuman Khandual 	struct range range = arch_get_mappable_range();
532f4eb07c1SHeiko Carstens 	int ret;
533f4eb07c1SHeiko Carstens 
5347707248aSAnshuman Khandual 	if (start < range.start ||
5357707248aSAnshuman Khandual 	    start + size > range.end + 1 ||
536f05f62d0SDavid Hildenbrand 	    start + size < start)
537f05f62d0SDavid Hildenbrand 		return -ERANGE;
538f05f62d0SDavid Hildenbrand 
539f4eb07c1SHeiko Carstens 	mutex_lock(&vmem_mutex);
5408398b226SDavid Hildenbrand 	ret = vmem_add_range(start, size);
541f4eb07c1SHeiko Carstens 	if (ret)
542f05f62d0SDavid Hildenbrand 		vmem_remove_range(start, size);
543f4eb07c1SHeiko Carstens 	mutex_unlock(&vmem_mutex);
544f4eb07c1SHeiko Carstens 	return ret;
545f4eb07c1SHeiko Carstens }
546f4eb07c1SHeiko Carstens 
547f4eb07c1SHeiko Carstens /*
5484df29d2bSAlexander Gordeev  * Allocate new or return existing page-table entry, but do not map it
5494df29d2bSAlexander Gordeev  * to any physical address. If missing, allocate segment- and region-
5504df29d2bSAlexander Gordeev  * table entries along. Meeting a large segment- or region-table entry
5514df29d2bSAlexander Gordeev  * while traversing is an error, since the function is expected to be
552cada938aSHeiko Carstens  * called against virtual regions reserved for 4KB mappings only.
5534df29d2bSAlexander Gordeev  */
vmem_get_alloc_pte(unsigned long addr,bool alloc)5542f0e8aaeSAlexander Gordeev pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
5554df29d2bSAlexander Gordeev {
5564df29d2bSAlexander Gordeev 	pte_t *ptep = NULL;
5574df29d2bSAlexander Gordeev 	pgd_t *pgd;
5584df29d2bSAlexander Gordeev 	p4d_t *p4d;
5594df29d2bSAlexander Gordeev 	pud_t *pud;
5604df29d2bSAlexander Gordeev 	pmd_t *pmd;
5614df29d2bSAlexander Gordeev 	pte_t *pte;
5624df29d2bSAlexander Gordeev 
5634df29d2bSAlexander Gordeev 	pgd = pgd_offset_k(addr);
5644df29d2bSAlexander Gordeev 	if (pgd_none(*pgd)) {
5654df29d2bSAlexander Gordeev 		if (!alloc)
5664df29d2bSAlexander Gordeev 			goto out;
5674df29d2bSAlexander Gordeev 		p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
5684df29d2bSAlexander Gordeev 		if (!p4d)
5694df29d2bSAlexander Gordeev 			goto out;
5704df29d2bSAlexander Gordeev 		pgd_populate(&init_mm, pgd, p4d);
5714df29d2bSAlexander Gordeev 	}
5724df29d2bSAlexander Gordeev 	p4d = p4d_offset(pgd, addr);
5734df29d2bSAlexander Gordeev 	if (p4d_none(*p4d)) {
5744df29d2bSAlexander Gordeev 		if (!alloc)
5754df29d2bSAlexander Gordeev 			goto out;
5764df29d2bSAlexander Gordeev 		pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
5774df29d2bSAlexander Gordeev 		if (!pud)
5784df29d2bSAlexander Gordeev 			goto out;
5794df29d2bSAlexander Gordeev 		p4d_populate(&init_mm, p4d, pud);
5804df29d2bSAlexander Gordeev 	}
5814df29d2bSAlexander Gordeev 	pud = pud_offset(p4d, addr);
5824df29d2bSAlexander Gordeev 	if (pud_none(*pud)) {
5834df29d2bSAlexander Gordeev 		if (!alloc)
5844df29d2bSAlexander Gordeev 			goto out;
5854df29d2bSAlexander Gordeev 		pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
5864df29d2bSAlexander Gordeev 		if (!pmd)
5874df29d2bSAlexander Gordeev 			goto out;
5884df29d2bSAlexander Gordeev 		pud_populate(&init_mm, pud, pmd);
589*907835e6SPeter Xu 	} else if (WARN_ON_ONCE(pud_leaf(*pud))) {
5904df29d2bSAlexander Gordeev 		goto out;
5914df29d2bSAlexander Gordeev 	}
5924df29d2bSAlexander Gordeev 	pmd = pmd_offset(pud, addr);
5934df29d2bSAlexander Gordeev 	if (pmd_none(*pmd)) {
5944df29d2bSAlexander Gordeev 		if (!alloc)
5954df29d2bSAlexander Gordeev 			goto out;
5964df29d2bSAlexander Gordeev 		pte = vmem_pte_alloc();
5974df29d2bSAlexander Gordeev 		if (!pte)
5984df29d2bSAlexander Gordeev 			goto out;
5994df29d2bSAlexander Gordeev 		pmd_populate(&init_mm, pmd, pte);
6004df29d2bSAlexander Gordeev 	} else if (WARN_ON_ONCE(pmd_large(*pmd))) {
6014df29d2bSAlexander Gordeev 		goto out;
6024df29d2bSAlexander Gordeev 	}
6034df29d2bSAlexander Gordeev 	ptep = pte_offset_kernel(pmd, addr);
6044df29d2bSAlexander Gordeev out:
6054df29d2bSAlexander Gordeev 	return ptep;
6064df29d2bSAlexander Gordeev }
6074df29d2bSAlexander Gordeev 
__vmem_map_4k_page(unsigned long addr,unsigned long phys,pgprot_t prot,bool alloc)6084df29d2bSAlexander Gordeev int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc)
6094df29d2bSAlexander Gordeev {
6104df29d2bSAlexander Gordeev 	pte_t *ptep, pte;
6114df29d2bSAlexander Gordeev 
6124df29d2bSAlexander Gordeev 	if (!IS_ALIGNED(addr, PAGE_SIZE))
6134df29d2bSAlexander Gordeev 		return -EINVAL;
6144df29d2bSAlexander Gordeev 	ptep = vmem_get_alloc_pte(addr, alloc);
6154df29d2bSAlexander Gordeev 	if (!ptep)
6164df29d2bSAlexander Gordeev 		return -ENOMEM;
6174df29d2bSAlexander Gordeev 	__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
6184df29d2bSAlexander Gordeev 	pte = mk_pte_phys(phys, prot);
6194df29d2bSAlexander Gordeev 	set_pte(ptep, pte);
6204df29d2bSAlexander Gordeev 	return 0;
6214df29d2bSAlexander Gordeev }
6224df29d2bSAlexander Gordeev 
vmem_map_4k_page(unsigned long addr,unsigned long phys,pgprot_t prot)6234df29d2bSAlexander Gordeev int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot)
6244df29d2bSAlexander Gordeev {
6254df29d2bSAlexander Gordeev 	int rc;
6264df29d2bSAlexander Gordeev 
6274df29d2bSAlexander Gordeev 	mutex_lock(&vmem_mutex);
6284df29d2bSAlexander Gordeev 	rc = __vmem_map_4k_page(addr, phys, prot, true);
6294df29d2bSAlexander Gordeev 	mutex_unlock(&vmem_mutex);
6304df29d2bSAlexander Gordeev 	return rc;
6314df29d2bSAlexander Gordeev }
6324df29d2bSAlexander Gordeev 
vmem_unmap_4k_page(unsigned long addr)6334df29d2bSAlexander Gordeev void vmem_unmap_4k_page(unsigned long addr)
6344df29d2bSAlexander Gordeev {
6354df29d2bSAlexander Gordeev 	pte_t *ptep;
6364df29d2bSAlexander Gordeev 
6374df29d2bSAlexander Gordeev 	mutex_lock(&vmem_mutex);
6384df29d2bSAlexander Gordeev 	ptep = virt_to_kpte(addr);
6394df29d2bSAlexander Gordeev 	__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
6404df29d2bSAlexander Gordeev 	pte_clear(&init_mm, addr, ptep);
6414df29d2bSAlexander Gordeev 	mutex_unlock(&vmem_mutex);
6424df29d2bSAlexander Gordeev }
6434df29d2bSAlexander Gordeev 
vmem_map_init(void)644f4eb07c1SHeiko Carstens void __init vmem_map_init(void)
645f4eb07c1SHeiko Carstens {
646a7eb2880SHeiko Carstens 	__set_memory_rox(_stext, _etext);
647a7eb2880SHeiko Carstens 	__set_memory_ro(_etext, __end_rodata);
648a7eb2880SHeiko Carstens 	__set_memory_rox(_sinittext, _einittext);
649a7eb2880SHeiko Carstens 	__set_memory_rox(__stext_amode31, __etext_amode31);
650a6e49f10SHeiko Carstens 	/*
651a6e49f10SHeiko Carstens 	 * If the BEAR-enhancement facility is not installed the first
652a6e49f10SHeiko Carstens 	 * prefix page is used to return to the previous context with
653a6e49f10SHeiko Carstens 	 * an LPSWE instruction and therefore must be executable.
654a6e49f10SHeiko Carstens 	 */
655c0f1d478SHeiko Carstens 	if (!static_key_enabled(&cpu_has_bear))
656c0f1d478SHeiko Carstens 		set_memory_x(0, 1);
6577b03942fSAlexander Gordeev 	if (debug_pagealloc_enabled()) {
658a7eb2880SHeiko Carstens 		/*
659a7eb2880SHeiko Carstens 		 * Use RELOC_HIDE() as long as __va(0) translates to NULL,
660a7eb2880SHeiko Carstens 		 * since performing pointer arithmetic on a NULL pointer
661a7eb2880SHeiko Carstens 		 * has undefined behavior and generates compiler warnings.
662a7eb2880SHeiko Carstens 		 */
663a7eb2880SHeiko Carstens 		__set_memory_4k(__va(0), RELOC_HIDE(__va(0), ident_map_size));
6647b03942fSAlexander Gordeev 	}
665c0f1d478SHeiko Carstens 	if (MACHINE_HAS_NX)
666c0f1d478SHeiko Carstens 		ctl_set_bit(0, 20);
66757d7f939SMartin Schwidefsky 	pr_info("Write protected kernel read-only data: %luk\n",
668ead7a22eSHeiko Carstens 		(unsigned long)(__end_rodata - _stext) >> 10);
669f4eb07c1SHeiko Carstens }
670