xref: /openbmc/linux/arch/um/kernel/mem.c (revision 6726d552)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4  */
5 
6 #include <linux/stddef.h>
7 #include <linux/module.h>
8 #include <linux/memblock.h>
9 #include <linux/highmem.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/slab.h>
13 #include <asm/fixmap.h>
14 #include <asm/page.h>
15 #include <as-layout.h>
16 #include <init.h>
17 #include <kern.h>
18 #include <kern_util.h>
19 #include <mem_user.h>
20 #include <os.h>
21 #include <linux/sched/task.h>
22 
23 #ifdef CONFIG_KASAN
24 int kasan_um_is_ready;
25 void kasan_init(void)
26 {
27 	/*
28 	 * kasan_map_memory will map all of the required address space and
29 	 * the host machine will allocate physical memory as necessary.
30 	 */
31 	kasan_map_memory((void *)KASAN_SHADOW_START, KASAN_SHADOW_SIZE);
32 	init_task.kasan_depth = 0;
33 	kasan_um_is_ready = true;
34 }
35 
36 static void (*kasan_init_ptr)(void)
37 __section(".kasan_init") __used
38 = kasan_init;
39 #endif
40 
41 /* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
42 unsigned long *empty_zero_page = NULL;
43 EXPORT_SYMBOL(empty_zero_page);
44 
45 /*
46  * Initialized during boot, and readonly for initializing page tables
47  * afterwards
48  */
49 pgd_t swapper_pg_dir[PTRS_PER_PGD];
50 
51 /* Initialized at boot time, and readonly after that */
52 unsigned long long highmem;
53 EXPORT_SYMBOL(highmem);
54 int kmalloc_ok = 0;
55 
56 /* Used during early boot */
57 static unsigned long brk_end;
58 
59 void __init mem_init(void)
60 {
61 	/* clear the zero-page */
62 	memset(empty_zero_page, 0, PAGE_SIZE);
63 
64 	/* Map in the area just after the brk now that kmalloc is about
65 	 * to be turned on.
66 	 */
67 	brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
68 	map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
69 	memblock_free((void *)brk_end, uml_reserved - brk_end);
70 	uml_reserved = brk_end;
71 
72 	/* this will put all low memory onto the freelists */
73 	memblock_free_all();
74 	max_low_pfn = totalram_pages();
75 	max_pfn = max_low_pfn;
76 	kmalloc_ok = 1;
77 }
78 
79 /*
80  * Create a page table and place a pointer to it in a middle page
81  * directory entry.
82  */
83 static void __init one_page_table_init(pmd_t *pmd)
84 {
85 	if (pmd_none(*pmd)) {
86 		pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
87 							  PAGE_SIZE);
88 		if (!pte)
89 			panic("%s: Failed to allocate %lu bytes align=%lx\n",
90 			      __func__, PAGE_SIZE, PAGE_SIZE);
91 
92 		set_pmd(pmd, __pmd(_KERNPG_TABLE +
93 					   (unsigned long) __pa(pte)));
94 		BUG_ON(pte != pte_offset_kernel(pmd, 0));
95 	}
96 }
97 
98 static void __init one_md_table_init(pud_t *pud)
99 {
100 #ifdef CONFIG_3_LEVEL_PGTABLES
101 	pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
102 	if (!pmd_table)
103 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
104 		      __func__, PAGE_SIZE, PAGE_SIZE);
105 
106 	set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
107 	BUG_ON(pmd_table != pmd_offset(pud, 0));
108 #endif
109 }
110 
111 static void __init fixrange_init(unsigned long start, unsigned long end,
112 				 pgd_t *pgd_base)
113 {
114 	pgd_t *pgd;
115 	p4d_t *p4d;
116 	pud_t *pud;
117 	pmd_t *pmd;
118 	int i, j;
119 	unsigned long vaddr;
120 
121 	vaddr = start;
122 	i = pgd_index(vaddr);
123 	j = pmd_index(vaddr);
124 	pgd = pgd_base + i;
125 
126 	for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
127 		p4d = p4d_offset(pgd, vaddr);
128 		pud = pud_offset(p4d, vaddr);
129 		if (pud_none(*pud))
130 			one_md_table_init(pud);
131 		pmd = pmd_offset(pud, vaddr);
132 		for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
133 			one_page_table_init(pmd);
134 			vaddr += PMD_SIZE;
135 		}
136 		j = 0;
137 	}
138 }
139 
140 static void __init fixaddr_user_init( void)
141 {
142 #ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
143 	long size = FIXADDR_USER_END - FIXADDR_USER_START;
144 	pte_t *pte;
145 	phys_t p;
146 	unsigned long v, vaddr = FIXADDR_USER_START;
147 
148 	if (!size)
149 		return;
150 
151 	fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
152 	v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
153 	if (!v)
154 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
155 		      __func__, size, PAGE_SIZE);
156 
157 	memcpy((void *) v , (void *) FIXADDR_USER_START, size);
158 	p = __pa(v);
159 	for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
160 		      p += PAGE_SIZE) {
161 		pte = virt_to_kpte(vaddr);
162 		pte_set_val(*pte, p, PAGE_READONLY);
163 	}
164 #endif
165 }
166 
167 void __init paging_init(void)
168 {
169 	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
170 	unsigned long vaddr;
171 
172 	empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
173 							       PAGE_SIZE);
174 	if (!empty_zero_page)
175 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
176 		      __func__, PAGE_SIZE, PAGE_SIZE);
177 
178 	max_zone_pfn[ZONE_NORMAL] = end_iomem >> PAGE_SHIFT;
179 	free_area_init(max_zone_pfn);
180 
181 	/*
182 	 * Fixed mappings, only the page table structure has to be
183 	 * created - mappings will be set by set_fixmap():
184 	 */
185 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
186 	fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
187 
188 	fixaddr_user_init();
189 }
190 
191 /*
192  * This can't do anything because nothing in the kernel image can be freed
193  * since it's not in kernel physical memory.
194  */
195 
196 void free_initmem(void)
197 {
198 }
199 
200 /* Allocate and free page tables. */
201 
202 pgd_t *pgd_alloc(struct mm_struct *mm)
203 {
204 	pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
205 
206 	if (pgd) {
207 		memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
208 		memcpy(pgd + USER_PTRS_PER_PGD,
209 		       swapper_pg_dir + USER_PTRS_PER_PGD,
210 		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
211 	}
212 	return pgd;
213 }
214 
215 void *uml_kmalloc(int size, int flags)
216 {
217 	return kmalloc(size, flags);
218 }
219 
220 static const pgprot_t protection_map[16] = {
221 	[VM_NONE]					= PAGE_NONE,
222 	[VM_READ]					= PAGE_READONLY,
223 	[VM_WRITE]					= PAGE_COPY,
224 	[VM_WRITE | VM_READ]				= PAGE_COPY,
225 	[VM_EXEC]					= PAGE_READONLY,
226 	[VM_EXEC | VM_READ]				= PAGE_READONLY,
227 	[VM_EXEC | VM_WRITE]				= PAGE_COPY,
228 	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY,
229 	[VM_SHARED]					= PAGE_NONE,
230 	[VM_SHARED | VM_READ]				= PAGE_READONLY,
231 	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
232 	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED,
233 	[VM_SHARED | VM_EXEC]				= PAGE_READONLY,
234 	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READONLY,
235 	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED,
236 	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED
237 };
238 DECLARE_VM_GET_PAGE_PROT
239