xref: /openbmc/linux/arch/um/kernel/mem.c (revision f5ad1c74)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4  */
5 
6 #include <linux/stddef.h>
7 #include <linux/module.h>
8 #include <linux/memblock.h>
9 #include <linux/highmem.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/slab.h>
13 #include <asm/fixmap.h>
14 #include <asm/page.h>
15 #include <as-layout.h>
16 #include <init.h>
17 #include <kern.h>
18 #include <kern_util.h>
19 #include <mem_user.h>
20 #include <os.h>
21 
22 /* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
23 unsigned long *empty_zero_page = NULL;
24 EXPORT_SYMBOL(empty_zero_page);
25 
26 /*
27  * Initialized during boot, and readonly for initializing page tables
28  * afterwards
29  */
30 pgd_t swapper_pg_dir[PTRS_PER_PGD];
31 
32 /* Initialized at boot time, and readonly after that */
33 unsigned long long highmem;
34 EXPORT_SYMBOL(highmem);
35 int kmalloc_ok = 0;
36 
37 /* Used during early boot */
38 static unsigned long brk_end;
39 
40 void __init mem_init(void)
41 {
42 	/* clear the zero-page */
43 	memset(empty_zero_page, 0, PAGE_SIZE);
44 
45 	/* Map in the area just after the brk now that kmalloc is about
46 	 * to be turned on.
47 	 */
48 	brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
49 	map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
50 	memblock_free(__pa(brk_end), uml_reserved - brk_end);
51 	uml_reserved = brk_end;
52 
53 	/* this will put all low memory onto the freelists */
54 	memblock_free_all();
55 	max_low_pfn = totalram_pages();
56 	max_pfn = max_low_pfn;
57 	mem_init_print_info(NULL);
58 	kmalloc_ok = 1;
59 }
60 
61 /*
62  * Create a page table and place a pointer to it in a middle page
63  * directory entry.
64  */
65 static void __init one_page_table_init(pmd_t *pmd)
66 {
67 	if (pmd_none(*pmd)) {
68 		pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
69 							  PAGE_SIZE);
70 		if (!pte)
71 			panic("%s: Failed to allocate %lu bytes align=%lx\n",
72 			      __func__, PAGE_SIZE, PAGE_SIZE);
73 
74 		set_pmd(pmd, __pmd(_KERNPG_TABLE +
75 					   (unsigned long) __pa(pte)));
76 		if (pte != pte_offset_kernel(pmd, 0))
77 			BUG();
78 	}
79 }
80 
81 static void __init one_md_table_init(pud_t *pud)
82 {
83 #ifdef CONFIG_3_LEVEL_PGTABLES
84 	pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
85 	if (!pmd_table)
86 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
87 		      __func__, PAGE_SIZE, PAGE_SIZE);
88 
89 	set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
90 	if (pmd_table != pmd_offset(pud, 0))
91 		BUG();
92 #endif
93 }
94 
95 static void __init fixrange_init(unsigned long start, unsigned long end,
96 				 pgd_t *pgd_base)
97 {
98 	pgd_t *pgd;
99 	p4d_t *p4d;
100 	pud_t *pud;
101 	pmd_t *pmd;
102 	int i, j;
103 	unsigned long vaddr;
104 
105 	vaddr = start;
106 	i = pgd_index(vaddr);
107 	j = pmd_index(vaddr);
108 	pgd = pgd_base + i;
109 
110 	for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
111 		p4d = p4d_offset(pgd, vaddr);
112 		pud = pud_offset(p4d, vaddr);
113 		if (pud_none(*pud))
114 			one_md_table_init(pud);
115 		pmd = pmd_offset(pud, vaddr);
116 		for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
117 			one_page_table_init(pmd);
118 			vaddr += PMD_SIZE;
119 		}
120 		j = 0;
121 	}
122 }
123 
124 static void __init fixaddr_user_init( void)
125 {
126 #ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
127 	long size = FIXADDR_USER_END - FIXADDR_USER_START;
128 	pte_t *pte;
129 	phys_t p;
130 	unsigned long v, vaddr = FIXADDR_USER_START;
131 
132 	if (!size)
133 		return;
134 
135 	fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
136 	v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
137 	if (!v)
138 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
139 		      __func__, size, PAGE_SIZE);
140 
141 	memcpy((void *) v , (void *) FIXADDR_USER_START, size);
142 	p = __pa(v);
143 	for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
144 		      p += PAGE_SIZE) {
145 		pte = virt_to_kpte(vaddr);
146 		pte_set_val(*pte, p, PAGE_READONLY);
147 	}
148 #endif
149 }
150 
151 void __init paging_init(void)
152 {
153 	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
154 	unsigned long vaddr;
155 
156 	empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
157 							       PAGE_SIZE);
158 	if (!empty_zero_page)
159 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
160 		      __func__, PAGE_SIZE, PAGE_SIZE);
161 
162 	max_zone_pfn[ZONE_NORMAL] = end_iomem >> PAGE_SHIFT;
163 	free_area_init(max_zone_pfn);
164 
165 	/*
166 	 * Fixed mappings, only the page table structure has to be
167 	 * created - mappings will be set by set_fixmap():
168 	 */
169 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
170 	fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
171 
172 	fixaddr_user_init();
173 }
174 
175 /*
176  * This can't do anything because nothing in the kernel image can be freed
177  * since it's not in kernel physical memory.
178  */
179 
180 void free_initmem(void)
181 {
182 }
183 
184 /* Allocate and free page tables. */
185 
186 pgd_t *pgd_alloc(struct mm_struct *mm)
187 {
188 	pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
189 
190 	if (pgd) {
191 		memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
192 		memcpy(pgd + USER_PTRS_PER_PGD,
193 		       swapper_pg_dir + USER_PTRS_PER_PGD,
194 		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
195 	}
196 	return pgd;
197 }
198 
199 void *uml_kmalloc(int size, int flags)
200 {
201 	return kmalloc(size, flags);
202 }
203