xref: /openbmc/linux/arch/sh/mm/init.c (revision 34facb04)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/arch/sh/mm/init.c
4  *
5  *  Copyright (C) 1999  Niibe Yutaka
6  *  Copyright (C) 2002 - 2011  Paul Mundt
7  *
8  *  Based on linux/arch/i386/mm/init.c:
9  *   Copyright (C) 1995  Linus Torvalds
10  */
11 #include <linux/mm.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/gfp.h>
15 #include <linux/memblock.h>
16 #include <linux/proc_fs.h>
17 #include <linux/pagemap.h>
18 #include <linux/percpu.h>
19 #include <linux/io.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/export.h>
22 #include <asm/mmu_context.h>
23 #include <asm/mmzone.h>
24 #include <asm/kexec.h>
25 #include <asm/tlb.h>
26 #include <asm/cacheflush.h>
27 #include <asm/sections.h>
28 #include <asm/setup.h>
29 #include <asm/cache.h>
30 #include <linux/sizes.h>
31 
32 pgd_t swapper_pg_dir[PTRS_PER_PGD];
33 
34 void __init generic_mem_init(void)
35 {
36 	memblock_add(__MEMORY_START, __MEMORY_SIZE);
37 }
38 
39 void __init __weak plat_mem_setup(void)
40 {
41 	/* Nothing to see here, move along. */
42 }
43 
44 #ifdef CONFIG_MMU
45 static pte_t *__get_pte_phys(unsigned long addr)
46 {
47 	pgd_t *pgd;
48 	p4d_t *p4d;
49 	pud_t *pud;
50 	pmd_t *pmd;
51 
52 	pgd = pgd_offset_k(addr);
53 	if (pgd_none(*pgd)) {
54 		pgd_ERROR(*pgd);
55 		return NULL;
56 	}
57 
58 	p4d = p4d_alloc(NULL, pgd, addr);
59 	if (unlikely(!p4d)) {
60 		p4d_ERROR(*p4d);
61 		return NULL;
62 	}
63 
64 	pud = pud_alloc(NULL, p4d, addr);
65 	if (unlikely(!pud)) {
66 		pud_ERROR(*pud);
67 		return NULL;
68 	}
69 
70 	pmd = pmd_alloc(NULL, pud, addr);
71 	if (unlikely(!pmd)) {
72 		pmd_ERROR(*pmd);
73 		return NULL;
74 	}
75 
76 	return pte_offset_kernel(pmd, addr);
77 }
78 
79 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
80 {
81 	pte_t *pte;
82 
83 	pte = __get_pte_phys(addr);
84 	if (!pte_none(*pte)) {
85 		pte_ERROR(*pte);
86 		return;
87 	}
88 
89 	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
90 	local_flush_tlb_one(get_asid(), addr);
91 
92 	if (pgprot_val(prot) & _PAGE_WIRED)
93 		tlb_wire_entry(NULL, addr, *pte);
94 }
95 
96 static void clear_pte_phys(unsigned long addr, pgprot_t prot)
97 {
98 	pte_t *pte;
99 
100 	pte = __get_pte_phys(addr);
101 
102 	if (pgprot_val(prot) & _PAGE_WIRED)
103 		tlb_unwire_entry();
104 
105 	set_pte(pte, pfn_pte(0, __pgprot(0)));
106 	local_flush_tlb_one(get_asid(), addr);
107 }
108 
109 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
110 {
111 	unsigned long address = __fix_to_virt(idx);
112 
113 	if (idx >= __end_of_fixed_addresses) {
114 		BUG();
115 		return;
116 	}
117 
118 	set_pte_phys(address, phys, prot);
119 }
120 
121 void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
122 {
123 	unsigned long address = __fix_to_virt(idx);
124 
125 	if (idx >= __end_of_fixed_addresses) {
126 		BUG();
127 		return;
128 	}
129 
130 	clear_pte_phys(address, prot);
131 }
132 
133 static pmd_t * __init one_md_table_init(pud_t *pud)
134 {
135 	if (pud_none(*pud)) {
136 		pmd_t *pmd;
137 
138 		pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
139 		if (!pmd)
140 			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
141 			      __func__, PAGE_SIZE, PAGE_SIZE);
142 		pud_populate(&init_mm, pud, pmd);
143 		BUG_ON(pmd != pmd_offset(pud, 0));
144 	}
145 
146 	return pmd_offset(pud, 0);
147 }
148 
149 static pte_t * __init one_page_table_init(pmd_t *pmd)
150 {
151 	if (pmd_none(*pmd)) {
152 		pte_t *pte;
153 
154 		pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
155 		if (!pte)
156 			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
157 			      __func__, PAGE_SIZE, PAGE_SIZE);
158 		pmd_populate_kernel(&init_mm, pmd, pte);
159 		BUG_ON(pte != pte_offset_kernel(pmd, 0));
160 	}
161 
162 	return pte_offset_kernel(pmd, 0);
163 }
164 
165 static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
166 					    unsigned long vaddr, pte_t *lastpte)
167 {
168 	return pte;
169 }
170 
171 void __init page_table_range_init(unsigned long start, unsigned long end,
172 					 pgd_t *pgd_base)
173 {
174 	pgd_t *pgd;
175 	pud_t *pud;
176 	pmd_t *pmd;
177 	pte_t *pte = NULL;
178 	int i, j, k;
179 	unsigned long vaddr;
180 
181 	vaddr = start;
182 	i = pgd_index(vaddr);
183 	j = pud_index(vaddr);
184 	k = pmd_index(vaddr);
185 	pgd = pgd_base + i;
186 
187 	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
188 		pud = (pud_t *)pgd;
189 		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
190 			pmd = one_md_table_init(pud);
191 #ifndef __PAGETABLE_PMD_FOLDED
192 			pmd += k;
193 #endif
194 			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
195 				pte = page_table_kmap_check(one_page_table_init(pmd),
196 							    pmd, vaddr, pte);
197 				vaddr += PMD_SIZE;
198 			}
199 			k = 0;
200 		}
201 		j = 0;
202 	}
203 }
204 #endif	/* CONFIG_MMU */
205 
206 void __init allocate_pgdat(unsigned int nid)
207 {
208 	unsigned long start_pfn, end_pfn;
209 
210 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
211 
212 #ifdef CONFIG_NEED_MULTIPLE_NODES
213 	NODE_DATA(nid) = memblock_alloc_try_nid(
214 				sizeof(struct pglist_data),
215 				SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
216 				MEMBLOCK_ALLOC_ACCESSIBLE, nid);
217 	if (!NODE_DATA(nid))
218 		panic("Can't allocate pgdat for node %d\n", nid);
219 #endif
220 
221 	NODE_DATA(nid)->node_start_pfn = start_pfn;
222 	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
223 }
224 
225 static void __init do_init_bootmem(void)
226 {
227 	struct memblock_region *reg;
228 
229 	/* Add active regions with valid PFNs. */
230 	for_each_memblock(memory, reg) {
231 		unsigned long start_pfn, end_pfn;
232 		start_pfn = memblock_region_memory_base_pfn(reg);
233 		end_pfn = memblock_region_memory_end_pfn(reg);
234 		__add_active_range(0, start_pfn, end_pfn);
235 	}
236 
237 	/* All of system RAM sits in node 0 for the non-NUMA case */
238 	allocate_pgdat(0);
239 	node_set_online(0);
240 
241 	plat_mem_setup();
242 
243 	for_each_memblock(memory, reg) {
244 		int nid = memblock_get_region_node(reg);
245 
246 		memory_present(nid, memblock_region_memory_base_pfn(reg),
247 			memblock_region_memory_end_pfn(reg));
248 	}
249 	sparse_init();
250 }
251 
252 static void __init early_reserve_mem(void)
253 {
254 	unsigned long start_pfn;
255 	u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
256 	u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
257 
258 	/*
259 	 * Partially used pages are not usable - thus
260 	 * we are rounding upwards:
261 	 */
262 	start_pfn = PFN_UP(__pa(_end));
263 
264 	/*
265 	 * Reserve the kernel text and Reserve the bootmem bitmap. We do
266 	 * this in two steps (first step was init_bootmem()), because
267 	 * this catches the (definitely buggy) case of us accidentally
268 	 * initializing the bootmem allocator with an invalid RAM area.
269 	 */
270 	memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
271 
272 	/*
273 	 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
274 	 */
275 	if (CONFIG_ZERO_PAGE_OFFSET != 0)
276 		memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
277 
278 	/*
279 	 * Handle additional early reservations
280 	 */
281 	check_for_initrd();
282 	reserve_crashkernel();
283 }
284 
285 void __init paging_init(void)
286 {
287 	unsigned long max_zone_pfns[MAX_NR_ZONES];
288 	unsigned long vaddr, end;
289 
290 	sh_mv.mv_mem_init();
291 
292 	early_reserve_mem();
293 
294 	/*
295 	 * Once the early reservations are out of the way, give the
296 	 * platforms a chance to kick out some memory.
297 	 */
298 	if (sh_mv.mv_mem_reserve)
299 		sh_mv.mv_mem_reserve();
300 
301 	memblock_enforce_memory_limit(memory_limit);
302 	memblock_allow_resize();
303 
304 	memblock_dump_all();
305 
306 	/*
307 	 * Determine low and high memory ranges:
308 	 */
309 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
310 	min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
311 
312 	nodes_clear(node_online_map);
313 
314 	memory_start = (unsigned long)__va(__MEMORY_START);
315 	memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
316 
317 	uncached_init();
318 	pmb_init();
319 	do_init_bootmem();
320 	ioremap_fixed_init();
321 
322 	/* We don't need to map the kernel through the TLB, as
323 	 * it is permanatly mapped using P1. So clear the
324 	 * entire pgd. */
325 	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
326 
327 	/* Set an initial value for the MMU.TTB so we don't have to
328 	 * check for a null value. */
329 	set_TTB(swapper_pg_dir);
330 
331 	/*
332 	 * Populate the relevant portions of swapper_pg_dir so that
333 	 * we can use the fixmap entries without calling kmalloc.
334 	 * pte's will be filled in by __set_fixmap().
335 	 */
336 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
337 	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
338 	page_table_range_init(vaddr, end, swapper_pg_dir);
339 
340 	kmap_coherent_init();
341 
342 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
343 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
344 	free_area_init(max_zone_pfns);
345 }
346 
347 unsigned int mem_init_done = 0;
348 
349 void __init mem_init(void)
350 {
351 	pg_data_t *pgdat;
352 
353 	high_memory = NULL;
354 	for_each_online_pgdat(pgdat)
355 		high_memory = max_t(void *, high_memory,
356 				    __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
357 
358 	memblock_free_all();
359 
360 	/* Set this up early, so we can take care of the zero page */
361 	cpu_cache_init();
362 
363 	/* clear the zero-page */
364 	memset(empty_zero_page, 0, PAGE_SIZE);
365 	__flush_wback_region(empty_zero_page, PAGE_SIZE);
366 
367 	vsyscall_init();
368 
369 	mem_init_print_info(NULL);
370 	pr_info("virtual kernel memory layout:\n"
371 		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
372 #ifdef CONFIG_HIGHMEM
373 		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
374 #endif
375 		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
376 		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
377 #ifdef CONFIG_UNCACHED_MAPPING
378 		"            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
379 #endif
380 		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
381 		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
382 		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
383 		FIXADDR_START, FIXADDR_TOP,
384 		(FIXADDR_TOP - FIXADDR_START) >> 10,
385 
386 #ifdef CONFIG_HIGHMEM
387 		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
388 		(LAST_PKMAP*PAGE_SIZE) >> 10,
389 #endif
390 
391 		(unsigned long)VMALLOC_START, VMALLOC_END,
392 		(VMALLOC_END - VMALLOC_START) >> 20,
393 
394 		(unsigned long)memory_start, (unsigned long)high_memory,
395 		((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
396 
397 #ifdef CONFIG_UNCACHED_MAPPING
398 		uncached_start, uncached_end, uncached_size >> 20,
399 #endif
400 
401 		(unsigned long)&__init_begin, (unsigned long)&__init_end,
402 		((unsigned long)&__init_end -
403 		 (unsigned long)&__init_begin) >> 10,
404 
405 		(unsigned long)&_etext, (unsigned long)&_edata,
406 		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
407 
408 		(unsigned long)&_text, (unsigned long)&_etext,
409 		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
410 
411 	mem_init_done = 1;
412 }
413 
414 #ifdef CONFIG_MEMORY_HOTPLUG
415 int arch_add_memory(int nid, u64 start, u64 size,
416 		    struct mhp_params *params)
417 {
418 	unsigned long start_pfn = PFN_DOWN(start);
419 	unsigned long nr_pages = size >> PAGE_SHIFT;
420 	int ret;
421 
422 	if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
423 		return -EINVAL;
424 
425 	/* We only have ZONE_NORMAL, so this is easy.. */
426 	ret = __add_pages(nid, start_pfn, nr_pages, params);
427 	if (unlikely(ret))
428 		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
429 
430 	return ret;
431 }
432 
433 #ifdef CONFIG_NUMA
434 int memory_add_physaddr_to_nid(u64 addr)
435 {
436 	/* Node 0 for now.. */
437 	return 0;
438 }
439 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
440 #endif
441 
442 void arch_remove_memory(int nid, u64 start, u64 size,
443 			struct vmem_altmap *altmap)
444 {
445 	unsigned long start_pfn = PFN_DOWN(start);
446 	unsigned long nr_pages = size >> PAGE_SHIFT;
447 
448 	__remove_pages(start_pfn, nr_pages, altmap);
449 }
450 #endif /* CONFIG_MEMORY_HOTPLUG */
451