xref: /openbmc/linux/arch/sh/mm/init.c (revision 0cabf991)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/arch/sh/mm/init.c
4  *
5  *  Copyright (C) 1999  Niibe Yutaka
6  *  Copyright (C) 2002 - 2011  Paul Mundt
7  *
8  *  Based on linux/arch/i386/mm/init.c:
9  *   Copyright (C) 1995  Linus Torvalds
10  */
11 #include <linux/mm.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/gfp.h>
15 #include <linux/memblock.h>
16 #include <linux/proc_fs.h>
17 #include <linux/pagemap.h>
18 #include <linux/percpu.h>
19 #include <linux/io.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/export.h>
22 #include <asm/mmu_context.h>
23 #include <asm/mmzone.h>
24 #include <asm/kexec.h>
25 #include <asm/tlb.h>
26 #include <asm/cacheflush.h>
27 #include <asm/sections.h>
28 #include <asm/setup.h>
29 #include <asm/cache.h>
30 #include <asm/pgalloc.h>
31 #include <linux/sizes.h>
32 
33 pgd_t swapper_pg_dir[PTRS_PER_PGD];
34 
35 void __init generic_mem_init(void)
36 {
37 	memblock_add(__MEMORY_START, __MEMORY_SIZE);
38 }
39 
40 void __init __weak plat_mem_setup(void)
41 {
42 	/* Nothing to see here, move along. */
43 }
44 
45 #ifdef CONFIG_MMU
46 static pte_t *__get_pte_phys(unsigned long addr)
47 {
48 	pgd_t *pgd;
49 	p4d_t *p4d;
50 	pud_t *pud;
51 	pmd_t *pmd;
52 
53 	pgd = pgd_offset_k(addr);
54 	if (pgd_none(*pgd)) {
55 		pgd_ERROR(*pgd);
56 		return NULL;
57 	}
58 
59 	p4d = p4d_alloc(NULL, pgd, addr);
60 	if (unlikely(!p4d)) {
61 		p4d_ERROR(*p4d);
62 		return NULL;
63 	}
64 
65 	pud = pud_alloc(NULL, p4d, addr);
66 	if (unlikely(!pud)) {
67 		pud_ERROR(*pud);
68 		return NULL;
69 	}
70 
71 	pmd = pmd_alloc(NULL, pud, addr);
72 	if (unlikely(!pmd)) {
73 		pmd_ERROR(*pmd);
74 		return NULL;
75 	}
76 
77 	return pte_offset_kernel(pmd, addr);
78 }
79 
80 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
81 {
82 	pte_t *pte;
83 
84 	pte = __get_pte_phys(addr);
85 	if (!pte_none(*pte)) {
86 		pte_ERROR(*pte);
87 		return;
88 	}
89 
90 	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
91 	local_flush_tlb_one(get_asid(), addr);
92 
93 	if (pgprot_val(prot) & _PAGE_WIRED)
94 		tlb_wire_entry(NULL, addr, *pte);
95 }
96 
97 static void clear_pte_phys(unsigned long addr, pgprot_t prot)
98 {
99 	pte_t *pte;
100 
101 	pte = __get_pte_phys(addr);
102 
103 	if (pgprot_val(prot) & _PAGE_WIRED)
104 		tlb_unwire_entry();
105 
106 	set_pte(pte, pfn_pte(0, __pgprot(0)));
107 	local_flush_tlb_one(get_asid(), addr);
108 }
109 
110 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
111 {
112 	unsigned long address = __fix_to_virt(idx);
113 
114 	if (idx >= __end_of_fixed_addresses) {
115 		BUG();
116 		return;
117 	}
118 
119 	set_pte_phys(address, phys, prot);
120 }
121 
122 void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
123 {
124 	unsigned long address = __fix_to_virt(idx);
125 
126 	if (idx >= __end_of_fixed_addresses) {
127 		BUG();
128 		return;
129 	}
130 
131 	clear_pte_phys(address, prot);
132 }
133 
134 static pmd_t * __init one_md_table_init(pud_t *pud)
135 {
136 	if (pud_none(*pud)) {
137 		pmd_t *pmd;
138 
139 		pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
140 		if (!pmd)
141 			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
142 			      __func__, PAGE_SIZE, PAGE_SIZE);
143 		pud_populate(&init_mm, pud, pmd);
144 		BUG_ON(pmd != pmd_offset(pud, 0));
145 	}
146 
147 	return pmd_offset(pud, 0);
148 }
149 
150 static pte_t * __init one_page_table_init(pmd_t *pmd)
151 {
152 	if (pmd_none(*pmd)) {
153 		pte_t *pte;
154 
155 		pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
156 		if (!pte)
157 			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
158 			      __func__, PAGE_SIZE, PAGE_SIZE);
159 		pmd_populate_kernel(&init_mm, pmd, pte);
160 		BUG_ON(pte != pte_offset_kernel(pmd, 0));
161 	}
162 
163 	return pte_offset_kernel(pmd, 0);
164 }
165 
166 static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
167 					    unsigned long vaddr, pte_t *lastpte)
168 {
169 	return pte;
170 }
171 
172 void __init page_table_range_init(unsigned long start, unsigned long end,
173 					 pgd_t *pgd_base)
174 {
175 	pgd_t *pgd;
176 	pud_t *pud;
177 	pmd_t *pmd;
178 	pte_t *pte = NULL;
179 	int i, j, k;
180 	unsigned long vaddr;
181 
182 	vaddr = start;
183 	i = pgd_index(vaddr);
184 	j = pud_index(vaddr);
185 	k = pmd_index(vaddr);
186 	pgd = pgd_base + i;
187 
188 	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
189 		pud = (pud_t *)pgd;
190 		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
191 			pmd = one_md_table_init(pud);
192 #ifndef __PAGETABLE_PMD_FOLDED
193 			pmd += k;
194 #endif
195 			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
196 				pte = page_table_kmap_check(one_page_table_init(pmd),
197 							    pmd, vaddr, pte);
198 				vaddr += PMD_SIZE;
199 			}
200 			k = 0;
201 		}
202 		j = 0;
203 	}
204 }
205 #endif	/* CONFIG_MMU */
206 
207 void __init allocate_pgdat(unsigned int nid)
208 {
209 	unsigned long start_pfn, end_pfn;
210 
211 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
212 
213 #ifdef CONFIG_NEED_MULTIPLE_NODES
214 	NODE_DATA(nid) = memblock_alloc_try_nid(
215 				sizeof(struct pglist_data),
216 				SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
217 				MEMBLOCK_ALLOC_ACCESSIBLE, nid);
218 	if (!NODE_DATA(nid))
219 		panic("Can't allocate pgdat for node %d\n", nid);
220 #endif
221 
222 	NODE_DATA(nid)->node_start_pfn = start_pfn;
223 	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
224 }
225 
226 static void __init do_init_bootmem(void)
227 {
228 	struct memblock_region *reg;
229 
230 	/* Add active regions with valid PFNs. */
231 	for_each_memblock(memory, reg) {
232 		unsigned long start_pfn, end_pfn;
233 		start_pfn = memblock_region_memory_base_pfn(reg);
234 		end_pfn = memblock_region_memory_end_pfn(reg);
235 		__add_active_range(0, start_pfn, end_pfn);
236 	}
237 
238 	/* All of system RAM sits in node 0 for the non-NUMA case */
239 	allocate_pgdat(0);
240 	node_set_online(0);
241 
242 	plat_mem_setup();
243 
244 	sparse_init();
245 }
246 
247 static void __init early_reserve_mem(void)
248 {
249 	unsigned long start_pfn;
250 	u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
251 	u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
252 
253 	/*
254 	 * Partially used pages are not usable - thus
255 	 * we are rounding upwards:
256 	 */
257 	start_pfn = PFN_UP(__pa(_end));
258 
259 	/*
260 	 * Reserve the kernel text and Reserve the bootmem bitmap. We do
261 	 * this in two steps (first step was init_bootmem()), because
262 	 * this catches the (definitely buggy) case of us accidentally
263 	 * initializing the bootmem allocator with an invalid RAM area.
264 	 */
265 	memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
266 
267 	/*
268 	 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
269 	 */
270 	if (CONFIG_ZERO_PAGE_OFFSET != 0)
271 		memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
272 
273 	/*
274 	 * Handle additional early reservations
275 	 */
276 	check_for_initrd();
277 	reserve_crashkernel();
278 }
279 
280 void __init paging_init(void)
281 {
282 	unsigned long max_zone_pfns[MAX_NR_ZONES];
283 	unsigned long vaddr, end;
284 
285 	sh_mv.mv_mem_init();
286 
287 	early_reserve_mem();
288 
289 	/*
290 	 * Once the early reservations are out of the way, give the
291 	 * platforms a chance to kick out some memory.
292 	 */
293 	if (sh_mv.mv_mem_reserve)
294 		sh_mv.mv_mem_reserve();
295 
296 	memblock_enforce_memory_limit(memory_limit);
297 	memblock_allow_resize();
298 
299 	memblock_dump_all();
300 
301 	/*
302 	 * Determine low and high memory ranges:
303 	 */
304 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
305 	min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
306 
307 	nodes_clear(node_online_map);
308 
309 	memory_start = (unsigned long)__va(__MEMORY_START);
310 	memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
311 
312 	uncached_init();
313 	pmb_init();
314 	do_init_bootmem();
315 	ioremap_fixed_init();
316 
317 	/* We don't need to map the kernel through the TLB, as
318 	 * it is permanatly mapped using P1. So clear the
319 	 * entire pgd. */
320 	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
321 
322 	/* Set an initial value for the MMU.TTB so we don't have to
323 	 * check for a null value. */
324 	set_TTB(swapper_pg_dir);
325 
326 	/*
327 	 * Populate the relevant portions of swapper_pg_dir so that
328 	 * we can use the fixmap entries without calling kmalloc.
329 	 * pte's will be filled in by __set_fixmap().
330 	 */
331 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
332 	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
333 	page_table_range_init(vaddr, end, swapper_pg_dir);
334 
335 	kmap_coherent_init();
336 
337 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
338 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
339 	free_area_init(max_zone_pfns);
340 }
341 
342 unsigned int mem_init_done = 0;
343 
344 void __init mem_init(void)
345 {
346 	pg_data_t *pgdat;
347 
348 	high_memory = NULL;
349 	for_each_online_pgdat(pgdat)
350 		high_memory = max_t(void *, high_memory,
351 				    __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
352 
353 	memblock_free_all();
354 
355 	/* Set this up early, so we can take care of the zero page */
356 	cpu_cache_init();
357 
358 	/* clear the zero-page */
359 	memset(empty_zero_page, 0, PAGE_SIZE);
360 	__flush_wback_region(empty_zero_page, PAGE_SIZE);
361 
362 	vsyscall_init();
363 
364 	mem_init_print_info(NULL);
365 	pr_info("virtual kernel memory layout:\n"
366 		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
367 #ifdef CONFIG_HIGHMEM
368 		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
369 #endif
370 		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
371 		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
372 #ifdef CONFIG_UNCACHED_MAPPING
373 		"            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
374 #endif
375 		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
376 		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
377 		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
378 		FIXADDR_START, FIXADDR_TOP,
379 		(FIXADDR_TOP - FIXADDR_START) >> 10,
380 
381 #ifdef CONFIG_HIGHMEM
382 		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
383 		(LAST_PKMAP*PAGE_SIZE) >> 10,
384 #endif
385 
386 		(unsigned long)VMALLOC_START, VMALLOC_END,
387 		(VMALLOC_END - VMALLOC_START) >> 20,
388 
389 		(unsigned long)memory_start, (unsigned long)high_memory,
390 		((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
391 
392 #ifdef CONFIG_UNCACHED_MAPPING
393 		uncached_start, uncached_end, uncached_size >> 20,
394 #endif
395 
396 		(unsigned long)&__init_begin, (unsigned long)&__init_end,
397 		((unsigned long)&__init_end -
398 		 (unsigned long)&__init_begin) >> 10,
399 
400 		(unsigned long)&_etext, (unsigned long)&_edata,
401 		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
402 
403 		(unsigned long)&_text, (unsigned long)&_etext,
404 		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
405 
406 	mem_init_done = 1;
407 }
408 
409 #ifdef CONFIG_MEMORY_HOTPLUG
410 int arch_add_memory(int nid, u64 start, u64 size,
411 		    struct mhp_params *params)
412 {
413 	unsigned long start_pfn = PFN_DOWN(start);
414 	unsigned long nr_pages = size >> PAGE_SHIFT;
415 	int ret;
416 
417 	if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
418 		return -EINVAL;
419 
420 	/* We only have ZONE_NORMAL, so this is easy.. */
421 	ret = __add_pages(nid, start_pfn, nr_pages, params);
422 	if (unlikely(ret))
423 		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
424 
425 	return ret;
426 }
427 
428 void arch_remove_memory(int nid, u64 start, u64 size,
429 			struct vmem_altmap *altmap)
430 {
431 	unsigned long start_pfn = PFN_DOWN(start);
432 	unsigned long nr_pages = size >> PAGE_SHIFT;
433 
434 	__remove_pages(start_pfn, nr_pages, altmap);
435 }
436 #endif /* CONFIG_MEMORY_HOTPLUG */
437