xref: /openbmc/linux/arch/sh/mm/init.c (revision 151f4e2b)
1 /*
2  * linux/arch/sh/mm/init.c
3  *
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2002 - 2011  Paul Mundt
6  *
7  *  Based on linux/arch/i386/mm/init.c:
8  *   Copyright (C) 1995  Linus Torvalds
9  */
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/init.h>
13 #include <linux/gfp.h>
14 #include <linux/memblock.h>
15 #include <linux/proc_fs.h>
16 #include <linux/pagemap.h>
17 #include <linux/percpu.h>
18 #include <linux/io.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/export.h>
21 #include <asm/mmu_context.h>
22 #include <asm/mmzone.h>
23 #include <asm/kexec.h>
24 #include <asm/tlb.h>
25 #include <asm/cacheflush.h>
26 #include <asm/sections.h>
27 #include <asm/setup.h>
28 #include <asm/cache.h>
29 #include <linux/sizes.h>
30 
31 pgd_t swapper_pg_dir[PTRS_PER_PGD];
32 
33 void __init generic_mem_init(void)
34 {
35 	memblock_add(__MEMORY_START, __MEMORY_SIZE);
36 }
37 
38 void __init __weak plat_mem_setup(void)
39 {
40 	/* Nothing to see here, move along. */
41 }
42 
43 #ifdef CONFIG_MMU
44 static pte_t *__get_pte_phys(unsigned long addr)
45 {
46 	pgd_t *pgd;
47 	pud_t *pud;
48 	pmd_t *pmd;
49 
50 	pgd = pgd_offset_k(addr);
51 	if (pgd_none(*pgd)) {
52 		pgd_ERROR(*pgd);
53 		return NULL;
54 	}
55 
56 	pud = pud_alloc(NULL, pgd, addr);
57 	if (unlikely(!pud)) {
58 		pud_ERROR(*pud);
59 		return NULL;
60 	}
61 
62 	pmd = pmd_alloc(NULL, pud, addr);
63 	if (unlikely(!pmd)) {
64 		pmd_ERROR(*pmd);
65 		return NULL;
66 	}
67 
68 	return pte_offset_kernel(pmd, addr);
69 }
70 
71 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
72 {
73 	pte_t *pte;
74 
75 	pte = __get_pte_phys(addr);
76 	if (!pte_none(*pte)) {
77 		pte_ERROR(*pte);
78 		return;
79 	}
80 
81 	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
82 	local_flush_tlb_one(get_asid(), addr);
83 
84 	if (pgprot_val(prot) & _PAGE_WIRED)
85 		tlb_wire_entry(NULL, addr, *pte);
86 }
87 
88 static void clear_pte_phys(unsigned long addr, pgprot_t prot)
89 {
90 	pte_t *pte;
91 
92 	pte = __get_pte_phys(addr);
93 
94 	if (pgprot_val(prot) & _PAGE_WIRED)
95 		tlb_unwire_entry();
96 
97 	set_pte(pte, pfn_pte(0, __pgprot(0)));
98 	local_flush_tlb_one(get_asid(), addr);
99 }
100 
101 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
102 {
103 	unsigned long address = __fix_to_virt(idx);
104 
105 	if (idx >= __end_of_fixed_addresses) {
106 		BUG();
107 		return;
108 	}
109 
110 	set_pte_phys(address, phys, prot);
111 }
112 
113 void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
114 {
115 	unsigned long address = __fix_to_virt(idx);
116 
117 	if (idx >= __end_of_fixed_addresses) {
118 		BUG();
119 		return;
120 	}
121 
122 	clear_pte_phys(address, prot);
123 }
124 
125 static pmd_t * __init one_md_table_init(pud_t *pud)
126 {
127 	if (pud_none(*pud)) {
128 		pmd_t *pmd;
129 
130 		pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
131 		if (!pmd)
132 			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
133 			      __func__, PAGE_SIZE, PAGE_SIZE);
134 		pud_populate(&init_mm, pud, pmd);
135 		BUG_ON(pmd != pmd_offset(pud, 0));
136 	}
137 
138 	return pmd_offset(pud, 0);
139 }
140 
141 static pte_t * __init one_page_table_init(pmd_t *pmd)
142 {
143 	if (pmd_none(*pmd)) {
144 		pte_t *pte;
145 
146 		pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
147 		if (!pte)
148 			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
149 			      __func__, PAGE_SIZE, PAGE_SIZE);
150 		pmd_populate_kernel(&init_mm, pmd, pte);
151 		BUG_ON(pte != pte_offset_kernel(pmd, 0));
152 	}
153 
154 	return pte_offset_kernel(pmd, 0);
155 }
156 
157 static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
158 					    unsigned long vaddr, pte_t *lastpte)
159 {
160 	return pte;
161 }
162 
163 void __init page_table_range_init(unsigned long start, unsigned long end,
164 					 pgd_t *pgd_base)
165 {
166 	pgd_t *pgd;
167 	pud_t *pud;
168 	pmd_t *pmd;
169 	pte_t *pte = NULL;
170 	int i, j, k;
171 	unsigned long vaddr;
172 
173 	vaddr = start;
174 	i = __pgd_offset(vaddr);
175 	j = __pud_offset(vaddr);
176 	k = __pmd_offset(vaddr);
177 	pgd = pgd_base + i;
178 
179 	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
180 		pud = (pud_t *)pgd;
181 		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
182 			pmd = one_md_table_init(pud);
183 #ifndef __PAGETABLE_PMD_FOLDED
184 			pmd += k;
185 #endif
186 			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
187 				pte = page_table_kmap_check(one_page_table_init(pmd),
188 							    pmd, vaddr, pte);
189 				vaddr += PMD_SIZE;
190 			}
191 			k = 0;
192 		}
193 		j = 0;
194 	}
195 }
196 #endif	/* CONFIG_MMU */
197 
198 void __init allocate_pgdat(unsigned int nid)
199 {
200 	unsigned long start_pfn, end_pfn;
201 
202 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
203 
204 #ifdef CONFIG_NEED_MULTIPLE_NODES
205 	NODE_DATA(nid) = memblock_alloc_try_nid(
206 				sizeof(struct pglist_data),
207 				SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
208 				MEMBLOCK_ALLOC_ACCESSIBLE, nid);
209 	if (!NODE_DATA(nid))
210 		panic("Can't allocate pgdat for node %d\n", nid);
211 #endif
212 
213 	NODE_DATA(nid)->node_start_pfn = start_pfn;
214 	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
215 }
216 
217 static void __init do_init_bootmem(void)
218 {
219 	struct memblock_region *reg;
220 
221 	/* Add active regions with valid PFNs. */
222 	for_each_memblock(memory, reg) {
223 		unsigned long start_pfn, end_pfn;
224 		start_pfn = memblock_region_memory_base_pfn(reg);
225 		end_pfn = memblock_region_memory_end_pfn(reg);
226 		__add_active_range(0, start_pfn, end_pfn);
227 	}
228 
229 	/* All of system RAM sits in node 0 for the non-NUMA case */
230 	allocate_pgdat(0);
231 	node_set_online(0);
232 
233 	plat_mem_setup();
234 
235 	for_each_memblock(memory, reg) {
236 		int nid = memblock_get_region_node(reg);
237 
238 		memory_present(nid, memblock_region_memory_base_pfn(reg),
239 			memblock_region_memory_end_pfn(reg));
240 	}
241 	sparse_init();
242 }
243 
244 static void __init early_reserve_mem(void)
245 {
246 	unsigned long start_pfn;
247 	u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
248 	u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
249 
250 	/*
251 	 * Partially used pages are not usable - thus
252 	 * we are rounding upwards:
253 	 */
254 	start_pfn = PFN_UP(__pa(_end));
255 
256 	/*
257 	 * Reserve the kernel text and Reserve the bootmem bitmap. We do
258 	 * this in two steps (first step was init_bootmem()), because
259 	 * this catches the (definitely buggy) case of us accidentally
260 	 * initializing the bootmem allocator with an invalid RAM area.
261 	 */
262 	memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
263 
264 	/*
265 	 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
266 	 */
267 	if (CONFIG_ZERO_PAGE_OFFSET != 0)
268 		memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
269 
270 	/*
271 	 * Handle additional early reservations
272 	 */
273 	check_for_initrd();
274 	reserve_crashkernel();
275 }
276 
277 void __init paging_init(void)
278 {
279 	unsigned long max_zone_pfns[MAX_NR_ZONES];
280 	unsigned long vaddr, end;
281 
282 	sh_mv.mv_mem_init();
283 
284 	early_reserve_mem();
285 
286 	/*
287 	 * Once the early reservations are out of the way, give the
288 	 * platforms a chance to kick out some memory.
289 	 */
290 	if (sh_mv.mv_mem_reserve)
291 		sh_mv.mv_mem_reserve();
292 
293 	memblock_enforce_memory_limit(memory_limit);
294 	memblock_allow_resize();
295 
296 	memblock_dump_all();
297 
298 	/*
299 	 * Determine low and high memory ranges:
300 	 */
301 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
302 	min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
303 
304 	nodes_clear(node_online_map);
305 
306 	memory_start = (unsigned long)__va(__MEMORY_START);
307 	memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
308 
309 	uncached_init();
310 	pmb_init();
311 	do_init_bootmem();
312 	ioremap_fixed_init();
313 
314 	/* We don't need to map the kernel through the TLB, as
315 	 * it is permanatly mapped using P1. So clear the
316 	 * entire pgd. */
317 	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
318 
319 	/* Set an initial value for the MMU.TTB so we don't have to
320 	 * check for a null value. */
321 	set_TTB(swapper_pg_dir);
322 
323 	/*
324 	 * Populate the relevant portions of swapper_pg_dir so that
325 	 * we can use the fixmap entries without calling kmalloc.
326 	 * pte's will be filled in by __set_fixmap().
327 	 */
328 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
329 	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
330 	page_table_range_init(vaddr, end, swapper_pg_dir);
331 
332 	kmap_coherent_init();
333 
334 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
335 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
336 	free_area_init_nodes(max_zone_pfns);
337 }
338 
339 unsigned int mem_init_done = 0;
340 
341 void __init mem_init(void)
342 {
343 	pg_data_t *pgdat;
344 
345 	high_memory = NULL;
346 	for_each_online_pgdat(pgdat)
347 		high_memory = max_t(void *, high_memory,
348 				    __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
349 
350 	memblock_free_all();
351 
352 	/* Set this up early, so we can take care of the zero page */
353 	cpu_cache_init();
354 
355 	/* clear the zero-page */
356 	memset(empty_zero_page, 0, PAGE_SIZE);
357 	__flush_wback_region(empty_zero_page, PAGE_SIZE);
358 
359 	vsyscall_init();
360 
361 	mem_init_print_info(NULL);
362 	pr_info("virtual kernel memory layout:\n"
363 		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
364 #ifdef CONFIG_HIGHMEM
365 		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
366 #endif
367 		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
368 		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
369 #ifdef CONFIG_UNCACHED_MAPPING
370 		"            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
371 #endif
372 		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
373 		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
374 		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
375 		FIXADDR_START, FIXADDR_TOP,
376 		(FIXADDR_TOP - FIXADDR_START) >> 10,
377 
378 #ifdef CONFIG_HIGHMEM
379 		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
380 		(LAST_PKMAP*PAGE_SIZE) >> 10,
381 #endif
382 
383 		(unsigned long)VMALLOC_START, VMALLOC_END,
384 		(VMALLOC_END - VMALLOC_START) >> 20,
385 
386 		(unsigned long)memory_start, (unsigned long)high_memory,
387 		((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
388 
389 #ifdef CONFIG_UNCACHED_MAPPING
390 		uncached_start, uncached_end, uncached_size >> 20,
391 #endif
392 
393 		(unsigned long)&__init_begin, (unsigned long)&__init_end,
394 		((unsigned long)&__init_end -
395 		 (unsigned long)&__init_begin) >> 10,
396 
397 		(unsigned long)&_etext, (unsigned long)&_edata,
398 		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
399 
400 		(unsigned long)&_text, (unsigned long)&_etext,
401 		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
402 
403 	mem_init_done = 1;
404 }
405 
406 #ifdef CONFIG_MEMORY_HOTPLUG
407 int arch_add_memory(int nid, u64 start, u64 size,
408 			struct mhp_restrictions *restrictions)
409 {
410 	unsigned long start_pfn = PFN_DOWN(start);
411 	unsigned long nr_pages = size >> PAGE_SHIFT;
412 	int ret;
413 
414 	/* We only have ZONE_NORMAL, so this is easy.. */
415 	ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
416 	if (unlikely(ret))
417 		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
418 
419 	return ret;
420 }
421 
422 #ifdef CONFIG_NUMA
423 int memory_add_physaddr_to_nid(u64 addr)
424 {
425 	/* Node 0 for now.. */
426 	return 0;
427 }
428 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
429 #endif
430 
431 #ifdef CONFIG_MEMORY_HOTREMOVE
432 void arch_remove_memory(int nid, u64 start, u64 size,
433 			struct vmem_altmap *altmap)
434 {
435 	unsigned long start_pfn = PFN_DOWN(start);
436 	unsigned long nr_pages = size >> PAGE_SHIFT;
437 	struct zone *zone;
438 
439 	zone = page_zone(pfn_to_page(start_pfn));
440 	__remove_pages(zone, start_pfn, nr_pages, altmap);
441 }
442 #endif
443 #endif /* CONFIG_MEMORY_HOTPLUG */
444