xref: /openbmc/linux/arch/sh/mm/init.c (revision d5dbb2e8)
1 /*
2  * linux/arch/sh/mm/init.c
3  *
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2002 - 2011  Paul Mundt
6  *
7  *  Based on linux/arch/i386/mm/init.c:
8  *   Copyright (C) 1995  Linus Torvalds
9  */
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/init.h>
13 #include <linux/gfp.h>
14 #include <linux/memblock.h>
15 #include <linux/proc_fs.h>
16 #include <linux/pagemap.h>
17 #include <linux/percpu.h>
18 #include <linux/io.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/export.h>
21 #include <asm/mmu_context.h>
22 #include <asm/mmzone.h>
23 #include <asm/kexec.h>
24 #include <asm/tlb.h>
25 #include <asm/cacheflush.h>
26 #include <asm/sections.h>
27 #include <asm/setup.h>
28 #include <asm/cache.h>
29 #include <asm/sizes.h>
30 
31 pgd_t swapper_pg_dir[PTRS_PER_PGD];
32 
33 void __init generic_mem_init(void)
34 {
35 	memblock_add(__MEMORY_START, __MEMORY_SIZE);
36 }
37 
38 void __init __weak plat_mem_setup(void)
39 {
40 	/* Nothing to see here, move along. */
41 }
42 
43 #ifdef CONFIG_MMU
44 static pte_t *__get_pte_phys(unsigned long addr)
45 {
46 	pgd_t *pgd;
47 	pud_t *pud;
48 	pmd_t *pmd;
49 
50 	pgd = pgd_offset_k(addr);
51 	if (pgd_none(*pgd)) {
52 		pgd_ERROR(*pgd);
53 		return NULL;
54 	}
55 
56 	pud = pud_alloc(NULL, pgd, addr);
57 	if (unlikely(!pud)) {
58 		pud_ERROR(*pud);
59 		return NULL;
60 	}
61 
62 	pmd = pmd_alloc(NULL, pud, addr);
63 	if (unlikely(!pmd)) {
64 		pmd_ERROR(*pmd);
65 		return NULL;
66 	}
67 
68 	return pte_offset_kernel(pmd, addr);
69 }
70 
71 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
72 {
73 	pte_t *pte;
74 
75 	pte = __get_pte_phys(addr);
76 	if (!pte_none(*pte)) {
77 		pte_ERROR(*pte);
78 		return;
79 	}
80 
81 	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
82 	local_flush_tlb_one(get_asid(), addr);
83 
84 	if (pgprot_val(prot) & _PAGE_WIRED)
85 		tlb_wire_entry(NULL, addr, *pte);
86 }
87 
88 static void clear_pte_phys(unsigned long addr, pgprot_t prot)
89 {
90 	pte_t *pte;
91 
92 	pte = __get_pte_phys(addr);
93 
94 	if (pgprot_val(prot) & _PAGE_WIRED)
95 		tlb_unwire_entry();
96 
97 	set_pte(pte, pfn_pte(0, __pgprot(0)));
98 	local_flush_tlb_one(get_asid(), addr);
99 }
100 
101 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
102 {
103 	unsigned long address = __fix_to_virt(idx);
104 
105 	if (idx >= __end_of_fixed_addresses) {
106 		BUG();
107 		return;
108 	}
109 
110 	set_pte_phys(address, phys, prot);
111 }
112 
113 void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
114 {
115 	unsigned long address = __fix_to_virt(idx);
116 
117 	if (idx >= __end_of_fixed_addresses) {
118 		BUG();
119 		return;
120 	}
121 
122 	clear_pte_phys(address, prot);
123 }
124 
125 static pmd_t * __init one_md_table_init(pud_t *pud)
126 {
127 	if (pud_none(*pud)) {
128 		pmd_t *pmd;
129 
130 		pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
131 		pud_populate(&init_mm, pud, pmd);
132 		BUG_ON(pmd != pmd_offset(pud, 0));
133 	}
134 
135 	return pmd_offset(pud, 0);
136 }
137 
138 static pte_t * __init one_page_table_init(pmd_t *pmd)
139 {
140 	if (pmd_none(*pmd)) {
141 		pte_t *pte;
142 
143 		pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
144 		pmd_populate_kernel(&init_mm, pmd, pte);
145 		BUG_ON(pte != pte_offset_kernel(pmd, 0));
146 	}
147 
148 	return pte_offset_kernel(pmd, 0);
149 }
150 
151 static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
152 					    unsigned long vaddr, pte_t *lastpte)
153 {
154 	return pte;
155 }
156 
157 void __init page_table_range_init(unsigned long start, unsigned long end,
158 					 pgd_t *pgd_base)
159 {
160 	pgd_t *pgd;
161 	pud_t *pud;
162 	pmd_t *pmd;
163 	pte_t *pte = NULL;
164 	int i, j, k;
165 	unsigned long vaddr;
166 
167 	vaddr = start;
168 	i = __pgd_offset(vaddr);
169 	j = __pud_offset(vaddr);
170 	k = __pmd_offset(vaddr);
171 	pgd = pgd_base + i;
172 
173 	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
174 		pud = (pud_t *)pgd;
175 		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
176 			pmd = one_md_table_init(pud);
177 #ifndef __PAGETABLE_PMD_FOLDED
178 			pmd += k;
179 #endif
180 			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
181 				pte = page_table_kmap_check(one_page_table_init(pmd),
182 							    pmd, vaddr, pte);
183 				vaddr += PMD_SIZE;
184 			}
185 			k = 0;
186 		}
187 		j = 0;
188 	}
189 }
190 #endif	/* CONFIG_MMU */
191 
192 void __init allocate_pgdat(unsigned int nid)
193 {
194 	unsigned long start_pfn, end_pfn;
195 #ifdef CONFIG_NEED_MULTIPLE_NODES
196 	unsigned long phys;
197 #endif
198 
199 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
200 
201 #ifdef CONFIG_NEED_MULTIPLE_NODES
202 	phys = __memblock_alloc_base(sizeof(struct pglist_data),
203 				SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
204 	/* Retry with all of system memory */
205 	if (!phys)
206 		phys = __memblock_alloc_base(sizeof(struct pglist_data),
207 					SMP_CACHE_BYTES, memblock_end_of_DRAM());
208 	if (!phys)
209 		panic("Can't allocate pgdat for node %d\n", nid);
210 
211 	NODE_DATA(nid) = __va(phys);
212 	memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
213 #endif
214 
215 	NODE_DATA(nid)->node_start_pfn = start_pfn;
216 	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
217 }
218 
219 static void __init do_init_bootmem(void)
220 {
221 	struct memblock_region *reg;
222 
223 	/* Add active regions with valid PFNs. */
224 	for_each_memblock(memory, reg) {
225 		unsigned long start_pfn, end_pfn;
226 		start_pfn = memblock_region_memory_base_pfn(reg);
227 		end_pfn = memblock_region_memory_end_pfn(reg);
228 		__add_active_range(0, start_pfn, end_pfn);
229 	}
230 
231 	/* All of system RAM sits in node 0 for the non-NUMA case */
232 	allocate_pgdat(0);
233 	node_set_online(0);
234 
235 	plat_mem_setup();
236 
237 	for_each_memblock(memory, reg) {
238 		int nid = memblock_get_region_node(reg);
239 
240 		memory_present(nid, memblock_region_memory_base_pfn(reg),
241 			memblock_region_memory_end_pfn(reg));
242 	}
243 	sparse_init();
244 }
245 
246 static void __init early_reserve_mem(void)
247 {
248 	unsigned long start_pfn;
249 	u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
250 	u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
251 
252 	/*
253 	 * Partially used pages are not usable - thus
254 	 * we are rounding upwards:
255 	 */
256 	start_pfn = PFN_UP(__pa(_end));
257 
258 	/*
259 	 * Reserve the kernel text and Reserve the bootmem bitmap. We do
260 	 * this in two steps (first step was init_bootmem()), because
261 	 * this catches the (definitely buggy) case of us accidentally
262 	 * initializing the bootmem allocator with an invalid RAM area.
263 	 */
264 	memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
265 
266 	/*
267 	 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
268 	 */
269 	if (CONFIG_ZERO_PAGE_OFFSET != 0)
270 		memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
271 
272 	/*
273 	 * Handle additional early reservations
274 	 */
275 	check_for_initrd();
276 	reserve_crashkernel();
277 }
278 
279 void __init paging_init(void)
280 {
281 	unsigned long max_zone_pfns[MAX_NR_ZONES];
282 	unsigned long vaddr, end;
283 
284 	sh_mv.mv_mem_init();
285 
286 	early_reserve_mem();
287 
288 	/*
289 	 * Once the early reservations are out of the way, give the
290 	 * platforms a chance to kick out some memory.
291 	 */
292 	if (sh_mv.mv_mem_reserve)
293 		sh_mv.mv_mem_reserve();
294 
295 	memblock_enforce_memory_limit(memory_limit);
296 	memblock_allow_resize();
297 
298 	memblock_dump_all();
299 
300 	/*
301 	 * Determine low and high memory ranges:
302 	 */
303 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
304 	min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
305 
306 	nodes_clear(node_online_map);
307 
308 	memory_start = (unsigned long)__va(__MEMORY_START);
309 	memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
310 
311 	uncached_init();
312 	pmb_init();
313 	do_init_bootmem();
314 	ioremap_fixed_init();
315 
316 	/* We don't need to map the kernel through the TLB, as
317 	 * it is permanatly mapped using P1. So clear the
318 	 * entire pgd. */
319 	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
320 
321 	/* Set an initial value for the MMU.TTB so we don't have to
322 	 * check for a null value. */
323 	set_TTB(swapper_pg_dir);
324 
325 	/*
326 	 * Populate the relevant portions of swapper_pg_dir so that
327 	 * we can use the fixmap entries without calling kmalloc.
328 	 * pte's will be filled in by __set_fixmap().
329 	 */
330 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
331 	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
332 	page_table_range_init(vaddr, end, swapper_pg_dir);
333 
334 	kmap_coherent_init();
335 
336 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
337 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
338 	free_area_init_nodes(max_zone_pfns);
339 }
340 
341 unsigned int mem_init_done = 0;
342 
343 void __init mem_init(void)
344 {
345 	pg_data_t *pgdat;
346 
347 	high_memory = NULL;
348 	for_each_online_pgdat(pgdat)
349 		high_memory = max_t(void *, high_memory,
350 				    __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
351 
352 	memblock_free_all();
353 
354 	/* Set this up early, so we can take care of the zero page */
355 	cpu_cache_init();
356 
357 	/* clear the zero-page */
358 	memset(empty_zero_page, 0, PAGE_SIZE);
359 	__flush_wback_region(empty_zero_page, PAGE_SIZE);
360 
361 	vsyscall_init();
362 
363 	mem_init_print_info(NULL);
364 	pr_info("virtual kernel memory layout:\n"
365 		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
366 #ifdef CONFIG_HIGHMEM
367 		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
368 #endif
369 		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
370 		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
371 #ifdef CONFIG_UNCACHED_MAPPING
372 		"            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
373 #endif
374 		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
375 		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
376 		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
377 		FIXADDR_START, FIXADDR_TOP,
378 		(FIXADDR_TOP - FIXADDR_START) >> 10,
379 
380 #ifdef CONFIG_HIGHMEM
381 		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
382 		(LAST_PKMAP*PAGE_SIZE) >> 10,
383 #endif
384 
385 		(unsigned long)VMALLOC_START, VMALLOC_END,
386 		(VMALLOC_END - VMALLOC_START) >> 20,
387 
388 		(unsigned long)memory_start, (unsigned long)high_memory,
389 		((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
390 
391 #ifdef CONFIG_UNCACHED_MAPPING
392 		uncached_start, uncached_end, uncached_size >> 20,
393 #endif
394 
395 		(unsigned long)&__init_begin, (unsigned long)&__init_end,
396 		((unsigned long)&__init_end -
397 		 (unsigned long)&__init_begin) >> 10,
398 
399 		(unsigned long)&_etext, (unsigned long)&_edata,
400 		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
401 
402 		(unsigned long)&_text, (unsigned long)&_etext,
403 		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
404 
405 	mem_init_done = 1;
406 }
407 
408 void free_initmem(void)
409 {
410 	free_initmem_default(-1);
411 }
412 
413 #ifdef CONFIG_BLK_DEV_INITRD
414 void free_initrd_mem(unsigned long start, unsigned long end)
415 {
416 	free_reserved_area((void *)start, (void *)end, -1, "initrd");
417 }
418 #endif
419 
420 #ifdef CONFIG_MEMORY_HOTPLUG
421 int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
422 		bool want_memblock)
423 {
424 	unsigned long start_pfn = PFN_DOWN(start);
425 	unsigned long nr_pages = size >> PAGE_SHIFT;
426 	int ret;
427 
428 	/* We only have ZONE_NORMAL, so this is easy.. */
429 	ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
430 	if (unlikely(ret))
431 		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
432 
433 	return ret;
434 }
435 
436 #ifdef CONFIG_NUMA
437 int memory_add_physaddr_to_nid(u64 addr)
438 {
439 	/* Node 0 for now.. */
440 	return 0;
441 }
442 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
443 #endif
444 
445 #ifdef CONFIG_MEMORY_HOTREMOVE
446 int arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap)
447 {
448 	unsigned long start_pfn = PFN_DOWN(start);
449 	unsigned long nr_pages = size >> PAGE_SHIFT;
450 	struct zone *zone;
451 	int ret;
452 
453 	zone = page_zone(pfn_to_page(start_pfn));
454 	ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
455 	if (unlikely(ret))
456 		pr_warn("%s: Failed, __remove_pages() == %d\n", __func__,
457 			ret);
458 
459 	return ret;
460 }
461 #endif
462 #endif /* CONFIG_MEMORY_HOTPLUG */
463