xref: /openbmc/linux/arch/sh/mm/init.c (revision 25985edc)
1 /*
2  * linux/arch/sh/mm/init.c
3  *
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2002 - 2011  Paul Mundt
6  *
7  *  Based on linux/arch/i386/mm/init.c:
8  *   Copyright (C) 1995  Linus Torvalds
9  */
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/init.h>
13 #include <linux/gfp.h>
14 #include <linux/bootmem.h>
15 #include <linux/proc_fs.h>
16 #include <linux/pagemap.h>
17 #include <linux/percpu.h>
18 #include <linux/io.h>
19 #include <linux/memblock.h>
20 #include <linux/dma-mapping.h>
21 #include <asm/mmu_context.h>
22 #include <asm/mmzone.h>
23 #include <asm/kexec.h>
24 #include <asm/tlb.h>
25 #include <asm/cacheflush.h>
26 #include <asm/sections.h>
27 #include <asm/setup.h>
28 #include <asm/cache.h>
29 #include <asm/sizes.h>
30 
31 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
32 pgd_t swapper_pg_dir[PTRS_PER_PGD];
33 
34 void __init generic_mem_init(void)
35 {
36 	memblock_add(__MEMORY_START, __MEMORY_SIZE);
37 }
38 
39 void __init __weak plat_mem_setup(void)
40 {
41 	/* Nothing to see here, move along. */
42 }
43 
44 #ifdef CONFIG_MMU
45 static pte_t *__get_pte_phys(unsigned long addr)
46 {
47 	pgd_t *pgd;
48 	pud_t *pud;
49 	pmd_t *pmd;
50 
51 	pgd = pgd_offset_k(addr);
52 	if (pgd_none(*pgd)) {
53 		pgd_ERROR(*pgd);
54 		return NULL;
55 	}
56 
57 	pud = pud_alloc(NULL, pgd, addr);
58 	if (unlikely(!pud)) {
59 		pud_ERROR(*pud);
60 		return NULL;
61 	}
62 
63 	pmd = pmd_alloc(NULL, pud, addr);
64 	if (unlikely(!pmd)) {
65 		pmd_ERROR(*pmd);
66 		return NULL;
67 	}
68 
69 	return pte_offset_kernel(pmd, addr);
70 }
71 
72 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
73 {
74 	pte_t *pte;
75 
76 	pte = __get_pte_phys(addr);
77 	if (!pte_none(*pte)) {
78 		pte_ERROR(*pte);
79 		return;
80 	}
81 
82 	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
83 	local_flush_tlb_one(get_asid(), addr);
84 
85 	if (pgprot_val(prot) & _PAGE_WIRED)
86 		tlb_wire_entry(NULL, addr, *pte);
87 }
88 
89 static void clear_pte_phys(unsigned long addr, pgprot_t prot)
90 {
91 	pte_t *pte;
92 
93 	pte = __get_pte_phys(addr);
94 
95 	if (pgprot_val(prot) & _PAGE_WIRED)
96 		tlb_unwire_entry();
97 
98 	set_pte(pte, pfn_pte(0, __pgprot(0)));
99 	local_flush_tlb_one(get_asid(), addr);
100 }
101 
102 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
103 {
104 	unsigned long address = __fix_to_virt(idx);
105 
106 	if (idx >= __end_of_fixed_addresses) {
107 		BUG();
108 		return;
109 	}
110 
111 	set_pte_phys(address, phys, prot);
112 }
113 
114 void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
115 {
116 	unsigned long address = __fix_to_virt(idx);
117 
118 	if (idx >= __end_of_fixed_addresses) {
119 		BUG();
120 		return;
121 	}
122 
123 	clear_pte_phys(address, prot);
124 }
125 
126 static pmd_t * __init one_md_table_init(pud_t *pud)
127 {
128 	if (pud_none(*pud)) {
129 		pmd_t *pmd;
130 
131 		pmd = alloc_bootmem_pages(PAGE_SIZE);
132 		pud_populate(&init_mm, pud, pmd);
133 		BUG_ON(pmd != pmd_offset(pud, 0));
134 	}
135 
136 	return pmd_offset(pud, 0);
137 }
138 
139 static pte_t * __init one_page_table_init(pmd_t *pmd)
140 {
141 	if (pmd_none(*pmd)) {
142 		pte_t *pte;
143 
144 		pte = alloc_bootmem_pages(PAGE_SIZE);
145 		pmd_populate_kernel(&init_mm, pmd, pte);
146 		BUG_ON(pte != pte_offset_kernel(pmd, 0));
147 	}
148 
149 	return pte_offset_kernel(pmd, 0);
150 }
151 
152 static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
153 					    unsigned long vaddr, pte_t *lastpte)
154 {
155 	return pte;
156 }
157 
158 void __init page_table_range_init(unsigned long start, unsigned long end,
159 					 pgd_t *pgd_base)
160 {
161 	pgd_t *pgd;
162 	pud_t *pud;
163 	pmd_t *pmd;
164 	pte_t *pte = NULL;
165 	int i, j, k;
166 	unsigned long vaddr;
167 
168 	vaddr = start;
169 	i = __pgd_offset(vaddr);
170 	j = __pud_offset(vaddr);
171 	k = __pmd_offset(vaddr);
172 	pgd = pgd_base + i;
173 
174 	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
175 		pud = (pud_t *)pgd;
176 		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
177 			pmd = one_md_table_init(pud);
178 #ifndef __PAGETABLE_PMD_FOLDED
179 			pmd += k;
180 #endif
181 			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
182 				pte = page_table_kmap_check(one_page_table_init(pmd),
183 							    pmd, vaddr, pte);
184 				vaddr += PMD_SIZE;
185 			}
186 			k = 0;
187 		}
188 		j = 0;
189 	}
190 }
191 #endif	/* CONFIG_MMU */
192 
193 void __init allocate_pgdat(unsigned int nid)
194 {
195 	unsigned long start_pfn, end_pfn;
196 #ifdef CONFIG_NEED_MULTIPLE_NODES
197 	unsigned long phys;
198 #endif
199 
200 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
201 
202 #ifdef CONFIG_NEED_MULTIPLE_NODES
203 	phys = __memblock_alloc_base(sizeof(struct pglist_data),
204 				SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
205 	/* Retry with all of system memory */
206 	if (!phys)
207 		phys = __memblock_alloc_base(sizeof(struct pglist_data),
208 					SMP_CACHE_BYTES, memblock_end_of_DRAM());
209 	if (!phys)
210 		panic("Can't allocate pgdat for node %d\n", nid);
211 
212 	NODE_DATA(nid) = __va(phys);
213 	memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
214 
215 	NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
216 #endif
217 
218 	NODE_DATA(nid)->node_start_pfn = start_pfn;
219 	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
220 }
221 
222 static void __init bootmem_init_one_node(unsigned int nid)
223 {
224 	unsigned long total_pages, paddr;
225 	unsigned long end_pfn;
226 	struct pglist_data *p;
227 
228 	p = NODE_DATA(nid);
229 
230 	/* Nothing to do.. */
231 	if (!p->node_spanned_pages)
232 		return;
233 
234 	end_pfn = p->node_start_pfn + p->node_spanned_pages;
235 
236 	total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
237 
238 	paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
239 	if (!paddr)
240 		panic("Can't allocate bootmap for nid[%d]\n", nid);
241 
242 	init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
243 
244 	free_bootmem_with_active_regions(nid, end_pfn);
245 
246 	/*
247 	 * XXX Handle initial reservations for the system memory node
248 	 * only for the moment, we'll refactor this later for handling
249 	 * reservations in other nodes.
250 	 */
251 	if (nid == 0) {
252 		struct memblock_region *reg;
253 
254 		/* Reserve the sections we're already using. */
255 		for_each_memblock(reserved, reg) {
256 			reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
257 		}
258 	}
259 
260 	sparse_memory_present_with_active_regions(nid);
261 }
262 
263 static void __init do_init_bootmem(void)
264 {
265 	struct memblock_region *reg;
266 	int i;
267 
268 	/* Add active regions with valid PFNs. */
269 	for_each_memblock(memory, reg) {
270 		unsigned long start_pfn, end_pfn;
271 		start_pfn = memblock_region_memory_base_pfn(reg);
272 		end_pfn = memblock_region_memory_end_pfn(reg);
273 		__add_active_range(0, start_pfn, end_pfn);
274 	}
275 
276 	/* All of system RAM sits in node 0 for the non-NUMA case */
277 	allocate_pgdat(0);
278 	node_set_online(0);
279 
280 	plat_mem_setup();
281 
282 	for_each_online_node(i)
283 		bootmem_init_one_node(i);
284 
285 	sparse_init();
286 }
287 
288 static void __init early_reserve_mem(void)
289 {
290 	unsigned long start_pfn;
291 
292 	/*
293 	 * Partially used pages are not usable - thus
294 	 * we are rounding upwards:
295 	 */
296 	start_pfn = PFN_UP(__pa(_end));
297 
298 	/*
299 	 * Reserve the kernel text and Reserve the bootmem bitmap. We do
300 	 * this in two steps (first step was init_bootmem()), because
301 	 * this catches the (definitely buggy) case of us accidentally
302 	 * initializing the bootmem allocator with an invalid RAM area.
303 	 */
304 	memblock_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
305 		    (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
306 		    (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
307 
308 	/*
309 	 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
310 	 */
311 	if (CONFIG_ZERO_PAGE_OFFSET != 0)
312 		memblock_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
313 
314 	/*
315 	 * Handle additional early reservations
316 	 */
317 	check_for_initrd();
318 	reserve_crashkernel();
319 }
320 
321 void __init paging_init(void)
322 {
323 	unsigned long max_zone_pfns[MAX_NR_ZONES];
324 	unsigned long vaddr, end;
325 	int nid;
326 
327 	memblock_init();
328 	sh_mv.mv_mem_init();
329 
330 	early_reserve_mem();
331 
332 	/*
333 	 * Once the early reservations are out of the way, give the
334 	 * platforms a chance to kick out some memory.
335 	 */
336 	if (sh_mv.mv_mem_reserve)
337 		sh_mv.mv_mem_reserve();
338 
339 	memblock_enforce_memory_limit(memory_limit);
340 	memblock_analyze();
341 
342 	memblock_dump_all();
343 
344 	/*
345 	 * Determine low and high memory ranges:
346 	 */
347 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
348 	min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
349 
350 	nodes_clear(node_online_map);
351 
352 	memory_start = (unsigned long)__va(__MEMORY_START);
353 	memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
354 
355 	uncached_init();
356 	pmb_init();
357 	do_init_bootmem();
358 	ioremap_fixed_init();
359 
360 	/* We don't need to map the kernel through the TLB, as
361 	 * it is permanatly mapped using P1. So clear the
362 	 * entire pgd. */
363 	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
364 
365 	/* Set an initial value for the MMU.TTB so we don't have to
366 	 * check for a null value. */
367 	set_TTB(swapper_pg_dir);
368 
369 	/*
370 	 * Populate the relevant portions of swapper_pg_dir so that
371 	 * we can use the fixmap entries without calling kmalloc.
372 	 * pte's will be filled in by __set_fixmap().
373 	 */
374 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
375 	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
376 	page_table_range_init(vaddr, end, swapper_pg_dir);
377 
378 	kmap_coherent_init();
379 
380 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
381 
382 	for_each_online_node(nid) {
383 		pg_data_t *pgdat = NODE_DATA(nid);
384 		unsigned long low, start_pfn;
385 
386 		start_pfn = pgdat->bdata->node_min_pfn;
387 		low = pgdat->bdata->node_low_pfn;
388 
389 		if (max_zone_pfns[ZONE_NORMAL] < low)
390 			max_zone_pfns[ZONE_NORMAL] = low;
391 
392 		printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
393 		       nid, start_pfn, low);
394 	}
395 
396 	free_area_init_nodes(max_zone_pfns);
397 }
398 
399 /*
400  * Early initialization for any I/O MMUs we might have.
401  */
402 static void __init iommu_init(void)
403 {
404 	no_iommu_init();
405 }
406 
407 unsigned int mem_init_done = 0;
408 
409 void __init mem_init(void)
410 {
411 	int codesize, datasize, initsize;
412 	int nid;
413 
414 	iommu_init();
415 
416 	num_physpages = 0;
417 	high_memory = NULL;
418 
419 	for_each_online_node(nid) {
420 		pg_data_t *pgdat = NODE_DATA(nid);
421 		unsigned long node_pages = 0;
422 		void *node_high_memory;
423 
424 		num_physpages += pgdat->node_present_pages;
425 
426 		if (pgdat->node_spanned_pages)
427 			node_pages = free_all_bootmem_node(pgdat);
428 
429 		totalram_pages += node_pages;
430 
431 		node_high_memory = (void *)__va((pgdat->node_start_pfn +
432 						 pgdat->node_spanned_pages) <<
433 						 PAGE_SHIFT);
434 		if (node_high_memory > high_memory)
435 			high_memory = node_high_memory;
436 	}
437 
438 	/* Set this up early, so we can take care of the zero page */
439 	cpu_cache_init();
440 
441 	/* clear the zero-page */
442 	memset(empty_zero_page, 0, PAGE_SIZE);
443 	__flush_wback_region(empty_zero_page, PAGE_SIZE);
444 
445 	vsyscall_init();
446 
447 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
448 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
449 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
450 
451 	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
452 	       "%dk data, %dk init)\n",
453 		nr_free_pages() << (PAGE_SHIFT-10),
454 		num_physpages << (PAGE_SHIFT-10),
455 		codesize >> 10,
456 		datasize >> 10,
457 		initsize >> 10);
458 
459 	printk(KERN_INFO "virtual kernel memory layout:\n"
460 		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
461 #ifdef CONFIG_HIGHMEM
462 		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
463 #endif
464 		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
465 		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
466 #ifdef CONFIG_UNCACHED_MAPPING
467 		"            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
468 #endif
469 		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
470 		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
471 		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
472 		FIXADDR_START, FIXADDR_TOP,
473 		(FIXADDR_TOP - FIXADDR_START) >> 10,
474 
475 #ifdef CONFIG_HIGHMEM
476 		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
477 		(LAST_PKMAP*PAGE_SIZE) >> 10,
478 #endif
479 
480 		(unsigned long)VMALLOC_START, VMALLOC_END,
481 		(VMALLOC_END - VMALLOC_START) >> 20,
482 
483 		(unsigned long)memory_start, (unsigned long)high_memory,
484 		((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
485 
486 #ifdef CONFIG_UNCACHED_MAPPING
487 		uncached_start, uncached_end, uncached_size >> 20,
488 #endif
489 
490 		(unsigned long)&__init_begin, (unsigned long)&__init_end,
491 		((unsigned long)&__init_end -
492 		 (unsigned long)&__init_begin) >> 10,
493 
494 		(unsigned long)&_etext, (unsigned long)&_edata,
495 		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
496 
497 		(unsigned long)&_text, (unsigned long)&_etext,
498 		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
499 
500 	mem_init_done = 1;
501 }
502 
503 void free_initmem(void)
504 {
505 	unsigned long addr;
506 
507 	addr = (unsigned long)(&__init_begin);
508 	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
509 		ClearPageReserved(virt_to_page(addr));
510 		init_page_count(virt_to_page(addr));
511 		free_page(addr);
512 		totalram_pages++;
513 	}
514 	printk("Freeing unused kernel memory: %ldk freed\n",
515 	       ((unsigned long)&__init_end -
516 	        (unsigned long)&__init_begin) >> 10);
517 }
518 
519 #ifdef CONFIG_BLK_DEV_INITRD
520 void free_initrd_mem(unsigned long start, unsigned long end)
521 {
522 	unsigned long p;
523 	for (p = start; p < end; p += PAGE_SIZE) {
524 		ClearPageReserved(virt_to_page(p));
525 		init_page_count(virt_to_page(p));
526 		free_page(p);
527 		totalram_pages++;
528 	}
529 	printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
530 }
531 #endif
532 
533 #ifdef CONFIG_MEMORY_HOTPLUG
534 int arch_add_memory(int nid, u64 start, u64 size)
535 {
536 	pg_data_t *pgdat;
537 	unsigned long start_pfn = start >> PAGE_SHIFT;
538 	unsigned long nr_pages = size >> PAGE_SHIFT;
539 	int ret;
540 
541 	pgdat = NODE_DATA(nid);
542 
543 	/* We only have ZONE_NORMAL, so this is easy.. */
544 	ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL,
545 				start_pfn, nr_pages);
546 	if (unlikely(ret))
547 		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
548 
549 	return ret;
550 }
551 EXPORT_SYMBOL_GPL(arch_add_memory);
552 
553 #ifdef CONFIG_NUMA
554 int memory_add_physaddr_to_nid(u64 addr)
555 {
556 	/* Node 0 for now.. */
557 	return 0;
558 }
559 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
560 #endif
561 
562 #endif /* CONFIG_MEMORY_HOTPLUG */
563