xref: /openbmc/linux/arch/sh/mm/init.c (revision e868d612)
1 /*
2  * linux/arch/sh/mm/init.c
3  *
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2002 - 2007  Paul Mundt
6  *
7  *  Based on linux/arch/i386/mm/init.c:
8  *   Copyright (C) 1995  Linus Torvalds
9  */
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
14 #include <linux/proc_fs.h>
15 #include <linux/percpu.h>
16 #include <linux/io.h>
17 #include <asm/mmu_context.h>
18 #include <asm/tlb.h>
19 #include <asm/cacheflush.h>
20 #include <asm/cache.h>
21 
22 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
23 pgd_t swapper_pg_dir[PTRS_PER_PGD];
24 
25 void (*copy_page)(void *from, void *to);
26 void (*clear_page)(void *to);
27 
28 void show_mem(void)
29 {
30 	int total = 0, reserved = 0, free = 0;
31 	int shared = 0, cached = 0, slab = 0;
32 	pg_data_t *pgdat;
33 
34 	printk("Mem-info:\n");
35 	show_free_areas();
36 
37 	for_each_online_pgdat(pgdat) {
38 		struct page *page, *end;
39 		unsigned long flags;
40 
41 		pgdat_resize_lock(pgdat, &flags);
42 		page = pgdat->node_mem_map;
43 		end = page + pgdat->node_spanned_pages;
44 
45 		do {
46 			total++;
47 			if (PageReserved(page))
48 				reserved++;
49 			else if (PageSwapCache(page))
50 				cached++;
51 			else if (PageSlab(page))
52 				slab++;
53 			else if (!page_count(page))
54 				free++;
55 			else
56 				shared += page_count(page) - 1;
57 			page++;
58 		} while (page < end);
59 
60 		pgdat_resize_unlock(pgdat, &flags);
61 	}
62 
63 	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
64 	printk("%d pages of RAM\n", total);
65 	printk("%d free pages\n", free);
66 	printk("%d reserved pages\n", reserved);
67 	printk("%d slab pages\n", slab);
68 	printk("%d pages shared\n", shared);
69 	printk("%d pages swap cached\n", cached);
70 	printk(KERN_INFO "Total of %ld pages in page table cache\n",
71 	       quicklist_total_size());
72 }
73 
74 #ifdef CONFIG_MMU
75 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
76 {
77 	pgd_t *pgd;
78 	pud_t *pud;
79 	pmd_t *pmd;
80 	pte_t *pte;
81 
82 	pgd = pgd_offset_k(addr);
83 	if (pgd_none(*pgd)) {
84 		pgd_ERROR(*pgd);
85 		return;
86 	}
87 
88 	pud = pud_alloc(NULL, pgd, addr);
89 	if (unlikely(!pud)) {
90 		pud_ERROR(*pud);
91 		return;
92 	}
93 
94 	pmd = pmd_alloc(NULL, pud, addr);
95 	if (unlikely(!pmd)) {
96 		pmd_ERROR(*pmd);
97 		return;
98 	}
99 
100 	pte = pte_offset_kernel(pmd, addr);
101 	if (!pte_none(*pte)) {
102 		pte_ERROR(*pte);
103 		return;
104 	}
105 
106 	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
107 
108 	flush_tlb_one(get_asid(), addr);
109 }
110 
111 /*
112  * As a performance optimization, other platforms preserve the fixmap mapping
113  * across a context switch, we don't presently do this, but this could be done
114  * in a similar fashion as to the wired TLB interface that sh64 uses (by way
115  * of the memory mapped UTLB configuration) -- this unfortunately forces us to
116  * give up a TLB entry for each mapping we want to preserve. While this may be
117  * viable for a small number of fixmaps, it's not particularly useful for
118  * everything and needs to be carefully evaluated. (ie, we may want this for
119  * the vsyscall page).
120  *
121  * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
122  * in at __set_fixmap() time to determine the appropriate behavior to follow.
123  *
124  *					 -- PFM.
125  */
126 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
127 {
128 	unsigned long address = __fix_to_virt(idx);
129 
130 	if (idx >= __end_of_fixed_addresses) {
131 		BUG();
132 		return;
133 	}
134 
135 	set_pte_phys(address, phys, prot);
136 }
137 #endif	/* CONFIG_MMU */
138 
139 /* References to section boundaries */
140 
141 extern char _text, _etext, _edata, __bss_start, _end;
142 extern char __init_begin, __init_end;
143 
144 /*
145  * paging_init() sets up the page tables
146  */
147 void __init paging_init(void)
148 {
149 	int nid;
150 
151 	/* We don't need to map the kernel through the TLB, as
152 	 * it is permanatly mapped using P1. So clear the
153 	 * entire pgd. */
154 	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
155 
156 	/* Set an initial value for the MMU.TTB so we don't have to
157 	 * check for a null value. */
158 	set_TTB(swapper_pg_dir);
159 
160 	for_each_online_node(nid) {
161 		pg_data_t *pgdat = NODE_DATA(nid);
162 		unsigned long max_zone_pfns[MAX_NR_ZONES];
163 		unsigned long low, start_pfn;
164 
165 		memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
166 
167 		start_pfn = pgdat->bdata->node_boot_start >> PAGE_SHIFT;
168 		low = pgdat->bdata->node_low_pfn;
169 
170 		max_zone_pfns[ZONE_NORMAL] = low;
171 		add_active_range(nid, start_pfn, low);
172 
173 		printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
174 		       nid, start_pfn, low);
175 
176 		free_area_init_nodes(max_zone_pfns);
177 
178 		printk("Node %u: mem_map starts at %p\n",
179 		       pgdat->node_id, pgdat->node_mem_map);
180 	}
181 }
182 
183 static struct kcore_list kcore_mem, kcore_vmalloc;
184 
185 void __init mem_init(void)
186 {
187 	int codesize, reservedpages, datasize, initsize;
188 	int nid;
189 
190 	reservedpages = 0;
191 
192 	for_each_online_node(nid) {
193 		pg_data_t *pgdat = NODE_DATA(nid);
194 		unsigned long node_pages = 0;
195 		void *node_high_memory;
196 		int i;
197 
198 		num_physpages += pgdat->node_present_pages;
199 
200 		if (pgdat->node_spanned_pages)
201 			node_pages = free_all_bootmem_node(pgdat);
202 
203 		totalram_pages += node_pages;
204 
205 		for (i = 0; i < node_pages; i++)
206 			if (PageReserved(pgdat->node_mem_map + i))
207 				reservedpages++;
208 
209 		node_high_memory = (void *)((pgdat->node_start_pfn +
210 					     pgdat->node_spanned_pages) <<
211 						PAGE_SHIFT);
212 		if (node_high_memory > high_memory)
213 			high_memory = node_high_memory;
214 	}
215 
216 	/* clear the zero-page */
217 	memset(empty_zero_page, 0, PAGE_SIZE);
218 	__flush_wback_region(empty_zero_page, PAGE_SIZE);
219 
220 	/*
221 	 * Setup wrappers for copy/clear_page(), these will get overridden
222 	 * later in the boot process if a better method is available.
223 	 */
224 #ifdef CONFIG_MMU
225 	copy_page = copy_page_slow;
226 	clear_page = clear_page_slow;
227 #else
228 	copy_page = copy_page_nommu;
229 	clear_page = clear_page_nommu;
230 #endif
231 
232 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
233 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
234 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
235 
236 	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
237 	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
238 		   VMALLOC_END - VMALLOC_START);
239 
240 	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
241 	       "%dk reserved, %dk data, %dk init)\n",
242 		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
243 		totalram_pages << (PAGE_SHIFT-10),
244 		codesize >> 10,
245 		reservedpages << (PAGE_SHIFT-10),
246 		datasize >> 10,
247 		initsize >> 10);
248 
249 	p3_cache_init();
250 
251 	/* Initialize the vDSO */
252 	vsyscall_init();
253 }
254 
255 void free_initmem(void)
256 {
257 	unsigned long addr;
258 
259 	addr = (unsigned long)(&__init_begin);
260 	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
261 		ClearPageReserved(virt_to_page(addr));
262 		init_page_count(virt_to_page(addr));
263 		free_page(addr);
264 		totalram_pages++;
265 	}
266 	printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
267 }
268 
269 #ifdef CONFIG_BLK_DEV_INITRD
270 void free_initrd_mem(unsigned long start, unsigned long end)
271 {
272 	unsigned long p;
273 	for (p = start; p < end; p += PAGE_SIZE) {
274 		ClearPageReserved(virt_to_page(p));
275 		init_page_count(virt_to_page(p));
276 		free_page(p);
277 		totalram_pages++;
278 	}
279 	printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
280 }
281 #endif
282