xref: /openbmc/linux/arch/sh/mm/init.c (revision 545e4006)
1 /*
2  * linux/arch/sh/mm/init.c
3  *
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2002 - 2007  Paul Mundt
6  *
7  *  Based on linux/arch/i386/mm/init.c:
8  *   Copyright (C) 1995  Linus Torvalds
9  */
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
14 #include <linux/proc_fs.h>
15 #include <linux/pagemap.h>
16 #include <linux/percpu.h>
17 #include <linux/io.h>
18 #include <asm/mmu_context.h>
19 #include <asm/tlb.h>
20 #include <asm/cacheflush.h>
21 #include <asm/sections.h>
22 #include <asm/cache.h>
23 
24 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
25 pgd_t swapper_pg_dir[PTRS_PER_PGD];
26 unsigned long cached_to_uncached = 0;
27 
28 void show_mem(void)
29 {
30 	int total = 0, reserved = 0, free = 0;
31 	int shared = 0, cached = 0, slab = 0;
32 	pg_data_t *pgdat;
33 
34 	printk("Mem-info:\n");
35 	show_free_areas();
36 
37 	for_each_online_pgdat(pgdat) {
38 		unsigned long flags, i;
39 
40 		pgdat_resize_lock(pgdat, &flags);
41 		for (i = 0; i < pgdat->node_spanned_pages; i++) {
42 			struct page *page = pgdat_page_nr(pgdat, i);
43 			total++;
44 			if (PageReserved(page))
45 				reserved++;
46 			else if (PageSwapCache(page))
47 				cached++;
48 			else if (PageSlab(page))
49 				slab++;
50 			else if (!page_count(page))
51 				free++;
52 			else
53 				shared += page_count(page) - 1;
54 		}
55 		pgdat_resize_unlock(pgdat, &flags);
56 	}
57 
58 	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
59 	printk("%d pages of RAM\n", total);
60 	printk("%d free pages\n", free);
61 	printk("%d reserved pages\n", reserved);
62 	printk("%d slab pages\n", slab);
63 	printk("%d pages shared\n", shared);
64 	printk("%d pages swap cached\n", cached);
65 	printk(KERN_INFO "Total of %ld pages in page table cache\n",
66 	       quicklist_total_size());
67 }
68 
69 #ifdef CONFIG_MMU
70 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
71 {
72 	pgd_t *pgd;
73 	pud_t *pud;
74 	pmd_t *pmd;
75 	pte_t *pte;
76 
77 	pgd = pgd_offset_k(addr);
78 	if (pgd_none(*pgd)) {
79 		pgd_ERROR(*pgd);
80 		return;
81 	}
82 
83 	pud = pud_alloc(NULL, pgd, addr);
84 	if (unlikely(!pud)) {
85 		pud_ERROR(*pud);
86 		return;
87 	}
88 
89 	pmd = pmd_alloc(NULL, pud, addr);
90 	if (unlikely(!pmd)) {
91 		pmd_ERROR(*pmd);
92 		return;
93 	}
94 
95 	pte = pte_offset_kernel(pmd, addr);
96 	if (!pte_none(*pte)) {
97 		pte_ERROR(*pte);
98 		return;
99 	}
100 
101 	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
102 
103 	if (cached_to_uncached)
104 		flush_tlb_one(get_asid(), addr);
105 }
106 
107 /*
108  * As a performance optimization, other platforms preserve the fixmap mapping
109  * across a context switch, we don't presently do this, but this could be done
110  * in a similar fashion as to the wired TLB interface that sh64 uses (by way
111  * of the memory mapped UTLB configuration) -- this unfortunately forces us to
112  * give up a TLB entry for each mapping we want to preserve. While this may be
113  * viable for a small number of fixmaps, it's not particularly useful for
114  * everything and needs to be carefully evaluated. (ie, we may want this for
115  * the vsyscall page).
116  *
117  * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
118  * in at __set_fixmap() time to determine the appropriate behavior to follow.
119  *
120  *					 -- PFM.
121  */
122 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
123 {
124 	unsigned long address = __fix_to_virt(idx);
125 
126 	if (idx >= __end_of_fixed_addresses) {
127 		BUG();
128 		return;
129 	}
130 
131 	set_pte_phys(address, phys, prot);
132 }
133 
134 void __init page_table_range_init(unsigned long start, unsigned long end,
135 					 pgd_t *pgd_base)
136 {
137 	pgd_t *pgd;
138 	pud_t *pud;
139 	pmd_t *pmd;
140 	int pgd_idx;
141 	unsigned long vaddr;
142 
143 	vaddr = start & PMD_MASK;
144 	end = (end + PMD_SIZE - 1) & PMD_MASK;
145 	pgd_idx = pgd_index(vaddr);
146 	pgd = pgd_base + pgd_idx;
147 
148 	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
149 		BUG_ON(pgd_none(*pgd));
150 		pud = pud_offset(pgd, 0);
151 		BUG_ON(pud_none(*pud));
152 		pmd = pmd_offset(pud, 0);
153 
154 		if (!pmd_present(*pmd)) {
155 			pte_t *pte_table;
156 			pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
157 			memset(pte_table, 0, PAGE_SIZE);
158 			pmd_populate_kernel(&init_mm, pmd, pte_table);
159 		}
160 
161 		vaddr += PMD_SIZE;
162 	}
163 }
164 #endif	/* CONFIG_MMU */
165 
166 /*
167  * paging_init() sets up the page tables
168  */
169 void __init paging_init(void)
170 {
171 	unsigned long max_zone_pfns[MAX_NR_ZONES];
172 	int nid;
173 
174 	/* We don't need to map the kernel through the TLB, as
175 	 * it is permanatly mapped using P1. So clear the
176 	 * entire pgd. */
177 	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
178 
179 	/* Set an initial value for the MMU.TTB so we don't have to
180 	 * check for a null value. */
181 	set_TTB(swapper_pg_dir);
182 
183 	/* Populate the relevant portions of swapper_pg_dir so that
184 	 * we can use the fixmap entries without calling kmalloc.
185 	 * pte's will be filled in by __set_fixmap(). */
186 	page_table_range_init(FIXADDR_START, FIXADDR_TOP, swapper_pg_dir);
187 
188 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
189 
190 	for_each_online_node(nid) {
191 		pg_data_t *pgdat = NODE_DATA(nid);
192 		unsigned long low, start_pfn;
193 
194 		start_pfn = pgdat->bdata->node_min_pfn;
195 		low = pgdat->bdata->node_low_pfn;
196 
197 		if (max_zone_pfns[ZONE_NORMAL] < low)
198 			max_zone_pfns[ZONE_NORMAL] = low;
199 
200 		printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
201 		       nid, start_pfn, low);
202 	}
203 
204 	free_area_init_nodes(max_zone_pfns);
205 
206 #ifdef CONFIG_SUPERH32
207 	/* Set up the uncached fixmap */
208 	set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
209 
210 #ifdef CONFIG_29BIT
211 	/*
212 	 * Handle trivial transitions between cached and uncached
213 	 * segments, making use of the 1:1 mapping relationship in
214 	 * 512MB lowmem.
215 	 */
216 	cached_to_uncached = P2SEG - P1SEG;
217 #endif
218 #endif
219 }
220 
221 static struct kcore_list kcore_mem, kcore_vmalloc;
222 int after_bootmem = 0;
223 
224 void __init mem_init(void)
225 {
226 	int codesize, datasize, initsize;
227 	int nid;
228 
229 	num_physpages = 0;
230 	high_memory = NULL;
231 
232 	for_each_online_node(nid) {
233 		pg_data_t *pgdat = NODE_DATA(nid);
234 		unsigned long node_pages = 0;
235 		void *node_high_memory;
236 
237 		num_physpages += pgdat->node_present_pages;
238 
239 		if (pgdat->node_spanned_pages)
240 			node_pages = free_all_bootmem_node(pgdat);
241 
242 		totalram_pages += node_pages;
243 
244 		node_high_memory = (void *)__va((pgdat->node_start_pfn +
245 						 pgdat->node_spanned_pages) <<
246 						 PAGE_SHIFT);
247 		if (node_high_memory > high_memory)
248 			high_memory = node_high_memory;
249 	}
250 
251 	/* clear the zero-page */
252 	memset(empty_zero_page, 0, PAGE_SIZE);
253 	__flush_wback_region(empty_zero_page, PAGE_SIZE);
254 
255 	after_bootmem = 1;
256 
257 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
258 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
259 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
260 
261 	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
262 	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
263 		   VMALLOC_END - VMALLOC_START);
264 
265 	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
266 	       "%dk data, %dk init)\n",
267 		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
268 		num_physpages << (PAGE_SHIFT-10),
269 		codesize >> 10,
270 		datasize >> 10,
271 		initsize >> 10);
272 
273 	p3_cache_init();
274 
275 	/* Initialize the vDSO */
276 	vsyscall_init();
277 }
278 
279 void free_initmem(void)
280 {
281 	unsigned long addr;
282 
283 	addr = (unsigned long)(&__init_begin);
284 	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
285 		ClearPageReserved(virt_to_page(addr));
286 		init_page_count(virt_to_page(addr));
287 		free_page(addr);
288 		totalram_pages++;
289 	}
290 	printk("Freeing unused kernel memory: %ldk freed\n",
291 	       ((unsigned long)&__init_end -
292 	        (unsigned long)&__init_begin) >> 10);
293 }
294 
295 #ifdef CONFIG_BLK_DEV_INITRD
296 void free_initrd_mem(unsigned long start, unsigned long end)
297 {
298 	unsigned long p;
299 	for (p = start; p < end; p += PAGE_SIZE) {
300 		ClearPageReserved(virt_to_page(p));
301 		init_page_count(virt_to_page(p));
302 		free_page(p);
303 		totalram_pages++;
304 	}
305 	printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
306 }
307 #endif
308 
309 #ifdef CONFIG_MEMORY_HOTPLUG
310 int arch_add_memory(int nid, u64 start, u64 size)
311 {
312 	pg_data_t *pgdat;
313 	unsigned long start_pfn = start >> PAGE_SHIFT;
314 	unsigned long nr_pages = size >> PAGE_SHIFT;
315 	int ret;
316 
317 	pgdat = NODE_DATA(nid);
318 
319 	/* We only have ZONE_NORMAL, so this is easy.. */
320 	ret = __add_pages(pgdat->node_zones + ZONE_NORMAL, start_pfn, nr_pages);
321 	if (unlikely(ret))
322 		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
323 
324 	return ret;
325 }
326 EXPORT_SYMBOL_GPL(arch_add_memory);
327 
328 #ifdef CONFIG_NUMA
329 int memory_add_physaddr_to_nid(u64 addr)
330 {
331 	/* Node 0 for now.. */
332 	return 0;
333 }
334 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
335 #endif
336 #endif
337