xref: /openbmc/linux/arch/sh/mm/init.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 /*
2  * linux/arch/sh/mm/init.c
3  *
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2002 - 2007  Paul Mundt
6  *
7  *  Based on linux/arch/i386/mm/init.c:
8  *   Copyright (C) 1995  Linus Torvalds
9  */
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
14 #include <linux/proc_fs.h>
15 #include <linux/pagemap.h>
16 #include <linux/percpu.h>
17 #include <linux/io.h>
18 #include <asm/mmu_context.h>
19 #include <asm/tlb.h>
20 #include <asm/cacheflush.h>
21 #include <asm/sections.h>
22 #include <asm/cache.h>
23 
24 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
25 pgd_t swapper_pg_dir[PTRS_PER_PGD];
26 
27 void (*copy_page)(void *from, void *to);
28 void (*clear_page)(void *to);
29 
30 void show_mem(void)
31 {
32 	int total = 0, reserved = 0, free = 0;
33 	int shared = 0, cached = 0, slab = 0;
34 	pg_data_t *pgdat;
35 
36 	printk("Mem-info:\n");
37 	show_free_areas();
38 
39 	for_each_online_pgdat(pgdat) {
40 		unsigned long flags, i;
41 
42 		pgdat_resize_lock(pgdat, &flags);
43 		for (i = 0; i < pgdat->node_spanned_pages; i++) {
44 			struct page *page = pgdat_page_nr(pgdat, i);
45 			total++;
46 			if (PageReserved(page))
47 				reserved++;
48 			else if (PageSwapCache(page))
49 				cached++;
50 			else if (PageSlab(page))
51 				slab++;
52 			else if (!page_count(page))
53 				free++;
54 			else
55 				shared += page_count(page) - 1;
56 		}
57 		pgdat_resize_unlock(pgdat, &flags);
58 	}
59 
60 	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
61 	printk("%d pages of RAM\n", total);
62 	printk("%d free pages\n", free);
63 	printk("%d reserved pages\n", reserved);
64 	printk("%d slab pages\n", slab);
65 	printk("%d pages shared\n", shared);
66 	printk("%d pages swap cached\n", cached);
67 	printk(KERN_INFO "Total of %ld pages in page table cache\n",
68 	       quicklist_total_size());
69 }
70 
71 #ifdef CONFIG_MMU
72 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
73 {
74 	pgd_t *pgd;
75 	pud_t *pud;
76 	pmd_t *pmd;
77 	pte_t *pte;
78 
79 	pgd = pgd_offset_k(addr);
80 	if (pgd_none(*pgd)) {
81 		pgd_ERROR(*pgd);
82 		return;
83 	}
84 
85 	pud = pud_alloc(NULL, pgd, addr);
86 	if (unlikely(!pud)) {
87 		pud_ERROR(*pud);
88 		return;
89 	}
90 
91 	pmd = pmd_alloc(NULL, pud, addr);
92 	if (unlikely(!pmd)) {
93 		pmd_ERROR(*pmd);
94 		return;
95 	}
96 
97 	pte = pte_offset_kernel(pmd, addr);
98 	if (!pte_none(*pte)) {
99 		pte_ERROR(*pte);
100 		return;
101 	}
102 
103 	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
104 
105 	flush_tlb_one(get_asid(), addr);
106 }
107 
108 /*
109  * As a performance optimization, other platforms preserve the fixmap mapping
110  * across a context switch, we don't presently do this, but this could be done
111  * in a similar fashion as to the wired TLB interface that sh64 uses (by way
112  * of the memory mapped UTLB configuration) -- this unfortunately forces us to
113  * give up a TLB entry for each mapping we want to preserve. While this may be
114  * viable for a small number of fixmaps, it's not particularly useful for
115  * everything and needs to be carefully evaluated. (ie, we may want this for
116  * the vsyscall page).
117  *
118  * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
119  * in at __set_fixmap() time to determine the appropriate behavior to follow.
120  *
121  *					 -- PFM.
122  */
123 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
124 {
125 	unsigned long address = __fix_to_virt(idx);
126 
127 	if (idx >= __end_of_fixed_addresses) {
128 		BUG();
129 		return;
130 	}
131 
132 	set_pte_phys(address, phys, prot);
133 }
134 #endif	/* CONFIG_MMU */
135 
136 /*
137  * paging_init() sets up the page tables
138  */
139 void __init paging_init(void)
140 {
141 	unsigned long max_zone_pfns[MAX_NR_ZONES];
142 	int nid;
143 
144 	/* We don't need to map the kernel through the TLB, as
145 	 * it is permanatly mapped using P1. So clear the
146 	 * entire pgd. */
147 	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
148 
149 	/* Set an initial value for the MMU.TTB so we don't have to
150 	 * check for a null value. */
151 	set_TTB(swapper_pg_dir);
152 
153 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
154 
155 	for_each_online_node(nid) {
156 		pg_data_t *pgdat = NODE_DATA(nid);
157 		unsigned long low, start_pfn;
158 
159 		start_pfn = pgdat->bdata->node_boot_start >> PAGE_SHIFT;
160 		low = pgdat->bdata->node_low_pfn;
161 
162 		if (max_zone_pfns[ZONE_NORMAL] < low)
163 			max_zone_pfns[ZONE_NORMAL] = low;
164 
165 		printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
166 		       nid, start_pfn, low);
167 	}
168 
169 	free_area_init_nodes(max_zone_pfns);
170 }
171 
172 static struct kcore_list kcore_mem, kcore_vmalloc;
173 
174 void __init mem_init(void)
175 {
176 	int codesize, datasize, initsize;
177 	int nid;
178 
179 	num_physpages = 0;
180 	high_memory = NULL;
181 
182 	for_each_online_node(nid) {
183 		pg_data_t *pgdat = NODE_DATA(nid);
184 		unsigned long node_pages = 0;
185 		void *node_high_memory;
186 
187 		num_physpages += pgdat->node_present_pages;
188 
189 		if (pgdat->node_spanned_pages)
190 			node_pages = free_all_bootmem_node(pgdat);
191 
192 		totalram_pages += node_pages;
193 
194 		node_high_memory = (void *)__va((pgdat->node_start_pfn +
195 						 pgdat->node_spanned_pages) <<
196 						 PAGE_SHIFT);
197 		if (node_high_memory > high_memory)
198 			high_memory = node_high_memory;
199 	}
200 
201 	/* clear the zero-page */
202 	memset(empty_zero_page, 0, PAGE_SIZE);
203 	__flush_wback_region(empty_zero_page, PAGE_SIZE);
204 
205 	/*
206 	 * Setup wrappers for copy/clear_page(), these will get overridden
207 	 * later in the boot process if a better method is available.
208 	 */
209 #ifdef CONFIG_MMU
210 	copy_page = copy_page_slow;
211 	clear_page = clear_page_slow;
212 #else
213 	copy_page = copy_page_nommu;
214 	clear_page = clear_page_nommu;
215 #endif
216 
217 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
218 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
219 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
220 
221 	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
222 	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
223 		   VMALLOC_END - VMALLOC_START);
224 
225 	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
226 	       "%dk data, %dk init)\n",
227 		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
228 		num_physpages << (PAGE_SHIFT-10),
229 		codesize >> 10,
230 		datasize >> 10,
231 		initsize >> 10);
232 
233 	p3_cache_init();
234 
235 	/* Initialize the vDSO */
236 	vsyscall_init();
237 }
238 
239 void free_initmem(void)
240 {
241 	unsigned long addr;
242 
243 	addr = (unsigned long)(&__init_begin);
244 	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
245 		ClearPageReserved(virt_to_page(addr));
246 		init_page_count(virt_to_page(addr));
247 		free_page(addr);
248 		totalram_pages++;
249 	}
250 	printk("Freeing unused kernel memory: %ldk freed\n",
251 	       ((unsigned long)&__init_end -
252 	        (unsigned long)&__init_begin) >> 10);
253 }
254 
255 #ifdef CONFIG_BLK_DEV_INITRD
256 void free_initrd_mem(unsigned long start, unsigned long end)
257 {
258 	unsigned long p;
259 	for (p = start; p < end; p += PAGE_SIZE) {
260 		ClearPageReserved(virt_to_page(p));
261 		init_page_count(virt_to_page(p));
262 		free_page(p);
263 		totalram_pages++;
264 	}
265 	printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
266 }
267 #endif
268 
269 #ifdef CONFIG_MEMORY_HOTPLUG
270 void online_page(struct page *page)
271 {
272 	ClearPageReserved(page);
273 	init_page_count(page);
274 	__free_page(page);
275 	totalram_pages++;
276 	num_physpages++;
277 }
278 
279 int arch_add_memory(int nid, u64 start, u64 size)
280 {
281 	pg_data_t *pgdat;
282 	unsigned long start_pfn = start >> PAGE_SHIFT;
283 	unsigned long nr_pages = size >> PAGE_SHIFT;
284 	int ret;
285 
286 	pgdat = NODE_DATA(nid);
287 
288 	/* We only have ZONE_NORMAL, so this is easy.. */
289 	ret = __add_pages(pgdat->node_zones + ZONE_NORMAL, start_pfn, nr_pages);
290 	if (unlikely(ret))
291 		printk("%s: Failed, __add_pages() == %d\n", __FUNCTION__, ret);
292 
293 	return ret;
294 }
295 EXPORT_SYMBOL_GPL(arch_add_memory);
296 
297 #ifdef CONFIG_NUMA
298 int memory_add_physaddr_to_nid(u64 addr)
299 {
300 	/* Node 0 for now.. */
301 	return 0;
302 }
303 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
304 #endif
305 #endif
306