xref: /openbmc/linux/arch/powerpc/mm/mem.c (revision 8fa5723aa7e053d498336b48448b292fc2e0458b)
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
7  *    Copyright (C) 1996 Paul Mackerras
8  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9  *
10  *  Derived from "arch/i386/mm/init.c"
11  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
12  *
13  *  This program is free software; you can redistribute it and/or
14  *  modify it under the terms of the GNU General Public License
15  *  as published by the Free Software Foundation; either version
16  *  2 of the License, or (at your option) any later version.
17  *
18  */
19 
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/types.h>
26 #include <linux/mm.h>
27 #include <linux/stddef.h>
28 #include <linux/init.h>
29 #include <linux/bootmem.h>
30 #include <linux/highmem.h>
31 #include <linux/initrd.h>
32 #include <linux/pagemap.h>
33 #include <linux/suspend.h>
34 #include <linux/lmb.h>
35 
36 #include <asm/pgalloc.h>
37 #include <asm/prom.h>
38 #include <asm/io.h>
39 #include <asm/mmu_context.h>
40 #include <asm/pgtable.h>
41 #include <asm/mmu.h>
42 #include <asm/smp.h>
43 #include <asm/machdep.h>
44 #include <asm/btext.h>
45 #include <asm/tlb.h>
46 #include <asm/sections.h>
47 #include <asm/sparsemem.h>
48 #include <asm/vdso.h>
49 #include <asm/fixmap.h>
50 
51 #include "mmu_decl.h"
52 
53 #ifndef CPU_FTR_COHERENT_ICACHE
54 #define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
55 #define CPU_FTR_NOEXECUTE	0
56 #endif
57 
58 int init_bootmem_done;
59 int mem_init_done;
60 unsigned long memory_limit;
61 
62 #ifdef CONFIG_HIGHMEM
63 pte_t *kmap_pte;
64 pgprot_t kmap_prot;
65 
66 EXPORT_SYMBOL(kmap_prot);
67 EXPORT_SYMBOL(kmap_pte);
68 
69 static inline pte_t *virt_to_kpte(unsigned long vaddr)
70 {
71 	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
72 			vaddr), vaddr), vaddr);
73 }
74 #endif
75 
76 int page_is_ram(unsigned long pfn)
77 {
78 #ifndef CONFIG_PPC64	/* XXX for now */
79 	return pfn < max_pfn;
80 #else
81 	unsigned long paddr = (pfn << PAGE_SHIFT);
82 	int i;
83 	for (i=0; i < lmb.memory.cnt; i++) {
84 		unsigned long base;
85 
86 		base = lmb.memory.region[i].base;
87 
88 		if ((paddr >= base) &&
89 			(paddr < (base + lmb.memory.region[i].size))) {
90 			return 1;
91 		}
92 	}
93 
94 	return 0;
95 #endif
96 }
97 
98 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
99 			      unsigned long size, pgprot_t vma_prot)
100 {
101 	if (ppc_md.phys_mem_access_prot)
102 		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
103 
104 	if (!page_is_ram(pfn))
105 		vma_prot = __pgprot(pgprot_val(vma_prot)
106 				    | _PAGE_GUARDED | _PAGE_NO_CACHE);
107 	return vma_prot;
108 }
109 EXPORT_SYMBOL(phys_mem_access_prot);
110 
111 #ifdef CONFIG_MEMORY_HOTPLUG
112 
113 #ifdef CONFIG_NUMA
114 int memory_add_physaddr_to_nid(u64 start)
115 {
116 	return hot_add_scn_to_nid(start);
117 }
118 #endif
119 
120 int arch_add_memory(int nid, u64 start, u64 size)
121 {
122 	struct pglist_data *pgdata;
123 	struct zone *zone;
124 	unsigned long start_pfn = start >> PAGE_SHIFT;
125 	unsigned long nr_pages = size >> PAGE_SHIFT;
126 
127 	pgdata = NODE_DATA(nid);
128 
129 	start = (unsigned long)__va(start);
130 	create_section_mapping(start, start + size);
131 
132 	/* this should work for most non-highmem platforms */
133 	zone = pgdata->node_zones;
134 
135 	return __add_pages(zone, start_pfn, nr_pages);
136 }
137 #endif /* CONFIG_MEMORY_HOTPLUG */
138 
139 /*
140  * walk_memory_resource() needs to make sure there is no holes in a given
141  * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
142  * Instead it maintains it in lmb.memory structures.  Walk through the
143  * memory regions, find holes and callback for contiguous regions.
144  */
145 int
146 walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
147 			int (*func)(unsigned long, unsigned long, void *))
148 {
149 	struct lmb_property res;
150 	unsigned long pfn, len;
151 	u64 end;
152 	int ret = -1;
153 
154 	res.base = (u64) start_pfn << PAGE_SHIFT;
155 	res.size = (u64) nr_pages << PAGE_SHIFT;
156 
157 	end = res.base + res.size - 1;
158 	while ((res.base < end) && (lmb_find(&res) >= 0)) {
159 		pfn = (unsigned long)(res.base >> PAGE_SHIFT);
160 		len = (unsigned long)(res.size >> PAGE_SHIFT);
161 		ret = (*func)(pfn, len, arg);
162 		if (ret)
163 			break;
164 		res.base += (res.size + 1);
165 		res.size = (end - res.base + 1);
166 	}
167 	return ret;
168 }
169 EXPORT_SYMBOL_GPL(walk_memory_resource);
170 
171 /*
172  * Initialize the bootmem system and give it all the memory we
173  * have available.  If we are using highmem, we only put the
174  * lowmem into the bootmem system.
175  */
176 #ifndef CONFIG_NEED_MULTIPLE_NODES
177 void __init do_init_bootmem(void)
178 {
179 	unsigned long i;
180 	unsigned long start, bootmap_pages;
181 	unsigned long total_pages;
182 	int boot_mapsize;
183 
184 	max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
185 	total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
186 #ifdef CONFIG_HIGHMEM
187 	total_pages = total_lowmem >> PAGE_SHIFT;
188 	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
189 #endif
190 
191 	/*
192 	 * Find an area to use for the bootmem bitmap.  Calculate the size of
193 	 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
194 	 * Add 1 additional page in case the address isn't page-aligned.
195 	 */
196 	bootmap_pages = bootmem_bootmap_pages(total_pages);
197 
198 	start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
199 
200 	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
201 	boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
202 
203 	/* Add active regions with valid PFNs */
204 	for (i = 0; i < lmb.memory.cnt; i++) {
205 		unsigned long start_pfn, end_pfn;
206 		start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
207 		end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
208 		add_active_range(0, start_pfn, end_pfn);
209 	}
210 
211 	/* Add all physical memory to the bootmem map, mark each area
212 	 * present.
213 	 */
214 #ifdef CONFIG_HIGHMEM
215 	free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
216 
217 	/* reserve the sections we're already using */
218 	for (i = 0; i < lmb.reserved.cnt; i++) {
219 		unsigned long addr = lmb.reserved.region[i].base +
220 				     lmb_size_bytes(&lmb.reserved, i) - 1;
221 		if (addr < lowmem_end_addr)
222 			reserve_bootmem(lmb.reserved.region[i].base,
223 					lmb_size_bytes(&lmb.reserved, i),
224 					BOOTMEM_DEFAULT);
225 		else if (lmb.reserved.region[i].base < lowmem_end_addr) {
226 			unsigned long adjusted_size = lowmem_end_addr -
227 				      lmb.reserved.region[i].base;
228 			reserve_bootmem(lmb.reserved.region[i].base,
229 					adjusted_size, BOOTMEM_DEFAULT);
230 		}
231 	}
232 #else
233 	free_bootmem_with_active_regions(0, max_pfn);
234 
235 	/* reserve the sections we're already using */
236 	for (i = 0; i < lmb.reserved.cnt; i++)
237 		reserve_bootmem(lmb.reserved.region[i].base,
238 				lmb_size_bytes(&lmb.reserved, i),
239 				BOOTMEM_DEFAULT);
240 
241 #endif
242 	/* XXX need to clip this if using highmem? */
243 	sparse_memory_present_with_active_regions(0);
244 
245 	init_bootmem_done = 1;
246 }
247 
248 /* mark pages that don't exist as nosave */
249 static int __init mark_nonram_nosave(void)
250 {
251 	unsigned long lmb_next_region_start_pfn,
252 		      lmb_region_max_pfn;
253 	int i;
254 
255 	for (i = 0; i < lmb.memory.cnt - 1; i++) {
256 		lmb_region_max_pfn =
257 			(lmb.memory.region[i].base >> PAGE_SHIFT) +
258 			(lmb.memory.region[i].size >> PAGE_SHIFT);
259 		lmb_next_region_start_pfn =
260 			lmb.memory.region[i+1].base >> PAGE_SHIFT;
261 
262 		if (lmb_region_max_pfn < lmb_next_region_start_pfn)
263 			register_nosave_region(lmb_region_max_pfn,
264 					       lmb_next_region_start_pfn);
265 	}
266 
267 	return 0;
268 }
269 
270 /*
271  * paging_init() sets up the page tables - in fact we've already done this.
272  */
273 void __init paging_init(void)
274 {
275 	unsigned long total_ram = lmb_phys_mem_size();
276 	phys_addr_t top_of_ram = lmb_end_of_DRAM();
277 	unsigned long max_zone_pfns[MAX_NR_ZONES];
278 
279 #ifdef CONFIG_PPC32
280 	unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
281 	unsigned long end = __fix_to_virt(FIX_HOLE);
282 
283 	for (; v < end; v += PAGE_SIZE)
284 		map_page(v, 0, 0); /* XXX gross */
285 #endif
286 
287 #ifdef CONFIG_HIGHMEM
288 	map_page(PKMAP_BASE, 0, 0);	/* XXX gross */
289 	pkmap_page_table = virt_to_kpte(PKMAP_BASE);
290 
291 	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
292 	kmap_prot = PAGE_KERNEL;
293 #endif /* CONFIG_HIGHMEM */
294 
295 	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%lx\n",
296 	       (unsigned long long)top_of_ram, total_ram);
297 	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
298 	       (long int)((top_of_ram - total_ram) >> 20));
299 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
300 #ifdef CONFIG_HIGHMEM
301 	max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
302 	max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
303 #else
304 	max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
305 #endif
306 	free_area_init_nodes(max_zone_pfns);
307 
308 	mark_nonram_nosave();
309 }
310 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
311 
312 void __init mem_init(void)
313 {
314 #ifdef CONFIG_NEED_MULTIPLE_NODES
315 	int nid;
316 #endif
317 	pg_data_t *pgdat;
318 	unsigned long i;
319 	struct page *page;
320 	unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
321 
322 	num_physpages = lmb.memory.size >> PAGE_SHIFT;
323 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
324 
325 #ifdef CONFIG_NEED_MULTIPLE_NODES
326         for_each_online_node(nid) {
327 		if (NODE_DATA(nid)->node_spanned_pages != 0) {
328 			printk("freeing bootmem node %d\n", nid);
329 			totalram_pages +=
330 				free_all_bootmem_node(NODE_DATA(nid));
331 		}
332 	}
333 #else
334 	max_mapnr = max_pfn;
335 	totalram_pages += free_all_bootmem();
336 #endif
337 	for_each_online_pgdat(pgdat) {
338 		for (i = 0; i < pgdat->node_spanned_pages; i++) {
339 			if (!pfn_valid(pgdat->node_start_pfn + i))
340 				continue;
341 			page = pgdat_page_nr(pgdat, i);
342 			if (PageReserved(page))
343 				reservedpages++;
344 		}
345 	}
346 
347 	codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
348 	datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
349 	initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
350 	bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
351 
352 #ifdef CONFIG_HIGHMEM
353 	{
354 		unsigned long pfn, highmem_mapnr;
355 
356 		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
357 		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
358 			struct page *page = pfn_to_page(pfn);
359 			if (lmb_is_reserved(pfn << PAGE_SHIFT))
360 				continue;
361 			ClearPageReserved(page);
362 			init_page_count(page);
363 			__free_page(page);
364 			totalhigh_pages++;
365 			reservedpages--;
366 		}
367 		totalram_pages += totalhigh_pages;
368 		printk(KERN_DEBUG "High memory: %luk\n",
369 		       totalhigh_pages << (PAGE_SHIFT-10));
370 	}
371 #endif /* CONFIG_HIGHMEM */
372 
373 	printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
374 	       "%luk reserved, %luk data, %luk bss, %luk init)\n",
375 		(unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
376 		num_physpages << (PAGE_SHIFT-10),
377 		codesize >> 10,
378 		reservedpages << (PAGE_SHIFT-10),
379 		datasize >> 10,
380 		bsssize >> 10,
381 		initsize >> 10);
382 
383 	mem_init_done = 1;
384 }
385 
386 /*
387  * This is called when a page has been modified by the kernel.
388  * It just marks the page as not i-cache clean.  We do the i-cache
389  * flush later when the page is given to a user process, if necessary.
390  */
391 void flush_dcache_page(struct page *page)
392 {
393 	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
394 		return;
395 	/* avoid an atomic op if possible */
396 	if (test_bit(PG_arch_1, &page->flags))
397 		clear_bit(PG_arch_1, &page->flags);
398 }
399 EXPORT_SYMBOL(flush_dcache_page);
400 
401 void flush_dcache_icache_page(struct page *page)
402 {
403 #ifdef CONFIG_BOOKE
404 	void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
405 	__flush_dcache_icache(start);
406 	kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
407 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
408 	/* On 8xx there is no need to kmap since highmem is not supported */
409 	__flush_dcache_icache(page_address(page));
410 #else
411 	__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
412 #endif
413 
414 }
415 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
416 {
417 	clear_page(page);
418 
419 	/*
420 	 * We shouldnt have to do this, but some versions of glibc
421 	 * require it (ld.so assumes zero filled pages are icache clean)
422 	 * - Anton
423 	 */
424 	flush_dcache_page(pg);
425 }
426 EXPORT_SYMBOL(clear_user_page);
427 
428 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
429 		    struct page *pg)
430 {
431 	copy_page(vto, vfrom);
432 
433 	/*
434 	 * We should be able to use the following optimisation, however
435 	 * there are two problems.
436 	 * Firstly a bug in some versions of binutils meant PLT sections
437 	 * were not marked executable.
438 	 * Secondly the first word in the GOT section is blrl, used
439 	 * to establish the GOT address. Until recently the GOT was
440 	 * not marked executable.
441 	 * - Anton
442 	 */
443 #if 0
444 	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
445 		return;
446 #endif
447 
448 	flush_dcache_page(pg);
449 }
450 
451 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
452 			     unsigned long addr, int len)
453 {
454 	unsigned long maddr;
455 
456 	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
457 	flush_icache_range(maddr, maddr + len);
458 	kunmap(page);
459 }
460 EXPORT_SYMBOL(flush_icache_user_range);
461 
462 /*
463  * This is called at the end of handling a user page fault, when the
464  * fault has been handled by updating a PTE in the linux page tables.
465  * We use it to preload an HPTE into the hash table corresponding to
466  * the updated linux PTE.
467  *
468  * This must always be called with the pte lock held.
469  */
470 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
471 		      pte_t pte)
472 {
473 #ifdef CONFIG_PPC_STD_MMU
474 	unsigned long access = 0, trap;
475 #endif
476 	unsigned long pfn = pte_pfn(pte);
477 
478 	/* handle i-cache coherency */
479 	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
480 	    !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
481 	    pfn_valid(pfn)) {
482 		struct page *page = pfn_to_page(pfn);
483 #ifdef CONFIG_8xx
484 		/* On 8xx, cache control instructions (particularly
485 		 * "dcbst" from flush_dcache_icache) fault as write
486 		 * operation if there is an unpopulated TLB entry
487 		 * for the address in question. To workaround that,
488 		 * we invalidate the TLB here, thus avoiding dcbst
489 		 * misbehaviour.
490 		 */
491 		_tlbie(address, 0 /* 8xx doesn't care about PID */);
492 #endif
493 		/* The _PAGE_USER test should really be _PAGE_EXEC, but
494 		 * older glibc versions execute some code from no-exec
495 		 * pages, which for now we are supporting.  If exec-only
496 		 * pages are ever implemented, this will have to change.
497 		 */
498 		if (!PageReserved(page) && (pte_val(pte) & _PAGE_USER)
499 		    && !test_bit(PG_arch_1, &page->flags)) {
500 			if (vma->vm_mm == current->active_mm) {
501 				__flush_dcache_icache((void *) address);
502 			} else
503 				flush_dcache_icache_page(page);
504 			set_bit(PG_arch_1, &page->flags);
505 		}
506 	}
507 
508 #ifdef CONFIG_PPC_STD_MMU
509 	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
510 	if (!pte_young(pte) || address >= TASK_SIZE)
511 		return;
512 
513 	/* We try to figure out if we are coming from an instruction
514 	 * access fault and pass that down to __hash_page so we avoid
515 	 * double-faulting on execution of fresh text. We have to test
516 	 * for regs NULL since init will get here first thing at boot
517 	 *
518 	 * We also avoid filling the hash if not coming from a fault
519 	 */
520 	if (current->thread.regs == NULL)
521 		return;
522 	trap = TRAP(current->thread.regs);
523 	if (trap == 0x400)
524 		access |= _PAGE_EXEC;
525 	else if (trap != 0x300)
526 		return;
527 	hash_preload(vma->vm_mm, address, access, trap);
528 #endif /* CONFIG_PPC_STD_MMU */
529 }
530