xref: /openbmc/linux/arch/powerpc/mm/mem.c (revision 4a44a19b)
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
7  *    Copyright (C) 1996 Paul Mackerras
8  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9  *
10  *  Derived from "arch/i386/mm/init.c"
11  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
12  *
13  *  This program is free software; you can redistribute it and/or
14  *  modify it under the terms of the GNU General Public License
15  *  as published by the Free Software Foundation; either version
16  *  2 of the License, or (at your option) any later version.
17  *
18  */
19 
20 #include <linux/export.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/gfp.h>
26 #include <linux/types.h>
27 #include <linux/mm.h>
28 #include <linux/stddef.h>
29 #include <linux/init.h>
30 #include <linux/bootmem.h>
31 #include <linux/highmem.h>
32 #include <linux/initrd.h>
33 #include <linux/pagemap.h>
34 #include <linux/suspend.h>
35 #include <linux/memblock.h>
36 #include <linux/hugetlb.h>
37 #include <linux/slab.h>
38 
39 #include <asm/pgalloc.h>
40 #include <asm/prom.h>
41 #include <asm/io.h>
42 #include <asm/mmu_context.h>
43 #include <asm/pgtable.h>
44 #include <asm/mmu.h>
45 #include <asm/smp.h>
46 #include <asm/machdep.h>
47 #include <asm/btext.h>
48 #include <asm/tlb.h>
49 #include <asm/sections.h>
50 #include <asm/sparsemem.h>
51 #include <asm/vdso.h>
52 #include <asm/fixmap.h>
53 #include <asm/swiotlb.h>
54 #include <asm/rtas.h>
55 
56 #include "mmu_decl.h"
57 
58 #ifndef CPU_FTR_COHERENT_ICACHE
59 #define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
60 #define CPU_FTR_NOEXECUTE	0
61 #endif
62 
63 int init_bootmem_done;
64 int mem_init_done;
65 unsigned long long memory_limit;
66 
67 #ifdef CONFIG_HIGHMEM
68 pte_t *kmap_pte;
69 EXPORT_SYMBOL(kmap_pte);
70 pgprot_t kmap_prot;
71 EXPORT_SYMBOL(kmap_prot);
72 
73 static inline pte_t *virt_to_kpte(unsigned long vaddr)
74 {
75 	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
76 			vaddr), vaddr), vaddr);
77 }
78 #endif
79 
80 int page_is_ram(unsigned long pfn)
81 {
82 #ifndef CONFIG_PPC64	/* XXX for now */
83 	return pfn < max_pfn;
84 #else
85 	unsigned long paddr = (pfn << PAGE_SHIFT);
86 	struct memblock_region *reg;
87 
88 	for_each_memblock(memory, reg)
89 		if (paddr >= reg->base && paddr < (reg->base + reg->size))
90 			return 1;
91 	return 0;
92 #endif
93 }
94 
95 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
96 			      unsigned long size, pgprot_t vma_prot)
97 {
98 	if (ppc_md.phys_mem_access_prot)
99 		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
100 
101 	if (!page_is_ram(pfn))
102 		vma_prot = pgprot_noncached(vma_prot);
103 
104 	return vma_prot;
105 }
106 EXPORT_SYMBOL(phys_mem_access_prot);
107 
108 #ifdef CONFIG_MEMORY_HOTPLUG
109 
110 #ifdef CONFIG_NUMA
111 int memory_add_physaddr_to_nid(u64 start)
112 {
113 	return hot_add_scn_to_nid(start);
114 }
115 #endif
116 
117 int arch_add_memory(int nid, u64 start, u64 size)
118 {
119 	struct pglist_data *pgdata;
120 	struct zone *zone;
121 	unsigned long start_pfn = start >> PAGE_SHIFT;
122 	unsigned long nr_pages = size >> PAGE_SHIFT;
123 
124 	pgdata = NODE_DATA(nid);
125 
126 	start = (unsigned long)__va(start);
127 	if (create_section_mapping(start, start + size))
128 		return -EINVAL;
129 
130 	/* this should work for most non-highmem platforms */
131 	zone = pgdata->node_zones +
132 		zone_for_memory(nid, start, size, 0);
133 
134 	return __add_pages(nid, zone, start_pfn, nr_pages);
135 }
136 
137 #ifdef CONFIG_MEMORY_HOTREMOVE
138 int arch_remove_memory(u64 start, u64 size)
139 {
140 	unsigned long start_pfn = start >> PAGE_SHIFT;
141 	unsigned long nr_pages = size >> PAGE_SHIFT;
142 	struct zone *zone;
143 	int ret;
144 
145 	zone = page_zone(pfn_to_page(start_pfn));
146 	ret = __remove_pages(zone, start_pfn, nr_pages);
147 	if (!ret && (ppc_md.remove_memory))
148 		ret = ppc_md.remove_memory(start, size);
149 
150 	return ret;
151 }
152 #endif
153 #endif /* CONFIG_MEMORY_HOTPLUG */
154 
155 /*
156  * walk_memory_resource() needs to make sure there is no holes in a given
157  * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
158  * Instead it maintains it in memblock.memory structures.  Walk through the
159  * memory regions, find holes and callback for contiguous regions.
160  */
161 int
162 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
163 		void *arg, int (*func)(unsigned long, unsigned long, void *))
164 {
165 	struct memblock_region *reg;
166 	unsigned long end_pfn = start_pfn + nr_pages;
167 	unsigned long tstart, tend;
168 	int ret = -1;
169 
170 	for_each_memblock(memory, reg) {
171 		tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
172 		tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
173 		if (tstart >= tend)
174 			continue;
175 		ret = (*func)(tstart, tend - tstart, arg);
176 		if (ret)
177 			break;
178 	}
179 	return ret;
180 }
181 EXPORT_SYMBOL_GPL(walk_system_ram_range);
182 
183 /*
184  * Initialize the bootmem system and give it all the memory we
185  * have available.  If we are using highmem, we only put the
186  * lowmem into the bootmem system.
187  */
188 #ifndef CONFIG_NEED_MULTIPLE_NODES
189 void __init do_init_bootmem(void)
190 {
191 	unsigned long start, bootmap_pages;
192 	unsigned long total_pages;
193 	struct memblock_region *reg;
194 	int boot_mapsize;
195 
196 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
197 	total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
198 #ifdef CONFIG_HIGHMEM
199 	total_pages = total_lowmem >> PAGE_SHIFT;
200 	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
201 #endif
202 
203 	/*
204 	 * Find an area to use for the bootmem bitmap.  Calculate the size of
205 	 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
206 	 * Add 1 additional page in case the address isn't page-aligned.
207 	 */
208 	bootmap_pages = bootmem_bootmap_pages(total_pages);
209 
210 	start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
211 
212 	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
213 	boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
214 
215 	/* Place all memblock_regions in the same node and merge contiguous
216 	 * memblock_regions
217 	 */
218 	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
219 
220 	/* Add all physical memory to the bootmem map, mark each area
221 	 * present.
222 	 */
223 #ifdef CONFIG_HIGHMEM
224 	free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
225 
226 	/* reserve the sections we're already using */
227 	for_each_memblock(reserved, reg) {
228 		unsigned long top = reg->base + reg->size - 1;
229 		if (top < lowmem_end_addr)
230 			reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
231 		else if (reg->base < lowmem_end_addr) {
232 			unsigned long trunc_size = lowmem_end_addr - reg->base;
233 			reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
234 		}
235 	}
236 #else
237 	free_bootmem_with_active_regions(0, max_pfn);
238 
239 	/* reserve the sections we're already using */
240 	for_each_memblock(reserved, reg)
241 		reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
242 #endif
243 	/* XXX need to clip this if using highmem? */
244 	sparse_memory_present_with_active_regions(0);
245 
246 	init_bootmem_done = 1;
247 }
248 
249 /* mark pages that don't exist as nosave */
250 static int __init mark_nonram_nosave(void)
251 {
252 	struct memblock_region *reg, *prev = NULL;
253 
254 	for_each_memblock(memory, reg) {
255 		if (prev &&
256 		    memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
257 			register_nosave_region(memblock_region_memory_end_pfn(prev),
258 					       memblock_region_memory_base_pfn(reg));
259 		prev = reg;
260 	}
261 	return 0;
262 }
263 #else /* CONFIG_NEED_MULTIPLE_NODES */
264 static int __init mark_nonram_nosave(void)
265 {
266 	return 0;
267 }
268 #endif
269 
270 static bool zone_limits_final;
271 
272 static unsigned long max_zone_pfns[MAX_NR_ZONES] = {
273 	[0 ... MAX_NR_ZONES - 1] = ~0UL
274 };
275 
276 /*
277  * Restrict the specified zone and all more restrictive zones
278  * to be below the specified pfn.  May not be called after
279  * paging_init().
280  */
281 void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
282 {
283 	int i;
284 
285 	if (WARN_ON(zone_limits_final))
286 		return;
287 
288 	for (i = zone; i >= 0; i--) {
289 		if (max_zone_pfns[i] > pfn_limit)
290 			max_zone_pfns[i] = pfn_limit;
291 	}
292 }
293 
294 /*
295  * Find the least restrictive zone that is entirely below the
296  * specified pfn limit.  Returns < 0 if no suitable zone is found.
297  *
298  * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
299  * systems -- the DMA limit can be higher than any possible real pfn.
300  */
301 int dma_pfn_limit_to_zone(u64 pfn_limit)
302 {
303 	enum zone_type top_zone = ZONE_NORMAL;
304 	int i;
305 
306 #ifdef CONFIG_HIGHMEM
307 	top_zone = ZONE_HIGHMEM;
308 #endif
309 
310 	for (i = top_zone; i >= 0; i--) {
311 		if (max_zone_pfns[i] <= pfn_limit)
312 			return i;
313 	}
314 
315 	return -EPERM;
316 }
317 
318 /*
319  * paging_init() sets up the page tables - in fact we've already done this.
320  */
321 void __init paging_init(void)
322 {
323 	unsigned long long total_ram = memblock_phys_mem_size();
324 	phys_addr_t top_of_ram = memblock_end_of_DRAM();
325 	enum zone_type top_zone;
326 
327 #ifdef CONFIG_PPC32
328 	unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
329 	unsigned long end = __fix_to_virt(FIX_HOLE);
330 
331 	for (; v < end; v += PAGE_SIZE)
332 		map_page(v, 0, 0); /* XXX gross */
333 #endif
334 
335 #ifdef CONFIG_HIGHMEM
336 	map_page(PKMAP_BASE, 0, 0);	/* XXX gross */
337 	pkmap_page_table = virt_to_kpte(PKMAP_BASE);
338 
339 	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
340 	kmap_prot = PAGE_KERNEL;
341 #endif /* CONFIG_HIGHMEM */
342 
343 	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
344 	       (unsigned long long)top_of_ram, total_ram);
345 	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
346 	       (long int)((top_of_ram - total_ram) >> 20));
347 
348 #ifdef CONFIG_HIGHMEM
349 	top_zone = ZONE_HIGHMEM;
350 	limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT);
351 #else
352 	top_zone = ZONE_NORMAL;
353 #endif
354 
355 	limit_zone_pfn(top_zone, top_of_ram >> PAGE_SHIFT);
356 	zone_limits_final = true;
357 	free_area_init_nodes(max_zone_pfns);
358 
359 	mark_nonram_nosave();
360 }
361 
362 static void __init register_page_bootmem_info(void)
363 {
364 	int i;
365 
366 	for_each_online_node(i)
367 		register_page_bootmem_info_node(NODE_DATA(i));
368 }
369 
370 void __init mem_init(void)
371 {
372 	/*
373 	 * book3s is limited to 16 page sizes due to encoding this in
374 	 * a 4-bit field for slices.
375 	 */
376 	BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
377 
378 #ifdef CONFIG_SWIOTLB
379 	swiotlb_init(0);
380 #endif
381 
382 	register_page_bootmem_info();
383 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
384 	set_max_mapnr(max_pfn);
385 	free_all_bootmem();
386 
387 #ifdef CONFIG_HIGHMEM
388 	{
389 		unsigned long pfn, highmem_mapnr;
390 
391 		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
392 		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
393 			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
394 			struct page *page = pfn_to_page(pfn);
395 			if (!memblock_is_reserved(paddr))
396 				free_highmem_page(page);
397 		}
398 	}
399 #endif /* CONFIG_HIGHMEM */
400 
401 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
402 	/*
403 	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
404 	 * functions.... do it here for the non-smp case.
405 	 */
406 	per_cpu(next_tlbcam_idx, smp_processor_id()) =
407 		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
408 #endif
409 
410 	mem_init_print_info(NULL);
411 #ifdef CONFIG_PPC32
412 	pr_info("Kernel virtual memory layout:\n");
413 	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
414 #ifdef CONFIG_HIGHMEM
415 	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
416 		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
417 #endif /* CONFIG_HIGHMEM */
418 #ifdef CONFIG_NOT_COHERENT_CACHE
419 	pr_info("  * 0x%08lx..0x%08lx  : consistent mem\n",
420 		IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
421 #endif /* CONFIG_NOT_COHERENT_CACHE */
422 	pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
423 		ioremap_bot, IOREMAP_TOP);
424 	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
425 		VMALLOC_START, VMALLOC_END);
426 #endif /* CONFIG_PPC32 */
427 
428 	mem_init_done = 1;
429 }
430 
431 void free_initmem(void)
432 {
433 	ppc_md.progress = ppc_printk_progress;
434 	free_initmem_default(POISON_FREE_INITMEM);
435 }
436 
437 #ifdef CONFIG_BLK_DEV_INITRD
438 void __init free_initrd_mem(unsigned long start, unsigned long end)
439 {
440 	free_reserved_area((void *)start, (void *)end, -1, "initrd");
441 }
442 #endif
443 
444 /*
445  * This is called when a page has been modified by the kernel.
446  * It just marks the page as not i-cache clean.  We do the i-cache
447  * flush later when the page is given to a user process, if necessary.
448  */
449 void flush_dcache_page(struct page *page)
450 {
451 	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
452 		return;
453 	/* avoid an atomic op if possible */
454 	if (test_bit(PG_arch_1, &page->flags))
455 		clear_bit(PG_arch_1, &page->flags);
456 }
457 EXPORT_SYMBOL(flush_dcache_page);
458 
459 void flush_dcache_icache_page(struct page *page)
460 {
461 #ifdef CONFIG_HUGETLB_PAGE
462 	if (PageCompound(page)) {
463 		flush_dcache_icache_hugepage(page);
464 		return;
465 	}
466 #endif
467 #ifdef CONFIG_BOOKE
468 	{
469 		void *start = kmap_atomic(page);
470 		__flush_dcache_icache(start);
471 		kunmap_atomic(start);
472 	}
473 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
474 	/* On 8xx there is no need to kmap since highmem is not supported */
475 	__flush_dcache_icache(page_address(page));
476 #else
477 	__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
478 #endif
479 }
480 EXPORT_SYMBOL(flush_dcache_icache_page);
481 
482 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
483 {
484 	clear_page(page);
485 
486 	/*
487 	 * We shouldn't have to do this, but some versions of glibc
488 	 * require it (ld.so assumes zero filled pages are icache clean)
489 	 * - Anton
490 	 */
491 	flush_dcache_page(pg);
492 }
493 EXPORT_SYMBOL(clear_user_page);
494 
495 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
496 		    struct page *pg)
497 {
498 	copy_page(vto, vfrom);
499 
500 	/*
501 	 * We should be able to use the following optimisation, however
502 	 * there are two problems.
503 	 * Firstly a bug in some versions of binutils meant PLT sections
504 	 * were not marked executable.
505 	 * Secondly the first word in the GOT section is blrl, used
506 	 * to establish the GOT address. Until recently the GOT was
507 	 * not marked executable.
508 	 * - Anton
509 	 */
510 #if 0
511 	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
512 		return;
513 #endif
514 
515 	flush_dcache_page(pg);
516 }
517 
518 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
519 			     unsigned long addr, int len)
520 {
521 	unsigned long maddr;
522 
523 	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
524 	flush_icache_range(maddr, maddr + len);
525 	kunmap(page);
526 }
527 EXPORT_SYMBOL(flush_icache_user_range);
528 
529 /*
530  * This is called at the end of handling a user page fault, when the
531  * fault has been handled by updating a PTE in the linux page tables.
532  * We use it to preload an HPTE into the hash table corresponding to
533  * the updated linux PTE.
534  *
535  * This must always be called with the pte lock held.
536  */
537 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
538 		      pte_t *ptep)
539 {
540 #ifdef CONFIG_PPC_STD_MMU
541 	/*
542 	 * We don't need to worry about _PAGE_PRESENT here because we are
543 	 * called with either mm->page_table_lock held or ptl lock held
544 	 */
545 	unsigned long access = 0, trap;
546 
547 	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
548 	if (!pte_young(*ptep) || address >= TASK_SIZE)
549 		return;
550 
551 	/* We try to figure out if we are coming from an instruction
552 	 * access fault and pass that down to __hash_page so we avoid
553 	 * double-faulting on execution of fresh text. We have to test
554 	 * for regs NULL since init will get here first thing at boot
555 	 *
556 	 * We also avoid filling the hash if not coming from a fault
557 	 */
558 	if (current->thread.regs == NULL)
559 		return;
560 	trap = TRAP(current->thread.regs);
561 	if (trap == 0x400)
562 		access |= _PAGE_EXEC;
563 	else if (trap != 0x300)
564 		return;
565 	hash_preload(vma->vm_mm, address, access, trap);
566 #endif /* CONFIG_PPC_STD_MMU */
567 #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
568 	&& defined(CONFIG_HUGETLB_PAGE)
569 	if (is_vm_hugetlb_page(vma))
570 		book3e_hugetlb_preload(vma, address, *ptep);
571 #endif
572 }
573 
574 /*
575  * System memory should not be in /proc/iomem but various tools expect it
576  * (eg kdump).
577  */
578 static int __init add_system_ram_resources(void)
579 {
580 	struct memblock_region *reg;
581 
582 	for_each_memblock(memory, reg) {
583 		struct resource *res;
584 		unsigned long base = reg->base;
585 		unsigned long size = reg->size;
586 
587 		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
588 		WARN_ON(!res);
589 
590 		if (res) {
591 			res->name = "System RAM";
592 			res->start = base;
593 			res->end = base + size - 1;
594 			res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
595 			WARN_ON(request_resource(&iomem_resource, res) < 0);
596 		}
597 	}
598 
599 	return 0;
600 }
601 subsys_initcall(add_system_ram_resources);
602 
603 #ifdef CONFIG_STRICT_DEVMEM
604 /*
605  * devmem_is_allowed(): check to see if /dev/mem access to a certain address
606  * is valid. The argument is a physical page number.
607  *
608  * Access has to be given to non-kernel-ram areas as well, these contain the
609  * PCI mmio resources as well as potential bios/acpi data regions.
610  */
611 int devmem_is_allowed(unsigned long pfn)
612 {
613 	if (iomem_is_exclusive(pfn << PAGE_SHIFT))
614 		return 0;
615 	if (!page_is_ram(pfn))
616 		return 1;
617 	if (page_is_rtas_user_buf(pfn))
618 		return 1;
619 	return 0;
620 }
621 #endif /* CONFIG_STRICT_DEVMEM */
622