xref: /openbmc/linux/arch/powerpc/mm/mem.c (revision 217188d9)
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
7  *    Copyright (C) 1996 Paul Mackerras
8  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9  *
10  *  Derived from "arch/i386/mm/init.c"
11  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
12  *
13  *  This program is free software; you can redistribute it and/or
14  *  modify it under the terms of the GNU General Public License
15  *  as published by the Free Software Foundation; either version
16  *  2 of the License, or (at your option) any later version.
17  *
18  */
19 
20 #include <linux/export.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/gfp.h>
26 #include <linux/types.h>
27 #include <linux/mm.h>
28 #include <linux/stddef.h>
29 #include <linux/init.h>
30 #include <linux/bootmem.h>
31 #include <linux/highmem.h>
32 #include <linux/initrd.h>
33 #include <linux/pagemap.h>
34 #include <linux/suspend.h>
35 #include <linux/memblock.h>
36 #include <linux/hugetlb.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
39 #include <linux/memremap.h>
40 
41 #include <asm/pgalloc.h>
42 #include <asm/prom.h>
43 #include <asm/io.h>
44 #include <asm/mmu_context.h>
45 #include <asm/pgtable.h>
46 #include <asm/mmu.h>
47 #include <asm/smp.h>
48 #include <asm/machdep.h>
49 #include <asm/btext.h>
50 #include <asm/tlb.h>
51 #include <asm/sections.h>
52 #include <asm/sparsemem.h>
53 #include <asm/vdso.h>
54 #include <asm/fixmap.h>
55 #include <asm/swiotlb.h>
56 #include <asm/rtas.h>
57 
58 #include "mmu_decl.h"
59 
60 #ifndef CPU_FTR_COHERENT_ICACHE
61 #define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
62 #define CPU_FTR_NOEXECUTE	0
63 #endif
64 
65 unsigned long long memory_limit;
66 bool init_mem_is_free;
67 
68 #ifdef CONFIG_HIGHMEM
69 pte_t *kmap_pte;
70 EXPORT_SYMBOL(kmap_pte);
71 pgprot_t kmap_prot;
72 EXPORT_SYMBOL(kmap_prot);
73 #define TOP_ZONE ZONE_HIGHMEM
74 
75 static inline pte_t *virt_to_kpte(unsigned long vaddr)
76 {
77 	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
78 			vaddr), vaddr), vaddr);
79 }
80 #else
81 #define TOP_ZONE ZONE_NORMAL
82 #endif
83 
84 int page_is_ram(unsigned long pfn)
85 {
86 	return memblock_is_memory(__pfn_to_phys(pfn));
87 }
88 
89 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
90 			      unsigned long size, pgprot_t vma_prot)
91 {
92 	if (ppc_md.phys_mem_access_prot)
93 		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
94 
95 	if (!page_is_ram(pfn))
96 		vma_prot = pgprot_noncached(vma_prot);
97 
98 	return vma_prot;
99 }
100 EXPORT_SYMBOL(phys_mem_access_prot);
101 
102 #ifdef CONFIG_MEMORY_HOTPLUG
103 
104 #ifdef CONFIG_NUMA
105 int memory_add_physaddr_to_nid(u64 start)
106 {
107 	return hot_add_scn_to_nid(start);
108 }
109 #endif
110 
111 int __weak create_section_mapping(unsigned long start, unsigned long end, int nid)
112 {
113 	return -ENODEV;
114 }
115 
116 int __weak remove_section_mapping(unsigned long start, unsigned long end)
117 {
118 	return -ENODEV;
119 }
120 
121 int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
122 		bool want_memblock)
123 {
124 	unsigned long start_pfn = start >> PAGE_SHIFT;
125 	unsigned long nr_pages = size >> PAGE_SHIFT;
126 	int rc;
127 
128 	resize_hpt_for_hotplug(memblock_phys_mem_size());
129 
130 	start = (unsigned long)__va(start);
131 	rc = create_section_mapping(start, start + size, nid);
132 	if (rc) {
133 		pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
134 			start, start + size, rc);
135 		return -EFAULT;
136 	}
137 	flush_inval_dcache_range(start, start + size);
138 
139 	return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
140 }
141 
142 #ifdef CONFIG_MEMORY_HOTREMOVE
143 int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
144 {
145 	unsigned long start_pfn = start >> PAGE_SHIFT;
146 	unsigned long nr_pages = size >> PAGE_SHIFT;
147 	struct page *page;
148 	int ret;
149 
150 	/*
151 	 * If we have an altmap then we need to skip over any reserved PFNs
152 	 * when querying the zone.
153 	 */
154 	page = pfn_to_page(start_pfn);
155 	if (altmap)
156 		page += vmem_altmap_offset(altmap);
157 
158 	ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
159 	if (ret)
160 		return ret;
161 
162 	/* Remove htab bolted mappings for this section of memory */
163 	start = (unsigned long)__va(start);
164 	flush_inval_dcache_range(start, start + size);
165 	ret = remove_section_mapping(start, start + size);
166 
167 	/* Ensure all vmalloc mappings are flushed in case they also
168 	 * hit that section of memory
169 	 */
170 	vm_unmap_aliases();
171 
172 	resize_hpt_for_hotplug(memblock_phys_mem_size());
173 
174 	return ret;
175 }
176 #endif
177 #endif /* CONFIG_MEMORY_HOTPLUG */
178 
179 /*
180  * walk_memory_resource() needs to make sure there is no holes in a given
181  * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
182  * Instead it maintains it in memblock.memory structures.  Walk through the
183  * memory regions, find holes and callback for contiguous regions.
184  */
185 int
186 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
187 		void *arg, int (*func)(unsigned long, unsigned long, void *))
188 {
189 	struct memblock_region *reg;
190 	unsigned long end_pfn = start_pfn + nr_pages;
191 	unsigned long tstart, tend;
192 	int ret = -1;
193 
194 	for_each_memblock(memory, reg) {
195 		tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
196 		tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
197 		if (tstart >= tend)
198 			continue;
199 		ret = (*func)(tstart, tend - tstart, arg);
200 		if (ret)
201 			break;
202 	}
203 	return ret;
204 }
205 EXPORT_SYMBOL_GPL(walk_system_ram_range);
206 
207 #ifndef CONFIG_NEED_MULTIPLE_NODES
208 void __init mem_topology_setup(void)
209 {
210 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
211 	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
212 #ifdef CONFIG_HIGHMEM
213 	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
214 #endif
215 
216 	/* Place all memblock_regions in the same node and merge contiguous
217 	 * memblock_regions
218 	 */
219 	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
220 }
221 
222 void __init initmem_init(void)
223 {
224 	/* XXX need to clip this if using highmem? */
225 	sparse_memory_present_with_active_regions(0);
226 	sparse_init();
227 }
228 
229 /* mark pages that don't exist as nosave */
230 static int __init mark_nonram_nosave(void)
231 {
232 	struct memblock_region *reg, *prev = NULL;
233 
234 	for_each_memblock(memory, reg) {
235 		if (prev &&
236 		    memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
237 			register_nosave_region(memblock_region_memory_end_pfn(prev),
238 					       memblock_region_memory_base_pfn(reg));
239 		prev = reg;
240 	}
241 	return 0;
242 }
243 #else /* CONFIG_NEED_MULTIPLE_NODES */
244 static int __init mark_nonram_nosave(void)
245 {
246 	return 0;
247 }
248 #endif
249 
250 static bool zone_limits_final;
251 
252 /*
253  * The memory zones past TOP_ZONE are managed by generic mm code.
254  * These should be set to zero since that's what every other
255  * architecture does.
256  */
257 static unsigned long max_zone_pfns[MAX_NR_ZONES] = {
258 	[0            ... TOP_ZONE        ] = ~0UL,
259 	[TOP_ZONE + 1 ... MAX_NR_ZONES - 1] = 0
260 };
261 
262 /*
263  * Restrict the specified zone and all more restrictive zones
264  * to be below the specified pfn.  May not be called after
265  * paging_init().
266  */
267 void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
268 {
269 	int i;
270 
271 	if (WARN_ON(zone_limits_final))
272 		return;
273 
274 	for (i = zone; i >= 0; i--) {
275 		if (max_zone_pfns[i] > pfn_limit)
276 			max_zone_pfns[i] = pfn_limit;
277 	}
278 }
279 
280 /*
281  * Find the least restrictive zone that is entirely below the
282  * specified pfn limit.  Returns < 0 if no suitable zone is found.
283  *
284  * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
285  * systems -- the DMA limit can be higher than any possible real pfn.
286  */
287 int dma_pfn_limit_to_zone(u64 pfn_limit)
288 {
289 	int i;
290 
291 	for (i = TOP_ZONE; i >= 0; i--) {
292 		if (max_zone_pfns[i] <= pfn_limit)
293 			return i;
294 	}
295 
296 	return -EPERM;
297 }
298 
299 /*
300  * paging_init() sets up the page tables - in fact we've already done this.
301  */
302 void __init paging_init(void)
303 {
304 	unsigned long long total_ram = memblock_phys_mem_size();
305 	phys_addr_t top_of_ram = memblock_end_of_DRAM();
306 
307 #ifdef CONFIG_PPC32
308 	unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
309 	unsigned long end = __fix_to_virt(FIX_HOLE);
310 
311 	for (; v < end; v += PAGE_SIZE)
312 		map_kernel_page(v, 0, 0); /* XXX gross */
313 #endif
314 
315 #ifdef CONFIG_HIGHMEM
316 	map_kernel_page(PKMAP_BASE, 0, 0);	/* XXX gross */
317 	pkmap_page_table = virt_to_kpte(PKMAP_BASE);
318 
319 	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
320 	kmap_prot = PAGE_KERNEL;
321 #endif /* CONFIG_HIGHMEM */
322 
323 	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
324 	       (unsigned long long)top_of_ram, total_ram);
325 	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
326 	       (long int)((top_of_ram - total_ram) >> 20));
327 
328 #ifdef CONFIG_HIGHMEM
329 	limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT);
330 #endif
331 	limit_zone_pfn(TOP_ZONE, top_of_ram >> PAGE_SHIFT);
332 	zone_limits_final = true;
333 	free_area_init_nodes(max_zone_pfns);
334 
335 	mark_nonram_nosave();
336 }
337 
338 void __init mem_init(void)
339 {
340 	/*
341 	 * book3s is limited to 16 page sizes due to encoding this in
342 	 * a 4-bit field for slices.
343 	 */
344 	BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
345 
346 #ifdef CONFIG_SWIOTLB
347 	swiotlb_init(0);
348 #endif
349 
350 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
351 	set_max_mapnr(max_pfn);
352 	free_all_bootmem();
353 
354 #ifdef CONFIG_HIGHMEM
355 	{
356 		unsigned long pfn, highmem_mapnr;
357 
358 		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
359 		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
360 			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
361 			struct page *page = pfn_to_page(pfn);
362 			if (!memblock_is_reserved(paddr))
363 				free_highmem_page(page);
364 		}
365 	}
366 #endif /* CONFIG_HIGHMEM */
367 
368 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
369 	/*
370 	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
371 	 * functions.... do it here for the non-smp case.
372 	 */
373 	per_cpu(next_tlbcam_idx, smp_processor_id()) =
374 		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
375 #endif
376 
377 	mem_init_print_info(NULL);
378 #ifdef CONFIG_PPC32
379 	pr_info("Kernel virtual memory layout:\n");
380 	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
381 #ifdef CONFIG_HIGHMEM
382 	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
383 		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
384 #endif /* CONFIG_HIGHMEM */
385 #ifdef CONFIG_NOT_COHERENT_CACHE
386 	pr_info("  * 0x%08lx..0x%08lx  : consistent mem\n",
387 		IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
388 #endif /* CONFIG_NOT_COHERENT_CACHE */
389 	pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
390 		ioremap_bot, IOREMAP_TOP);
391 	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
392 		VMALLOC_START, VMALLOC_END);
393 #endif /* CONFIG_PPC32 */
394 }
395 
396 void free_initmem(void)
397 {
398 	ppc_md.progress = ppc_printk_progress;
399 	mark_initmem_nx();
400 	init_mem_is_free = true;
401 	free_initmem_default(POISON_FREE_INITMEM);
402 }
403 
404 #ifdef CONFIG_BLK_DEV_INITRD
405 void __init free_initrd_mem(unsigned long start, unsigned long end)
406 {
407 	free_reserved_area((void *)start, (void *)end, -1, "initrd");
408 }
409 #endif
410 
411 /*
412  * This is called when a page has been modified by the kernel.
413  * It just marks the page as not i-cache clean.  We do the i-cache
414  * flush later when the page is given to a user process, if necessary.
415  */
416 void flush_dcache_page(struct page *page)
417 {
418 	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
419 		return;
420 	/* avoid an atomic op if possible */
421 	if (test_bit(PG_arch_1, &page->flags))
422 		clear_bit(PG_arch_1, &page->flags);
423 }
424 EXPORT_SYMBOL(flush_dcache_page);
425 
426 void flush_dcache_icache_page(struct page *page)
427 {
428 #ifdef CONFIG_HUGETLB_PAGE
429 	if (PageCompound(page)) {
430 		flush_dcache_icache_hugepage(page);
431 		return;
432 	}
433 #endif
434 #if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
435 	/* On 8xx there is no need to kmap since highmem is not supported */
436 	__flush_dcache_icache(page_address(page));
437 #else
438 	if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
439 		void *start = kmap_atomic(page);
440 		__flush_dcache_icache(start);
441 		kunmap_atomic(start);
442 	} else {
443 		__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
444 	}
445 #endif
446 }
447 EXPORT_SYMBOL(flush_dcache_icache_page);
448 
449 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
450 {
451 	clear_page(page);
452 
453 	/*
454 	 * We shouldn't have to do this, but some versions of glibc
455 	 * require it (ld.so assumes zero filled pages are icache clean)
456 	 * - Anton
457 	 */
458 	flush_dcache_page(pg);
459 }
460 EXPORT_SYMBOL(clear_user_page);
461 
462 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
463 		    struct page *pg)
464 {
465 	copy_page(vto, vfrom);
466 
467 	/*
468 	 * We should be able to use the following optimisation, however
469 	 * there are two problems.
470 	 * Firstly a bug in some versions of binutils meant PLT sections
471 	 * were not marked executable.
472 	 * Secondly the first word in the GOT section is blrl, used
473 	 * to establish the GOT address. Until recently the GOT was
474 	 * not marked executable.
475 	 * - Anton
476 	 */
477 #if 0
478 	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
479 		return;
480 #endif
481 
482 	flush_dcache_page(pg);
483 }
484 
485 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
486 			     unsigned long addr, int len)
487 {
488 	unsigned long maddr;
489 
490 	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
491 	flush_icache_range(maddr, maddr + len);
492 	kunmap(page);
493 }
494 EXPORT_SYMBOL(flush_icache_user_range);
495 
496 /*
497  * This is called at the end of handling a user page fault, when the
498  * fault has been handled by updating a PTE in the linux page tables.
499  * We use it to preload an HPTE into the hash table corresponding to
500  * the updated linux PTE.
501  *
502  * This must always be called with the pte lock held.
503  */
504 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
505 		      pte_t *ptep)
506 {
507 #ifdef CONFIG_PPC_STD_MMU
508 	/*
509 	 * We don't need to worry about _PAGE_PRESENT here because we are
510 	 * called with either mm->page_table_lock held or ptl lock held
511 	 */
512 	unsigned long access, trap;
513 
514 	if (radix_enabled()) {
515 		prefetch((void *)address);
516 		return;
517 	}
518 
519 	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
520 	if (!pte_young(*ptep) || address >= TASK_SIZE)
521 		return;
522 
523 	/* We try to figure out if we are coming from an instruction
524 	 * access fault and pass that down to __hash_page so we avoid
525 	 * double-faulting on execution of fresh text. We have to test
526 	 * for regs NULL since init will get here first thing at boot
527 	 *
528 	 * We also avoid filling the hash if not coming from a fault
529 	 */
530 
531 	trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL;
532 	switch (trap) {
533 	case 0x300:
534 		access = 0UL;
535 		break;
536 	case 0x400:
537 		access = _PAGE_EXEC;
538 		break;
539 	default:
540 		return;
541 	}
542 
543 	hash_preload(vma->vm_mm, address, access, trap);
544 #endif /* CONFIG_PPC_STD_MMU */
545 #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
546 	&& defined(CONFIG_HUGETLB_PAGE)
547 	if (is_vm_hugetlb_page(vma))
548 		book3e_hugetlb_preload(vma, address, *ptep);
549 #endif
550 }
551 
552 /*
553  * System memory should not be in /proc/iomem but various tools expect it
554  * (eg kdump).
555  */
556 static int __init add_system_ram_resources(void)
557 {
558 	struct memblock_region *reg;
559 
560 	for_each_memblock(memory, reg) {
561 		struct resource *res;
562 		unsigned long base = reg->base;
563 		unsigned long size = reg->size;
564 
565 		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
566 		WARN_ON(!res);
567 
568 		if (res) {
569 			res->name = "System RAM";
570 			res->start = base;
571 			res->end = base + size - 1;
572 			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
573 			WARN_ON(request_resource(&iomem_resource, res) < 0);
574 		}
575 	}
576 
577 	return 0;
578 }
579 subsys_initcall(add_system_ram_resources);
580 
581 #ifdef CONFIG_STRICT_DEVMEM
582 /*
583  * devmem_is_allowed(): check to see if /dev/mem access to a certain address
584  * is valid. The argument is a physical page number.
585  *
586  * Access has to be given to non-kernel-ram areas as well, these contain the
587  * PCI mmio resources as well as potential bios/acpi data regions.
588  */
589 int devmem_is_allowed(unsigned long pfn)
590 {
591 	if (page_is_rtas_user_buf(pfn))
592 		return 1;
593 	if (iomem_is_exclusive(PFN_PHYS(pfn)))
594 		return 0;
595 	if (!page_is_ram(pfn))
596 		return 1;
597 	return 0;
598 }
599 #endif /* CONFIG_STRICT_DEVMEM */
600