xref: /openbmc/linux/arch/powerpc/mm/mem.c (revision a5a92abb)
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
7  *    Copyright (C) 1996 Paul Mackerras
8  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9  *
10  *  Derived from "arch/i386/mm/init.c"
11  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
12  *
13  *  This program is free software; you can redistribute it and/or
14  *  modify it under the terms of the GNU General Public License
15  *  as published by the Free Software Foundation; either version
16  *  2 of the License, or (at your option) any later version.
17  *
18  */
19 
20 #include <linux/export.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/gfp.h>
26 #include <linux/types.h>
27 #include <linux/mm.h>
28 #include <linux/stddef.h>
29 #include <linux/init.h>
30 #include <linux/bootmem.h>
31 #include <linux/highmem.h>
32 #include <linux/initrd.h>
33 #include <linux/pagemap.h>
34 #include <linux/suspend.h>
35 #include <linux/memblock.h>
36 #include <linux/hugetlb.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
39 #include <linux/memremap.h>
40 
41 #include <asm/pgalloc.h>
42 #include <asm/prom.h>
43 #include <asm/io.h>
44 #include <asm/mmu_context.h>
45 #include <asm/pgtable.h>
46 #include <asm/mmu.h>
47 #include <asm/smp.h>
48 #include <asm/machdep.h>
49 #include <asm/btext.h>
50 #include <asm/tlb.h>
51 #include <asm/sections.h>
52 #include <asm/sparsemem.h>
53 #include <asm/vdso.h>
54 #include <asm/fixmap.h>
55 #include <asm/swiotlb.h>
56 #include <asm/rtas.h>
57 
58 #include "mmu_decl.h"
59 
60 #ifndef CPU_FTR_COHERENT_ICACHE
61 #define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
62 #define CPU_FTR_NOEXECUTE	0
63 #endif
64 
65 unsigned long long memory_limit;
66 
67 #ifdef CONFIG_HIGHMEM
68 pte_t *kmap_pte;
69 EXPORT_SYMBOL(kmap_pte);
70 pgprot_t kmap_prot;
71 EXPORT_SYMBOL(kmap_prot);
72 #define TOP_ZONE ZONE_HIGHMEM
73 
74 static inline pte_t *virt_to_kpte(unsigned long vaddr)
75 {
76 	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
77 			vaddr), vaddr), vaddr);
78 }
79 #else
80 #define TOP_ZONE ZONE_NORMAL
81 #endif
82 
83 int page_is_ram(unsigned long pfn)
84 {
85 	return memblock_is_memory(__pfn_to_phys(pfn));
86 }
87 
88 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
89 			      unsigned long size, pgprot_t vma_prot)
90 {
91 	if (ppc_md.phys_mem_access_prot)
92 		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
93 
94 	if (!page_is_ram(pfn))
95 		vma_prot = pgprot_noncached(vma_prot);
96 
97 	return vma_prot;
98 }
99 EXPORT_SYMBOL(phys_mem_access_prot);
100 
101 #ifdef CONFIG_MEMORY_HOTPLUG
102 
103 #ifdef CONFIG_NUMA
104 int memory_add_physaddr_to_nid(u64 start)
105 {
106 	return hot_add_scn_to_nid(start);
107 }
108 #endif
109 
110 int __weak create_section_mapping(unsigned long start, unsigned long end, int nid)
111 {
112 	return -ENODEV;
113 }
114 
115 int __weak remove_section_mapping(unsigned long start, unsigned long end)
116 {
117 	return -ENODEV;
118 }
119 
120 int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
121 		bool want_memblock)
122 {
123 	unsigned long start_pfn = start >> PAGE_SHIFT;
124 	unsigned long nr_pages = size >> PAGE_SHIFT;
125 	int rc;
126 
127 	resize_hpt_for_hotplug(memblock_phys_mem_size());
128 
129 	start = (unsigned long)__va(start);
130 	rc = create_section_mapping(start, start + size, nid);
131 	if (rc) {
132 		pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
133 			start, start + size, rc);
134 		return -EFAULT;
135 	}
136 
137 	return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
138 }
139 
140 #ifdef CONFIG_MEMORY_HOTREMOVE
141 int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
142 {
143 	unsigned long start_pfn = start >> PAGE_SHIFT;
144 	unsigned long nr_pages = size >> PAGE_SHIFT;
145 	struct page *page;
146 	int ret;
147 
148 	/*
149 	 * If we have an altmap then we need to skip over any reserved PFNs
150 	 * when querying the zone.
151 	 */
152 	page = pfn_to_page(start_pfn);
153 	if (altmap)
154 		page += vmem_altmap_offset(altmap);
155 
156 	ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
157 	if (ret)
158 		return ret;
159 
160 	/* Remove htab bolted mappings for this section of memory */
161 	start = (unsigned long)__va(start);
162 	ret = remove_section_mapping(start, start + size);
163 
164 	/* Ensure all vmalloc mappings are flushed in case they also
165 	 * hit that section of memory
166 	 */
167 	vm_unmap_aliases();
168 
169 	resize_hpt_for_hotplug(memblock_phys_mem_size());
170 
171 	return ret;
172 }
173 #endif
174 #endif /* CONFIG_MEMORY_HOTPLUG */
175 
176 /*
177  * walk_memory_resource() needs to make sure there is no holes in a given
178  * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
179  * Instead it maintains it in memblock.memory structures.  Walk through the
180  * memory regions, find holes and callback for contiguous regions.
181  */
182 int
183 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
184 		void *arg, int (*func)(unsigned long, unsigned long, void *))
185 {
186 	struct memblock_region *reg;
187 	unsigned long end_pfn = start_pfn + nr_pages;
188 	unsigned long tstart, tend;
189 	int ret = -1;
190 
191 	for_each_memblock(memory, reg) {
192 		tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
193 		tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
194 		if (tstart >= tend)
195 			continue;
196 		ret = (*func)(tstart, tend - tstart, arg);
197 		if (ret)
198 			break;
199 	}
200 	return ret;
201 }
202 EXPORT_SYMBOL_GPL(walk_system_ram_range);
203 
204 #ifndef CONFIG_NEED_MULTIPLE_NODES
205 void __init mem_topology_setup(void)
206 {
207 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
208 	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
209 #ifdef CONFIG_HIGHMEM
210 	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
211 #endif
212 
213 	/* Place all memblock_regions in the same node and merge contiguous
214 	 * memblock_regions
215 	 */
216 	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
217 }
218 
219 void __init initmem_init(void)
220 {
221 	/* XXX need to clip this if using highmem? */
222 	sparse_memory_present_with_active_regions(0);
223 	sparse_init();
224 }
225 
226 /* mark pages that don't exist as nosave */
227 static int __init mark_nonram_nosave(void)
228 {
229 	struct memblock_region *reg, *prev = NULL;
230 
231 	for_each_memblock(memory, reg) {
232 		if (prev &&
233 		    memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
234 			register_nosave_region(memblock_region_memory_end_pfn(prev),
235 					       memblock_region_memory_base_pfn(reg));
236 		prev = reg;
237 	}
238 	return 0;
239 }
240 #else /* CONFIG_NEED_MULTIPLE_NODES */
241 static int __init mark_nonram_nosave(void)
242 {
243 	return 0;
244 }
245 #endif
246 
247 static bool zone_limits_final;
248 
249 /*
250  * The memory zones past TOP_ZONE are managed by generic mm code.
251  * These should be set to zero since that's what every other
252  * architecture does.
253  */
254 static unsigned long max_zone_pfns[MAX_NR_ZONES] = {
255 	[0            ... TOP_ZONE        ] = ~0UL,
256 	[TOP_ZONE + 1 ... MAX_NR_ZONES - 1] = 0
257 };
258 
259 /*
260  * Restrict the specified zone and all more restrictive zones
261  * to be below the specified pfn.  May not be called after
262  * paging_init().
263  */
264 void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
265 {
266 	int i;
267 
268 	if (WARN_ON(zone_limits_final))
269 		return;
270 
271 	for (i = zone; i >= 0; i--) {
272 		if (max_zone_pfns[i] > pfn_limit)
273 			max_zone_pfns[i] = pfn_limit;
274 	}
275 }
276 
277 /*
278  * Find the least restrictive zone that is entirely below the
279  * specified pfn limit.  Returns < 0 if no suitable zone is found.
280  *
281  * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
282  * systems -- the DMA limit can be higher than any possible real pfn.
283  */
284 int dma_pfn_limit_to_zone(u64 pfn_limit)
285 {
286 	int i;
287 
288 	for (i = TOP_ZONE; i >= 0; i--) {
289 		if (max_zone_pfns[i] <= pfn_limit)
290 			return i;
291 	}
292 
293 	return -EPERM;
294 }
295 
296 /*
297  * paging_init() sets up the page tables - in fact we've already done this.
298  */
299 void __init paging_init(void)
300 {
301 	unsigned long long total_ram = memblock_phys_mem_size();
302 	phys_addr_t top_of_ram = memblock_end_of_DRAM();
303 
304 #ifdef CONFIG_PPC32
305 	unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
306 	unsigned long end = __fix_to_virt(FIX_HOLE);
307 
308 	for (; v < end; v += PAGE_SIZE)
309 		map_kernel_page(v, 0, 0); /* XXX gross */
310 #endif
311 
312 #ifdef CONFIG_HIGHMEM
313 	map_kernel_page(PKMAP_BASE, 0, 0);	/* XXX gross */
314 	pkmap_page_table = virt_to_kpte(PKMAP_BASE);
315 
316 	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
317 	kmap_prot = PAGE_KERNEL;
318 #endif /* CONFIG_HIGHMEM */
319 
320 	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
321 	       (unsigned long long)top_of_ram, total_ram);
322 	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
323 	       (long int)((top_of_ram - total_ram) >> 20));
324 
325 #ifdef CONFIG_HIGHMEM
326 	limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT);
327 #endif
328 	limit_zone_pfn(TOP_ZONE, top_of_ram >> PAGE_SHIFT);
329 	zone_limits_final = true;
330 	free_area_init_nodes(max_zone_pfns);
331 
332 	mark_nonram_nosave();
333 }
334 
335 void __init mem_init(void)
336 {
337 	/*
338 	 * book3s is limited to 16 page sizes due to encoding this in
339 	 * a 4-bit field for slices.
340 	 */
341 	BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
342 
343 #ifdef CONFIG_SWIOTLB
344 	swiotlb_init(0);
345 #endif
346 
347 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
348 	set_max_mapnr(max_pfn);
349 	free_all_bootmem();
350 
351 #ifdef CONFIG_HIGHMEM
352 	{
353 		unsigned long pfn, highmem_mapnr;
354 
355 		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
356 		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
357 			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
358 			struct page *page = pfn_to_page(pfn);
359 			if (!memblock_is_reserved(paddr))
360 				free_highmem_page(page);
361 		}
362 	}
363 #endif /* CONFIG_HIGHMEM */
364 
365 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
366 	/*
367 	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
368 	 * functions.... do it here for the non-smp case.
369 	 */
370 	per_cpu(next_tlbcam_idx, smp_processor_id()) =
371 		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
372 #endif
373 
374 	mem_init_print_info(NULL);
375 #ifdef CONFIG_PPC32
376 	pr_info("Kernel virtual memory layout:\n");
377 	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
378 #ifdef CONFIG_HIGHMEM
379 	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
380 		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
381 #endif /* CONFIG_HIGHMEM */
382 #ifdef CONFIG_NOT_COHERENT_CACHE
383 	pr_info("  * 0x%08lx..0x%08lx  : consistent mem\n",
384 		IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
385 #endif /* CONFIG_NOT_COHERENT_CACHE */
386 	pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
387 		ioremap_bot, IOREMAP_TOP);
388 	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
389 		VMALLOC_START, VMALLOC_END);
390 #endif /* CONFIG_PPC32 */
391 }
392 
393 void free_initmem(void)
394 {
395 	ppc_md.progress = ppc_printk_progress;
396 	mark_initmem_nx();
397 	free_initmem_default(POISON_FREE_INITMEM);
398 }
399 
400 #ifdef CONFIG_BLK_DEV_INITRD
401 void __init free_initrd_mem(unsigned long start, unsigned long end)
402 {
403 	free_reserved_area((void *)start, (void *)end, -1, "initrd");
404 }
405 #endif
406 
407 /*
408  * This is called when a page has been modified by the kernel.
409  * It just marks the page as not i-cache clean.  We do the i-cache
410  * flush later when the page is given to a user process, if necessary.
411  */
412 void flush_dcache_page(struct page *page)
413 {
414 	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
415 		return;
416 	/* avoid an atomic op if possible */
417 	if (test_bit(PG_arch_1, &page->flags))
418 		clear_bit(PG_arch_1, &page->flags);
419 }
420 EXPORT_SYMBOL(flush_dcache_page);
421 
422 void flush_dcache_icache_page(struct page *page)
423 {
424 #ifdef CONFIG_HUGETLB_PAGE
425 	if (PageCompound(page)) {
426 		flush_dcache_icache_hugepage(page);
427 		return;
428 	}
429 #endif
430 #if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
431 	/* On 8xx there is no need to kmap since highmem is not supported */
432 	__flush_dcache_icache(page_address(page));
433 #else
434 	if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
435 		void *start = kmap_atomic(page);
436 		__flush_dcache_icache(start);
437 		kunmap_atomic(start);
438 	} else {
439 		__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
440 	}
441 #endif
442 }
443 EXPORT_SYMBOL(flush_dcache_icache_page);
444 
445 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
446 {
447 	clear_page(page);
448 
449 	/*
450 	 * We shouldn't have to do this, but some versions of glibc
451 	 * require it (ld.so assumes zero filled pages are icache clean)
452 	 * - Anton
453 	 */
454 	flush_dcache_page(pg);
455 }
456 EXPORT_SYMBOL(clear_user_page);
457 
458 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
459 		    struct page *pg)
460 {
461 	copy_page(vto, vfrom);
462 
463 	/*
464 	 * We should be able to use the following optimisation, however
465 	 * there are two problems.
466 	 * Firstly a bug in some versions of binutils meant PLT sections
467 	 * were not marked executable.
468 	 * Secondly the first word in the GOT section is blrl, used
469 	 * to establish the GOT address. Until recently the GOT was
470 	 * not marked executable.
471 	 * - Anton
472 	 */
473 #if 0
474 	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
475 		return;
476 #endif
477 
478 	flush_dcache_page(pg);
479 }
480 
481 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
482 			     unsigned long addr, int len)
483 {
484 	unsigned long maddr;
485 
486 	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
487 	flush_icache_range(maddr, maddr + len);
488 	kunmap(page);
489 }
490 EXPORT_SYMBOL(flush_icache_user_range);
491 
492 /*
493  * This is called at the end of handling a user page fault, when the
494  * fault has been handled by updating a PTE in the linux page tables.
495  * We use it to preload an HPTE into the hash table corresponding to
496  * the updated linux PTE.
497  *
498  * This must always be called with the pte lock held.
499  */
500 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
501 		      pte_t *ptep)
502 {
503 #ifdef CONFIG_PPC_STD_MMU
504 	/*
505 	 * We don't need to worry about _PAGE_PRESENT here because we are
506 	 * called with either mm->page_table_lock held or ptl lock held
507 	 */
508 	unsigned long access, trap;
509 
510 	if (radix_enabled())
511 		return;
512 
513 	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
514 	if (!pte_young(*ptep) || address >= TASK_SIZE)
515 		return;
516 
517 	/* We try to figure out if we are coming from an instruction
518 	 * access fault and pass that down to __hash_page so we avoid
519 	 * double-faulting on execution of fresh text. We have to test
520 	 * for regs NULL since init will get here first thing at boot
521 	 *
522 	 * We also avoid filling the hash if not coming from a fault
523 	 */
524 
525 	trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL;
526 	switch (trap) {
527 	case 0x300:
528 		access = 0UL;
529 		break;
530 	case 0x400:
531 		access = _PAGE_EXEC;
532 		break;
533 	default:
534 		return;
535 	}
536 
537 	hash_preload(vma->vm_mm, address, access, trap);
538 #endif /* CONFIG_PPC_STD_MMU */
539 #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
540 	&& defined(CONFIG_HUGETLB_PAGE)
541 	if (is_vm_hugetlb_page(vma))
542 		book3e_hugetlb_preload(vma, address, *ptep);
543 #endif
544 }
545 
546 /*
547  * System memory should not be in /proc/iomem but various tools expect it
548  * (eg kdump).
549  */
550 static int __init add_system_ram_resources(void)
551 {
552 	struct memblock_region *reg;
553 
554 	for_each_memblock(memory, reg) {
555 		struct resource *res;
556 		unsigned long base = reg->base;
557 		unsigned long size = reg->size;
558 
559 		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
560 		WARN_ON(!res);
561 
562 		if (res) {
563 			res->name = "System RAM";
564 			res->start = base;
565 			res->end = base + size - 1;
566 			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
567 			WARN_ON(request_resource(&iomem_resource, res) < 0);
568 		}
569 	}
570 
571 	return 0;
572 }
573 subsys_initcall(add_system_ram_resources);
574 
575 #ifdef CONFIG_STRICT_DEVMEM
576 /*
577  * devmem_is_allowed(): check to see if /dev/mem access to a certain address
578  * is valid. The argument is a physical page number.
579  *
580  * Access has to be given to non-kernel-ram areas as well, these contain the
581  * PCI mmio resources as well as potential bios/acpi data regions.
582  */
583 int devmem_is_allowed(unsigned long pfn)
584 {
585 	if (page_is_rtas_user_buf(pfn))
586 		return 1;
587 	if (iomem_is_exclusive(PFN_PHYS(pfn)))
588 		return 0;
589 	if (!page_is_ram(pfn))
590 		return 1;
591 	return 0;
592 }
593 #endif /* CONFIG_STRICT_DEVMEM */
594