xref: /openbmc/linux/arch/powerpc/mm/mem.c (revision 835fd614)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  PowerPC version
4  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5  *
6  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
8  *    Copyright (C) 1996 Paul Mackerras
9  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10  *
11  *  Derived from "arch/i386/mm/init.c"
12  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
13  */
14 
15 #include <linux/export.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/gfp.h>
21 #include <linux/types.h>
22 #include <linux/mm.h>
23 #include <linux/stddef.h>
24 #include <linux/init.h>
25 #include <linux/memblock.h>
26 #include <linux/highmem.h>
27 #include <linux/initrd.h>
28 #include <linux/pagemap.h>
29 #include <linux/suspend.h>
30 #include <linux/hugetlb.h>
31 #include <linux/slab.h>
32 #include <linux/vmalloc.h>
33 #include <linux/memremap.h>
34 #include <linux/dma-direct.h>
35 #include <linux/kprobes.h>
36 
37 #include <asm/prom.h>
38 #include <asm/io.h>
39 #include <asm/mmu_context.h>
40 #include <asm/mmu.h>
41 #include <asm/smp.h>
42 #include <asm/machdep.h>
43 #include <asm/btext.h>
44 #include <asm/tlb.h>
45 #include <asm/sections.h>
46 #include <asm/sparsemem.h>
47 #include <asm/vdso.h>
48 #include <asm/fixmap.h>
49 #include <asm/swiotlb.h>
50 #include <asm/rtas.h>
51 #include <asm/kasan.h>
52 #include <asm/svm.h>
53 
54 #include <mm/mmu_decl.h>
55 
56 #ifndef CPU_FTR_COHERENT_ICACHE
57 #define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
58 #define CPU_FTR_NOEXECUTE	0
59 #endif
60 
61 unsigned long long memory_limit;
62 bool init_mem_is_free;
63 
64 #ifdef CONFIG_HIGHMEM
65 pte_t *kmap_pte;
66 EXPORT_SYMBOL(kmap_pte);
67 #endif
68 
69 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
70 			      unsigned long size, pgprot_t vma_prot)
71 {
72 	if (ppc_md.phys_mem_access_prot)
73 		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
74 
75 	if (!page_is_ram(pfn))
76 		vma_prot = pgprot_noncached(vma_prot);
77 
78 	return vma_prot;
79 }
80 EXPORT_SYMBOL(phys_mem_access_prot);
81 
82 #ifdef CONFIG_MEMORY_HOTPLUG
83 
84 #ifdef CONFIG_NUMA
85 int memory_add_physaddr_to_nid(u64 start)
86 {
87 	return hot_add_scn_to_nid(start);
88 }
89 #endif
90 
91 int __weak create_section_mapping(unsigned long start, unsigned long end,
92 				  int nid, pgprot_t prot)
93 {
94 	return -ENODEV;
95 }
96 
97 int __weak remove_section_mapping(unsigned long start, unsigned long end)
98 {
99 	return -ENODEV;
100 }
101 
102 #define FLUSH_CHUNK_SIZE SZ_1G
103 /**
104  * flush_dcache_range_chunked(): Write any modified data cache blocks out to
105  * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
106  * Does not invalidate the corresponding instruction cache blocks.
107  *
108  * @start: the start address
109  * @stop: the stop address (exclusive)
110  * @chunk: the max size of the chunks
111  */
112 static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
113 				       unsigned long chunk)
114 {
115 	unsigned long i;
116 
117 	for (i = start; i < stop; i += chunk) {
118 		flush_dcache_range(i, min(stop, i + chunk));
119 		cond_resched();
120 	}
121 }
122 
123 int __ref arch_add_memory(int nid, u64 start, u64 size,
124 			  struct mhp_params *params)
125 {
126 	unsigned long start_pfn = start >> PAGE_SHIFT;
127 	unsigned long nr_pages = size >> PAGE_SHIFT;
128 	int rc;
129 
130 	start = (unsigned long)__va(start);
131 	rc = create_section_mapping(start, start + size, nid,
132 				    params->pgprot);
133 	if (rc) {
134 		pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
135 			start, start + size, rc);
136 		return -EFAULT;
137 	}
138 
139 	return __add_pages(nid, start_pfn, nr_pages, params);
140 }
141 
142 void __ref arch_remove_memory(int nid, u64 start, u64 size,
143 			     struct vmem_altmap *altmap)
144 {
145 	unsigned long start_pfn = start >> PAGE_SHIFT;
146 	unsigned long nr_pages = size >> PAGE_SHIFT;
147 	int ret;
148 
149 	__remove_pages(start_pfn, nr_pages, altmap);
150 
151 	/* Remove htab bolted mappings for this section of memory */
152 	start = (unsigned long)__va(start);
153 	flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE);
154 
155 	ret = remove_section_mapping(start, start + size);
156 	WARN_ON_ONCE(ret);
157 
158 	/* Ensure all vmalloc mappings are flushed in case they also
159 	 * hit that section of memory
160 	 */
161 	vm_unmap_aliases();
162 }
163 #endif
164 
165 #ifndef CONFIG_NEED_MULTIPLE_NODES
166 void __init mem_topology_setup(void)
167 {
168 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
169 	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
170 #ifdef CONFIG_HIGHMEM
171 	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
172 #endif
173 
174 	/* Place all memblock_regions in the same node and merge contiguous
175 	 * memblock_regions
176 	 */
177 	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
178 }
179 
180 void __init initmem_init(void)
181 {
182 	sparse_init();
183 }
184 
185 /* mark pages that don't exist as nosave */
186 static int __init mark_nonram_nosave(void)
187 {
188 	unsigned long spfn, epfn, prev = 0;
189 	int i;
190 
191 	for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) {
192 		if (prev && prev < spfn)
193 			register_nosave_region(prev, spfn);
194 
195 		prev = epfn;
196 	}
197 
198 	return 0;
199 }
200 #else /* CONFIG_NEED_MULTIPLE_NODES */
201 static int __init mark_nonram_nosave(void)
202 {
203 	return 0;
204 }
205 #endif
206 
207 /*
208  * Zones usage:
209  *
210  * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
211  * everything else. GFP_DMA32 page allocations automatically fall back to
212  * ZONE_DMA.
213  *
214  * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the
215  * generic DMA mapping code.  32-bit only devices (if not handled by an IOMMU
216  * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
217  * ZONE_DMA.
218  */
219 static unsigned long max_zone_pfns[MAX_NR_ZONES];
220 
221 /*
222  * paging_init() sets up the page tables - in fact we've already done this.
223  */
224 void __init paging_init(void)
225 {
226 	unsigned long long total_ram = memblock_phys_mem_size();
227 	phys_addr_t top_of_ram = memblock_end_of_DRAM();
228 
229 #ifdef CONFIG_HIGHMEM
230 	unsigned long v = __fix_to_virt(FIX_KMAP_END);
231 	unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN);
232 
233 	for (; v < end; v += PAGE_SIZE)
234 		map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
235 
236 	map_kernel_page(PKMAP_BASE, 0, __pgprot(0));	/* XXX gross */
237 	pkmap_page_table = virt_to_kpte(PKMAP_BASE);
238 
239 	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
240 #endif /* CONFIG_HIGHMEM */
241 
242 	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
243 	       (unsigned long long)top_of_ram, total_ram);
244 	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
245 	       (long int)((top_of_ram - total_ram) >> 20));
246 
247 	/*
248 	 * Allow 30-bit DMA for very limited Broadcom wifi chips on many
249 	 * powerbooks.
250 	 */
251 	if (IS_ENABLED(CONFIG_PPC32))
252 		zone_dma_bits = 30;
253 	else
254 		zone_dma_bits = 31;
255 
256 #ifdef CONFIG_ZONE_DMA
257 	max_zone_pfns[ZONE_DMA]	= min(max_low_pfn,
258 				      1UL << (zone_dma_bits - PAGE_SHIFT));
259 #endif
260 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
261 #ifdef CONFIG_HIGHMEM
262 	max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
263 #endif
264 
265 	free_area_init(max_zone_pfns);
266 
267 	mark_nonram_nosave();
268 }
269 
270 void __init mem_init(void)
271 {
272 	/*
273 	 * book3s is limited to 16 page sizes due to encoding this in
274 	 * a 4-bit field for slices.
275 	 */
276 	BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
277 
278 #ifdef CONFIG_SWIOTLB
279 	/*
280 	 * Some platforms (e.g. 85xx) limit DMA-able memory way below
281 	 * 4G. We force memblock to bottom-up mode to ensure that the
282 	 * memory allocated in swiotlb_init() is DMA-able.
283 	 * As it's the last memblock allocation, no need to reset it
284 	 * back to to-down.
285 	 */
286 	memblock_set_bottom_up(true);
287 	if (is_secure_guest())
288 		svm_swiotlb_init();
289 	else
290 		swiotlb_init(0);
291 #endif
292 
293 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
294 	set_max_mapnr(max_pfn);
295 
296 	kasan_late_init();
297 
298 	memblock_free_all();
299 
300 #ifdef CONFIG_HIGHMEM
301 	{
302 		unsigned long pfn, highmem_mapnr;
303 
304 		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
305 		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
306 			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
307 			struct page *page = pfn_to_page(pfn);
308 			if (!memblock_is_reserved(paddr))
309 				free_highmem_page(page);
310 		}
311 	}
312 #endif /* CONFIG_HIGHMEM */
313 
314 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
315 	/*
316 	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
317 	 * functions.... do it here for the non-smp case.
318 	 */
319 	per_cpu(next_tlbcam_idx, smp_processor_id()) =
320 		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
321 #endif
322 
323 	mem_init_print_info(NULL);
324 #ifdef CONFIG_PPC32
325 	pr_info("Kernel virtual memory layout:\n");
326 #ifdef CONFIG_KASAN
327 	pr_info("  * 0x%08lx..0x%08lx  : kasan shadow mem\n",
328 		KASAN_SHADOW_START, KASAN_SHADOW_END);
329 #endif
330 	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
331 #ifdef CONFIG_HIGHMEM
332 	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
333 		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
334 #endif /* CONFIG_HIGHMEM */
335 	if (ioremap_bot != IOREMAP_TOP)
336 		pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
337 			ioremap_bot, IOREMAP_TOP);
338 	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
339 		VMALLOC_START, VMALLOC_END);
340 #endif /* CONFIG_PPC32 */
341 }
342 
343 void free_initmem(void)
344 {
345 	ppc_md.progress = ppc_printk_progress;
346 	mark_initmem_nx();
347 	init_mem_is_free = true;
348 	free_initmem_default(POISON_FREE_INITMEM);
349 }
350 
351 /**
352  * flush_coherent_icache() - if a CPU has a coherent icache, flush it
353  * @addr: The base address to use (can be any valid address, the whole cache will be flushed)
354  * Return true if the cache was flushed, false otherwise
355  */
356 static inline bool flush_coherent_icache(unsigned long addr)
357 {
358 	/*
359 	 * For a snooping icache, we still need a dummy icbi to purge all the
360 	 * prefetched instructions from the ifetch buffers. We also need a sync
361 	 * before the icbi to order the the actual stores to memory that might
362 	 * have modified instructions with the icbi.
363 	 */
364 	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
365 		mb(); /* sync */
366 		allow_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
367 		icbi((void *)addr);
368 		prevent_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
369 		mb(); /* sync */
370 		isync();
371 		return true;
372 	}
373 
374 	return false;
375 }
376 
377 /**
378  * invalidate_icache_range() - Flush the icache by issuing icbi across an address range
379  * @start: the start address
380  * @stop: the stop address (exclusive)
381  */
382 static void invalidate_icache_range(unsigned long start, unsigned long stop)
383 {
384 	unsigned long shift = l1_icache_shift();
385 	unsigned long bytes = l1_icache_bytes();
386 	char *addr = (char *)(start & ~(bytes - 1));
387 	unsigned long size = stop - (unsigned long)addr + (bytes - 1);
388 	unsigned long i;
389 
390 	for (i = 0; i < size >> shift; i++, addr += bytes)
391 		icbi(addr);
392 
393 	mb(); /* sync */
394 	isync();
395 }
396 
397 /**
398  * flush_icache_range: Write any modified data cache blocks out to memory
399  * and invalidate the corresponding blocks in the instruction cache
400  *
401  * Generic code will call this after writing memory, before executing from it.
402  *
403  * @start: the start address
404  * @stop: the stop address (exclusive)
405  */
406 void flush_icache_range(unsigned long start, unsigned long stop)
407 {
408 	if (flush_coherent_icache(start))
409 		return;
410 
411 	clean_dcache_range(start, stop);
412 
413 	if (IS_ENABLED(CONFIG_44x)) {
414 		/*
415 		 * Flash invalidate on 44x because we are passed kmapped
416 		 * addresses and this doesn't work for userspace pages due to
417 		 * the virtually tagged icache.
418 		 */
419 		iccci((void *)start);
420 		mb(); /* sync */
421 		isync();
422 	} else
423 		invalidate_icache_range(start, stop);
424 }
425 EXPORT_SYMBOL(flush_icache_range);
426 
427 #if !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
428 /**
429  * flush_dcache_icache_phys() - Flush a page by it's physical address
430  * @physaddr: the physical address of the page
431  */
432 static void flush_dcache_icache_phys(unsigned long physaddr)
433 {
434 	unsigned long bytes = l1_dcache_bytes();
435 	unsigned long nb = PAGE_SIZE / bytes;
436 	unsigned long addr = physaddr & PAGE_MASK;
437 	unsigned long msr, msr0;
438 	unsigned long loop1 = addr, loop2 = addr;
439 
440 	msr0 = mfmsr();
441 	msr = msr0 & ~MSR_DR;
442 	/*
443 	 * This must remain as ASM to prevent potential memory accesses
444 	 * while the data MMU is disabled
445 	 */
446 	asm volatile(
447 		"   mtctr %2;\n"
448 		"   mtmsr %3;\n"
449 		"   isync;\n"
450 		"0: dcbst   0, %0;\n"
451 		"   addi    %0, %0, %4;\n"
452 		"   bdnz    0b;\n"
453 		"   sync;\n"
454 		"   mtctr %2;\n"
455 		"1: icbi    0, %1;\n"
456 		"   addi    %1, %1, %4;\n"
457 		"   bdnz    1b;\n"
458 		"   sync;\n"
459 		"   mtmsr %5;\n"
460 		"   isync;\n"
461 		: "+&r" (loop1), "+&r" (loop2)
462 		: "r" (nb), "r" (msr), "i" (bytes), "r" (msr0)
463 		: "ctr", "memory");
464 }
465 NOKPROBE_SYMBOL(flush_dcache_icache_phys)
466 #endif // !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
467 
468 /*
469  * This is called when a page has been modified by the kernel.
470  * It just marks the page as not i-cache clean.  We do the i-cache
471  * flush later when the page is given to a user process, if necessary.
472  */
473 void flush_dcache_page(struct page *page)
474 {
475 	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
476 		return;
477 	/* avoid an atomic op if possible */
478 	if (test_bit(PG_arch_1, &page->flags))
479 		clear_bit(PG_arch_1, &page->flags);
480 }
481 EXPORT_SYMBOL(flush_dcache_page);
482 
483 void flush_dcache_icache_page(struct page *page)
484 {
485 #ifdef CONFIG_HUGETLB_PAGE
486 	if (PageCompound(page)) {
487 		flush_dcache_icache_hugepage(page);
488 		return;
489 	}
490 #endif
491 #if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
492 	/* On 8xx there is no need to kmap since highmem is not supported */
493 	__flush_dcache_icache(page_address(page));
494 #else
495 	if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
496 		void *start = kmap_atomic(page);
497 		__flush_dcache_icache(start);
498 		kunmap_atomic(start);
499 	} else {
500 		unsigned long addr = page_to_pfn(page) << PAGE_SHIFT;
501 
502 		if (flush_coherent_icache(addr))
503 			return;
504 		flush_dcache_icache_phys(addr);
505 	}
506 #endif
507 }
508 EXPORT_SYMBOL(flush_dcache_icache_page);
509 
510 /**
511  * __flush_dcache_icache(): Flush a particular page from the data cache to RAM.
512  * Note: this is necessary because the instruction cache does *not*
513  * snoop from the data cache.
514  *
515  * @page: the address of the page to flush
516  */
517 void __flush_dcache_icache(void *p)
518 {
519 	unsigned long addr = (unsigned long)p;
520 
521 	if (flush_coherent_icache(addr))
522 		return;
523 
524 	clean_dcache_range(addr, addr + PAGE_SIZE);
525 
526 	/*
527 	 * We don't flush the icache on 44x. Those have a virtual icache and we
528 	 * don't have access to the virtual address here (it's not the page
529 	 * vaddr but where it's mapped in user space). The flushing of the
530 	 * icache on these is handled elsewhere, when a change in the address
531 	 * space occurs, before returning to user space.
532 	 */
533 
534 	if (cpu_has_feature(MMU_FTR_TYPE_44x))
535 		return;
536 
537 	invalidate_icache_range(addr, addr + PAGE_SIZE);
538 }
539 
540 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
541 {
542 	clear_page(page);
543 
544 	/*
545 	 * We shouldn't have to do this, but some versions of glibc
546 	 * require it (ld.so assumes zero filled pages are icache clean)
547 	 * - Anton
548 	 */
549 	flush_dcache_page(pg);
550 }
551 EXPORT_SYMBOL(clear_user_page);
552 
553 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
554 		    struct page *pg)
555 {
556 	copy_page(vto, vfrom);
557 
558 	/*
559 	 * We should be able to use the following optimisation, however
560 	 * there are two problems.
561 	 * Firstly a bug in some versions of binutils meant PLT sections
562 	 * were not marked executable.
563 	 * Secondly the first word in the GOT section is blrl, used
564 	 * to establish the GOT address. Until recently the GOT was
565 	 * not marked executable.
566 	 * - Anton
567 	 */
568 #if 0
569 	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
570 		return;
571 #endif
572 
573 	flush_dcache_page(pg);
574 }
575 
576 void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
577 			     unsigned long addr, int len)
578 {
579 	unsigned long maddr;
580 
581 	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
582 	flush_icache_range(maddr, maddr + len);
583 	kunmap(page);
584 }
585 
586 /*
587  * System memory should not be in /proc/iomem but various tools expect it
588  * (eg kdump).
589  */
590 static int __init add_system_ram_resources(void)
591 {
592 	phys_addr_t start, end;
593 	u64 i;
594 
595 	for_each_mem_range(i, &start, &end) {
596 		struct resource *res;
597 
598 		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
599 		WARN_ON(!res);
600 
601 		if (res) {
602 			res->name = "System RAM";
603 			res->start = start;
604 			/*
605 			 * In memblock, end points to the first byte after
606 			 * the range while in resourses, end points to the
607 			 * last byte in the range.
608 			 */
609 			res->end = end - 1;
610 			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
611 			WARN_ON(request_resource(&iomem_resource, res) < 0);
612 		}
613 	}
614 
615 	return 0;
616 }
617 subsys_initcall(add_system_ram_resources);
618 
619 #ifdef CONFIG_STRICT_DEVMEM
620 /*
621  * devmem_is_allowed(): check to see if /dev/mem access to a certain address
622  * is valid. The argument is a physical page number.
623  *
624  * Access has to be given to non-kernel-ram areas as well, these contain the
625  * PCI mmio resources as well as potential bios/acpi data regions.
626  */
627 int devmem_is_allowed(unsigned long pfn)
628 {
629 	if (page_is_rtas_user_buf(pfn))
630 		return 1;
631 	if (iomem_is_exclusive(PFN_PHYS(pfn)))
632 		return 0;
633 	if (!page_is_ram(pfn))
634 		return 1;
635 	return 0;
636 }
637 #endif /* CONFIG_STRICT_DEVMEM */
638 
639 /*
640  * This is defined in kernel/resource.c but only powerpc needs to export it, for
641  * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
642  */
643 EXPORT_SYMBOL_GPL(walk_system_ram_range);
644