xref: /openbmc/linux/arch/powerpc/mm/mem.c (revision 8bdc2a19)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  PowerPC version
4  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5  *
6  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
8  *    Copyright (C) 1996 Paul Mackerras
9  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10  *
11  *  Derived from "arch/i386/mm/init.c"
12  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
13  */
14 
15 #include <linux/memblock.h>
16 #include <linux/highmem.h>
17 #include <linux/suspend.h>
18 #include <linux/dma-direct.h>
19 
20 #include <asm/swiotlb.h>
21 #include <asm/machdep.h>
22 #include <asm/rtas.h>
23 #include <asm/kasan.h>
24 #include <asm/svm.h>
25 #include <asm/mmzone.h>
26 
27 #include <mm/mmu_decl.h>
28 
29 unsigned long long memory_limit;
30 
31 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
32 EXPORT_SYMBOL(empty_zero_page);
33 
34 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
35 			      unsigned long size, pgprot_t vma_prot)
36 {
37 	if (ppc_md.phys_mem_access_prot)
38 		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
39 
40 	if (!page_is_ram(pfn))
41 		vma_prot = pgprot_noncached(vma_prot);
42 
43 	return vma_prot;
44 }
45 EXPORT_SYMBOL(phys_mem_access_prot);
46 
47 #ifdef CONFIG_MEMORY_HOTPLUG
48 static DEFINE_MUTEX(linear_mapping_mutex);
49 
50 #ifdef CONFIG_NUMA
51 int memory_add_physaddr_to_nid(u64 start)
52 {
53 	return hot_add_scn_to_nid(start);
54 }
55 #endif
56 
57 int __weak create_section_mapping(unsigned long start, unsigned long end,
58 				  int nid, pgprot_t prot)
59 {
60 	return -ENODEV;
61 }
62 
63 int __weak remove_section_mapping(unsigned long start, unsigned long end)
64 {
65 	return -ENODEV;
66 }
67 
68 int __ref arch_create_linear_mapping(int nid, u64 start, u64 size,
69 				     struct mhp_params *params)
70 {
71 	int rc;
72 
73 	start = (unsigned long)__va(start);
74 	mutex_lock(&linear_mapping_mutex);
75 	rc = create_section_mapping(start, start + size, nid,
76 				    params->pgprot);
77 	mutex_unlock(&linear_mapping_mutex);
78 	if (rc) {
79 		pr_warn("Unable to create linear mapping for 0x%llx..0x%llx: %d\n",
80 			start, start + size, rc);
81 		return -EFAULT;
82 	}
83 	return 0;
84 }
85 
86 void __ref arch_remove_linear_mapping(u64 start, u64 size)
87 {
88 	int ret;
89 
90 	/* Remove htab bolted mappings for this section of memory */
91 	start = (unsigned long)__va(start);
92 
93 	mutex_lock(&linear_mapping_mutex);
94 	ret = remove_section_mapping(start, start + size);
95 	mutex_unlock(&linear_mapping_mutex);
96 	if (ret)
97 		pr_warn("Unable to remove linear mapping for 0x%llx..0x%llx: %d\n",
98 			start, start + size, ret);
99 
100 	/* Ensure all vmalloc mappings are flushed in case they also
101 	 * hit that section of memory
102 	 */
103 	vm_unmap_aliases();
104 }
105 
106 int __ref arch_add_memory(int nid, u64 start, u64 size,
107 			  struct mhp_params *params)
108 {
109 	unsigned long start_pfn = start >> PAGE_SHIFT;
110 	unsigned long nr_pages = size >> PAGE_SHIFT;
111 	int rc;
112 
113 	rc = arch_create_linear_mapping(nid, start, size, params);
114 	if (rc)
115 		return rc;
116 	rc = __add_pages(nid, start_pfn, nr_pages, params);
117 	if (rc)
118 		arch_remove_linear_mapping(start, size);
119 	return rc;
120 }
121 
122 void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
123 {
124 	unsigned long start_pfn = start >> PAGE_SHIFT;
125 	unsigned long nr_pages = size >> PAGE_SHIFT;
126 
127 	__remove_pages(start_pfn, nr_pages, altmap);
128 	arch_remove_linear_mapping(start, size);
129 }
130 #endif
131 
132 #ifndef CONFIG_NUMA
133 void __init mem_topology_setup(void)
134 {
135 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
136 	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
137 #ifdef CONFIG_HIGHMEM
138 	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
139 #endif
140 
141 	/* Place all memblock_regions in the same node and merge contiguous
142 	 * memblock_regions
143 	 */
144 	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
145 }
146 
147 void __init initmem_init(void)
148 {
149 	sparse_init();
150 }
151 
152 /* mark pages that don't exist as nosave */
153 static int __init mark_nonram_nosave(void)
154 {
155 	unsigned long spfn, epfn, prev = 0;
156 	int i;
157 
158 	for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) {
159 		if (prev && prev < spfn)
160 			register_nosave_region(prev, spfn);
161 
162 		prev = epfn;
163 	}
164 
165 	return 0;
166 }
167 #else /* CONFIG_NUMA */
168 static int __init mark_nonram_nosave(void)
169 {
170 	return 0;
171 }
172 #endif
173 
174 /*
175  * Zones usage:
176  *
177  * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
178  * everything else. GFP_DMA32 page allocations automatically fall back to
179  * ZONE_DMA.
180  *
181  * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the
182  * generic DMA mapping code.  32-bit only devices (if not handled by an IOMMU
183  * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
184  * ZONE_DMA.
185  */
186 static unsigned long max_zone_pfns[MAX_NR_ZONES];
187 
188 /*
189  * paging_init() sets up the page tables - in fact we've already done this.
190  */
191 void __init paging_init(void)
192 {
193 	unsigned long long total_ram = memblock_phys_mem_size();
194 	phys_addr_t top_of_ram = memblock_end_of_DRAM();
195 
196 #ifdef CONFIG_HIGHMEM
197 	unsigned long v = __fix_to_virt(FIX_KMAP_END);
198 	unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN);
199 
200 	for (; v < end; v += PAGE_SIZE)
201 		map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
202 
203 	map_kernel_page(PKMAP_BASE, 0, __pgprot(0));	/* XXX gross */
204 	pkmap_page_table = virt_to_kpte(PKMAP_BASE);
205 #endif /* CONFIG_HIGHMEM */
206 
207 	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
208 	       (unsigned long long)top_of_ram, total_ram);
209 	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
210 	       (long int)((top_of_ram - total_ram) >> 20));
211 
212 	/*
213 	 * Allow 30-bit DMA for very limited Broadcom wifi chips on many
214 	 * powerbooks.
215 	 */
216 	if (IS_ENABLED(CONFIG_PPC32))
217 		zone_dma_bits = 30;
218 	else
219 		zone_dma_bits = 31;
220 
221 #ifdef CONFIG_ZONE_DMA
222 	max_zone_pfns[ZONE_DMA]	= min(max_low_pfn,
223 				      1UL << (zone_dma_bits - PAGE_SHIFT));
224 #endif
225 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
226 #ifdef CONFIG_HIGHMEM
227 	max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
228 #endif
229 
230 	free_area_init(max_zone_pfns);
231 
232 	mark_nonram_nosave();
233 }
234 
235 void __init mem_init(void)
236 {
237 	/*
238 	 * book3s is limited to 16 page sizes due to encoding this in
239 	 * a 4-bit field for slices.
240 	 */
241 	BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
242 
243 #ifdef CONFIG_SWIOTLB
244 	/*
245 	 * Some platforms (e.g. 85xx) limit DMA-able memory way below
246 	 * 4G. We force memblock to bottom-up mode to ensure that the
247 	 * memory allocated in swiotlb_init() is DMA-able.
248 	 * As it's the last memblock allocation, no need to reset it
249 	 * back to to-down.
250 	 */
251 	memblock_set_bottom_up(true);
252 	swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags);
253 #endif
254 
255 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
256 	set_max_mapnr(max_pfn);
257 
258 	kasan_late_init();
259 
260 	memblock_free_all();
261 
262 #ifdef CONFIG_HIGHMEM
263 	{
264 		unsigned long pfn, highmem_mapnr;
265 
266 		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
267 		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
268 			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
269 			struct page *page = pfn_to_page(pfn);
270 			if (!memblock_is_reserved(paddr))
271 				free_highmem_page(page);
272 		}
273 	}
274 #endif /* CONFIG_HIGHMEM */
275 
276 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
277 	/*
278 	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
279 	 * functions.... do it here for the non-smp case.
280 	 */
281 	per_cpu(next_tlbcam_idx, smp_processor_id()) =
282 		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
283 #endif
284 
285 #ifdef CONFIG_PPC32
286 	pr_info("Kernel virtual memory layout:\n");
287 #ifdef CONFIG_KASAN
288 	pr_info("  * 0x%08lx..0x%08lx  : kasan shadow mem\n",
289 		KASAN_SHADOW_START, KASAN_SHADOW_END);
290 #endif
291 	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
292 #ifdef CONFIG_HIGHMEM
293 	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
294 		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
295 #endif /* CONFIG_HIGHMEM */
296 	if (ioremap_bot != IOREMAP_TOP)
297 		pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
298 			ioremap_bot, IOREMAP_TOP);
299 	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
300 		VMALLOC_START, VMALLOC_END);
301 #ifdef MODULES_VADDR
302 	pr_info("  * 0x%08lx..0x%08lx  : modules\n",
303 		MODULES_VADDR, MODULES_END);
304 #endif
305 #endif /* CONFIG_PPC32 */
306 }
307 
308 void free_initmem(void)
309 {
310 	ppc_md.progress = ppc_printk_progress;
311 	mark_initmem_nx();
312 	free_initmem_default(POISON_FREE_INITMEM);
313 }
314 
315 /*
316  * System memory should not be in /proc/iomem but various tools expect it
317  * (eg kdump).
318  */
319 static int __init add_system_ram_resources(void)
320 {
321 	phys_addr_t start, end;
322 	u64 i;
323 
324 	for_each_mem_range(i, &start, &end) {
325 		struct resource *res;
326 
327 		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
328 		WARN_ON(!res);
329 
330 		if (res) {
331 			res->name = "System RAM";
332 			res->start = start;
333 			/*
334 			 * In memblock, end points to the first byte after
335 			 * the range while in resourses, end points to the
336 			 * last byte in the range.
337 			 */
338 			res->end = end - 1;
339 			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
340 			WARN_ON(request_resource(&iomem_resource, res) < 0);
341 		}
342 	}
343 
344 	return 0;
345 }
346 subsys_initcall(add_system_ram_resources);
347 
348 #ifdef CONFIG_STRICT_DEVMEM
349 /*
350  * devmem_is_allowed(): check to see if /dev/mem access to a certain address
351  * is valid. The argument is a physical page number.
352  *
353  * Access has to be given to non-kernel-ram areas as well, these contain the
354  * PCI mmio resources as well as potential bios/acpi data regions.
355  */
356 int devmem_is_allowed(unsigned long pfn)
357 {
358 	if (page_is_rtas_user_buf(pfn))
359 		return 1;
360 	if (iomem_is_exclusive(PFN_PHYS(pfn)))
361 		return 0;
362 	if (!page_is_ram(pfn))
363 		return 1;
364 	return 0;
365 }
366 #endif /* CONFIG_STRICT_DEVMEM */
367 
368 /*
369  * This is defined in kernel/resource.c but only powerpc needs to export it, for
370  * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
371  */
372 EXPORT_SYMBOL_GPL(walk_system_ram_range);
373