12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
214cf11afSPaul Mackerras /*
314cf11afSPaul Mackerras * PowerPC version
414cf11afSPaul Mackerras * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
514cf11afSPaul Mackerras *
614cf11afSPaul Mackerras * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
714cf11afSPaul Mackerras * and Cort Dougan (PReP) (cort@cs.nmt.edu)
814cf11afSPaul Mackerras * Copyright (C) 1996 Paul Mackerras
914cf11afSPaul Mackerras * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
1014cf11afSPaul Mackerras *
1114cf11afSPaul Mackerras * Derived from "arch/i386/mm/init.c"
1214cf11afSPaul Mackerras * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
1314cf11afSPaul Mackerras */
1414cf11afSPaul Mackerras
1557c8a661SMike Rapoport #include <linux/memblock.h>
1614cf11afSPaul Mackerras #include <linux/highmem.h>
174e8ad3e8SJohannes Berg #include <linux/suspend.h>
188b5369eaSNicolas Saenz Julienne #include <linux/dma-direct.h>
1914cf11afSPaul Mackerras
20c6af2aa9SChristoph Hellwig #include <asm/swiotlb.h>
2114cf11afSPaul Mackerras #include <asm/machdep.h>
228a3e3d31Ssukadev@linux.vnet.ibm.com #include <asm/rtas.h>
233d4247fcSChristophe Leroy #include <asm/kasan.h>
24eae9eec4SThiago Jung Bauermann #include <asm/svm.h>
257eff9bc0SChristophe Leroy #include <asm/mmzone.h>
2684ade0a6SNaveen N. Rao #include <asm/ftrace.h>
27b0337678SChristophe Leroy #include <asm/code-patching.h>
28113fe88eSChristophe Leroy #include <asm/setup.h>
2914cf11afSPaul Mackerras
309d9f2cccSChristophe Leroy #include <mm/mmu_decl.h>
3114cf11afSPaul Mackerras
32a84fcd46SSuzuki Poulose unsigned long long memory_limit;
337c8c6b97SPaul Mackerras
3445b30fafSChristophe Leroy unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
3545b30fafSChristophe Leroy EXPORT_SYMBOL(empty_zero_page);
3645b30fafSChristophe Leroy
phys_mem_access_prot(struct file * file,unsigned long pfn,unsigned long size,pgprot_t vma_prot)378b150478SRoland Dreier pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
3814cf11afSPaul Mackerras unsigned long size, pgprot_t vma_prot)
3914cf11afSPaul Mackerras {
4014cf11afSPaul Mackerras if (ppc_md.phys_mem_access_prot)
418b150478SRoland Dreier return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
4214cf11afSPaul Mackerras
438b150478SRoland Dreier if (!page_is_ram(pfn))
4464b3d0e8SBenjamin Herrenschmidt vma_prot = pgprot_noncached(vma_prot);
4564b3d0e8SBenjamin Herrenschmidt
4614cf11afSPaul Mackerras return vma_prot;
4714cf11afSPaul Mackerras }
4814cf11afSPaul Mackerras EXPORT_SYMBOL(phys_mem_access_prot);
4914cf11afSPaul Mackerras
5023fd0775SPaul Mackerras #ifdef CONFIG_MEMORY_HOTPLUG
519be77e11SSebastian Andrzej Siewior static DEFINE_MUTEX(linear_mapping_mutex);
5223fd0775SPaul Mackerras
53bc02af93SYasunori Goto #ifdef CONFIG_NUMA
memory_add_physaddr_to_nid(u64 start)54bc02af93SYasunori Goto int memory_add_physaddr_to_nid(u64 start)
55bc02af93SYasunori Goto {
56bc02af93SYasunori Goto return hot_add_scn_to_nid(start);
57bc02af93SYasunori Goto }
58452e21cfSMichael Ellerman EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
59bc02af93SYasunori Goto #endif
60bc02af93SYasunori Goto
create_section_mapping(unsigned long start,unsigned long end,int nid,pgprot_t prot)614e00c5afSLogan Gunthorpe int __weak create_section_mapping(unsigned long start, unsigned long end,
624e00c5afSLogan Gunthorpe int nid, pgprot_t prot)
63fecbfabeSBenjamin Herrenschmidt {
64fecbfabeSBenjamin Herrenschmidt return -ENODEV;
65fecbfabeSBenjamin Herrenschmidt }
66fecbfabeSBenjamin Herrenschmidt
remove_section_mapping(unsigned long start,unsigned long end)67fecbfabeSBenjamin Herrenschmidt int __weak remove_section_mapping(unsigned long start, unsigned long end)
68fecbfabeSBenjamin Herrenschmidt {
69fecbfabeSBenjamin Herrenschmidt return -ENODEV;
70fecbfabeSBenjamin Herrenschmidt }
71fecbfabeSBenjamin Herrenschmidt
arch_create_linear_mapping(int nid,u64 start,u64 size,struct mhp_params * params)724abb1e5bSDavid Hildenbrand int __ref arch_create_linear_mapping(int nid, u64 start, u64 size,
73f5637d3bSLogan Gunthorpe struct mhp_params *params)
7423fd0775SPaul Mackerras {
751dace6c6SDavid Gibson int rc;
7623fd0775SPaul Mackerras
772d0eee14SAndrew Morton start = (unsigned long)__va(start);
78e5b2af04SDavid Hildenbrand mutex_lock(&linear_mapping_mutex);
79bfeb022fSLogan Gunthorpe rc = create_section_mapping(start, start + size, nid,
80bfeb022fSLogan Gunthorpe params->pgprot);
81e5b2af04SDavid Hildenbrand mutex_unlock(&linear_mapping_mutex);
821dace6c6SDavid Gibson if (rc) {
834abb1e5bSDavid Hildenbrand pr_warn("Unable to create linear mapping for 0x%llx..0x%llx: %d\n",
841dace6c6SDavid Gibson start, start + size, rc);
851dace6c6SDavid Gibson return -EFAULT;
861dace6c6SDavid Gibson }
874abb1e5bSDavid Hildenbrand return 0;
8823fd0775SPaul Mackerras }
8924d335caSWen Congyang
arch_remove_linear_mapping(u64 start,u64 size)904abb1e5bSDavid Hildenbrand void __ref arch_remove_linear_mapping(u64 start, u64 size)
9124d335caSWen Congyang {
929ac8cde9SNathan Fontenot int ret;
9324d335caSWen Congyang
9416d0f5c4SAnton Blanchard /* Remove htab bolted mappings for this section of memory */
9516d0f5c4SAnton Blanchard start = (unsigned long)__va(start);
9607626590SAlastair D'Silva
97e5b2af04SDavid Hildenbrand mutex_lock(&linear_mapping_mutex);
9816d0f5c4SAnton Blanchard ret = remove_section_mapping(start, start + size);
99e5b2af04SDavid Hildenbrand mutex_unlock(&linear_mapping_mutex);
1001f73ad3eSDavid Hildenbrand if (ret)
1011f73ad3eSDavid Hildenbrand pr_warn("Unable to remove linear mapping for 0x%llx..0x%llx: %d\n",
1021f73ad3eSDavid Hildenbrand start, start + size, ret);
10316d0f5c4SAnton Blanchard
10416d0f5c4SAnton Blanchard /* Ensure all vmalloc mappings are flushed in case they also
10516d0f5c4SAnton Blanchard * hit that section of memory
10616d0f5c4SAnton Blanchard */
10716d0f5c4SAnton Blanchard vm_unmap_aliases();
10824d335caSWen Congyang }
1094abb1e5bSDavid Hildenbrand
110ac790d09SAneesh Kumar K.V /*
111ac790d09SAneesh Kumar K.V * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
112ac790d09SAneesh Kumar K.V * updating.
113ac790d09SAneesh Kumar K.V */
update_end_of_memory_vars(u64 start,u64 size)114ac790d09SAneesh Kumar K.V static void update_end_of_memory_vars(u64 start, u64 size)
115ac790d09SAneesh Kumar K.V {
116ac790d09SAneesh Kumar K.V unsigned long end_pfn = PFN_UP(start + size);
117ac790d09SAneesh Kumar K.V
118ac790d09SAneesh Kumar K.V if (end_pfn > max_pfn) {
119ac790d09SAneesh Kumar K.V max_pfn = end_pfn;
120ac790d09SAneesh Kumar K.V max_low_pfn = end_pfn;
121ac790d09SAneesh Kumar K.V high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
122ac790d09SAneesh Kumar K.V }
123ac790d09SAneesh Kumar K.V }
124ac790d09SAneesh Kumar K.V
add_pages(int nid,unsigned long start_pfn,unsigned long nr_pages,struct mhp_params * params)125ac790d09SAneesh Kumar K.V int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
126ac790d09SAneesh Kumar K.V struct mhp_params *params)
127ac790d09SAneesh Kumar K.V {
128ac790d09SAneesh Kumar K.V int ret;
129ac790d09SAneesh Kumar K.V
130ac790d09SAneesh Kumar K.V ret = __add_pages(nid, start_pfn, nr_pages, params);
131ac790d09SAneesh Kumar K.V if (ret)
132ac790d09SAneesh Kumar K.V return ret;
133ac790d09SAneesh Kumar K.V
134ac790d09SAneesh Kumar K.V /* update max_pfn, max_low_pfn and high_memory */
135ac790d09SAneesh Kumar K.V update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
136ac790d09SAneesh Kumar K.V nr_pages << PAGE_SHIFT);
137ac790d09SAneesh Kumar K.V
138ac790d09SAneesh Kumar K.V return ret;
139ac790d09SAneesh Kumar K.V }
140ac790d09SAneesh Kumar K.V
arch_add_memory(int nid,u64 start,u64 size,struct mhp_params * params)14123fd0775SPaul Mackerras int __ref arch_add_memory(int nid, u64 start, u64 size,
14223fd0775SPaul Mackerras struct mhp_params *params)
14323fd0775SPaul Mackerras {
14423fd0775SPaul Mackerras unsigned long start_pfn = start >> PAGE_SHIFT;
14523fd0775SPaul Mackerras unsigned long nr_pages = size >> PAGE_SHIFT;
14623fd0775SPaul Mackerras int rc;
14723fd0775SPaul Mackerras
1484abb1e5bSDavid Hildenbrand rc = arch_create_linear_mapping(nid, start, size, params);
1494abb1e5bSDavid Hildenbrand if (rc)
1504abb1e5bSDavid Hildenbrand return rc;
151ac790d09SAneesh Kumar K.V rc = add_pages(nid, start_pfn, nr_pages, params);
152ca2c36caSDavid Hildenbrand if (rc)
153ca2c36caSDavid Hildenbrand arch_remove_linear_mapping(start, size);
154ca2c36caSDavid Hildenbrand return rc;
15523fd0775SPaul Mackerras }
15623fd0775SPaul Mackerras
arch_remove_memory(u64 start,u64 size,struct vmem_altmap * altmap)15765a2aa5fSDavid Hildenbrand void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
15823fd0775SPaul Mackerras {
15923fd0775SPaul Mackerras unsigned long start_pfn = start >> PAGE_SHIFT;
16023fd0775SPaul Mackerras unsigned long nr_pages = size >> PAGE_SHIFT;
16123fd0775SPaul Mackerras
16224d335caSWen Congyang __remove_pages(start_pfn, nr_pages, altmap);
1634abb1e5bSDavid Hildenbrand arch_remove_linear_mapping(start, size);
16424d335caSWen Congyang }
16524d335caSWen Congyang #endif
166a99824f3SBadari Pulavarty
167a9ee6cf5SMike Rapoport #ifndef CONFIG_NUMA
mem_topology_setup(void)1689bd9be00SNicholas Piggin void __init mem_topology_setup(void)
1697c8c6b97SPaul Mackerras {
17095f72d1eSYinghai Lu max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
17110239733SAnton Blanchard min_low_pfn = MEMORY_START >> PAGE_SHIFT;
1727c8c6b97SPaul Mackerras #ifdef CONFIG_HIGHMEM
173d7917ba7SKumar Gala max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
1747c8c6b97SPaul Mackerras #endif
1757c8c6b97SPaul Mackerras
1764e8309baSCody P Schafer /* Place all memblock_regions in the same node and merge contiguous
1774e8309baSCody P Schafer * memblock_regions
1784e8309baSCody P Schafer */
179d7dc899aSStefan Agner memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
1809bd9be00SNicholas Piggin }
181c67c3cb4SMel Gorman
initmem_init(void)1829bd9be00SNicholas Piggin void __init initmem_init(void)
1839bd9be00SNicholas Piggin {
18421098b9eSAnton Blanchard sparse_init();
1857c8c6b97SPaul Mackerras }
1867c8c6b97SPaul Mackerras
1874e8ad3e8SJohannes Berg /* mark pages that don't exist as nosave */
mark_nonram_nosave(void)1884e8ad3e8SJohannes Berg static int __init mark_nonram_nosave(void)
1894e8ad3e8SJohannes Berg {
190c9118e6cSMike Rapoport unsigned long spfn, epfn, prev = 0;
191c9118e6cSMike Rapoport int i;
1924e8ad3e8SJohannes Berg
193c9118e6cSMike Rapoport for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) {
194c9118e6cSMike Rapoport if (prev && prev < spfn)
195c9118e6cSMike Rapoport register_nosave_region(prev, spfn);
196c9118e6cSMike Rapoport
197c9118e6cSMike Rapoport prev = epfn;
1984e8ad3e8SJohannes Berg }
199c9118e6cSMike Rapoport
2004e8ad3e8SJohannes Berg return 0;
2014e8ad3e8SJohannes Berg }
202a9ee6cf5SMike Rapoport #else /* CONFIG_NUMA */
mark_nonram_nosave(void)2036db35ad2SScott Wood static int __init mark_nonram_nosave(void)
2046db35ad2SScott Wood {
2056db35ad2SScott Wood return 0;
2066db35ad2SScott Wood }
2076db35ad2SScott Wood #endif
2084e8ad3e8SJohannes Berg
2093079abe5SOliver O'Halloran /*
21025078dc1SChristoph Hellwig * Zones usage:
21125078dc1SChristoph Hellwig *
21225078dc1SChristoph Hellwig * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
21325078dc1SChristoph Hellwig * everything else. GFP_DMA32 page allocations automatically fall back to
21425078dc1SChristoph Hellwig * ZONE_DMA.
21525078dc1SChristoph Hellwig *
2168b5369eaSNicolas Saenz Julienne * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the
2178b5369eaSNicolas Saenz Julienne * generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU
2188b5369eaSNicolas Saenz Julienne * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
2198b5369eaSNicolas Saenz Julienne * ZONE_DMA.
2203079abe5SOliver O'Halloran */
22125078dc1SChristoph Hellwig static unsigned long max_zone_pfns[MAX_NR_ZONES];
2221c98025cSScott Wood
2231c98025cSScott Wood /*
2247c8c6b97SPaul Mackerras * paging_init() sets up the page tables - in fact we've already done this.
2257c8c6b97SPaul Mackerras */
paging_init(void)2267c8c6b97SPaul Mackerras void __init paging_init(void)
2277c8c6b97SPaul Mackerras {
228f7ba2991STony Breeds unsigned long long total_ram = memblock_phys_mem_size();
22995f72d1eSYinghai Lu phys_addr_t top_of_ram = memblock_end_of_DRAM();
2307c8c6b97SPaul Mackerras
2317c8c6b97SPaul Mackerras #ifdef CONFIG_HIGHMEM
2322807273fSChristophe Leroy unsigned long v = __fix_to_virt(FIX_KMAP_END);
2332807273fSChristophe Leroy unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN);
2347c8c6b97SPaul Mackerras
2357c8c6b97SPaul Mackerras for (; v < end; v += PAGE_SIZE)
2367c8c6b97SPaul Mackerras map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
2377c8c6b97SPaul Mackerras
238c766ee72SChristophe Leroy map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
2392c419bdeSKumar Gala pkmap_page_table = virt_to_kpte(PKMAP_BASE);
2407c8c6b97SPaul Mackerras #endif /* CONFIG_HIGHMEM */
2417c8c6b97SPaul Mackerras
242f7ba2991STony Breeds printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
243fb610635STony Breeds (unsigned long long)top_of_ram, total_ram);
244e110b281SOlof Johansson printk(KERN_DEBUG "Memory hole size: %ldMB\n",
2452bf3016fSStefan Roese (long int)((top_of_ram - total_ram) >> 20));
2461c98025cSScott Wood
2478b5369eaSNicolas Saenz Julienne /*
2488b5369eaSNicolas Saenz Julienne * Allow 30-bit DMA for very limited Broadcom wifi chips on many
2498b5369eaSNicolas Saenz Julienne * powerbooks.
2508b5369eaSNicolas Saenz Julienne */
2518b5369eaSNicolas Saenz Julienne if (IS_ENABLED(CONFIG_PPC32))
2528b5369eaSNicolas Saenz Julienne zone_dma_bits = 30;
2538b5369eaSNicolas Saenz Julienne else
2548b5369eaSNicolas Saenz Julienne zone_dma_bits = 31;
2558b5369eaSNicolas Saenz Julienne
25625078dc1SChristoph Hellwig #ifdef CONFIG_ZONE_DMA
2579739ab7eSChristoph Hellwig max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
2588b5369eaSNicolas Saenz Julienne 1UL << (zone_dma_bits - PAGE_SHIFT));
259c67c3cb4SMel Gorman #endif
26025078dc1SChristoph Hellwig max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
26125078dc1SChristoph Hellwig #ifdef CONFIG_HIGHMEM
26225078dc1SChristoph Hellwig max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
26325078dc1SChristoph Hellwig #endif
26425078dc1SChristoph Hellwig
2659691a071SMike Rapoport free_area_init(max_zone_pfns);
2664e8ad3e8SJohannes Berg
2674e8ad3e8SJohannes Berg mark_nonram_nosave();
2687c8c6b97SPaul Mackerras }
2697c8c6b97SPaul Mackerras
mem_init(void)2707c8c6b97SPaul Mackerras void __init mem_init(void)
2717c8c6b97SPaul Mackerras {
27228efc35fSScott Wood /*
27328efc35fSScott Wood * book3s is limited to 16 page sizes due to encoding this in
27428efc35fSScott Wood * a 4-bit field for slices.
27528efc35fSScott Wood */
27628efc35fSScott Wood BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
27728efc35fSScott Wood
278a9327296SFUJITA Tomonori #ifdef CONFIG_SWIOTLB
2798fabc623SMike Rapoport /*
2808fabc623SMike Rapoport * Some platforms (e.g. 85xx) limit DMA-able memory way below
2818fabc623SMike Rapoport * 4G. We force memblock to bottom-up mode to ensure that the
2828fabc623SMike Rapoport * memory allocated in swiotlb_init() is DMA-able.
2838fabc623SMike Rapoport * As it's the last memblock allocation, no need to reset it
2848fabc623SMike Rapoport * back to to-down.
2858fabc623SMike Rapoport */
2868fabc623SMike Rapoport memblock_set_bottom_up(true);
2878ba2ed1bSChristoph Hellwig swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags);
288a9327296SFUJITA Tomonori #endif
289a9327296SFUJITA Tomonori
2903d4247fcSChristophe Leroy kasan_late_init();
2913d4247fcSChristophe Leroy
292c6ffc5caSMike Rapoport memblock_free_all();
2937c8c6b97SPaul Mackerras
2947c8c6b97SPaul Mackerras #ifdef CONFIG_HIGHMEM
2957c8c6b97SPaul Mackerras {
2967c8c6b97SPaul Mackerras unsigned long pfn, highmem_mapnr;
2977c8c6b97SPaul Mackerras
298d7917ba7SKumar Gala highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
2997c8c6b97SPaul Mackerras for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
3003d41e0f6SBecky Bruce phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
3017c8c6b97SPaul Mackerras struct page *page = pfn_to_page(pfn);
3022fc1c63dSChristophe Leroy if (memblock_is_memory(paddr) && !memblock_is_reserved(paddr))
3037db6f78cSJiang Liu free_highmem_page(page);
3047c8c6b97SPaul Mackerras }
3057c8c6b97SPaul Mackerras }
3067c8c6b97SPaul Mackerras #endif /* CONFIG_HIGHMEM */
3077c8c6b97SPaul Mackerras
308*3e731858SChristophe Leroy #if defined(CONFIG_PPC_E500) && !defined(CONFIG_SMP)
3093160b097SBecky Bruce /*
3103160b097SBecky Bruce * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
3113160b097SBecky Bruce * functions.... do it here for the non-smp case.
3123160b097SBecky Bruce */
3133160b097SBecky Bruce per_cpu(next_tlbcam_idx, smp_processor_id()) =
3143160b097SBecky Bruce (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
3153160b097SBecky Bruce #endif
3163160b097SBecky Bruce
317f637a49eSBenjamin Herrenschmidt #ifdef CONFIG_PPC32
318f637a49eSBenjamin Herrenschmidt pr_info("Kernel virtual memory layout:\n");
319b4abe38fSChristophe Leroy #ifdef CONFIG_KASAN
320b4abe38fSChristophe Leroy pr_info(" * 0x%08lx..0x%08lx : kasan shadow mem\n",
321b4abe38fSChristophe Leroy KASAN_SHADOW_START, KASAN_SHADOW_END);
322b4abe38fSChristophe Leroy #endif
323f637a49eSBenjamin Herrenschmidt pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
324f637a49eSBenjamin Herrenschmidt #ifdef CONFIG_HIGHMEM
325f637a49eSBenjamin Herrenschmidt pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
326f637a49eSBenjamin Herrenschmidt PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
327f637a49eSBenjamin Herrenschmidt #endif /* CONFIG_HIGHMEM */
328ad628a34SChristophe Leroy if (ioremap_bot != IOREMAP_TOP)
329f637a49eSBenjamin Herrenschmidt pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
330f637a49eSBenjamin Herrenschmidt ioremap_bot, IOREMAP_TOP);
331f637a49eSBenjamin Herrenschmidt pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
332f637a49eSBenjamin Herrenschmidt VMALLOC_START, VMALLOC_END);
333baf24d23SChristophe Leroy #ifdef MODULES_VADDR
334baf24d23SChristophe Leroy pr_info(" * 0x%08lx..0x%08lx : modules\n",
335baf24d23SChristophe Leroy MODULES_VADDR, MODULES_END);
336baf24d23SChristophe Leroy #endif
337f637a49eSBenjamin Herrenschmidt #endif /* CONFIG_PPC32 */
3387c8c6b97SPaul Mackerras }
3397c8c6b97SPaul Mackerras
free_initmem(void)3402773fcc8SDave Carroll void free_initmem(void)
3412773fcc8SDave Carroll {
342a9c0f41bSDave Carroll ppc_md.progress = ppc_printk_progress;
343029d9252SMichael Ellerman mark_initmem_nx();
3445d585e5cSJiang Liu free_initmem_default(POISON_FREE_INITMEM);
34584ade0a6SNaveen N. Rao ftrace_free_init_tramp();
3462773fcc8SDave Carroll }
3472773fcc8SDave Carroll
34814cf11afSPaul Mackerras /*
349c40dd2f7SAnton Blanchard * System memory should not be in /proc/iomem but various tools expect it
350c40dd2f7SAnton Blanchard * (eg kdump).
351c40dd2f7SAnton Blanchard */
add_system_ram_resources(void)3524f770924SGeert Uytterhoeven static int __init add_system_ram_resources(void)
353c40dd2f7SAnton Blanchard {
354b10d6bcaSMike Rapoport phys_addr_t start, end;
355b10d6bcaSMike Rapoport u64 i;
356c40dd2f7SAnton Blanchard
357b10d6bcaSMike Rapoport for_each_mem_range(i, &start, &end) {
358c40dd2f7SAnton Blanchard struct resource *res;
359c40dd2f7SAnton Blanchard
360c40dd2f7SAnton Blanchard res = kzalloc(sizeof(struct resource), GFP_KERNEL);
361c40dd2f7SAnton Blanchard WARN_ON(!res);
362c40dd2f7SAnton Blanchard
363c40dd2f7SAnton Blanchard if (res) {
364c40dd2f7SAnton Blanchard res->name = "System RAM";
365b10d6bcaSMike Rapoport res->start = start;
366b10d6bcaSMike Rapoport /*
367b10d6bcaSMike Rapoport * In memblock, end points to the first byte after
368b10d6bcaSMike Rapoport * the range while in resourses, end points to the
369b10d6bcaSMike Rapoport * last byte in the range.
370b10d6bcaSMike Rapoport */
371b10d6bcaSMike Rapoport res->end = end - 1;
37235d98e93SToshi Kani res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
373c40dd2f7SAnton Blanchard WARN_ON(request_resource(&iomem_resource, res) < 0);
374c40dd2f7SAnton Blanchard }
375c40dd2f7SAnton Blanchard }
376c40dd2f7SAnton Blanchard
377c40dd2f7SAnton Blanchard return 0;
378c40dd2f7SAnton Blanchard }
379c40dd2f7SAnton Blanchard subsys_initcall(add_system_ram_resources);
3801d54cf2bSsukadev@linux.vnet.ibm.com
3811d54cf2bSsukadev@linux.vnet.ibm.com #ifdef CONFIG_STRICT_DEVMEM
3821d54cf2bSsukadev@linux.vnet.ibm.com /*
3831d54cf2bSsukadev@linux.vnet.ibm.com * devmem_is_allowed(): check to see if /dev/mem access to a certain address
3841d54cf2bSsukadev@linux.vnet.ibm.com * is valid. The argument is a physical page number.
3851d54cf2bSsukadev@linux.vnet.ibm.com *
3861d54cf2bSsukadev@linux.vnet.ibm.com * Access has to be given to non-kernel-ram areas as well, these contain the
3871d54cf2bSsukadev@linux.vnet.ibm.com * PCI mmio resources as well as potential bios/acpi data regions.
3881d54cf2bSsukadev@linux.vnet.ibm.com */
devmem_is_allowed(unsigned long pfn)3891d54cf2bSsukadev@linux.vnet.ibm.com int devmem_is_allowed(unsigned long pfn)
3901d54cf2bSsukadev@linux.vnet.ibm.com {
391e256caa7SVasant Hegde if (page_is_rtas_user_buf(pfn))
392e256caa7SVasant Hegde return 1;
3936c0cc627SScott Wood if (iomem_is_exclusive(PFN_PHYS(pfn)))
3941d54cf2bSsukadev@linux.vnet.ibm.com return 0;
3951d54cf2bSsukadev@linux.vnet.ibm.com if (!page_is_ram(pfn))
3961d54cf2bSsukadev@linux.vnet.ibm.com return 1;
3971d54cf2bSsukadev@linux.vnet.ibm.com return 0;
3981d54cf2bSsukadev@linux.vnet.ibm.com }
3991d54cf2bSsukadev@linux.vnet.ibm.com #endif /* CONFIG_STRICT_DEVMEM */
40026b52335SChristophe Leroy
40126b52335SChristophe Leroy /*
40226b52335SChristophe Leroy * This is defined in kernel/resource.c but only powerpc needs to export it, for
40326b52335SChristophe Leroy * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
40426b52335SChristophe Leroy */
40526b52335SChristophe Leroy EXPORT_SYMBOL_GPL(walk_system_ram_range);
406