150acfb2bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 276d2a049SPalmer Dabbelt /* 376d2a049SPalmer Dabbelt * Copyright (C) 2012 Regents of the University of California 4671f9a3eSAnup Patel * Copyright (C) 2019 Western Digital Corporation or its affiliates. 576d2a049SPalmer Dabbelt */ 676d2a049SPalmer Dabbelt 776d2a049SPalmer Dabbelt #include <linux/init.h> 876d2a049SPalmer Dabbelt #include <linux/mm.h> 976d2a049SPalmer Dabbelt #include <linux/memblock.h> 1057c8a661SMike Rapoport #include <linux/initrd.h> 1176d2a049SPalmer Dabbelt #include <linux/swap.h> 125ec9c4ffSChristoph Hellwig #include <linux/sizes.h> 130651c263SAnup Patel #include <linux/of_fdt.h> 14922b0375SAlbert Ou #include <linux/libfdt.h> 15d27c3c90SZong Li #include <linux/set_memory.h> 16da815582SKefeng Wang #include <linux/dma-map-ops.h> 1776d2a049SPalmer Dabbelt 18f2c17aabSAnup Patel #include <asm/fixmap.h> 1976d2a049SPalmer Dabbelt #include <asm/tlbflush.h> 2076d2a049SPalmer Dabbelt #include <asm/sections.h> 212d268251SPalmer Dabbelt #include <asm/soc.h> 2276d2a049SPalmer Dabbelt #include <asm/io.h> 23b422d28bSZong Li #include <asm/ptdump.h> 244f0e8eefSAtish Patra #include <asm/numa.h> 2576d2a049SPalmer Dabbelt 26ffaee272SPaul Walmsley #include "../kernel/head.h" 27ffaee272SPaul Walmsley 282bfc6cd8SAlexandre Ghiti unsigned long kernel_virt_addr = KERNEL_LINK_ADDR; 292bfc6cd8SAlexandre Ghiti EXPORT_SYMBOL(kernel_virt_addr); 302bfc6cd8SAlexandre Ghiti 31387181dcSAnup Patel unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] 32387181dcSAnup Patel __page_aligned_bss; 33387181dcSAnup Patel EXPORT_SYMBOL(empty_zero_page); 34387181dcSAnup Patel 35d90d45d7SAnup Patel extern char _start[]; 368f3a2b4aSAnup Patel #define DTB_EARLY_BASE_VA PGDIR_SIZE 378f3a2b4aSAnup Patel void *dtb_early_va __initdata; 388f3a2b4aSAnup Patel uintptr_t dtb_early_pa __initdata; 39d90d45d7SAnup Patel 40e8dcb61fSAtish Patra struct pt_alloc_ops { 41e8dcb61fSAtish Patra pte_t *(*get_pte_virt)(phys_addr_t pa); 42e8dcb61fSAtish Patra phys_addr_t (*alloc_pte)(uintptr_t va); 43e8dcb61fSAtish Patra #ifndef __PAGETABLE_PMD_FOLDED 44e8dcb61fSAtish Patra pmd_t *(*get_pmd_virt)(phys_addr_t pa); 45e8dcb61fSAtish Patra phys_addr_t (*alloc_pmd)(uintptr_t va); 46e8dcb61fSAtish Patra #endif 47e8dcb61fSAtish Patra }; 4876d2a049SPalmer Dabbelt 49da815582SKefeng Wang static phys_addr_t dma32_phys_limit __ro_after_init; 50da815582SKefeng Wang 5176d2a049SPalmer Dabbelt static void __init zone_sizes_init(void) 5276d2a049SPalmer Dabbelt { 535ec9c4ffSChristoph Hellwig unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; 5476d2a049SPalmer Dabbelt 55d5fad48cSZong Li #ifdef CONFIG_ZONE_DMA32 56da815582SKefeng Wang max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit); 57d5fad48cSZong Li #endif 585ec9c4ffSChristoph Hellwig max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 595ec9c4ffSChristoph Hellwig 609691a071SMike Rapoport free_area_init(max_zone_pfns); 6176d2a049SPalmer Dabbelt } 6276d2a049SPalmer Dabbelt 631987501bSJisheng Zhang static void __init setup_zero_page(void) 6476d2a049SPalmer Dabbelt { 6576d2a049SPalmer Dabbelt memset((void *)empty_zero_page, 0, PAGE_SIZE); 6676d2a049SPalmer Dabbelt } 6776d2a049SPalmer Dabbelt 688fa3cdffSKefeng Wang #if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM) 692cc6c4a0SYash Shah static inline void print_mlk(char *name, unsigned long b, unsigned long t) 702cc6c4a0SYash Shah { 712cc6c4a0SYash Shah pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld kB)\n", name, b, t, 722cc6c4a0SYash Shah (((t) - (b)) >> 10)); 732cc6c4a0SYash Shah } 742cc6c4a0SYash Shah 752cc6c4a0SYash Shah static inline void print_mlm(char *name, unsigned long b, unsigned long t) 762cc6c4a0SYash Shah { 772cc6c4a0SYash Shah pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld MB)\n", name, b, t, 782cc6c4a0SYash Shah (((t) - (b)) >> 20)); 792cc6c4a0SYash Shah } 802cc6c4a0SYash Shah 811987501bSJisheng Zhang static void __init print_vm_layout(void) 822cc6c4a0SYash Shah { 832cc6c4a0SYash Shah pr_notice("Virtual kernel memory layout:\n"); 842cc6c4a0SYash Shah print_mlk("fixmap", (unsigned long)FIXADDR_START, 852cc6c4a0SYash Shah (unsigned long)FIXADDR_TOP); 862cc6c4a0SYash Shah print_mlm("pci io", (unsigned long)PCI_IO_START, 872cc6c4a0SYash Shah (unsigned long)PCI_IO_END); 882cc6c4a0SYash Shah print_mlm("vmemmap", (unsigned long)VMEMMAP_START, 892cc6c4a0SYash Shah (unsigned long)VMEMMAP_END); 902cc6c4a0SYash Shah print_mlm("vmalloc", (unsigned long)VMALLOC_START, 912cc6c4a0SYash Shah (unsigned long)VMALLOC_END); 922cc6c4a0SYash Shah print_mlm("lowmem", (unsigned long)PAGE_OFFSET, 932cc6c4a0SYash Shah (unsigned long)high_memory); 942bfc6cd8SAlexandre Ghiti #ifdef CONFIG_64BIT 952bfc6cd8SAlexandre Ghiti print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR, 962bfc6cd8SAlexandre Ghiti (unsigned long)ADDRESS_SPACE_END); 972bfc6cd8SAlexandre Ghiti #endif 982cc6c4a0SYash Shah } 992cc6c4a0SYash Shah #else 1002cc6c4a0SYash Shah static void print_vm_layout(void) { } 1012cc6c4a0SYash Shah #endif /* CONFIG_DEBUG_VM */ 1022cc6c4a0SYash Shah 10376d2a049SPalmer Dabbelt void __init mem_init(void) 10476d2a049SPalmer Dabbelt { 10576d2a049SPalmer Dabbelt #ifdef CONFIG_FLATMEM 10676d2a049SPalmer Dabbelt BUG_ON(!mem_map); 10776d2a049SPalmer Dabbelt #endif /* CONFIG_FLATMEM */ 10876d2a049SPalmer Dabbelt 10976d2a049SPalmer Dabbelt high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); 110c6ffc5caSMike Rapoport memblock_free_all(); 11176d2a049SPalmer Dabbelt 11276d2a049SPalmer Dabbelt mem_init_print_info(NULL); 1132cc6c4a0SYash Shah print_vm_layout(); 11476d2a049SPalmer Dabbelt } 11576d2a049SPalmer Dabbelt 1160651c263SAnup Patel void __init setup_bootmem(void) 1170651c263SAnup Patel { 118ac51e005SZong Li phys_addr_t vmlinux_end = __pa_symbol(&_end); 119ac51e005SZong Li phys_addr_t vmlinux_start = __pa_symbol(&_start); 120dd2d082bSKefeng Wang phys_addr_t dram_end = memblock_end_of_DRAM(); 121abb8e86bSAtish Patra phys_addr_t max_mapped_addr = __pa(~(ulong)0); 1220651c263SAnup Patel 123dd2d082bSKefeng Wang /* The maximal physical memory size is -PAGE_OFFSET. */ 124de043da0SAtish Patra memblock_enforce_memory_limit(-PAGE_OFFSET); 1250651c263SAnup Patel 1262bfc6cd8SAlexandre Ghiti /* 1272bfc6cd8SAlexandre Ghiti * Reserve from the start of the kernel to the end of the kernel 1282bfc6cd8SAlexandre Ghiti * and make sure we align the reservation on PMD_SIZE since we will 1292bfc6cd8SAlexandre Ghiti * map the kernel in the linear mapping as read-only: we do not want 1302bfc6cd8SAlexandre Ghiti * any allocation to happen between _end and the next pmd aligned page. 1312bfc6cd8SAlexandre Ghiti */ 1322bfc6cd8SAlexandre Ghiti memblock_reserve(vmlinux_start, (vmlinux_end - vmlinux_start + PMD_SIZE - 1) & PMD_MASK); 133d90d45d7SAnup Patel 134abb8e86bSAtish Patra /* 135abb8e86bSAtish Patra * memblock allocator is not aware of the fact that last 4K bytes of 136abb8e86bSAtish Patra * the addressable memory can not be mapped because of IS_ERR_VALUE 137abb8e86bSAtish Patra * macro. Make sure that last 4k bytes are not usable by memblock 138abb8e86bSAtish Patra * if end of dram is equal to maximum addressable memory. 139abb8e86bSAtish Patra */ 140abb8e86bSAtish Patra if (max_mapped_addr == (dram_end - 1)) 141abb8e86bSAtish Patra memblock_set_current_limit(max_mapped_addr - 4096); 142abb8e86bSAtish Patra 143f6e5aedfSKefeng Wang min_low_pfn = PFN_UP(memblock_start_of_DRAM()); 144f6e5aedfSKefeng Wang max_low_pfn = max_pfn = PFN_DOWN(dram_end); 145f6e5aedfSKefeng Wang 146da815582SKefeng Wang dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); 147336e8eb2SGuo Ren set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); 1480651c263SAnup Patel 149aec33b54SKefeng Wang reserve_initrd_mem(); 150922b0375SAlbert Ou /* 151f105aa94SVitaly Wool * If DTB is built in, no need to reserve its memblock. 152f105aa94SVitaly Wool * Otherwise, do reserve it but avoid using 153f105aa94SVitaly Wool * early_init_fdt_reserve_self() since __pa() does 154922b0375SAlbert Ou * not work for DTB pointers that are fixmap addresses 155922b0375SAlbert Ou */ 156f105aa94SVitaly Wool if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) 157922b0375SAlbert Ou memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); 158922b0375SAlbert Ou 1590651c263SAnup Patel early_init_fdt_scan_reserved_mem(); 160da815582SKefeng Wang dma_contiguous_reserve(dma32_phys_limit); 1610651c263SAnup Patel memblock_allow_resize(); 1620651c263SAnup Patel } 1636f1e9e94SAnup Patel 1646bd33e1eSChristoph Hellwig #ifdef CONFIG_MMU 165de31ea4aSJisheng Zhang static struct pt_alloc_ops pt_ops __ro_after_init; 166e8dcb61fSAtish Patra 1672bfc6cd8SAlexandre Ghiti /* Offset between linear mapping virtual address and kernel load address */ 168de31ea4aSJisheng Zhang unsigned long va_pa_offset __ro_after_init; 169387181dcSAnup Patel EXPORT_SYMBOL(va_pa_offset); 1702bfc6cd8SAlexandre Ghiti #ifdef CONFIG_64BIT 1712bfc6cd8SAlexandre Ghiti /* Offset between kernel mapping virtual address and kernel load address */ 1722bfc6cd8SAlexandre Ghiti unsigned long va_kernel_pa_offset; 1732bfc6cd8SAlexandre Ghiti EXPORT_SYMBOL(va_kernel_pa_offset); 1742bfc6cd8SAlexandre Ghiti #endif 175de31ea4aSJisheng Zhang unsigned long pfn_base __ro_after_init; 176387181dcSAnup Patel EXPORT_SYMBOL(pfn_base); 177387181dcSAnup Patel 1786f1e9e94SAnup Patel pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; 179671f9a3eSAnup Patel pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss; 180f2c17aabSAnup Patel pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; 181671f9a3eSAnup Patel 182671f9a3eSAnup Patel pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); 183f2c17aabSAnup Patel 184f2c17aabSAnup Patel void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) 185f2c17aabSAnup Patel { 186f2c17aabSAnup Patel unsigned long addr = __fix_to_virt(idx); 187f2c17aabSAnup Patel pte_t *ptep; 188f2c17aabSAnup Patel 189f2c17aabSAnup Patel BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); 190f2c17aabSAnup Patel 191f2c17aabSAnup Patel ptep = &fixmap_pte[pte_index(addr)]; 192f2c17aabSAnup Patel 19321190b74SGreentime Hu if (pgprot_val(prot)) 194f2c17aabSAnup Patel set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); 19521190b74SGreentime Hu else 196f2c17aabSAnup Patel pte_clear(&init_mm, addr, ptep); 197f2c17aabSAnup Patel local_flush_tlb_page(addr); 198f2c17aabSAnup Patel } 199f2c17aabSAnup Patel 200e8dcb61fSAtish Patra static inline pte_t *__init get_pte_virt_early(phys_addr_t pa) 201671f9a3eSAnup Patel { 202671f9a3eSAnup Patel return (pte_t *)((uintptr_t)pa); 203671f9a3eSAnup Patel } 204e8dcb61fSAtish Patra 205e8dcb61fSAtish Patra static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa) 206e8dcb61fSAtish Patra { 207e8dcb61fSAtish Patra clear_fixmap(FIX_PTE); 208e8dcb61fSAtish Patra return (pte_t *)set_fixmap_offset(FIX_PTE, pa); 209671f9a3eSAnup Patel } 210671f9a3eSAnup Patel 211e8dcb61fSAtish Patra static inline pte_t *get_pte_virt_late(phys_addr_t pa) 212e8dcb61fSAtish Patra { 213e8dcb61fSAtish Patra return (pte_t *) __va(pa); 214e8dcb61fSAtish Patra } 215e8dcb61fSAtish Patra 216e8dcb61fSAtish Patra static inline phys_addr_t __init alloc_pte_early(uintptr_t va) 217671f9a3eSAnup Patel { 218671f9a3eSAnup Patel /* 219671f9a3eSAnup Patel * We only create PMD or PGD early mappings so we 220671f9a3eSAnup Patel * should never reach here with MMU disabled. 221671f9a3eSAnup Patel */ 222e8dcb61fSAtish Patra BUG(); 223e8dcb61fSAtish Patra } 224671f9a3eSAnup Patel 225e8dcb61fSAtish Patra static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va) 226e8dcb61fSAtish Patra { 227671f9a3eSAnup Patel return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); 228671f9a3eSAnup Patel } 229671f9a3eSAnup Patel 230e8dcb61fSAtish Patra static phys_addr_t alloc_pte_late(uintptr_t va) 231e8dcb61fSAtish Patra { 232e8dcb61fSAtish Patra unsigned long vaddr; 233e8dcb61fSAtish Patra 234e8dcb61fSAtish Patra vaddr = __get_free_page(GFP_KERNEL); 235*e75e6bf4Szhouchuangao BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr))); 236*e75e6bf4Szhouchuangao 237e8dcb61fSAtish Patra return __pa(vaddr); 238e8dcb61fSAtish Patra } 239e8dcb61fSAtish Patra 240671f9a3eSAnup Patel static void __init create_pte_mapping(pte_t *ptep, 241671f9a3eSAnup Patel uintptr_t va, phys_addr_t pa, 242671f9a3eSAnup Patel phys_addr_t sz, pgprot_t prot) 243671f9a3eSAnup Patel { 244974b9b2cSMike Rapoport uintptr_t pte_idx = pte_index(va); 245671f9a3eSAnup Patel 246671f9a3eSAnup Patel BUG_ON(sz != PAGE_SIZE); 247671f9a3eSAnup Patel 248974b9b2cSMike Rapoport if (pte_none(ptep[pte_idx])) 249974b9b2cSMike Rapoport ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot); 250671f9a3eSAnup Patel } 251671f9a3eSAnup Patel 252671f9a3eSAnup Patel #ifndef __PAGETABLE_PMD_FOLDED 253671f9a3eSAnup Patel 254671f9a3eSAnup Patel pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss; 255671f9a3eSAnup Patel pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss; 2560f02de44SAlexandre Ghiti pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); 2571074dd44SAnup Patel pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); 258671f9a3eSAnup Patel 259e8dcb61fSAtish Patra static pmd_t *__init get_pmd_virt_early(phys_addr_t pa) 260671f9a3eSAnup Patel { 261e8dcb61fSAtish Patra /* Before MMU is enabled */ 262671f9a3eSAnup Patel return (pmd_t *)((uintptr_t)pa); 263671f9a3eSAnup Patel } 264e8dcb61fSAtish Patra 265e8dcb61fSAtish Patra static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa) 266e8dcb61fSAtish Patra { 267e8dcb61fSAtish Patra clear_fixmap(FIX_PMD); 268e8dcb61fSAtish Patra return (pmd_t *)set_fixmap_offset(FIX_PMD, pa); 269671f9a3eSAnup Patel } 270671f9a3eSAnup Patel 271e8dcb61fSAtish Patra static pmd_t *get_pmd_virt_late(phys_addr_t pa) 272e8dcb61fSAtish Patra { 273e8dcb61fSAtish Patra return (pmd_t *) __va(pa); 274e8dcb61fSAtish Patra } 275e8dcb61fSAtish Patra 276e8dcb61fSAtish Patra static phys_addr_t __init alloc_pmd_early(uintptr_t va) 277671f9a3eSAnup Patel { 2782bfc6cd8SAlexandre Ghiti BUG_ON((va - kernel_virt_addr) >> PGDIR_SHIFT); 279671f9a3eSAnup Patel 2800f02de44SAlexandre Ghiti return (uintptr_t)early_pmd; 281671f9a3eSAnup Patel } 282671f9a3eSAnup Patel 283e8dcb61fSAtish Patra static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va) 284e8dcb61fSAtish Patra { 285e8dcb61fSAtish Patra return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); 286e8dcb61fSAtish Patra } 287e8dcb61fSAtish Patra 288e8dcb61fSAtish Patra static phys_addr_t alloc_pmd_late(uintptr_t va) 289e8dcb61fSAtish Patra { 290e8dcb61fSAtish Patra unsigned long vaddr; 291e8dcb61fSAtish Patra 292e8dcb61fSAtish Patra vaddr = __get_free_page(GFP_KERNEL); 293e8dcb61fSAtish Patra BUG_ON(!vaddr); 294e8dcb61fSAtish Patra return __pa(vaddr); 295e8dcb61fSAtish Patra } 296e8dcb61fSAtish Patra 297671f9a3eSAnup Patel static void __init create_pmd_mapping(pmd_t *pmdp, 298671f9a3eSAnup Patel uintptr_t va, phys_addr_t pa, 299671f9a3eSAnup Patel phys_addr_t sz, pgprot_t prot) 300671f9a3eSAnup Patel { 301671f9a3eSAnup Patel pte_t *ptep; 302671f9a3eSAnup Patel phys_addr_t pte_phys; 303974b9b2cSMike Rapoport uintptr_t pmd_idx = pmd_index(va); 304671f9a3eSAnup Patel 305671f9a3eSAnup Patel if (sz == PMD_SIZE) { 306974b9b2cSMike Rapoport if (pmd_none(pmdp[pmd_idx])) 307974b9b2cSMike Rapoport pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot); 308671f9a3eSAnup Patel return; 309671f9a3eSAnup Patel } 310671f9a3eSAnup Patel 311974b9b2cSMike Rapoport if (pmd_none(pmdp[pmd_idx])) { 312e8dcb61fSAtish Patra pte_phys = pt_ops.alloc_pte(va); 313974b9b2cSMike Rapoport pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE); 314e8dcb61fSAtish Patra ptep = pt_ops.get_pte_virt(pte_phys); 315671f9a3eSAnup Patel memset(ptep, 0, PAGE_SIZE); 316671f9a3eSAnup Patel } else { 317974b9b2cSMike Rapoport pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx])); 318e8dcb61fSAtish Patra ptep = pt_ops.get_pte_virt(pte_phys); 319671f9a3eSAnup Patel } 320671f9a3eSAnup Patel 321671f9a3eSAnup Patel create_pte_mapping(ptep, va, pa, sz, prot); 322671f9a3eSAnup Patel } 323671f9a3eSAnup Patel 324671f9a3eSAnup Patel #define pgd_next_t pmd_t 325e8dcb61fSAtish Patra #define alloc_pgd_next(__va) pt_ops.alloc_pmd(__va) 326e8dcb61fSAtish Patra #define get_pgd_next_virt(__pa) pt_ops.get_pmd_virt(__pa) 327671f9a3eSAnup Patel #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ 328671f9a3eSAnup Patel create_pmd_mapping(__nextp, __va, __pa, __sz, __prot) 329671f9a3eSAnup Patel #define fixmap_pgd_next fixmap_pmd 330671f9a3eSAnup Patel #else 331671f9a3eSAnup Patel #define pgd_next_t pte_t 332e8dcb61fSAtish Patra #define alloc_pgd_next(__va) pt_ops.alloc_pte(__va) 333e8dcb61fSAtish Patra #define get_pgd_next_virt(__pa) pt_ops.get_pte_virt(__pa) 334671f9a3eSAnup Patel #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ 335671f9a3eSAnup Patel create_pte_mapping(__nextp, __va, __pa, __sz, __prot) 336671f9a3eSAnup Patel #define fixmap_pgd_next fixmap_pte 337671f9a3eSAnup Patel #endif 338671f9a3eSAnup Patel 339b91540d5SAtish Patra void __init create_pgd_mapping(pgd_t *pgdp, 340671f9a3eSAnup Patel uintptr_t va, phys_addr_t pa, 341671f9a3eSAnup Patel phys_addr_t sz, pgprot_t prot) 342671f9a3eSAnup Patel { 343671f9a3eSAnup Patel pgd_next_t *nextp; 344671f9a3eSAnup Patel phys_addr_t next_phys; 345974b9b2cSMike Rapoport uintptr_t pgd_idx = pgd_index(va); 346671f9a3eSAnup Patel 347671f9a3eSAnup Patel if (sz == PGDIR_SIZE) { 348974b9b2cSMike Rapoport if (pgd_val(pgdp[pgd_idx]) == 0) 349974b9b2cSMike Rapoport pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot); 350671f9a3eSAnup Patel return; 351671f9a3eSAnup Patel } 352671f9a3eSAnup Patel 353974b9b2cSMike Rapoport if (pgd_val(pgdp[pgd_idx]) == 0) { 354671f9a3eSAnup Patel next_phys = alloc_pgd_next(va); 355974b9b2cSMike Rapoport pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE); 356671f9a3eSAnup Patel nextp = get_pgd_next_virt(next_phys); 357671f9a3eSAnup Patel memset(nextp, 0, PAGE_SIZE); 358671f9a3eSAnup Patel } else { 359974b9b2cSMike Rapoport next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx])); 360671f9a3eSAnup Patel nextp = get_pgd_next_virt(next_phys); 361671f9a3eSAnup Patel } 362671f9a3eSAnup Patel 363671f9a3eSAnup Patel create_pgd_next_mapping(nextp, va, pa, sz, prot); 364671f9a3eSAnup Patel } 365671f9a3eSAnup Patel 366671f9a3eSAnup Patel static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size) 367671f9a3eSAnup Patel { 3680fdc636cSZong Li /* Upgrade to PMD_SIZE mappings whenever possible */ 3690fdc636cSZong Li if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1))) 3700fdc636cSZong Li return PAGE_SIZE; 371671f9a3eSAnup Patel 3720fdc636cSZong Li return PMD_SIZE; 373671f9a3eSAnup Patel } 374671f9a3eSAnup Patel 375387181dcSAnup Patel /* 376387181dcSAnup Patel * setup_vm() is called from head.S with MMU-off. 377387181dcSAnup Patel * 378387181dcSAnup Patel * Following requirements should be honoured for setup_vm() to work 379387181dcSAnup Patel * correctly: 380387181dcSAnup Patel * 1) It should use PC-relative addressing for accessing kernel symbols. 381387181dcSAnup Patel * To achieve this we always use GCC cmodel=medany. 382387181dcSAnup Patel * 2) The compiler instrumentation for FTRACE will not work for setup_vm() 383387181dcSAnup Patel * so disable compiler instrumentation when FTRACE is enabled. 384387181dcSAnup Patel * 385387181dcSAnup Patel * Currently, the above requirements are honoured by using custom CFLAGS 386387181dcSAnup Patel * for init.o in mm/Makefile. 387387181dcSAnup Patel */ 388387181dcSAnup Patel 389387181dcSAnup Patel #ifndef __riscv_cmodel_medany 3906a527b67SPaul Walmsley #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing." 391387181dcSAnup Patel #endif 392387181dcSAnup Patel 3932bfc6cd8SAlexandre Ghiti uintptr_t load_pa, load_sz; 3942bfc6cd8SAlexandre Ghiti 3952bfc6cd8SAlexandre Ghiti static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) 3962bfc6cd8SAlexandre Ghiti { 3972bfc6cd8SAlexandre Ghiti uintptr_t va, end_va; 3982bfc6cd8SAlexandre Ghiti 3992bfc6cd8SAlexandre Ghiti end_va = kernel_virt_addr + load_sz; 4002bfc6cd8SAlexandre Ghiti for (va = kernel_virt_addr; va < end_va; va += map_size) 4012bfc6cd8SAlexandre Ghiti create_pgd_mapping(pgdir, va, 4022bfc6cd8SAlexandre Ghiti load_pa + (va - kernel_virt_addr), 4032bfc6cd8SAlexandre Ghiti map_size, PAGE_KERNEL_EXEC); 4042bfc6cd8SAlexandre Ghiti } 4052bfc6cd8SAlexandre Ghiti 406671f9a3eSAnup Patel asmlinkage void __init setup_vm(uintptr_t dtb_pa) 4076f1e9e94SAnup Patel { 4082bfc6cd8SAlexandre Ghiti uintptr_t pa; 4090f02de44SAlexandre Ghiti uintptr_t map_size; 4106262f661SAtish Patra #ifndef __PAGETABLE_PMD_FOLDED 4116262f661SAtish Patra pmd_t fix_bmap_spmd, fix_bmap_epmd; 4126262f661SAtish Patra #endif 4132bfc6cd8SAlexandre Ghiti load_pa = (uintptr_t)(&_start); 4142bfc6cd8SAlexandre Ghiti load_sz = (uintptr_t)(&_end) - load_pa; 4156f1e9e94SAnup Patel 416671f9a3eSAnup Patel va_pa_offset = PAGE_OFFSET - load_pa; 4172bfc6cd8SAlexandre Ghiti #ifdef CONFIG_64BIT 4182bfc6cd8SAlexandre Ghiti va_kernel_pa_offset = kernel_virt_addr - load_pa; 4192bfc6cd8SAlexandre Ghiti #endif 4202bfc6cd8SAlexandre Ghiti 421671f9a3eSAnup Patel pfn_base = PFN_DOWN(load_pa); 422671f9a3eSAnup Patel 423671f9a3eSAnup Patel /* 424671f9a3eSAnup Patel * Enforce boot alignment requirements of RV32 and 425671f9a3eSAnup Patel * RV64 by only allowing PMD or PGD mappings. 426671f9a3eSAnup Patel */ 4270f02de44SAlexandre Ghiti map_size = PMD_SIZE; 4286f1e9e94SAnup Patel 4296f1e9e94SAnup Patel /* Sanity check alignment and size */ 4306f1e9e94SAnup Patel BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0); 431671f9a3eSAnup Patel BUG_ON((load_pa % map_size) != 0); 432671f9a3eSAnup Patel 433e8dcb61fSAtish Patra pt_ops.alloc_pte = alloc_pte_early; 434e8dcb61fSAtish Patra pt_ops.get_pte_virt = get_pte_virt_early; 435e8dcb61fSAtish Patra #ifndef __PAGETABLE_PMD_FOLDED 436e8dcb61fSAtish Patra pt_ops.alloc_pmd = alloc_pmd_early; 437e8dcb61fSAtish Patra pt_ops.get_pmd_virt = get_pmd_virt_early; 438e8dcb61fSAtish Patra #endif 439671f9a3eSAnup Patel /* Setup early PGD for fixmap */ 440671f9a3eSAnup Patel create_pgd_mapping(early_pg_dir, FIXADDR_START, 441671f9a3eSAnup Patel (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE); 4426f1e9e94SAnup Patel 4436f1e9e94SAnup Patel #ifndef __PAGETABLE_PMD_FOLDED 444671f9a3eSAnup Patel /* Setup fixmap PMD */ 445671f9a3eSAnup Patel create_pmd_mapping(fixmap_pmd, FIXADDR_START, 446671f9a3eSAnup Patel (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE); 447671f9a3eSAnup Patel /* Setup trampoline PGD and PMD */ 4482bfc6cd8SAlexandre Ghiti create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr, 449671f9a3eSAnup Patel (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE); 4502bfc6cd8SAlexandre Ghiti create_pmd_mapping(trampoline_pmd, kernel_virt_addr, 451671f9a3eSAnup Patel load_pa, PMD_SIZE, PAGE_KERNEL_EXEC); 4526f1e9e94SAnup Patel #else 453671f9a3eSAnup Patel /* Setup trampoline PGD */ 4542bfc6cd8SAlexandre Ghiti create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr, 455671f9a3eSAnup Patel load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC); 456671f9a3eSAnup Patel #endif 4576f1e9e94SAnup Patel 458671f9a3eSAnup Patel /* 4592bfc6cd8SAlexandre Ghiti * Setup early PGD covering entire kernel which will allow 460671f9a3eSAnup Patel * us to reach paging_init(). We map all memory banks later 461671f9a3eSAnup Patel * in setup_vm_final() below. 462671f9a3eSAnup Patel */ 4632bfc6cd8SAlexandre Ghiti create_kernel_page_table(early_pg_dir, map_size); 464f2c17aabSAnup Patel 4651074dd44SAnup Patel #ifndef __PAGETABLE_PMD_FOLDED 4661074dd44SAnup Patel /* Setup early PMD for DTB */ 4671074dd44SAnup Patel create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA, 4681074dd44SAnup Patel (uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE); 469f105aa94SVitaly Wool #ifndef CONFIG_BUILTIN_DTB 4701074dd44SAnup Patel /* Create two consecutive PMD mappings for FDT early scan */ 4711074dd44SAnup Patel pa = dtb_pa & ~(PMD_SIZE - 1); 4721074dd44SAnup Patel create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA, 4731074dd44SAnup Patel pa, PMD_SIZE, PAGE_KERNEL); 4741074dd44SAnup Patel create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE, 4751074dd44SAnup Patel pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL); 4761074dd44SAnup Patel dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1)); 477f105aa94SVitaly Wool #else /* CONFIG_BUILTIN_DTB */ 4782bfc6cd8SAlexandre Ghiti #ifdef CONFIG_64BIT 4792bfc6cd8SAlexandre Ghiti /* 4802bfc6cd8SAlexandre Ghiti * __va can't be used since it would return a linear mapping address 4812bfc6cd8SAlexandre Ghiti * whereas dtb_early_va will be used before setup_vm_final installs 4822bfc6cd8SAlexandre Ghiti * the linear mapping. 4832bfc6cd8SAlexandre Ghiti */ 4842bfc6cd8SAlexandre Ghiti dtb_early_va = kernel_mapping_pa_to_va(dtb_pa); 4852bfc6cd8SAlexandre Ghiti #else 486f105aa94SVitaly Wool dtb_early_va = __va(dtb_pa); 4872bfc6cd8SAlexandre Ghiti #endif /* CONFIG_64BIT */ 488f105aa94SVitaly Wool #endif /* CONFIG_BUILTIN_DTB */ 4891074dd44SAnup Patel #else 490f105aa94SVitaly Wool #ifndef CONFIG_BUILTIN_DTB 4918f3a2b4aSAnup Patel /* Create two consecutive PGD mappings for FDT early scan */ 4928f3a2b4aSAnup Patel pa = dtb_pa & ~(PGDIR_SIZE - 1); 4938f3a2b4aSAnup Patel create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA, 4948f3a2b4aSAnup Patel pa, PGDIR_SIZE, PAGE_KERNEL); 4958f3a2b4aSAnup Patel create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE, 4968f3a2b4aSAnup Patel pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL); 4978f3a2b4aSAnup Patel dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1)); 498f105aa94SVitaly Wool #else /* CONFIG_BUILTIN_DTB */ 4992bfc6cd8SAlexandre Ghiti #ifdef CONFIG_64BIT 5002bfc6cd8SAlexandre Ghiti dtb_early_va = kernel_mapping_pa_to_va(dtb_pa); 5012bfc6cd8SAlexandre Ghiti #else 502f105aa94SVitaly Wool dtb_early_va = __va(dtb_pa); 5032bfc6cd8SAlexandre Ghiti #endif /* CONFIG_64BIT */ 504f105aa94SVitaly Wool #endif /* CONFIG_BUILTIN_DTB */ 5051074dd44SAnup Patel #endif 506922b0375SAlbert Ou dtb_early_pa = dtb_pa; 5076262f661SAtish Patra 5086262f661SAtish Patra /* 5096262f661SAtish Patra * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap 5106262f661SAtish Patra * range can not span multiple pmds. 5116262f661SAtish Patra */ 5126262f661SAtish Patra BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) 5136262f661SAtish Patra != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); 5146262f661SAtish Patra 5156262f661SAtish Patra #ifndef __PAGETABLE_PMD_FOLDED 5166262f661SAtish Patra /* 5176262f661SAtish Patra * Early ioremap fixmap is already created as it lies within first 2MB 5186262f661SAtish Patra * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END 5196262f661SAtish Patra * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn 5206262f661SAtish Patra * the user if not. 5216262f661SAtish Patra */ 5226262f661SAtish Patra fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))]; 5236262f661SAtish Patra fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))]; 5246262f661SAtish Patra if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) { 5256262f661SAtish Patra WARN_ON(1); 5266262f661SAtish Patra pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n", 5276262f661SAtish Patra pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd)); 5286262f661SAtish Patra pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 5296262f661SAtish Patra fix_to_virt(FIX_BTMAP_BEGIN)); 5306262f661SAtish Patra pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", 5316262f661SAtish Patra fix_to_virt(FIX_BTMAP_END)); 5326262f661SAtish Patra 5336262f661SAtish Patra pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 5346262f661SAtish Patra pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); 5356262f661SAtish Patra } 5366262f661SAtish Patra #endif 5376f1e9e94SAnup Patel } 538f2c17aabSAnup Patel 5392bfc6cd8SAlexandre Ghiti #ifdef CONFIG_64BIT 5402bfc6cd8SAlexandre Ghiti void protect_kernel_linear_mapping_text_rodata(void) 5412bfc6cd8SAlexandre Ghiti { 5422bfc6cd8SAlexandre Ghiti unsigned long text_start = (unsigned long)lm_alias(_start); 5432bfc6cd8SAlexandre Ghiti unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin); 5442bfc6cd8SAlexandre Ghiti unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata); 5452bfc6cd8SAlexandre Ghiti unsigned long data_start = (unsigned long)lm_alias(_data); 5462bfc6cd8SAlexandre Ghiti 5472bfc6cd8SAlexandre Ghiti set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT); 5482bfc6cd8SAlexandre Ghiti set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT); 5492bfc6cd8SAlexandre Ghiti 5502bfc6cd8SAlexandre Ghiti set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); 5512bfc6cd8SAlexandre Ghiti set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); 5522bfc6cd8SAlexandre Ghiti } 5532bfc6cd8SAlexandre Ghiti #endif 5542bfc6cd8SAlexandre Ghiti 555671f9a3eSAnup Patel static void __init setup_vm_final(void) 556671f9a3eSAnup Patel { 557671f9a3eSAnup Patel uintptr_t va, map_size; 558671f9a3eSAnup Patel phys_addr_t pa, start, end; 559b10d6bcaSMike Rapoport u64 i; 560671f9a3eSAnup Patel 561e8dcb61fSAtish Patra /** 562e8dcb61fSAtish Patra * MMU is enabled at this point. But page table setup is not complete yet. 563e8dcb61fSAtish Patra * fixmap page table alloc functions should be used at this point 564e8dcb61fSAtish Patra */ 565e8dcb61fSAtish Patra pt_ops.alloc_pte = alloc_pte_fixmap; 566e8dcb61fSAtish Patra pt_ops.get_pte_virt = get_pte_virt_fixmap; 567e8dcb61fSAtish Patra #ifndef __PAGETABLE_PMD_FOLDED 568e8dcb61fSAtish Patra pt_ops.alloc_pmd = alloc_pmd_fixmap; 569e8dcb61fSAtish Patra pt_ops.get_pmd_virt = get_pmd_virt_fixmap; 570e8dcb61fSAtish Patra #endif 571671f9a3eSAnup Patel /* Setup swapper PGD for fixmap */ 572671f9a3eSAnup Patel create_pgd_mapping(swapper_pg_dir, FIXADDR_START, 573ac51e005SZong Li __pa_symbol(fixmap_pgd_next), 574671f9a3eSAnup Patel PGDIR_SIZE, PAGE_TABLE); 575671f9a3eSAnup Patel 5762bfc6cd8SAlexandre Ghiti /* Map all memory banks in the linear mapping */ 577b10d6bcaSMike Rapoport for_each_mem_range(i, &start, &end) { 578671f9a3eSAnup Patel if (start >= end) 579671f9a3eSAnup Patel break; 580671f9a3eSAnup Patel if (start <= __pa(PAGE_OFFSET) && 581671f9a3eSAnup Patel __pa(PAGE_OFFSET) < end) 582671f9a3eSAnup Patel start = __pa(PAGE_OFFSET); 583671f9a3eSAnup Patel 584671f9a3eSAnup Patel map_size = best_map_size(start, end - start); 585671f9a3eSAnup Patel for (pa = start; pa < end; pa += map_size) { 586671f9a3eSAnup Patel va = (uintptr_t)__va(pa); 587671f9a3eSAnup Patel create_pgd_mapping(swapper_pg_dir, va, pa, 5882bfc6cd8SAlexandre Ghiti map_size, 5892bfc6cd8SAlexandre Ghiti #ifdef CONFIG_64BIT 5902bfc6cd8SAlexandre Ghiti PAGE_KERNEL 5912bfc6cd8SAlexandre Ghiti #else 5922bfc6cd8SAlexandre Ghiti PAGE_KERNEL_EXEC 5932bfc6cd8SAlexandre Ghiti #endif 5942bfc6cd8SAlexandre Ghiti ); 5952bfc6cd8SAlexandre Ghiti 596671f9a3eSAnup Patel } 597671f9a3eSAnup Patel } 598671f9a3eSAnup Patel 5992bfc6cd8SAlexandre Ghiti #ifdef CONFIG_64BIT 6002bfc6cd8SAlexandre Ghiti /* Map the kernel */ 6012bfc6cd8SAlexandre Ghiti create_kernel_page_table(swapper_pg_dir, PMD_SIZE); 6022bfc6cd8SAlexandre Ghiti #endif 6032bfc6cd8SAlexandre Ghiti 604671f9a3eSAnup Patel /* Clear fixmap PTE and PMD mappings */ 605671f9a3eSAnup Patel clear_fixmap(FIX_PTE); 606671f9a3eSAnup Patel clear_fixmap(FIX_PMD); 607671f9a3eSAnup Patel 608671f9a3eSAnup Patel /* Move to swapper page table */ 609ac51e005SZong Li csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE); 610671f9a3eSAnup Patel local_flush_tlb_all(); 611e8dcb61fSAtish Patra 612e8dcb61fSAtish Patra /* generic page allocation functions must be used to setup page table */ 613e8dcb61fSAtish Patra pt_ops.alloc_pte = alloc_pte_late; 614e8dcb61fSAtish Patra pt_ops.get_pte_virt = get_pte_virt_late; 615e8dcb61fSAtish Patra #ifndef __PAGETABLE_PMD_FOLDED 616e8dcb61fSAtish Patra pt_ops.alloc_pmd = alloc_pmd_late; 617e8dcb61fSAtish Patra pt_ops.get_pmd_virt = get_pmd_virt_late; 618e8dcb61fSAtish Patra #endif 619671f9a3eSAnup Patel } 6206bd33e1eSChristoph Hellwig #else 6216bd33e1eSChristoph Hellwig asmlinkage void __init setup_vm(uintptr_t dtb_pa) 6226bd33e1eSChristoph Hellwig { 6236bd33e1eSChristoph Hellwig dtb_early_va = (void *)dtb_pa; 624a78c6f59SAtish Patra dtb_early_pa = dtb_pa; 6256bd33e1eSChristoph Hellwig } 6266bd33e1eSChristoph Hellwig 6276bd33e1eSChristoph Hellwig static inline void setup_vm_final(void) 6286bd33e1eSChristoph Hellwig { 6296bd33e1eSChristoph Hellwig } 6306bd33e1eSChristoph Hellwig #endif /* CONFIG_MMU */ 631671f9a3eSAnup Patel 632d27c3c90SZong Li #ifdef CONFIG_STRICT_KERNEL_RWX 6331987501bSJisheng Zhang void __init protect_kernel_text_data(void) 634d27c3c90SZong Li { 63519a00869SAtish Patra unsigned long text_start = (unsigned long)_start; 63619a00869SAtish Patra unsigned long init_text_start = (unsigned long)__init_text_begin; 63719a00869SAtish Patra unsigned long init_data_start = (unsigned long)__init_data_begin; 638d27c3c90SZong Li unsigned long rodata_start = (unsigned long)__start_rodata; 639d27c3c90SZong Li unsigned long data_start = (unsigned long)_data; 640d27c3c90SZong Li unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn))); 641d27c3c90SZong Li 64219a00869SAtish Patra set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT); 64319a00869SAtish Patra set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT); 64419a00869SAtish Patra set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT); 64519a00869SAtish Patra /* rodata section is marked readonly in mark_rodata_ro */ 646d27c3c90SZong Li set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); 647d27c3c90SZong Li set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT); 64819a00869SAtish Patra } 64919a00869SAtish Patra 65019a00869SAtish Patra void mark_rodata_ro(void) 65119a00869SAtish Patra { 65219a00869SAtish Patra unsigned long rodata_start = (unsigned long)__start_rodata; 65319a00869SAtish Patra unsigned long data_start = (unsigned long)_data; 65419a00869SAtish Patra 65519a00869SAtish Patra set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); 656b422d28bSZong Li 657b422d28bSZong Li debug_checkwx(); 658d27c3c90SZong Li } 659d27c3c90SZong Li #endif 660d27c3c90SZong Li 661671f9a3eSAnup Patel void __init paging_init(void) 662671f9a3eSAnup Patel { 663671f9a3eSAnup Patel setup_vm_final(); 664671f9a3eSAnup Patel setup_zero_page(); 665cbd34f4bSAtish Patra } 666cbd34f4bSAtish Patra 667cbd34f4bSAtish Patra void __init misc_mem_init(void) 668cbd34f4bSAtish Patra { 669f6e5aedfSKefeng Wang early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT); 6704f0e8eefSAtish Patra arch_numa_init(); 671cbd34f4bSAtish Patra sparse_init(); 672671f9a3eSAnup Patel zone_sizes_init(); 6734f0e8eefSAtish Patra memblock_dump_all(); 6746f1e9e94SAnup Patel } 675d95f1a54SLogan Gunthorpe 6769fe57d8cSKefeng Wang #ifdef CONFIG_SPARSEMEM_VMEMMAP 677d95f1a54SLogan Gunthorpe int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 678d95f1a54SLogan Gunthorpe struct vmem_altmap *altmap) 679d95f1a54SLogan Gunthorpe { 6801d9cfee7SAnshuman Khandual return vmemmap_populate_basepages(start, end, node, NULL); 681d95f1a54SLogan Gunthorpe } 682d95f1a54SLogan Gunthorpe #endif 683