1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * This file contains the routines setting up the linux page tables. 4 * -- paulus 5 * 6 * Derived from arch/ppc/mm/init.c: 7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 8 * 9 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 10 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 11 * Copyright (C) 1996 Paul Mackerras 12 * 13 * Derived from "arch/i386/mm/init.c" 14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 15 */ 16 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/types.h> 20 #include <linux/mm.h> 21 #include <linux/vmalloc.h> 22 #include <linux/init.h> 23 #include <linux/highmem.h> 24 #include <linux/memblock.h> 25 #include <linux/slab.h> 26 27 #include <asm/pgalloc.h> 28 #include <asm/fixmap.h> 29 #include <asm/setup.h> 30 #include <asm/sections.h> 31 #include <asm/early_ioremap.h> 32 33 #include <mm/mmu_decl.h> 34 35 extern char etext[], _stext[], _sinittext[], _einittext[]; 36 37 static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data; 38 39 notrace void __init early_ioremap_init(void) 40 { 41 unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE); 42 pte_t *ptep = (pte_t *)early_fixmap_pagetable; 43 pmd_t *pmdp = pmd_off_k(addr); 44 45 for (; (s32)(FIXADDR_TOP - addr) > 0; 46 addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++) 47 pmd_populate_kernel(&init_mm, pmdp, ptep); 48 49 early_ioremap_setup(); 50 } 51 52 static void __init *early_alloc_pgtable(unsigned long size) 53 { 54 void *ptr = memblock_alloc(size, size); 55 56 if (!ptr) 57 panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 58 __func__, size, size); 59 60 return ptr; 61 } 62 63 pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va) 64 { 65 if (pmd_none(*pmdp)) { 66 pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE); 67 68 pmd_populate_kernel(&init_mm, pmdp, ptep); 69 } 70 return pte_offset_kernel(pmdp, va); 71 } 72 73 74 int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) 75 { 76 pmd_t *pd; 77 pte_t *pg; 78 int err = -ENOMEM; 79 80 /* Use upper 10 bits of VA to index the first level map */ 81 pd = pmd_off_k(va); 82 /* Use middle 10 bits of VA to index the second-level map */ 83 if (likely(slab_is_available())) 84 pg = pte_alloc_kernel(pd, va); 85 else 86 pg = early_pte_alloc_kernel(pd, va); 87 if (pg) { 88 err = 0; 89 /* The PTE should never be already set nor present in the 90 * hash table 91 */ 92 BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot)); 93 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot)); 94 } 95 smp_wmb(); 96 return err; 97 } 98 99 /* 100 * Map in a chunk of physical memory starting at start. 101 */ 102 static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) 103 { 104 unsigned long v, s; 105 phys_addr_t p; 106 int ktext; 107 108 s = offset; 109 v = PAGE_OFFSET + s; 110 p = memstart_addr + s; 111 for (; s < top; s += PAGE_SIZE) { 112 ktext = ((char *)v >= _stext && (char *)v < etext) || 113 ((char *)v >= _sinittext && (char *)v < _einittext); 114 map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL); 115 v += PAGE_SIZE; 116 p += PAGE_SIZE; 117 } 118 } 119 120 void __init mapin_ram(void) 121 { 122 phys_addr_t base, end; 123 u64 i; 124 125 for_each_mem_range(i, &base, &end) { 126 phys_addr_t top = min(end, total_lowmem); 127 128 if (base >= top) 129 continue; 130 base = mmu_mapin_ram(base, top); 131 __mapin_ram_chunk(base, top); 132 } 133 } 134 135 static int __change_page_attr_noflush(struct page *page, pgprot_t prot) 136 { 137 pte_t *kpte; 138 unsigned long address; 139 140 BUG_ON(PageHighMem(page)); 141 address = (unsigned long)page_address(page); 142 143 if (v_block_mapped(address)) 144 return 0; 145 kpte = virt_to_kpte(address); 146 if (!kpte) 147 return -EINVAL; 148 __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0); 149 150 return 0; 151 } 152 153 /* 154 * Change the page attributes of an page in the linear mapping. 155 * 156 * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY 157 */ 158 static int change_page_attr(struct page *page, int numpages, pgprot_t prot) 159 { 160 int i, err = 0; 161 unsigned long flags; 162 struct page *start = page; 163 164 local_irq_save(flags); 165 for (i = 0; i < numpages; i++, page++) { 166 err = __change_page_attr_noflush(page, prot); 167 if (err) 168 break; 169 } 170 wmb(); 171 local_irq_restore(flags); 172 flush_tlb_kernel_range((unsigned long)page_address(start), 173 (unsigned long)page_address(page)); 174 return err; 175 } 176 177 void mark_initmem_nx(void) 178 { 179 struct page *page = virt_to_page(_sinittext); 180 unsigned long numpages = PFN_UP((unsigned long)_einittext) - 181 PFN_DOWN((unsigned long)_sinittext); 182 183 if (v_block_mapped((unsigned long)_sinittext)) 184 mmu_mark_initmem_nx(); 185 else 186 change_page_attr(page, numpages, PAGE_KERNEL); 187 } 188 189 #ifdef CONFIG_STRICT_KERNEL_RWX 190 void mark_rodata_ro(void) 191 { 192 struct page *page; 193 unsigned long numpages; 194 195 if (v_block_mapped((unsigned long)_stext + 1)) { 196 mmu_mark_rodata_ro(); 197 ptdump_check_wx(); 198 return; 199 } 200 201 page = virt_to_page(_stext); 202 numpages = PFN_UP((unsigned long)_etext) - 203 PFN_DOWN((unsigned long)_stext); 204 205 change_page_attr(page, numpages, PAGE_KERNEL_ROX); 206 /* 207 * mark .rodata as read only. Use __init_begin rather than __end_rodata 208 * to cover NOTES and EXCEPTION_TABLE. 209 */ 210 page = virt_to_page(__start_rodata); 211 numpages = PFN_UP((unsigned long)__init_begin) - 212 PFN_DOWN((unsigned long)__start_rodata); 213 214 change_page_attr(page, numpages, PAGE_KERNEL_RO); 215 216 // mark_initmem_nx() should have already run by now 217 ptdump_check_wx(); 218 } 219 #endif 220 221 #ifdef CONFIG_DEBUG_PAGEALLOC 222 void __kernel_map_pages(struct page *page, int numpages, int enable) 223 { 224 if (PageHighMem(page)) 225 return; 226 227 change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0)); 228 } 229 #endif /* CONFIG_DEBUG_PAGEALLOC */ 230