mmu.c (5bd466947160d1e0517747b63216806ea768c791) | mmu.c (d27cfa1fc823d35a6cf45ba51f5623db8a14a9b9) |
---|---|
1/* 2 * Based on arch/arm/mm/mmu.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * Copyright (C) 2012 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as --- 30 unchanged lines hidden (view full) --- 39#include <asm/setup.h> 40#include <asm/sizes.h> 41#include <asm/tlb.h> 42#include <asm/memblock.h> 43#include <asm/mmu_context.h> 44#include <asm/ptdump.h> 45 46#define NO_BLOCK_MAPPINGS BIT(0) | 1/* 2 * Based on arch/arm/mm/mmu.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * Copyright (C) 2012 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as --- 30 unchanged lines hidden (view full) --- 39#include <asm/setup.h> 40#include <asm/sizes.h> 41#include <asm/tlb.h> 42#include <asm/memblock.h> 43#include <asm/mmu_context.h> 44#include <asm/ptdump.h> 45 46#define NO_BLOCK_MAPPINGS BIT(0) |
47#define NO_CONT_MAPPINGS BIT(1) |
|
47 48u64 idmap_t0sz = TCR_T0SZ(VA_BITS); 49 50u64 kimage_voffset __ro_after_init; 51EXPORT_SYMBOL(kimage_voffset); 52 53/* 54 * Empty_zero_page is a special page that is used for zero-initialized data --- 56 unchanged lines hidden (view full) --- 111 112 /* live contiguous mappings may not be manipulated at all */ 113 if ((old | new) & PTE_CONT) 114 return false; 115 116 return ((old ^ new) & ~mask) == 0; 117} 118 | 48 49u64 idmap_t0sz = TCR_T0SZ(VA_BITS); 50 51u64 kimage_voffset __ro_after_init; 52EXPORT_SYMBOL(kimage_voffset); 53 54/* 55 * Empty_zero_page is a special page that is used for zero-initialized data --- 56 unchanged lines hidden (view full) --- 112 113 /* live contiguous mappings may not be manipulated at all */ 114 if ((old | new) & PTE_CONT) 115 return false; 116 117 return ((old ^ new) & ~mask) == 0; 118} 119 |
119static void alloc_init_pte(pmd_t *pmd, unsigned long addr, 120 unsigned long end, phys_addr_t phys, 121 pgprot_t prot, 122 phys_addr_t (*pgtable_alloc)(void)) | 120static void init_pte(pmd_t *pmd, unsigned long addr, unsigned long end, 121 phys_addr_t phys, pgprot_t prot) |
123{ 124 pte_t *pte; 125 | 122{ 123 pte_t *pte; 124 |
126 BUG_ON(pmd_sect(*pmd)); 127 if (pmd_none(*pmd)) { 128 phys_addr_t pte_phys; 129 BUG_ON(!pgtable_alloc); 130 pte_phys = pgtable_alloc(); 131 __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE); 132 } 133 BUG_ON(pmd_bad(*pmd)); 134 | |
135 pte = pte_set_fixmap_offset(pmd, addr); 136 do { 137 pte_t old_pte = *pte; 138 139 set_pte(pte, pfn_pte(__phys_to_pfn(phys), prot)); 140 141 /* 142 * After the PTE entry has been populated once, we 143 * only allow updates to the permission attributes. 144 */ 145 BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte))); 146 147 phys += PAGE_SIZE; 148 } while (pte++, addr += PAGE_SIZE, addr != end); 149 150 pte_clear_fixmap(); 151} 152 | 125 pte = pte_set_fixmap_offset(pmd, addr); 126 do { 127 pte_t old_pte = *pte; 128 129 set_pte(pte, pfn_pte(__phys_to_pfn(phys), prot)); 130 131 /* 132 * After the PTE entry has been populated once, we 133 * only allow updates to the permission attributes. 134 */ 135 BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte))); 136 137 phys += PAGE_SIZE; 138 } while (pte++, addr += PAGE_SIZE, addr != end); 139 140 pte_clear_fixmap(); 141} 142 |
153static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, 154 phys_addr_t phys, pgprot_t prot, 155 phys_addr_t (*pgtable_alloc)(void), 156 int flags) | 143static void alloc_init_cont_pte(pmd_t *pmd, unsigned long addr, 144 unsigned long end, phys_addr_t phys, 145 pgprot_t prot, 146 phys_addr_t (*pgtable_alloc)(void), 147 int flags) |
157{ | 148{ |
158 pmd_t *pmd; | |
159 unsigned long next; 160 | 149 unsigned long next; 150 |
161 /* 162 * Check for initial section mappings in the pgd/pud and remove them. 163 */ 164 BUG_ON(pud_sect(*pud)); 165 if (pud_none(*pud)) { 166 phys_addr_t pmd_phys; | 151 BUG_ON(pmd_sect(*pmd)); 152 if (pmd_none(*pmd)) { 153 phys_addr_t pte_phys; |
167 BUG_ON(!pgtable_alloc); | 154 BUG_ON(!pgtable_alloc); |
168 pmd_phys = pgtable_alloc(); 169 __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE); | 155 pte_phys = pgtable_alloc(); 156 __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE); |
170 } | 157 } |
171 BUG_ON(pud_bad(*pud)); | 158 BUG_ON(pmd_bad(*pmd)); |
172 | 159 |
160 do { 161 pgprot_t __prot = prot; 162 163 next = pte_cont_addr_end(addr, end); 164 165 /* use a contiguous mapping if the range is suitably aligned */ 166 if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) && 167 (flags & NO_CONT_MAPPINGS) == 0) 168 __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 169 170 init_pte(pmd, addr, next, phys, __prot); 171 172 phys += next - addr; 173 } while (addr = next, addr != end); 174} 175 176static void init_pmd(pud_t *pud, unsigned long addr, unsigned long end, 177 phys_addr_t phys, pgprot_t prot, 178 phys_addr_t (*pgtable_alloc)(void), int flags) 179{ 180 unsigned long next; 181 pmd_t *pmd; 182 |
|
173 pmd = pmd_set_fixmap_offset(pud, addr); 174 do { 175 pmd_t old_pmd = *pmd; 176 177 next = pmd_addr_end(addr, end); 178 179 /* try section mapping first */ 180 if (((addr | next | phys) & ~SECTION_MASK) == 0 && 181 (flags & NO_BLOCK_MAPPINGS) == 0) { 182 pmd_set_huge(pmd, phys, prot); 183 184 /* 185 * After the PMD entry has been populated once, we 186 * only allow updates to the permission attributes. 187 */ 188 BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), 189 pmd_val(*pmd))); 190 } else { | 183 pmd = pmd_set_fixmap_offset(pud, addr); 184 do { 185 pmd_t old_pmd = *pmd; 186 187 next = pmd_addr_end(addr, end); 188 189 /* try section mapping first */ 190 if (((addr | next | phys) & ~SECTION_MASK) == 0 && 191 (flags & NO_BLOCK_MAPPINGS) == 0) { 192 pmd_set_huge(pmd, phys, prot); 193 194 /* 195 * After the PMD entry has been populated once, we 196 * only allow updates to the permission attributes. 197 */ 198 BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), 199 pmd_val(*pmd))); 200 } else { |
191 alloc_init_pte(pmd, addr, next, phys, 192 prot, pgtable_alloc); | 201 alloc_init_cont_pte(pmd, addr, next, phys, prot, 202 pgtable_alloc, flags); |
193 194 BUG_ON(pmd_val(old_pmd) != 0 && 195 pmd_val(old_pmd) != pmd_val(*pmd)); 196 } 197 phys += next - addr; 198 } while (pmd++, addr = next, addr != end); 199 200 pmd_clear_fixmap(); 201} 202 | 203 204 BUG_ON(pmd_val(old_pmd) != 0 && 205 pmd_val(old_pmd) != pmd_val(*pmd)); 206 } 207 phys += next - addr; 208 } while (pmd++, addr = next, addr != end); 209 210 pmd_clear_fixmap(); 211} 212 |
213static void alloc_init_cont_pmd(pud_t *pud, unsigned long addr, 214 unsigned long end, phys_addr_t phys, 215 pgprot_t prot, 216 phys_addr_t (*pgtable_alloc)(void), int flags) 217{ 218 unsigned long next; 219 220 /* 221 * Check for initial section mappings in the pgd/pud. 222 */ 223 BUG_ON(pud_sect(*pud)); 224 if (pud_none(*pud)) { 225 phys_addr_t pmd_phys; 226 BUG_ON(!pgtable_alloc); 227 pmd_phys = pgtable_alloc(); 228 __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE); 229 } 230 BUG_ON(pud_bad(*pud)); 231 232 do { 233 pgprot_t __prot = prot; 234 235 next = pmd_cont_addr_end(addr, end); 236 237 /* use a contiguous mapping if the range is suitably aligned */ 238 if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) && 239 (flags & NO_CONT_MAPPINGS) == 0) 240 __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 241 242 init_pmd(pud, addr, next, phys, __prot, pgtable_alloc, flags); 243 244 phys += next - addr; 245 } while (addr = next, addr != end); 246} 247 |
|
203static inline bool use_1G_block(unsigned long addr, unsigned long next, 204 unsigned long phys) 205{ 206 if (PAGE_SHIFT != 12) 207 return false; 208 209 if (((addr | next | phys) & ~PUD_MASK) != 0) 210 return false; --- 32 unchanged lines hidden (view full) --- 243 244 /* 245 * After the PUD entry has been populated once, we 246 * only allow updates to the permission attributes. 247 */ 248 BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), 249 pud_val(*pud))); 250 } else { | 248static inline bool use_1G_block(unsigned long addr, unsigned long next, 249 unsigned long phys) 250{ 251 if (PAGE_SHIFT != 12) 252 return false; 253 254 if (((addr | next | phys) & ~PUD_MASK) != 0) 255 return false; --- 32 unchanged lines hidden (view full) --- 288 289 /* 290 * After the PUD entry has been populated once, we 291 * only allow updates to the permission attributes. 292 */ 293 BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), 294 pud_val(*pud))); 295 } else { |
251 alloc_init_pmd(pud, addr, next, phys, prot, 252 pgtable_alloc, flags); | 296 alloc_init_cont_pmd(pud, addr, next, phys, prot, 297 pgtable_alloc, flags); |
253 254 BUG_ON(pud_val(old_pud) != 0 && 255 pud_val(old_pud) != pud_val(*pud)); 256 } 257 phys += next - addr; 258 } while (pud++, addr = next, addr != end); 259 260 pud_clear_fixmap(); --- 47 unchanged lines hidden (view full) --- 308static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, 309 phys_addr_t size, pgprot_t prot) 310{ 311 if (virt < VMALLOC_START) { 312 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", 313 &phys, virt); 314 return; 315 } | 298 299 BUG_ON(pud_val(old_pud) != 0 && 300 pud_val(old_pud) != pud_val(*pud)); 301 } 302 phys += next - addr; 303 } while (pud++, addr = next, addr != end); 304 305 pud_clear_fixmap(); --- 47 unchanged lines hidden (view full) --- 353static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, 354 phys_addr_t size, pgprot_t prot) 355{ 356 if (virt < VMALLOC_START) { 357 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", 358 &phys, virt); 359 return; 360 } |
316 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, 0); | 361 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, 362 NO_CONT_MAPPINGS); |
317} 318 319void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 320 unsigned long virt, phys_addr_t size, 321 pgprot_t prot, bool page_mappings_only) 322{ 323 int flags = 0; 324 325 BUG_ON(mm == &init_mm); 326 327 if (page_mappings_only) | 363} 364 365void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 366 unsigned long virt, phys_addr_t size, 367 pgprot_t prot, bool page_mappings_only) 368{ 369 int flags = 0; 370 371 BUG_ON(mm == &init_mm); 372 373 if (page_mappings_only) |
328 flags = NO_BLOCK_MAPPINGS; | 374 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; |
329 330 __create_pgd_mapping(mm->pgd, phys, virt, size, prot, 331 pgd_pgtable_alloc, flags); 332} 333 334static void update_mapping_prot(phys_addr_t phys, unsigned long virt, 335 phys_addr_t size, pgprot_t prot) 336{ 337 if (virt < VMALLOC_START) { 338 pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", 339 &phys, virt); 340 return; 341 } 342 | 375 376 __create_pgd_mapping(mm->pgd, phys, virt, size, prot, 377 pgd_pgtable_alloc, flags); 378} 379 380static void update_mapping_prot(phys_addr_t phys, unsigned long virt, 381 phys_addr_t size, pgprot_t prot) 382{ 383 if (virt < VMALLOC_START) { 384 pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", 385 &phys, virt); 386 return; 387 } 388 |
343 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, 0); | 389 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, 390 NO_CONT_MAPPINGS); |
344 345 /* flush the TLBs after updating live kernel mappings */ 346 flush_tlb_kernel_range(virt, virt + size); 347} 348 349static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end) 350{ 351 phys_addr_t kernel_start = __pa_symbol(_text); 352 phys_addr_t kernel_end = __pa_symbol(__init_begin); 353 int flags = 0; 354 355 if (debug_pagealloc_enabled()) | 391 392 /* flush the TLBs after updating live kernel mappings */ 393 flush_tlb_kernel_range(virt, virt + size); 394} 395 396static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end) 397{ 398 phys_addr_t kernel_start = __pa_symbol(_text); 399 phys_addr_t kernel_end = __pa_symbol(__init_begin); 400 int flags = 0; 401 402 if (debug_pagealloc_enabled()) |
356 flags = NO_BLOCK_MAPPINGS; | 403 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; |
357 358 /* 359 * Take care not to create a writable alias for the 360 * read-only text and rodata sections of the kernel image. 361 */ 362 363 /* No overlap with the kernel text/rodata */ 364 if (end < kernel_start || start >= kernel_end) { --- 20 unchanged lines hidden (view full) --- 385 386 /* 387 * Map the linear alias of the [_text, __init_begin) interval 388 * as non-executable now, and remove the write permission in 389 * mark_linear_text_alias_ro() below (which will be called after 390 * alternative patching has completed). This makes the contents 391 * of the region accessible to subsystems such as hibernate, 392 * but protects it from inadvertent modification or execution. | 404 405 /* 406 * Take care not to create a writable alias for the 407 * read-only text and rodata sections of the kernel image. 408 */ 409 410 /* No overlap with the kernel text/rodata */ 411 if (end < kernel_start || start >= kernel_end) { --- 20 unchanged lines hidden (view full) --- 432 433 /* 434 * Map the linear alias of the [_text, __init_begin) interval 435 * as non-executable now, and remove the write permission in 436 * mark_linear_text_alias_ro() below (which will be called after 437 * alternative patching has completed). This makes the contents 438 * of the region accessible to subsystems such as hibernate, 439 * but protects it from inadvertent modification or execution. |
440 * Note that contiguous mappings cannot be remapped in this way, 441 * so we should avoid them here. |
|
393 */ 394 __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start), 395 kernel_end - kernel_start, PAGE_KERNEL, | 442 */ 443 __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start), 444 kernel_end - kernel_start, PAGE_KERNEL, |
396 early_pgtable_alloc, 0); | 445 early_pgtable_alloc, NO_CONT_MAPPINGS); |
397} 398 399void __init mark_linear_text_alias_ro(void) 400{ 401 /* 402 * Remove the write permissions from the linear alias of .text/.rodata 403 */ 404 update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text), --- 30 unchanged lines hidden (view full) --- 435 section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; 436 update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata, 437 section_size, PAGE_KERNEL_RO); 438 439 debug_checkwx(); 440} 441 442static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, | 446} 447 448void __init mark_linear_text_alias_ro(void) 449{ 450 /* 451 * Remove the write permissions from the linear alias of .text/.rodata 452 */ 453 update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text), --- 30 unchanged lines hidden (view full) --- 484 section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; 485 update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata, 486 section_size, PAGE_KERNEL_RO); 487 488 debug_checkwx(); 489} 490 491static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, |
443 pgprot_t prot, struct vm_struct *vma) | 492 pgprot_t prot, struct vm_struct *vma, 493 int flags) |
444{ 445 phys_addr_t pa_start = __pa_symbol(va_start); 446 unsigned long size = va_end - va_start; 447 448 BUG_ON(!PAGE_ALIGNED(pa_start)); 449 BUG_ON(!PAGE_ALIGNED(size)); 450 451 __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot, | 494{ 495 phys_addr_t pa_start = __pa_symbol(va_start); 496 unsigned long size = va_end - va_start; 497 498 BUG_ON(!PAGE_ALIGNED(pa_start)); 499 BUG_ON(!PAGE_ALIGNED(size)); 500 501 __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot, |
452 early_pgtable_alloc, 0); | 502 early_pgtable_alloc, flags); |
453 454 vma->addr = va_start; 455 vma->phys_addr = pa_start; 456 vma->size = size; 457 vma->flags = VM_MAP; 458 vma->caller = __builtin_return_address(0); 459 460 vm_area_add_early(vma); --- 15 unchanged lines hidden (view full) --- 476 477 /* 478 * External debuggers may need to write directly to the text 479 * mapping to install SW breakpoints. Allow this (only) when 480 * explicitly requested with rodata=off. 481 */ 482 pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; 483 | 503 504 vma->addr = va_start; 505 vma->phys_addr = pa_start; 506 vma->size = size; 507 vma->flags = VM_MAP; 508 vma->caller = __builtin_return_address(0); 509 510 vm_area_add_early(vma); --- 15 unchanged lines hidden (view full) --- 526 527 /* 528 * External debuggers may need to write directly to the text 529 * mapping to install SW breakpoints. Allow this (only) when 530 * explicitly requested with rodata=off. 531 */ 532 pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; 533 |
484 map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text); | 534 /* 535 * Only rodata will be remapped with different permissions later on, 536 * all other segments are allowed to use contiguous mappings. 537 */ 538 map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0); |
485 map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL, | 539 map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL, |
486 &vmlinux_rodata); | 540 &vmlinux_rodata, NO_CONT_MAPPINGS); |
487 map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot, | 541 map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot, |
488 &vmlinux_inittext); | 542 &vmlinux_inittext, 0); |
489 map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL, | 543 map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL, |
490 &vmlinux_initdata); 491 map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data); | 544 &vmlinux_initdata, 0); 545 map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0); |
492 493 if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) { 494 /* 495 * The fixmap falls in a separate pgd to the kernel, and doesn't 496 * live in the carveout for the swapper_pg_dir. We can simply 497 * re-use the existing dir for the fixmap. 498 */ 499 set_pgd(pgd_offset_raw(pgd, FIXADDR_START), --- 341 unchanged lines hidden --- | 546 547 if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) { 548 /* 549 * The fixmap falls in a separate pgd to the kernel, and doesn't 550 * live in the carveout for the swapper_pg_dir. We can simply 551 * re-use the existing dir for the fixmap. 552 */ 553 set_pgd(pgd_offset_raw(pgd, FIXADDR_START), --- 341 unchanged lines hidden --- |