mmu.c (cdd38c5f1ce4398ec58fec95904b75824daab7b5) | mmu.c (03aaf83fba6e5af08b5dd174c72edee9b7d9ed9b) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Based on arch/arm/mm/mmu.c 4 * 5 * Copyright (C) 1995-2005 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 --- 614 unchanged lines hidden (view full) --- 623 */ 624static bool arm64_early_this_cpu_has_bti(void) 625{ 626 u64 pfr1; 627 628 if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) 629 return false; 630 | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Based on arch/arm/mm/mmu.c 4 * 5 * Copyright (C) 1995-2005 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 --- 614 unchanged lines hidden (view full) --- 623 */ 624static bool arm64_early_this_cpu_has_bti(void) 625{ 626 u64 pfr1; 627 628 if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) 629 return false; 630 |
631 pfr1 = read_sysreg_s(SYS_ID_AA64PFR1_EL1); | 631 pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1); |
632 return cpuid_feature_extract_unsigned_field(pfr1, 633 ID_AA64PFR1_BT_SHIFT); 634} 635 636/* 637 * Create fine-grained mappings for the kernel. 638 */ 639static void __init map_kernel(pgd_t *pgdp) --- 449 unchanged lines hidden (view full) --- 1089} 1090#endif 1091 1092#ifdef CONFIG_SPARSEMEM_VMEMMAP 1093#if !ARM64_SWAPPER_USES_SECTION_MAPS 1094int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 1095 struct vmem_altmap *altmap) 1096{ | 632 return cpuid_feature_extract_unsigned_field(pfr1, 633 ID_AA64PFR1_BT_SHIFT); 634} 635 636/* 637 * Create fine-grained mappings for the kernel. 638 */ 639static void __init map_kernel(pgd_t *pgdp) --- 449 unchanged lines hidden (view full) --- 1089} 1090#endif 1091 1092#ifdef CONFIG_SPARSEMEM_VMEMMAP 1093#if !ARM64_SWAPPER_USES_SECTION_MAPS 1094int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 1095 struct vmem_altmap *altmap) 1096{ |
1097 WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); |
|
1097 return vmemmap_populate_basepages(start, end, node, altmap); 1098} 1099#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ 1100int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 1101 struct vmem_altmap *altmap) 1102{ 1103 unsigned long addr = start; 1104 unsigned long next; 1105 pgd_t *pgdp; 1106 p4d_t *p4dp; 1107 pud_t *pudp; 1108 pmd_t *pmdp; 1109 | 1098 return vmemmap_populate_basepages(start, end, node, altmap); 1099} 1100#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ 1101int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 1102 struct vmem_altmap *altmap) 1103{ 1104 unsigned long addr = start; 1105 unsigned long next; 1106 pgd_t *pgdp; 1107 p4d_t *p4dp; 1108 pud_t *pudp; 1109 pmd_t *pmdp; 1110 |
1111 WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); |
|
1110 do { 1111 next = pmd_addr_end(addr, end); 1112 1113 pgdp = vmemmap_pgd_populate(addr, node); 1114 if (!pgdp) 1115 return -ENOMEM; 1116 1117 p4dp = vmemmap_p4d_populate(pgdp, addr, node); --- 319 unchanged lines hidden (view full) --- 1437 1438 WARN_ON(pgdir != init_mm.pgd); 1439 WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END)); 1440 1441 unmap_hotplug_range(start, end, false, NULL); 1442 free_empty_tables(start, end, PAGE_OFFSET, PAGE_END); 1443} 1444 | 1112 do { 1113 next = pmd_addr_end(addr, end); 1114 1115 pgdp = vmemmap_pgd_populate(addr, node); 1116 if (!pgdp) 1117 return -ENOMEM; 1118 1119 p4dp = vmemmap_p4d_populate(pgdp, addr, node); --- 319 unchanged lines hidden (view full) --- 1439 1440 WARN_ON(pgdir != init_mm.pgd); 1441 WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END)); 1442 1443 unmap_hotplug_range(start, end, false, NULL); 1444 free_empty_tables(start, end, PAGE_OFFSET, PAGE_END); 1445} 1446 |
1445static bool inside_linear_region(u64 start, u64 size) | 1447struct range arch_get_mappable_range(void) |
1446{ | 1448{ |
1449 struct range mhp_range; 1450 |
|
1447 /* 1448 * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)] 1449 * accommodating both its ends but excluding PAGE_END. Max physical 1450 * range which can be mapped inside this linear mapping range, must 1451 * also be derived from its end points. 1452 */ | 1451 /* 1452 * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)] 1453 * accommodating both its ends but excluding PAGE_END. Max physical 1454 * range which can be mapped inside this linear mapping range, must 1455 * also be derived from its end points. 1456 */ |
1453 return start >= __pa(_PAGE_OFFSET(vabits_actual)) && 1454 (start + size - 1) <= __pa(PAGE_END - 1); | 1457 mhp_range.start = __pa(_PAGE_OFFSET(vabits_actual)); 1458 mhp_range.end = __pa(PAGE_END - 1); 1459 return mhp_range; |
1455} 1456 1457int arch_add_memory(int nid, u64 start, u64 size, 1458 struct mhp_params *params) 1459{ 1460 int ret, flags = 0; 1461 | 1460} 1461 1462int arch_add_memory(int nid, u64 start, u64 size, 1463 struct mhp_params *params) 1464{ 1465 int ret, flags = 0; 1466 |
1462 if (!inside_linear_region(start, size)) { 1463 pr_err("[%llx %llx] is outside linear mapping region\n", start, start + size); 1464 return -EINVAL; 1465 } 1466 | 1467 VM_BUG_ON(!mhp_range_allowed(start, size, true)); |
1467 if (rodata_full || debug_pagealloc_enabled()) 1468 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 1469 1470 __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), 1471 size, params->pgprot, __pgd_pgtable_alloc, 1472 flags); 1473 1474 memblock_clear_nomap(start, size); --- 144 unchanged lines hidden --- | 1468 if (rodata_full || debug_pagealloc_enabled()) 1469 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 1470 1471 __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), 1472 size, params->pgprot, __pgd_pgtable_alloc, 1473 flags); 1474 1475 memblock_clear_nomap(start, size); --- 144 unchanged lines hidden --- |