mmu.c (5461bd81bf7f86503f393b0b1d57e69b1f1bd44e) mmu.c (a5f4c561b3b19a9bc43a81da6382b0098ebbc1fb)
1/*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.

--- 343 unchanged lines hidden (view full) ---

352};
353
354const struct mem_type *get_mem_type(unsigned int type)
355{
356 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
357}
358EXPORT_SYMBOL(get_mem_type);
359
1/*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.

--- 343 unchanged lines hidden (view full) ---

352};
353
354const struct mem_type *get_mem_type(unsigned int type)
355{
356 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
357}
358EXPORT_SYMBOL(get_mem_type);
359
360static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
361
362static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
363 __aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
364
365static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
366{
367 return &bm_pte[pte_index(addr)];
368}
369
370static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
371{
372 return pte_offset_kernel(dir, addr);
373}
374
375static inline pmd_t * __init fixmap_pmd(unsigned long addr)
376{
377 pgd_t *pgd = pgd_offset_k(addr);
378 pud_t *pud = pud_offset(pgd, addr);
379 pmd_t *pmd = pmd_offset(pud, addr);
380
381 return pmd;
382}
383
384void __init early_fixmap_init(void)
385{
386 pmd_t *pmd;
387
388 /*
389 * The early fixmap range spans multiple pmds, for which
390 * we are not prepared:
391 */
392 BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT)
393 != FIXADDR_TOP >> PMD_SHIFT);
394
395 pmd = fixmap_pmd(FIXADDR_TOP);
396 pmd_populate_kernel(&init_mm, pmd, bm_pte);
397
398 pte_offset_fixmap = pte_offset_early_fixmap;
399}
400
360/*
361 * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
362 * As a result, this can only be called with preemption disabled, as under
363 * stop_machine().
364 */
365void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
366{
367 unsigned long vaddr = __fix_to_virt(idx);
401/*
402 * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
403 * As a result, this can only be called with preemption disabled, as under
404 * stop_machine().
405 */
406void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
407{
408 unsigned long vaddr = __fix_to_virt(idx);
368 pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
409 pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
369
370 /* Make sure fixmap region does not exceed available allocation. */
371 BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
372 FIXADDR_END);
373 BUG_ON(idx >= __end_of_fixed_addresses);
374
375 if (pgprot_val(prot))
376 set_pte_at(NULL, vaddr, pte,

--- 473 unchanged lines hidden (view full) ---

850
851 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
852 pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
853 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
854 return;
855 }
856
857 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
410
411 /* Make sure fixmap region does not exceed available allocation. */
412 BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
413 FIXADDR_END);
414 BUG_ON(idx >= __end_of_fixed_addresses);
415
416 if (pgprot_val(prot))
417 set_pte_at(NULL, vaddr, pte,

--- 473 unchanged lines hidden (view full) ---

891
892 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
893 pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
894 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
895 return;
896 }
897
898 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
858 md->virtual >= PAGE_OFFSET &&
899 md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
859 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
860 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
861 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
862 }
863
864 type = &mem_types[md->type];
865
866#ifndef CONFIG_ARM_LPAE

--- 200 unchanged lines hidden (view full) ---

1067phys_addr_t arm_lowmem_limit __initdata = 0;
1068
1069void __init sanity_check_meminfo(void)
1070{
1071 phys_addr_t memblock_limit = 0;
1072 int highmem = 0;
1073 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
1074 struct memblock_region *reg;
900 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
901 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
902 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
903 }
904
905 type = &mem_types[md->type];
906
907#ifndef CONFIG_ARM_LPAE

--- 200 unchanged lines hidden (view full) ---

1108phys_addr_t arm_lowmem_limit __initdata = 0;
1109
1110void __init sanity_check_meminfo(void)
1111{
1112 phys_addr_t memblock_limit = 0;
1113 int highmem = 0;
1114 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
1115 struct memblock_region *reg;
1075 bool should_use_highmem = false;
1076
1077 for_each_memblock(memory, reg) {
1078 phys_addr_t block_start = reg->base;
1079 phys_addr_t block_end = reg->base + reg->size;
1080 phys_addr_t size_limit = reg->size;
1081
1082 if (reg->base >= vmalloc_limit)
1083 highmem = 1;
1084 else
1085 size_limit = vmalloc_limit - reg->base;
1086
1087
1088 if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
1089
1090 if (highmem) {
1091 pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
1092 &block_start, &block_end);
1093 memblock_remove(reg->base, reg->size);
1116
1117 for_each_memblock(memory, reg) {
1118 phys_addr_t block_start = reg->base;
1119 phys_addr_t block_end = reg->base + reg->size;
1120 phys_addr_t size_limit = reg->size;
1121
1122 if (reg->base >= vmalloc_limit)
1123 highmem = 1;
1124 else
1125 size_limit = vmalloc_limit - reg->base;
1126
1127
1128 if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
1129
1130 if (highmem) {
1131 pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
1132 &block_start, &block_end);
1133 memblock_remove(reg->base, reg->size);
1094 should_use_highmem = true;
1095 continue;
1096 }
1097
1098 if (reg->size > size_limit) {
1099 phys_addr_t overlap_size = reg->size - size_limit;
1100
1101 pr_notice("Truncating RAM at %pa-%pa to -%pa",
1102 &block_start, &block_end, &vmalloc_limit);
1103 memblock_remove(vmalloc_limit, overlap_size);
1104 block_end = vmalloc_limit;
1134 continue;
1135 }
1136
1137 if (reg->size > size_limit) {
1138 phys_addr_t overlap_size = reg->size - size_limit;
1139
1140 pr_notice("Truncating RAM at %pa-%pa to -%pa",
1141 &block_start, &block_end, &vmalloc_limit);
1142 memblock_remove(vmalloc_limit, overlap_size);
1143 block_end = vmalloc_limit;
1105 should_use_highmem = true;
1106 }
1107 }
1108
1109 if (!highmem) {
1110 if (block_end > arm_lowmem_limit) {
1111 if (reg->size > size_limit)
1112 arm_lowmem_limit = vmalloc_limit;
1113 else

--- 18 unchanged lines hidden (view full) ---

1132 memblock_limit = block_start;
1133 else if (!IS_ALIGNED(block_end, PMD_SIZE))
1134 memblock_limit = arm_lowmem_limit;
1135 }
1136
1137 }
1138 }
1139
1144 }
1145 }
1146
1147 if (!highmem) {
1148 if (block_end > arm_lowmem_limit) {
1149 if (reg->size > size_limit)
1150 arm_lowmem_limit = vmalloc_limit;
1151 else

--- 18 unchanged lines hidden (view full) ---

1170 memblock_limit = block_start;
1171 else if (!IS_ALIGNED(block_end, PMD_SIZE))
1172 memblock_limit = arm_lowmem_limit;
1173 }
1174
1175 }
1176 }
1177
1140 if (should_use_highmem)
1141 pr_notice("Consider using a HIGHMEM enabled kernel.\n");
1142
1143 high_memory = __va(arm_lowmem_limit - 1) + 1;
1144
1145 /*
1146 * Round the memblock limit down to a pmd size. This
1147 * helps to ensure that we will allocate memory from the
1148 * last full pmd, which should be mapped.
1149 */
1150 if (memblock_limit)

--- 63 unchanged lines hidden (view full) ---

1214 * precious DMA-able memory...
1215 */
1216 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
1217#endif
1218}
1219
1220/*
1221 * Set up the device mappings. Since we clear out the page tables for all
1178 high_memory = __va(arm_lowmem_limit - 1) + 1;
1179
1180 /*
1181 * Round the memblock limit down to a pmd size. This
1182 * helps to ensure that we will allocate memory from the
1183 * last full pmd, which should be mapped.
1184 */
1185 if (memblock_limit)

--- 63 unchanged lines hidden (view full) ---

1249 * precious DMA-able memory...
1250 */
1251 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
1252#endif
1253}
1254
1255/*
1256 * Set up the device mappings. Since we clear out the page tables for all
1222 * mappings above VMALLOC_START, we will remove any debug device mappings.
1223 * This means you have to be careful how you debug this function, or any
1224 * called function. This means you can't use any function or debugging
1225 * method which may touch any device, otherwise the kernel _will_ crash.
1257 * mappings above VMALLOC_START, except early fixmap, we might remove debug
1258 * device mappings. This means earlycon can be used to debug this function
1259 * Any other function or debugging method which may touch any device _will_
1260 * crash the kernel.
1226 */
1227static void __init devicemaps_init(const struct machine_desc *mdesc)
1228{
1229 struct map_desc map;
1230 unsigned long addr;
1231 void *vectors;
1232
1233 /*
1234 * Allocate the vector page early.
1235 */
1236 vectors = early_alloc(PAGE_SIZE * 2);
1237
1238 early_trap_init(vectors);
1239
1261 */
1262static void __init devicemaps_init(const struct machine_desc *mdesc)
1263{
1264 struct map_desc map;
1265 unsigned long addr;
1266 void *vectors;
1267
1268 /*
1269 * Allocate the vector page early.
1270 */
1271 vectors = early_alloc(PAGE_SIZE * 2);
1272
1273 early_trap_init(vectors);
1274
1240 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
1275 /*
1276 * Clear page table except top pmd used by early fixmaps
1277 */
1278 for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
1241 pmd_clear(pmd_off_k(addr));
1242
1243 /*
1244 * Map the kernel if it is XIP.
1245 * It is always first in the modulearea.
1246 */
1247#ifdef CONFIG_XIP_KERNEL
1248 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);

--- 235 unchanged lines hidden (view full) ---

1484 pr_crit("Physical address space modification is only to support Keystone2.\n");
1485 pr_crit("Please enable ARM_LPAE and ARM_PATCH_PHYS_VIRT support to use this\n");
1486 pr_crit("feature. Your kernel may crash now, have a good day.\n");
1487 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1488}
1489
1490#endif
1491
1279 pmd_clear(pmd_off_k(addr));
1280
1281 /*
1282 * Map the kernel if it is XIP.
1283 * It is always first in the modulearea.
1284 */
1285#ifdef CONFIG_XIP_KERNEL
1286 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);

--- 235 unchanged lines hidden (view full) ---

1522 pr_crit("Physical address space modification is only to support Keystone2.\n");
1523 pr_crit("Please enable ARM_LPAE and ARM_PATCH_PHYS_VIRT support to use this\n");
1524 pr_crit("feature. Your kernel may crash now, have a good day.\n");
1525 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1526}
1527
1528#endif
1529
1530static void __init early_fixmap_shutdown(void)
1531{
1532 int i;
1533 unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
1534
1535 pte_offset_fixmap = pte_offset_late_fixmap;
1536 pmd_clear(fixmap_pmd(va));
1537 local_flush_tlb_kernel_page(va);
1538
1539 for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
1540 pte_t *pte;
1541 struct map_desc map;
1542
1543 map.virtual = fix_to_virt(i);
1544 pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
1545
1546 /* Only i/o device mappings are supported ATM */
1547 if (pte_none(*pte) ||
1548 (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
1549 continue;
1550
1551 map.pfn = pte_pfn(*pte);
1552 map.type = MT_DEVICE;
1553 map.length = PAGE_SIZE;
1554
1555 create_mapping(&map);
1556 }
1557}
1558
1492/*
1493 * paging_init() sets up the page tables, initialises the zone memory
1494 * maps, and sets up the zero page, bad page and bad page tables.
1495 */
1496void __init paging_init(const struct machine_desc *mdesc)
1497{
1498 void *zero_page;
1499
1500 build_mem_type_table();
1501 prepare_page_table();
1502 map_lowmem();
1559/*
1560 * paging_init() sets up the page tables, initialises the zone memory
1561 * maps, and sets up the zero page, bad page and bad page tables.
1562 */
1563void __init paging_init(const struct machine_desc *mdesc)
1564{
1565 void *zero_page;
1566
1567 build_mem_type_table();
1568 prepare_page_table();
1569 map_lowmem();
1503 memblock_set_current_limit(arm_lowmem_limit);
1504 dma_contiguous_remap();
1570 dma_contiguous_remap();
1571 early_fixmap_shutdown();
1505 devicemaps_init(mdesc);
1506 kmap_init();
1507 tcm_init();
1508
1509 top_pmd = pmd_off_k(0xffff0000);
1510
1511 /* allocate the zero page. */
1512 zero_page = early_alloc(PAGE_SIZE);
1513
1514 bootmem_init();
1515
1516 empty_zero_page = virt_to_page(zero_page);
1517 __flush_dcache_page(NULL, empty_zero_page);
1518}
1572 devicemaps_init(mdesc);
1573 kmap_init();
1574 tcm_init();
1575
1576 top_pmd = pmd_off_k(0xffff0000);
1577
1578 /* allocate the zero page. */
1579 zero_page = early_alloc(PAGE_SIZE);
1580
1581 bootmem_init();
1582
1583 empty_zero_page = virt_to_page(zero_page);
1584 __flush_dcache_page(NULL, empty_zero_page);
1585}