init.c (b7c15a3ce6fea5da3aa836c897a78ac628467d54) | init.c (a35707c3d850dda0ceefb75b1b3bd191921d5765) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2019 Western Digital Corporation or its affiliates. 5 * Copyright (C) 2020 FORTH-ICS/CARV 6 * Nick Kossifidis <mick@ics.forth.gr> 7 */ 8 --- 194 unchanged lines hidden (view full) --- 203 204 reserve_initrd_mem(); 205 /* 206 * If DTB is built in, no need to reserve its memblock. 207 * Otherwise, do reserve it but avoid using 208 * early_init_fdt_reserve_self() since __pa() does 209 * not work for DTB pointers that are fixmap addresses 210 */ | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2019 Western Digital Corporation or its affiliates. 5 * Copyright (C) 2020 FORTH-ICS/CARV 6 * Nick Kossifidis <mick@ics.forth.gr> 7 */ 8 --- 194 unchanged lines hidden (view full) --- 203 204 reserve_initrd_mem(); 205 /* 206 * If DTB is built in, no need to reserve its memblock. 207 * Otherwise, do reserve it but avoid using 208 * early_init_fdt_reserve_self() since __pa() does 209 * not work for DTB pointers that are fixmap addresses 210 */ |
211 if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) { 212 /* 213 * In case the DTB is not located in a memory region we won't 214 * be able to locate it later on via the linear mapping and 215 * get a segfault when accessing it via __va(dtb_early_pa). 216 * To avoid this situation copy DTB to a memory region. 217 * Note that memblock_phys_alloc will also reserve DTB region. 218 */ 219 if (!memblock_is_memory(dtb_early_pa)) { 220 size_t fdt_size = fdt_totalsize(dtb_early_va); 221 phys_addr_t new_dtb_early_pa = memblock_phys_alloc(fdt_size, PAGE_SIZE); 222 void *new_dtb_early_va = early_memremap(new_dtb_early_pa, fdt_size); | 211 if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) 212 memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); |
223 | 213 |
224 memcpy(new_dtb_early_va, dtb_early_va, fdt_size); 225 early_memunmap(new_dtb_early_va, fdt_size); 226 _dtb_early_pa = new_dtb_early_pa; 227 } else 228 memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); 229 } 230 | |
231 early_init_fdt_scan_reserved_mem(); 232 dma_contiguous_reserve(dma32_phys_limit); 233 if (IS_ENABLED(CONFIG_64BIT)) 234 hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); 235 memblock_allow_resize(); 236} 237 238#ifdef CONFIG_MMU --- 491 unchanged lines hidden (view full) --- 730 csr_write(CSR_SATP, identity_satp); 731 hw_satp = csr_swap(CSR_SATP, 0ULL); 732 local_flush_tlb_all(); 733 734 if (hw_satp != identity_satp) { 735 if (!check_l4) { 736 disable_pgtable_l5(); 737 check_l4 = true; | 214 early_init_fdt_scan_reserved_mem(); 215 dma_contiguous_reserve(dma32_phys_limit); 216 if (IS_ENABLED(CONFIG_64BIT)) 217 hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); 218 memblock_allow_resize(); 219} 220 221#ifdef CONFIG_MMU --- 491 unchanged lines hidden (view full) --- 713 csr_write(CSR_SATP, identity_satp); 714 hw_satp = csr_swap(CSR_SATP, 0ULL); 715 local_flush_tlb_all(); 716 717 if (hw_satp != identity_satp) { 718 if (!check_l4) { 719 disable_pgtable_l5(); 720 check_l4 = true; |
738 memset(early_pg_dir, 0, PAGE_SIZE); | |
739 goto retry; 740 } 741 disable_pgtable_l4(); 742 } 743 744 memset(early_pg_dir, 0, PAGE_SIZE); 745 memset(early_p4d, 0, PAGE_SIZE); 746 memset(early_pud, 0, PAGE_SIZE); --- 201 unchanged lines hidden (view full) --- 948#ifdef CONFIG_64BIT 949 /* 950 * The last 4K bytes of the addressable memory can not be mapped because 951 * of IS_ERR_VALUE macro. 952 */ 953 BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K); 954#endif 955 | 721 goto retry; 722 } 723 disable_pgtable_l4(); 724 } 725 726 memset(early_pg_dir, 0, PAGE_SIZE); 727 memset(early_p4d, 0, PAGE_SIZE); 728 memset(early_pud, 0, PAGE_SIZE); --- 201 unchanged lines hidden (view full) --- 930#ifdef CONFIG_64BIT 931 /* 932 * The last 4K bytes of the addressable memory can not be mapped because 933 * of IS_ERR_VALUE macro. 934 */ 935 BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K); 936#endif 937 |
938 apply_early_boot_alternatives(); |
|
956 pt_ops_set_early(); 957 958 /* Setup early PGD for fixmap */ 959 create_pgd_mapping(early_pg_dir, FIXADDR_START, 960 fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE); 961 962#ifndef __PAGETABLE_PMD_FOLDED 963 /* Setup fixmap P4D and PUD */ --- 227 unchanged lines hidden --- | 939 pt_ops_set_early(); 940 941 /* Setup early PGD for fixmap */ 942 create_pgd_mapping(early_pg_dir, FIXADDR_START, 943 fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE); 944 945#ifndef __PAGETABLE_PMD_FOLDED 946 /* Setup fixmap P4D and PUD */ --- 227 unchanged lines hidden --- |