1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 4 #include <linux/bug.h> 5 #include <linux/module.h> 6 #include <linux/init.h> 7 #include <linux/signal.h> 8 #include <linux/sched.h> 9 #include <linux/kernel.h> 10 #include <linux/errno.h> 11 #include <linux/string.h> 12 #include <linux/types.h> 13 #include <linux/pagemap.h> 14 #include <linux/ptrace.h> 15 #include <linux/mman.h> 16 #include <linux/mm.h> 17 #include <linux/highmem.h> 18 #include <linux/memblock.h> 19 #include <linux/swap.h> 20 #include <linux/proc_fs.h> 21 #include <linux/pfn.h> 22 #include <linux/initrd.h> 23 24 #include <asm/setup.h> 25 #include <asm/cachectl.h> 26 #include <asm/dma.h> 27 #include <asm/pgalloc.h> 28 #include <asm/mmu_context.h> 29 #include <asm/sections.h> 30 #include <asm/tlb.h> 31 #include <asm/cacheflush.h> 32 33 #define PTRS_KERN_TABLE \ 34 ((PTRS_PER_PGD - USER_PTRS_PER_PGD) * PTRS_PER_PTE) 35 36 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; 37 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; 38 pte_t kernel_pte_tables[PTRS_KERN_TABLE] __page_aligned_bss; 39 40 EXPORT_SYMBOL(invalid_pte_table); 41 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] 42 __page_aligned_bss; 43 EXPORT_SYMBOL(empty_zero_page); 44 45 #ifdef CONFIG_BLK_DEV_INITRD 46 static void __init setup_initrd(void) 47 { 48 unsigned long size; 49 50 if (initrd_start >= initrd_end) { 51 pr_err("initrd not found or empty"); 52 goto disable; 53 } 54 55 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { 56 pr_err("initrd extends beyond end of memory"); 57 goto disable; 58 } 59 60 size = initrd_end - initrd_start; 61 62 if (memblock_is_region_reserved(__pa(initrd_start), size)) { 63 pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region", 64 __pa(initrd_start), size); 65 goto disable; 66 } 67 68 memblock_reserve(__pa(initrd_start), size); 69 70 pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", 71 (void *)(initrd_start), size); 72 73 initrd_below_start_ok = 1; 74 75 return; 76 77 disable: 78 initrd_start = initrd_end = 0; 79 80 pr_err(" - disabling initrd\n"); 81 } 82 #endif 83 84 void __init mem_init(void) 85 { 86 #ifdef CONFIG_HIGHMEM 87 unsigned long tmp; 88 89 set_max_mapnr(highend_pfn - ARCH_PFN_OFFSET); 90 #else 91 set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); 92 #endif 93 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 94 95 #ifdef CONFIG_BLK_DEV_INITRD 96 setup_initrd(); 97 #endif 98 99 memblock_free_all(); 100 101 #ifdef CONFIG_HIGHMEM 102 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { 103 struct page *page = pfn_to_page(tmp); 104 105 /* FIXME not sure about */ 106 if (!memblock_is_reserved(tmp << PAGE_SHIFT)) 107 free_highmem_page(page); 108 } 109 #endif 110 mem_init_print_info(NULL); 111 } 112 113 void free_initmem(void) 114 { 115 free_initmem_default(-1); 116 } 117 118 void pgd_init(unsigned long *p) 119 { 120 int i; 121 122 for (i = 0; i < PTRS_PER_PGD; i++) 123 p[i] = __pa(invalid_pte_table); 124 125 flush_tlb_all(); 126 local_icache_inv_all(NULL); 127 } 128 129 void __init mmu_init(unsigned long min_pfn, unsigned long max_pfn) 130 { 131 int i; 132 133 for (i = 0; i < USER_PTRS_PER_PGD; i++) 134 swapper_pg_dir[i].pgd = __pa(invalid_pte_table); 135 136 for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) 137 swapper_pg_dir[i].pgd = 138 __pa(kernel_pte_tables + (PTRS_PER_PTE * (i - USER_PTRS_PER_PGD))); 139 140 for (i = 0; i < PTRS_KERN_TABLE; i++) 141 set_pte(&kernel_pte_tables[i], __pte(_PAGE_GLOBAL)); 142 143 for (i = min_pfn; i < max_pfn; i++) 144 set_pte(&kernel_pte_tables[i - PFN_DOWN(va_pa_offset)], pfn_pte(i, PAGE_KERNEL)); 145 146 flush_tlb_all(); 147 local_icache_inv_all(NULL); 148 149 /* Setup page mask to 4k */ 150 write_mmu_pagemask(0); 151 152 setup_pgd(swapper_pg_dir, 0); 153 } 154 155 void __init fixrange_init(unsigned long start, unsigned long end, 156 pgd_t *pgd_base) 157 { 158 pgd_t *pgd; 159 pud_t *pud; 160 pmd_t *pmd; 161 pte_t *pte; 162 int i, j, k; 163 unsigned long vaddr; 164 165 vaddr = start; 166 i = pgd_index(vaddr); 167 j = pud_index(vaddr); 168 k = pmd_index(vaddr); 169 pgd = pgd_base + i; 170 171 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 172 pud = (pud_t *)pgd; 173 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { 174 pmd = (pmd_t *)pud; 175 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 176 if (pmd_none(*pmd)) { 177 pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); 178 if (!pte) 179 panic("%s: Failed to allocate %lu bytes align=%lx\n", 180 __func__, PAGE_SIZE, 181 PAGE_SIZE); 182 183 set_pmd(pmd, __pmd(__pa(pte))); 184 BUG_ON(pte != pte_offset_kernel(pmd, 0)); 185 } 186 vaddr += PMD_SIZE; 187 } 188 k = 0; 189 } 190 j = 0; 191 } 192 } 193 194 void __init fixaddr_init(void) 195 { 196 unsigned long vaddr; 197 198 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 199 fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir); 200 } 201