1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Based upon linux/arch/m68k/mm/sun3mmu.c 4 * Based upon linux/arch/ppc/mm/mmu_context.c 5 * 6 * Implementations of mm routines specific to the Coldfire MMU. 7 * 8 * Copyright (c) 2008 Freescale Semiconductor, Inc. 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/types.h> 13 #include <linux/mm.h> 14 #include <linux/init.h> 15 #include <linux/string.h> 16 #include <linux/memblock.h> 17 18 #include <asm/setup.h> 19 #include <asm/page.h> 20 #include <asm/mmu_context.h> 21 #include <asm/mcf_pgalloc.h> 22 #include <asm/tlbflush.h> 23 #include <asm/pgalloc.h> 24 25 #define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END)) 26 27 mm_context_t next_mmu_context; 28 unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; 29 atomic_t nr_free_contexts; 30 struct mm_struct *context_mm[LAST_CONTEXT+1]; 31 unsigned long num_pages; 32 33 /* 34 * ColdFire paging_init derived from sun3. 35 */ 36 void __init paging_init(void) 37 { 38 pgd_t *pg_dir; 39 pte_t *pg_table; 40 unsigned long address, size; 41 unsigned long next_pgtable, bootmem_end; 42 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 }; 43 int i; 44 45 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 46 if (!empty_zero_page) 47 panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 48 __func__, PAGE_SIZE, PAGE_SIZE); 49 50 pg_dir = swapper_pg_dir; 51 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); 52 53 size = num_pages * sizeof(pte_t); 54 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); 55 next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE); 56 if (!next_pgtable) 57 panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 58 __func__, size, PAGE_SIZE); 59 60 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; 61 pg_dir += PAGE_OFFSET >> PGDIR_SHIFT; 62 63 address = PAGE_OFFSET; 64 while (address < (unsigned long)high_memory) { 65 pg_table = (pte_t *) next_pgtable; 66 next_pgtable += PTRS_PER_PTE * sizeof(pte_t); 67 pgd_val(*pg_dir) = (unsigned long) pg_table; 68 pg_dir++; 69 70 /* now change pg_table to kernel virtual addresses */ 71 for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) { 72 pte_t pte = pfn_pte(virt_to_pfn((void *)address), 73 PAGE_INIT); 74 if (address >= (unsigned long) high_memory) 75 pte_val(pte) = 0; 76 77 set_pte(pg_table, pte); 78 address += PAGE_SIZE; 79 } 80 } 81 82 current->mm = NULL; 83 max_zone_pfn[ZONE_DMA] = PFN_DOWN(_ramend); 84 free_area_init(max_zone_pfn); 85 } 86 87 int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word) 88 { 89 unsigned long flags, mmuar, mmutr; 90 struct mm_struct *mm; 91 pgd_t *pgd; 92 p4d_t *p4d; 93 pud_t *pud; 94 pmd_t *pmd; 95 pte_t *pte; 96 int asid; 97 98 local_irq_save(flags); 99 100 mmuar = (dtlb) ? mmu_read(MMUAR) : 101 regs->pc + (extension_word * sizeof(long)); 102 103 mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm; 104 if (!mm) { 105 local_irq_restore(flags); 106 return -1; 107 } 108 109 pgd = pgd_offset(mm, mmuar); 110 if (pgd_none(*pgd)) { 111 local_irq_restore(flags); 112 return -1; 113 } 114 115 p4d = p4d_offset(pgd, mmuar); 116 if (p4d_none(*p4d)) { 117 local_irq_restore(flags); 118 return -1; 119 } 120 121 pud = pud_offset(p4d, mmuar); 122 if (pud_none(*pud)) { 123 local_irq_restore(flags); 124 return -1; 125 } 126 127 pmd = pmd_offset(pud, mmuar); 128 if (pmd_none(*pmd)) { 129 local_irq_restore(flags); 130 return -1; 131 } 132 133 pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar) 134 : pte_offset_map(pmd, mmuar); 135 if (pte_none(*pte) || !pte_present(*pte)) { 136 local_irq_restore(flags); 137 return -1; 138 } 139 140 if (write) { 141 if (!pte_write(*pte)) { 142 local_irq_restore(flags); 143 return -1; 144 } 145 set_pte(pte, pte_mkdirty(*pte)); 146 } 147 148 set_pte(pte, pte_mkyoung(*pte)); 149 asid = mm->context & 0xff; 150 if (!pte_dirty(*pte) && !KMAPAREA(mmuar)) 151 set_pte(pte, pte_wrprotect(*pte)); 152 153 mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V; 154 if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE)) 155 mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT; 156 mmu_write(MMUTR, mmutr); 157 158 mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) | 159 ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X); 160 161 if (dtlb) 162 mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA); 163 else 164 mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA); 165 166 local_irq_restore(flags); 167 return 0; 168 } 169 170 void __init cf_bootmem_alloc(void) 171 { 172 unsigned long memstart; 173 174 /* _rambase and _ramend will be naturally page aligned */ 175 m68k_memory[0].addr = _rambase; 176 m68k_memory[0].size = _ramend - _rambase; 177 178 memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0, 179 MEMBLOCK_NONE); 180 181 /* compute total pages in system */ 182 num_pages = PFN_DOWN(_ramend - _rambase); 183 184 /* page numbers */ 185 memstart = PAGE_ALIGN(_ramstart); 186 min_low_pfn = PFN_DOWN(_rambase); 187 max_pfn = max_low_pfn = PFN_DOWN(_ramend); 188 high_memory = (void *)_ramend; 189 190 /* Reserve kernel text/data/bss */ 191 memblock_reserve(_rambase, memstart - _rambase); 192 193 m68k_virt_to_node_shift = fls(_ramend - 1) - 6; 194 module_fixup(NULL, __start_fixup, __stop_fixup); 195 196 /* setup node data */ 197 m68k_setup_node(0); 198 } 199 200 /* 201 * Initialize the context management stuff. 202 * The following was taken from arch/ppc/mmu_context.c 203 */ 204 void __init cf_mmu_context_init(void) 205 { 206 /* 207 * Some processors have too few contexts to reserve one for 208 * init_mm, and require using context 0 for a normal task. 209 * Other processors reserve the use of context zero for the kernel. 210 * This code assumes FIRST_CONTEXT < 32. 211 */ 212 context_map[0] = (1 << FIRST_CONTEXT) - 1; 213 next_mmu_context = FIRST_CONTEXT; 214 atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1); 215 } 216 217 /* 218 * Steal a context from a task that has one at the moment. 219 * This isn't an LRU system, it just frees up each context in 220 * turn (sort-of pseudo-random replacement :). This would be the 221 * place to implement an LRU scheme if anyone was motivated to do it. 222 * -- paulus 223 */ 224 void steal_context(void) 225 { 226 struct mm_struct *mm; 227 /* 228 * free up context `next_mmu_context' 229 * if we shouldn't free context 0, don't... 230 */ 231 if (next_mmu_context < FIRST_CONTEXT) 232 next_mmu_context = FIRST_CONTEXT; 233 mm = context_mm[next_mmu_context]; 234 flush_tlb_mm(mm); 235 destroy_context(mm); 236 } 237 238 static const pgprot_t protection_map[16] = { 239 [VM_NONE] = PAGE_NONE, 240 [VM_READ] = __pgprot(CF_PAGE_VALID | 241 CF_PAGE_ACCESSED | 242 CF_PAGE_READABLE), 243 [VM_WRITE] = __pgprot(CF_PAGE_VALID | 244 CF_PAGE_ACCESSED | 245 CF_PAGE_WRITABLE), 246 [VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID | 247 CF_PAGE_ACCESSED | 248 CF_PAGE_READABLE | 249 CF_PAGE_WRITABLE), 250 [VM_EXEC] = __pgprot(CF_PAGE_VALID | 251 CF_PAGE_ACCESSED | 252 CF_PAGE_EXEC), 253 [VM_EXEC | VM_READ] = __pgprot(CF_PAGE_VALID | 254 CF_PAGE_ACCESSED | 255 CF_PAGE_READABLE | 256 CF_PAGE_EXEC), 257 [VM_EXEC | VM_WRITE] = __pgprot(CF_PAGE_VALID | 258 CF_PAGE_ACCESSED | 259 CF_PAGE_WRITABLE | 260 CF_PAGE_EXEC), 261 [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID | 262 CF_PAGE_ACCESSED | 263 CF_PAGE_READABLE | 264 CF_PAGE_WRITABLE | 265 CF_PAGE_EXEC), 266 [VM_SHARED] = PAGE_NONE, 267 [VM_SHARED | VM_READ] = __pgprot(CF_PAGE_VALID | 268 CF_PAGE_ACCESSED | 269 CF_PAGE_READABLE), 270 [VM_SHARED | VM_WRITE] = PAGE_SHARED, 271 [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID | 272 CF_PAGE_ACCESSED | 273 CF_PAGE_READABLE | 274 CF_PAGE_SHARED), 275 [VM_SHARED | VM_EXEC] = __pgprot(CF_PAGE_VALID | 276 CF_PAGE_ACCESSED | 277 CF_PAGE_EXEC), 278 [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(CF_PAGE_VALID | 279 CF_PAGE_ACCESSED | 280 CF_PAGE_READABLE | 281 CF_PAGE_EXEC), 282 [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(CF_PAGE_VALID | 283 CF_PAGE_ACCESSED | 284 CF_PAGE_SHARED | 285 CF_PAGE_EXEC), 286 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID | 287 CF_PAGE_ACCESSED | 288 CF_PAGE_READABLE | 289 CF_PAGE_SHARED | 290 CF_PAGE_EXEC) 291 }; 292 DECLARE_VM_GET_PAGE_PROT 293