1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/arch/m68k/mm/motorola.c 4 * 5 * Routines specific to the Motorola MMU, originally from: 6 * linux/arch/m68k/init.c 7 * which are Copyright (C) 1995 Hamish Macdonald 8 * 9 * Moved 8/20/1999 Sam Creasey 10 */ 11 12 #include <linux/module.h> 13 #include <linux/signal.h> 14 #include <linux/sched.h> 15 #include <linux/mm.h> 16 #include <linux/swap.h> 17 #include <linux/kernel.h> 18 #include <linux/string.h> 19 #include <linux/types.h> 20 #include <linux/init.h> 21 #include <linux/memblock.h> 22 #include <linux/gfp.h> 23 24 #include <asm/setup.h> 25 #include <linux/uaccess.h> 26 #include <asm/page.h> 27 #include <asm/pgalloc.h> 28 #include <asm/machdep.h> 29 #include <asm/io.h> 30 #include <asm/dma.h> 31 #ifdef CONFIG_ATARI 32 #include <asm/atari_stram.h> 33 #endif 34 #include <asm/sections.h> 35 36 #undef DEBUG 37 38 #ifndef mm_cachebits 39 /* 40 * Bits to add to page descriptors for "normal" caching mode. 41 * For 68020/030 this is 0. 42 * For 68040, this is _PAGE_CACHE040 (cachable, copyback) 43 */ 44 unsigned long mm_cachebits; 45 EXPORT_SYMBOL(mm_cachebits); 46 #endif 47 48 49 /* 50 * Motorola 680x0 user's manual recommends using uncached memory for address 51 * translation tables. 52 * 53 * Seeing how the MMU can be external on (some of) these chips, that seems like 54 * a very important recommendation to follow. Provide some helpers to combat 55 * 'variation' amongst the users of this. 56 */ 57 58 void mmu_page_ctor(void *page) 59 { 60 __flush_page_to_ram(page); 61 flush_tlb_kernel_page(page); 62 nocache_page(page); 63 } 64 65 void mmu_page_dtor(void *page) 66 { 67 cache_page(page); 68 } 69 70 /* ++andreas: {get,free}_pointer_table rewritten to use unused fields from 71 struct page instead of separately kmalloced struct. Stolen from 72 arch/sparc/mm/srmmu.c ... */ 73 74 typedef struct list_head ptable_desc; 75 76 static struct list_head ptable_list[2] = { 77 LIST_HEAD_INIT(ptable_list[0]), 78 LIST_HEAD_INIT(ptable_list[1]), 79 }; 80 81 #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru)) 82 #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru)) 83 #define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index) 84 85 static const int ptable_shift[2] = { 86 7+2, /* PGD, PMD */ 87 6+2, /* PTE */ 88 }; 89 90 #define ptable_size(type) (1U << ptable_shift[type]) 91 #define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1) 92 93 void __init init_pointer_table(void *table, int type) 94 { 95 ptable_desc *dp; 96 unsigned long ptable = (unsigned long)table; 97 unsigned long page = ptable & PAGE_MASK; 98 unsigned int mask = 1U << ((ptable - page)/ptable_size(type)); 99 100 dp = PD_PTABLE(page); 101 if (!(PD_MARKBITS(dp) & mask)) { 102 PD_MARKBITS(dp) = ptable_mask(type); 103 list_add(dp, &ptable_list[type]); 104 } 105 106 PD_MARKBITS(dp) &= ~mask; 107 pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp)); 108 109 /* unreserve the page so it's possible to free that page */ 110 __ClearPageReserved(PD_PAGE(dp)); 111 init_page_count(PD_PAGE(dp)); 112 113 return; 114 } 115 116 void *get_pointer_table(int type) 117 { 118 ptable_desc *dp = ptable_list[type].next; 119 unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp); 120 unsigned int tmp, off; 121 122 /* 123 * For a pointer table for a user process address space, a 124 * table is taken from a page allocated for the purpose. Each 125 * page can hold 8 pointer tables. The page is remapped in 126 * virtual address space to be noncacheable. 127 */ 128 if (mask == 0) { 129 void *page; 130 ptable_desc *new; 131 132 if (!(page = (void *)get_zeroed_page(GFP_KERNEL))) 133 return NULL; 134 135 if (type == TABLE_PTE) { 136 /* 137 * m68k doesn't have SPLIT_PTE_PTLOCKS for not having 138 * SMP. 139 */ 140 pgtable_pte_page_ctor(virt_to_page(page)); 141 } 142 143 mmu_page_ctor(page); 144 145 new = PD_PTABLE(page); 146 PD_MARKBITS(new) = ptable_mask(type) - 1; 147 list_add_tail(new, dp); 148 149 return (pmd_t *)page; 150 } 151 152 for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type)) 153 ; 154 PD_MARKBITS(dp) = mask & ~tmp; 155 if (!PD_MARKBITS(dp)) { 156 /* move to end of list */ 157 list_move_tail(dp, &ptable_list[type]); 158 } 159 return page_address(PD_PAGE(dp)) + off; 160 } 161 162 int free_pointer_table(void *table, int type) 163 { 164 ptable_desc *dp; 165 unsigned long ptable = (unsigned long)table; 166 unsigned long page = ptable & PAGE_MASK; 167 unsigned int mask = 1U << ((ptable - page)/ptable_size(type)); 168 169 dp = PD_PTABLE(page); 170 if (PD_MARKBITS (dp) & mask) 171 panic ("table already free!"); 172 173 PD_MARKBITS (dp) |= mask; 174 175 if (PD_MARKBITS(dp) == ptable_mask(type)) { 176 /* all tables in page are free, free page */ 177 list_del(dp); 178 mmu_page_dtor((void *)page); 179 if (type == TABLE_PTE) 180 pgtable_pte_page_dtor(virt_to_page(page)); 181 free_page (page); 182 return 1; 183 } else if (ptable_list[type].next != dp) { 184 /* 185 * move this descriptor to the front of the list, since 186 * it has one or more free tables. 187 */ 188 list_move(dp, &ptable_list[type]); 189 } 190 return 0; 191 } 192 193 /* size of memory already mapped in head.S */ 194 extern __initdata unsigned long m68k_init_mapped_size; 195 196 extern unsigned long availmem; 197 198 static pte_t *last_pte_table __initdata = NULL; 199 200 static pte_t * __init kernel_page_table(void) 201 { 202 pte_t *pte_table = last_pte_table; 203 204 if (((unsigned long)last_pte_table & ~PAGE_MASK) == 0) { 205 pte_table = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); 206 if (!pte_table) { 207 panic("%s: Failed to allocate %lu bytes align=%lx\n", 208 __func__, PAGE_SIZE, PAGE_SIZE); 209 } 210 211 clear_page(pte_table); 212 mmu_page_ctor(pte_table); 213 214 last_pte_table = pte_table; 215 } 216 217 last_pte_table += PTRS_PER_PTE; 218 219 return pte_table; 220 } 221 222 static pmd_t *last_pmd_table __initdata = NULL; 223 224 static pmd_t * __init kernel_ptr_table(void) 225 { 226 if (!last_pmd_table) { 227 unsigned long pmd, last; 228 int i; 229 230 /* Find the last ptr table that was used in head.S and 231 * reuse the remaining space in that page for further 232 * ptr tables. 233 */ 234 last = (unsigned long)kernel_pg_dir; 235 for (i = 0; i < PTRS_PER_PGD; i++) { 236 pud_t *pud = (pud_t *)(&kernel_pg_dir[i]); 237 238 if (!pud_present(*pud)) 239 continue; 240 pmd = pgd_page_vaddr(kernel_pg_dir[i]); 241 if (pmd > last) 242 last = pmd; 243 } 244 245 last_pmd_table = (pmd_t *)last; 246 #ifdef DEBUG 247 printk("kernel_ptr_init: %p\n", last_pmd_table); 248 #endif 249 } 250 251 last_pmd_table += PTRS_PER_PMD; 252 if (((unsigned long)last_pmd_table & ~PAGE_MASK) == 0) { 253 last_pmd_table = (pmd_t *)memblock_alloc_low(PAGE_SIZE, 254 PAGE_SIZE); 255 if (!last_pmd_table) 256 panic("%s: Failed to allocate %lu bytes align=%lx\n", 257 __func__, PAGE_SIZE, PAGE_SIZE); 258 259 clear_page(last_pmd_table); 260 mmu_page_ctor(last_pmd_table); 261 } 262 263 return last_pmd_table; 264 } 265 266 static void __init map_node(int node) 267 { 268 unsigned long physaddr, virtaddr, size; 269 pgd_t *pgd_dir; 270 p4d_t *p4d_dir; 271 pud_t *pud_dir; 272 pmd_t *pmd_dir; 273 pte_t *pte_dir; 274 275 size = m68k_memory[node].size; 276 physaddr = m68k_memory[node].addr; 277 virtaddr = (unsigned long)phys_to_virt(physaddr); 278 physaddr |= m68k_supervisor_cachemode | 279 _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY; 280 if (CPU_IS_040_OR_060) 281 physaddr |= _PAGE_GLOBAL040; 282 283 while (size > 0) { 284 #ifdef DEBUG 285 if (!(virtaddr & (PMD_SIZE-1))) 286 printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK, 287 virtaddr); 288 #endif 289 pgd_dir = pgd_offset_k(virtaddr); 290 if (virtaddr && CPU_IS_020_OR_030) { 291 if (!(virtaddr & (PGDIR_SIZE-1)) && 292 size >= PGDIR_SIZE) { 293 #ifdef DEBUG 294 printk ("[very early term]"); 295 #endif 296 pgd_val(*pgd_dir) = physaddr; 297 size -= PGDIR_SIZE; 298 virtaddr += PGDIR_SIZE; 299 physaddr += PGDIR_SIZE; 300 continue; 301 } 302 } 303 p4d_dir = p4d_offset(pgd_dir, virtaddr); 304 pud_dir = pud_offset(p4d_dir, virtaddr); 305 if (!pud_present(*pud_dir)) { 306 pmd_dir = kernel_ptr_table(); 307 #ifdef DEBUG 308 printk ("[new pointer %p]", pmd_dir); 309 #endif 310 pud_set(pud_dir, pmd_dir); 311 } else 312 pmd_dir = pmd_offset(pud_dir, virtaddr); 313 314 if (CPU_IS_020_OR_030) { 315 if (virtaddr) { 316 #ifdef DEBUG 317 printk ("[early term]"); 318 #endif 319 pmd_val(*pmd_dir) = physaddr; 320 physaddr += PMD_SIZE; 321 } else { 322 int i; 323 #ifdef DEBUG 324 printk ("[zero map]"); 325 #endif 326 pte_dir = kernel_page_table(); 327 pmd_set(pmd_dir, pte_dir); 328 329 pte_val(*pte_dir++) = 0; 330 physaddr += PAGE_SIZE; 331 for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++) 332 pte_val(*pte_dir++) = physaddr; 333 } 334 size -= PMD_SIZE; 335 virtaddr += PMD_SIZE; 336 } else { 337 if (!pmd_present(*pmd_dir)) { 338 #ifdef DEBUG 339 printk ("[new table]"); 340 #endif 341 pte_dir = kernel_page_table(); 342 pmd_set(pmd_dir, pte_dir); 343 } 344 pte_dir = pte_offset_kernel(pmd_dir, virtaddr); 345 346 if (virtaddr) { 347 if (!pte_present(*pte_dir)) 348 pte_val(*pte_dir) = physaddr; 349 } else 350 pte_val(*pte_dir) = 0; 351 size -= PAGE_SIZE; 352 virtaddr += PAGE_SIZE; 353 physaddr += PAGE_SIZE; 354 } 355 356 } 357 #ifdef DEBUG 358 printk("\n"); 359 #endif 360 } 361 362 /* 363 * paging_init() continues the virtual memory environment setup which 364 * was begun by the code in arch/head.S. 365 */ 366 void __init paging_init(void) 367 { 368 unsigned long zones_size[MAX_NR_ZONES] = { 0, }; 369 unsigned long min_addr, max_addr; 370 unsigned long addr; 371 int i; 372 373 #ifdef DEBUG 374 printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem); 375 #endif 376 377 /* Fix the cache mode in the page descriptors for the 680[46]0. */ 378 if (CPU_IS_040_OR_060) { 379 int i; 380 #ifndef mm_cachebits 381 mm_cachebits = _PAGE_CACHE040; 382 #endif 383 for (i = 0; i < 16; i++) 384 pgprot_val(protection_map[i]) |= _PAGE_CACHE040; 385 } 386 387 min_addr = m68k_memory[0].addr; 388 max_addr = min_addr + m68k_memory[0].size; 389 memblock_add(m68k_memory[0].addr, m68k_memory[0].size); 390 for (i = 1; i < m68k_num_memory;) { 391 if (m68k_memory[i].addr < min_addr) { 392 printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n", 393 m68k_memory[i].addr, m68k_memory[i].size); 394 printk("Fix your bootloader or use a memfile to make use of this area!\n"); 395 m68k_num_memory--; 396 memmove(m68k_memory + i, m68k_memory + i + 1, 397 (m68k_num_memory - i) * sizeof(struct m68k_mem_info)); 398 continue; 399 } 400 memblock_add(m68k_memory[i].addr, m68k_memory[i].size); 401 addr = m68k_memory[i].addr + m68k_memory[i].size; 402 if (addr > max_addr) 403 max_addr = addr; 404 i++; 405 } 406 m68k_memoffset = min_addr - PAGE_OFFSET; 407 m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6; 408 409 module_fixup(NULL, __start_fixup, __stop_fixup); 410 flush_icache(); 411 412 high_memory = phys_to_virt(max_addr); 413 414 min_low_pfn = availmem >> PAGE_SHIFT; 415 max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT; 416 417 /* Reserve kernel text/data/bss and the memory allocated in head.S */ 418 memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr); 419 420 /* 421 * Map the physical memory available into the kernel virtual 422 * address space. Make sure memblock will not try to allocate 423 * pages beyond the memory we already mapped in head.S 424 */ 425 memblock_set_bottom_up(true); 426 427 for (i = 0; i < m68k_num_memory; i++) { 428 m68k_setup_node(i); 429 map_node(i); 430 } 431 432 flush_tlb_all(); 433 434 /* 435 * initialize the bad page table and bad page to point 436 * to a couple of allocated pages 437 */ 438 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 439 if (!empty_zero_page) 440 panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 441 __func__, PAGE_SIZE, PAGE_SIZE); 442 443 /* 444 * Set up SFC/DFC registers 445 */ 446 set_fs(KERNEL_DS); 447 448 #ifdef DEBUG 449 printk ("before free_area_init\n"); 450 #endif 451 for (i = 0; i < m68k_num_memory; i++) { 452 zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT; 453 free_area_init_node(i, zones_size, 454 m68k_memory[i].addr >> PAGE_SHIFT, NULL); 455 if (node_present_pages(i)) 456 node_set_state(i, N_NORMAL_MEMORY); 457 } 458 } 459