1 /* 2 * linux/arch/m68k/motorola.c 3 * 4 * Routines specific to the Motorola MMU, originally from: 5 * linux/arch/m68k/init.c 6 * which are Copyright (C) 1995 Hamish Macdonald 7 * 8 * Moved 8/20/1999 Sam Creasey 9 */ 10 11 #include <linux/config.h> 12 #include <linux/module.h> 13 #include <linux/signal.h> 14 #include <linux/sched.h> 15 #include <linux/mm.h> 16 #include <linux/swap.h> 17 #include <linux/kernel.h> 18 #include <linux/string.h> 19 #include <linux/types.h> 20 #include <linux/init.h> 21 #include <linux/bootmem.h> 22 23 #include <asm/setup.h> 24 #include <asm/uaccess.h> 25 #include <asm/page.h> 26 #include <asm/pgalloc.h> 27 #include <asm/system.h> 28 #include <asm/machdep.h> 29 #include <asm/io.h> 30 #include <asm/dma.h> 31 #ifdef CONFIG_ATARI 32 #include <asm/atari_stram.h> 33 #endif 34 35 #undef DEBUG 36 37 #ifndef mm_cachebits 38 /* 39 * Bits to add to page descriptors for "normal" caching mode. 40 * For 68020/030 this is 0. 41 * For 68040, this is _PAGE_CACHE040 (cachable, copyback) 42 */ 43 unsigned long mm_cachebits; 44 EXPORT_SYMBOL(mm_cachebits); 45 #endif 46 47 static pte_t * __init kernel_page_table(void) 48 { 49 pte_t *ptablep; 50 51 ptablep = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); 52 53 clear_page(ptablep); 54 __flush_page_to_ram(ptablep); 55 flush_tlb_kernel_page(ptablep); 56 nocache_page(ptablep); 57 58 return ptablep; 59 } 60 61 static pmd_t *last_pgtable __initdata = NULL; 62 pmd_t *zero_pgtable __initdata = NULL; 63 64 static pmd_t * __init kernel_ptr_table(void) 65 { 66 if (!last_pgtable) { 67 unsigned long pmd, last; 68 int i; 69 70 /* Find the last ptr table that was used in head.S and 71 * reuse the remaining space in that page for further 72 * ptr tables. 73 */ 74 last = (unsigned long)kernel_pg_dir; 75 for (i = 0; i < PTRS_PER_PGD; i++) { 76 if (!pgd_present(kernel_pg_dir[i])) 77 continue; 78 pmd = __pgd_page(kernel_pg_dir[i]); 79 if (pmd > last) 80 last = pmd; 81 } 82 83 last_pgtable = (pmd_t *)last; 84 #ifdef DEBUG 85 printk("kernel_ptr_init: %p\n", last_pgtable); 86 #endif 87 } 88 89 last_pgtable += PTRS_PER_PMD; 90 if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) { 91 last_pgtable = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); 92 93 clear_page(last_pgtable); 94 __flush_page_to_ram(last_pgtable); 95 flush_tlb_kernel_page(last_pgtable); 96 nocache_page(last_pgtable); 97 } 98 99 return last_pgtable; 100 } 101 102 static unsigned long __init 103 map_chunk (unsigned long addr, long size) 104 { 105 #define PTRTREESIZE (256*1024) 106 #define ROOTTREESIZE (32*1024*1024) 107 static unsigned long virtaddr = PAGE_OFFSET; 108 unsigned long physaddr; 109 pgd_t *pgd_dir; 110 pmd_t *pmd_dir; 111 pte_t *pte_dir; 112 113 physaddr = (addr | m68k_supervisor_cachemode | 114 _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); 115 if (CPU_IS_040_OR_060) 116 physaddr |= _PAGE_GLOBAL040; 117 118 while (size > 0) { 119 #ifdef DEBUG 120 if (!(virtaddr & (PTRTREESIZE-1))) 121 printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK, 122 virtaddr); 123 #endif 124 pgd_dir = pgd_offset_k(virtaddr); 125 if (virtaddr && CPU_IS_020_OR_030) { 126 if (!(virtaddr & (ROOTTREESIZE-1)) && 127 size >= ROOTTREESIZE) { 128 #ifdef DEBUG 129 printk ("[very early term]"); 130 #endif 131 pgd_val(*pgd_dir) = physaddr; 132 size -= ROOTTREESIZE; 133 virtaddr += ROOTTREESIZE; 134 physaddr += ROOTTREESIZE; 135 continue; 136 } 137 } 138 if (!pgd_present(*pgd_dir)) { 139 pmd_dir = kernel_ptr_table(); 140 #ifdef DEBUG 141 printk ("[new pointer %p]", pmd_dir); 142 #endif 143 pgd_set(pgd_dir, pmd_dir); 144 } else 145 pmd_dir = pmd_offset(pgd_dir, virtaddr); 146 147 if (CPU_IS_020_OR_030) { 148 if (virtaddr) { 149 #ifdef DEBUG 150 printk ("[early term]"); 151 #endif 152 pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; 153 physaddr += PTRTREESIZE; 154 } else { 155 int i; 156 #ifdef DEBUG 157 printk ("[zero map]"); 158 #endif 159 zero_pgtable = kernel_ptr_table(); 160 pte_dir = (pte_t *)zero_pgtable; 161 pmd_dir->pmd[0] = virt_to_phys(pte_dir) | 162 _PAGE_TABLE | _PAGE_ACCESSED; 163 pte_val(*pte_dir++) = 0; 164 physaddr += PAGE_SIZE; 165 for (i = 1; i < 64; physaddr += PAGE_SIZE, i++) 166 pte_val(*pte_dir++) = physaddr; 167 } 168 size -= PTRTREESIZE; 169 virtaddr += PTRTREESIZE; 170 } else { 171 if (!pmd_present(*pmd_dir)) { 172 #ifdef DEBUG 173 printk ("[new table]"); 174 #endif 175 pte_dir = kernel_page_table(); 176 pmd_set(pmd_dir, pte_dir); 177 } 178 pte_dir = pte_offset_kernel(pmd_dir, virtaddr); 179 180 if (virtaddr) { 181 if (!pte_present(*pte_dir)) 182 pte_val(*pte_dir) = physaddr; 183 } else 184 pte_val(*pte_dir) = 0; 185 size -= PAGE_SIZE; 186 virtaddr += PAGE_SIZE; 187 physaddr += PAGE_SIZE; 188 } 189 190 } 191 #ifdef DEBUG 192 printk("\n"); 193 #endif 194 195 return virtaddr; 196 } 197 198 /* 199 * paging_init() continues the virtual memory environment setup which 200 * was begun by the code in arch/head.S. 201 */ 202 void __init paging_init(void) 203 { 204 int chunk; 205 unsigned long mem_avail = 0; 206 unsigned long zones_size[3] = { 0, }; 207 208 #ifdef DEBUG 209 { 210 extern unsigned long availmem; 211 printk ("start of paging_init (%p, %lx, %lx, %lx)\n", 212 kernel_pg_dir, availmem, start_mem, end_mem); 213 } 214 #endif 215 216 /* Fix the cache mode in the page descriptors for the 680[46]0. */ 217 if (CPU_IS_040_OR_060) { 218 int i; 219 #ifndef mm_cachebits 220 mm_cachebits = _PAGE_CACHE040; 221 #endif 222 for (i = 0; i < 16; i++) 223 pgprot_val(protection_map[i]) |= _PAGE_CACHE040; 224 } 225 226 /* 227 * Map the physical memory available into the kernel virtual 228 * address space. It may allocate some memory for page 229 * tables and thus modify availmem. 230 */ 231 232 for (chunk = 0; chunk < m68k_num_memory; chunk++) { 233 mem_avail = map_chunk (m68k_memory[chunk].addr, 234 m68k_memory[chunk].size); 235 236 } 237 238 flush_tlb_all(); 239 #ifdef DEBUG 240 printk ("memory available is %ldKB\n", mem_avail >> 10); 241 printk ("start_mem is %#lx\nvirtual_end is %#lx\n", 242 start_mem, end_mem); 243 #endif 244 245 /* 246 * initialize the bad page table and bad page to point 247 * to a couple of allocated pages 248 */ 249 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); 250 memset(empty_zero_page, 0, PAGE_SIZE); 251 252 /* 253 * Set up SFC/DFC registers 254 */ 255 set_fs(KERNEL_DS); 256 257 #ifdef DEBUG 258 printk ("before free_area_init\n"); 259 #endif 260 zones_size[0] = (mach_max_dma_address < (unsigned long)high_memory ? 261 (mach_max_dma_address+1) : (unsigned long)high_memory); 262 zones_size[1] = (unsigned long)high_memory - zones_size[0]; 263 264 zones_size[0] = (zones_size[0] - PAGE_OFFSET) >> PAGE_SHIFT; 265 zones_size[1] >>= PAGE_SHIFT; 266 267 free_area_init(zones_size); 268 } 269 270 extern char __init_begin, __init_end; 271 272 void free_initmem(void) 273 { 274 unsigned long addr; 275 276 addr = (unsigned long)&__init_begin; 277 for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) { 278 virt_to_page(addr)->flags &= ~(1 << PG_reserved); 279 set_page_count(virt_to_page(addr), 1); 280 free_page(addr); 281 totalram_pages++; 282 } 283 } 284 285 286