1 /* 2 * Modifications by Matt Porter (mporter@mvista.com) to support 3 * PPC44x Book E processors. 4 * 5 * This file contains the routines for initializing the MMU 6 * on the 4xx series of chips. 7 * -- paulus 8 * 9 * Derived from arch/ppc/mm/init.c: 10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 11 * 12 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 13 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 14 * Copyright (C) 1996 Paul Mackerras 15 * 16 * Derived from "arch/i386/mm/init.c" 17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 18 * 19 * This program is free software; you can redistribute it and/or 20 * modify it under the terms of the GNU General Public License 21 * as published by the Free Software Foundation; either version 22 * 2 of the License, or (at your option) any later version. 23 * 24 */ 25 26 #include <linux/init.h> 27 #include <linux/memblock.h> 28 29 #include <asm/mmu.h> 30 #include <asm/page.h> 31 #include <asm/cacheflush.h> 32 #include <asm/code-patching.h> 33 34 #include <mm/mmu_decl.h> 35 36 /* Used by the 44x TLB replacement exception handler. 37 * Just needed it declared someplace. 38 */ 39 unsigned int tlb_44x_index; /* = 0 */ 40 unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS; 41 int icache_44x_need_flush; 42 43 unsigned long tlb_47x_boltmap[1024/8]; 44 45 static void ppc44x_update_tlb_hwater(void) 46 { 47 /* The TLB miss handlers hard codes the watermark in a cmpli 48 * instruction to improve performances rather than loading it 49 * from the global variable. Thus, we patch the instructions 50 * in the 2 TLB miss handlers when updating the value 51 */ 52 modify_instruction_site(&patch__tlb_44x_hwater_D, 0xffff, tlb_44x_hwater); 53 modify_instruction_site(&patch__tlb_44x_hwater_I, 0xffff, tlb_44x_hwater); 54 } 55 56 /* 57 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 44x type MMU 58 */ 59 static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) 60 { 61 unsigned int entry = tlb_44x_hwater--; 62 63 ppc44x_update_tlb_hwater(); 64 65 mtspr(SPRN_MMUCR, 0); 66 67 __asm__ __volatile__( 68 "tlbwe %2,%3,%4\n" 69 "tlbwe %1,%3,%5\n" 70 "tlbwe %0,%3,%6\n" 71 : 72 : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G), 73 "r" (phys), 74 "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M), 75 "r" (entry), 76 "i" (PPC44x_TLB_PAGEID), 77 "i" (PPC44x_TLB_XLAT), 78 "i" (PPC44x_TLB_ATTRIB)); 79 } 80 81 static int __init ppc47x_find_free_bolted(void) 82 { 83 unsigned int mmube0 = mfspr(SPRN_MMUBE0); 84 unsigned int mmube1 = mfspr(SPRN_MMUBE1); 85 86 if (!(mmube0 & MMUBE0_VBE0)) 87 return 0; 88 if (!(mmube0 & MMUBE0_VBE1)) 89 return 1; 90 if (!(mmube0 & MMUBE0_VBE2)) 91 return 2; 92 if (!(mmube1 & MMUBE1_VBE3)) 93 return 3; 94 if (!(mmube1 & MMUBE1_VBE4)) 95 return 4; 96 if (!(mmube1 & MMUBE1_VBE5)) 97 return 5; 98 return -1; 99 } 100 101 static void __init ppc47x_update_boltmap(void) 102 { 103 unsigned int mmube0 = mfspr(SPRN_MMUBE0); 104 unsigned int mmube1 = mfspr(SPRN_MMUBE1); 105 106 if (mmube0 & MMUBE0_VBE0) 107 __set_bit((mmube0 >> MMUBE0_IBE0_SHIFT) & 0xff, 108 tlb_47x_boltmap); 109 if (mmube0 & MMUBE0_VBE1) 110 __set_bit((mmube0 >> MMUBE0_IBE1_SHIFT) & 0xff, 111 tlb_47x_boltmap); 112 if (mmube0 & MMUBE0_VBE2) 113 __set_bit((mmube0 >> MMUBE0_IBE2_SHIFT) & 0xff, 114 tlb_47x_boltmap); 115 if (mmube1 & MMUBE1_VBE3) 116 __set_bit((mmube1 >> MMUBE1_IBE3_SHIFT) & 0xff, 117 tlb_47x_boltmap); 118 if (mmube1 & MMUBE1_VBE4) 119 __set_bit((mmube1 >> MMUBE1_IBE4_SHIFT) & 0xff, 120 tlb_47x_boltmap); 121 if (mmube1 & MMUBE1_VBE5) 122 __set_bit((mmube1 >> MMUBE1_IBE5_SHIFT) & 0xff, 123 tlb_47x_boltmap); 124 } 125 126 /* 127 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU 128 */ 129 static void ppc47x_pin_tlb(unsigned int virt, unsigned int phys) 130 { 131 unsigned int rA; 132 int bolted; 133 134 /* Base rA is HW way select, way 0, bolted bit set */ 135 rA = 0x88000000; 136 137 /* Look for a bolted entry slot */ 138 bolted = ppc47x_find_free_bolted(); 139 BUG_ON(bolted < 0); 140 141 /* Insert bolted slot number */ 142 rA |= bolted << 24; 143 144 pr_debug("256M TLB entry for 0x%08x->0x%08x in bolt slot %d\n", 145 virt, phys, bolted); 146 147 mtspr(SPRN_MMUCR, 0); 148 149 __asm__ __volatile__( 150 "tlbwe %2,%3,0\n" 151 "tlbwe %1,%3,1\n" 152 "tlbwe %0,%3,2\n" 153 : 154 : "r" (PPC47x_TLB2_SW | PPC47x_TLB2_SR | 155 PPC47x_TLB2_SX 156 #ifdef CONFIG_SMP 157 | PPC47x_TLB2_M 158 #endif 159 ), 160 "r" (phys), 161 "r" (virt | PPC47x_TLB0_VALID | PPC47x_TLB0_256M), 162 "r" (rA)); 163 } 164 165 void __init MMU_init_hw(void) 166 { 167 /* This is not useful on 47x but won't hurt either */ 168 ppc44x_update_tlb_hwater(); 169 170 flush_instruction_cache(); 171 } 172 173 unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) 174 { 175 unsigned long addr; 176 unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1); 177 178 /* Pin in enough TLBs to cover any lowmem not covered by the 179 * initial 256M mapping established in head_44x.S */ 180 for (addr = memstart + PPC_PIN_SIZE; addr < lowmem_end_addr; 181 addr += PPC_PIN_SIZE) { 182 if (mmu_has_feature(MMU_FTR_TYPE_47x)) 183 ppc47x_pin_tlb(addr + PAGE_OFFSET, addr); 184 else 185 ppc44x_pin_tlb(addr + PAGE_OFFSET, addr); 186 } 187 if (mmu_has_feature(MMU_FTR_TYPE_47x)) { 188 ppc47x_update_boltmap(); 189 190 #ifdef DEBUG 191 { 192 int i; 193 194 printk(KERN_DEBUG "bolted entries: "); 195 for (i = 0; i < 255; i++) { 196 if (test_bit(i, tlb_47x_boltmap)) 197 printk("%d ", i); 198 } 199 printk("\n"); 200 } 201 #endif /* DEBUG */ 202 } 203 return total_lowmem; 204 } 205 206 void setup_initial_memory_limit(phys_addr_t first_memblock_base, 207 phys_addr_t first_memblock_size) 208 { 209 u64 size; 210 211 #ifndef CONFIG_NONSTATIC_KERNEL 212 /* We don't currently support the first MEMBLOCK not mapping 0 213 * physical on those processors 214 */ 215 BUG_ON(first_memblock_base != 0); 216 #endif 217 218 /* 44x has a 256M TLB entry pinned at boot */ 219 size = (min_t(u64, first_memblock_size, PPC_PIN_SIZE)); 220 memblock_set_current_limit(first_memblock_base + size); 221 } 222 223 #ifdef CONFIG_SMP 224 void __init mmu_init_secondary(int cpu) 225 { 226 unsigned long addr; 227 unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1); 228 229 /* Pin in enough TLBs to cover any lowmem not covered by the 230 * initial 256M mapping established in head_44x.S 231 * 232 * WARNING: This is called with only the first 256M of the 233 * linear mapping in the TLB and we can't take faults yet 234 * so beware of what this code uses. It runs off a temporary 235 * stack. current (r2) isn't initialized, smp_processor_id() 236 * will not work, current thread info isn't accessible, ... 237 */ 238 for (addr = memstart + PPC_PIN_SIZE; addr < lowmem_end_addr; 239 addr += PPC_PIN_SIZE) { 240 if (mmu_has_feature(MMU_FTR_TYPE_47x)) 241 ppc47x_pin_tlb(addr + PAGE_OFFSET, addr); 242 else 243 ppc44x_pin_tlb(addr + PAGE_OFFSET, addr); 244 } 245 } 246 #endif /* CONFIG_SMP */ 247