1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net) 7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org 8 * Carsten Langgaard, carstenl@mips.com 9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved. 10 */ 11 #include <linux/init.h> 12 #include <linux/sched.h> 13 #include <linux/smp.h> 14 #include <linux/mm.h> 15 #include <linux/hugetlb.h> 16 #include <linux/module.h> 17 18 #include <asm/cpu.h> 19 #include <asm/bootinfo.h> 20 #include <asm/mmu_context.h> 21 #include <asm/pgtable.h> 22 #include <asm/tlbmisc.h> 23 24 extern void build_tlb_refill_handler(void); 25 26 /* 27 * Make sure all entries differ. If they're not different 28 * MIPS32 will take revenge ... 29 */ 30 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) 31 32 /* Atomicity and interruptability */ 33 #ifdef CONFIG_MIPS_MT_SMTC 34 35 #include <asm/smtc.h> 36 #include <asm/mipsmtregs.h> 37 38 #define ENTER_CRITICAL(flags) \ 39 { \ 40 unsigned int mvpflags; \ 41 local_irq_save(flags);\ 42 mvpflags = dvpe() 43 #define EXIT_CRITICAL(flags) \ 44 evpe(mvpflags); \ 45 local_irq_restore(flags); \ 46 } 47 #else 48 49 #define ENTER_CRITICAL(flags) local_irq_save(flags) 50 #define EXIT_CRITICAL(flags) local_irq_restore(flags) 51 52 #endif /* CONFIG_MIPS_MT_SMTC */ 53 54 #if defined(CONFIG_CPU_LOONGSON2) 55 /* 56 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb, 57 * unfortrunately, itlb is not totally transparent to software. 58 */ 59 #define FLUSH_ITLB write_c0_diag(4); 60 61 #define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC) write_c0_diag(4); } 62 63 #else 64 65 #define FLUSH_ITLB 66 #define FLUSH_ITLB_VM(vma) 67 68 #endif 69 70 void local_flush_tlb_all(void) 71 { 72 unsigned long flags; 73 unsigned long old_ctx; 74 int entry; 75 76 ENTER_CRITICAL(flags); 77 /* Save old context and create impossible VPN2 value */ 78 old_ctx = read_c0_entryhi(); 79 write_c0_entrylo0(0); 80 write_c0_entrylo1(0); 81 82 entry = read_c0_wired(); 83 84 /* Blast 'em all away. */ 85 while (entry < current_cpu_data.tlbsize) { 86 /* Make sure all entries differ. */ 87 write_c0_entryhi(UNIQUE_ENTRYHI(entry)); 88 write_c0_index(entry); 89 mtc0_tlbw_hazard(); 90 tlb_write_indexed(); 91 entry++; 92 } 93 tlbw_use_hazard(); 94 write_c0_entryhi(old_ctx); 95 FLUSH_ITLB; 96 EXIT_CRITICAL(flags); 97 } 98 EXPORT_SYMBOL(local_flush_tlb_all); 99 100 /* All entries common to a mm share an asid. To effectively flush 101 these entries, we just bump the asid. */ 102 void local_flush_tlb_mm(struct mm_struct *mm) 103 { 104 int cpu; 105 106 preempt_disable(); 107 108 cpu = smp_processor_id(); 109 110 if (cpu_context(cpu, mm) != 0) { 111 drop_mmu_context(mm, cpu); 112 } 113 114 preempt_enable(); 115 } 116 117 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 118 unsigned long end) 119 { 120 struct mm_struct *mm = vma->vm_mm; 121 int cpu = smp_processor_id(); 122 123 if (cpu_context(cpu, mm) != 0) { 124 unsigned long size, flags; 125 126 ENTER_CRITICAL(flags); 127 start = round_down(start, PAGE_SIZE << 1); 128 end = round_up(end, PAGE_SIZE << 1); 129 size = (end - start) >> (PAGE_SHIFT + 1); 130 if (size <= current_cpu_data.tlbsize/2) { 131 int oldpid = read_c0_entryhi(); 132 int newpid = cpu_asid(cpu, mm); 133 134 while (start < end) { 135 int idx; 136 137 write_c0_entryhi(start | newpid); 138 start += (PAGE_SIZE << 1); 139 mtc0_tlbw_hazard(); 140 tlb_probe(); 141 tlb_probe_hazard(); 142 idx = read_c0_index(); 143 write_c0_entrylo0(0); 144 write_c0_entrylo1(0); 145 if (idx < 0) 146 continue; 147 /* Make sure all entries differ. */ 148 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 149 mtc0_tlbw_hazard(); 150 tlb_write_indexed(); 151 } 152 tlbw_use_hazard(); 153 write_c0_entryhi(oldpid); 154 } else { 155 drop_mmu_context(mm, cpu); 156 } 157 FLUSH_ITLB; 158 EXIT_CRITICAL(flags); 159 } 160 } 161 162 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) 163 { 164 unsigned long size, flags; 165 166 ENTER_CRITICAL(flags); 167 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 168 size = (size + 1) >> 1; 169 if (size <= current_cpu_data.tlbsize / 2) { 170 int pid = read_c0_entryhi(); 171 172 start &= (PAGE_MASK << 1); 173 end += ((PAGE_SIZE << 1) - 1); 174 end &= (PAGE_MASK << 1); 175 176 while (start < end) { 177 int idx; 178 179 write_c0_entryhi(start); 180 start += (PAGE_SIZE << 1); 181 mtc0_tlbw_hazard(); 182 tlb_probe(); 183 tlb_probe_hazard(); 184 idx = read_c0_index(); 185 write_c0_entrylo0(0); 186 write_c0_entrylo1(0); 187 if (idx < 0) 188 continue; 189 /* Make sure all entries differ. */ 190 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 191 mtc0_tlbw_hazard(); 192 tlb_write_indexed(); 193 } 194 tlbw_use_hazard(); 195 write_c0_entryhi(pid); 196 } else { 197 local_flush_tlb_all(); 198 } 199 FLUSH_ITLB; 200 EXIT_CRITICAL(flags); 201 } 202 203 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 204 { 205 int cpu = smp_processor_id(); 206 207 if (cpu_context(cpu, vma->vm_mm) != 0) { 208 unsigned long flags; 209 int oldpid, newpid, idx; 210 211 newpid = cpu_asid(cpu, vma->vm_mm); 212 page &= (PAGE_MASK << 1); 213 ENTER_CRITICAL(flags); 214 oldpid = read_c0_entryhi(); 215 write_c0_entryhi(page | newpid); 216 mtc0_tlbw_hazard(); 217 tlb_probe(); 218 tlb_probe_hazard(); 219 idx = read_c0_index(); 220 write_c0_entrylo0(0); 221 write_c0_entrylo1(0); 222 if (idx < 0) 223 goto finish; 224 /* Make sure all entries differ. */ 225 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 226 mtc0_tlbw_hazard(); 227 tlb_write_indexed(); 228 tlbw_use_hazard(); 229 230 finish: 231 write_c0_entryhi(oldpid); 232 FLUSH_ITLB_VM(vma); 233 EXIT_CRITICAL(flags); 234 } 235 } 236 237 /* 238 * This one is only used for pages with the global bit set so we don't care 239 * much about the ASID. 240 */ 241 void local_flush_tlb_one(unsigned long page) 242 { 243 unsigned long flags; 244 int oldpid, idx; 245 246 ENTER_CRITICAL(flags); 247 oldpid = read_c0_entryhi(); 248 page &= (PAGE_MASK << 1); 249 write_c0_entryhi(page); 250 mtc0_tlbw_hazard(); 251 tlb_probe(); 252 tlb_probe_hazard(); 253 idx = read_c0_index(); 254 write_c0_entrylo0(0); 255 write_c0_entrylo1(0); 256 if (idx >= 0) { 257 /* Make sure all entries differ. */ 258 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 259 mtc0_tlbw_hazard(); 260 tlb_write_indexed(); 261 tlbw_use_hazard(); 262 } 263 write_c0_entryhi(oldpid); 264 FLUSH_ITLB; 265 EXIT_CRITICAL(flags); 266 } 267 268 /* 269 * We will need multiple versions of update_mmu_cache(), one that just 270 * updates the TLB with the new pte(s), and another which also checks 271 * for the R4k "end of page" hardware bug and does the needy. 272 */ 273 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) 274 { 275 unsigned long flags; 276 pgd_t *pgdp; 277 pud_t *pudp; 278 pmd_t *pmdp; 279 pte_t *ptep; 280 int idx, pid; 281 282 /* 283 * Handle debugger faulting in for debugee. 284 */ 285 if (current->active_mm != vma->vm_mm) 286 return; 287 288 ENTER_CRITICAL(flags); 289 290 pid = read_c0_entryhi() & ASID_MASK; 291 address &= (PAGE_MASK << 1); 292 write_c0_entryhi(address | pid); 293 pgdp = pgd_offset(vma->vm_mm, address); 294 mtc0_tlbw_hazard(); 295 tlb_probe(); 296 tlb_probe_hazard(); 297 pudp = pud_offset(pgdp, address); 298 pmdp = pmd_offset(pudp, address); 299 idx = read_c0_index(); 300 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 301 /* this could be a huge page */ 302 if (pmd_huge(*pmdp)) { 303 unsigned long lo; 304 write_c0_pagemask(PM_HUGE_MASK); 305 ptep = (pte_t *)pmdp; 306 lo = pte_to_entrylo(pte_val(*ptep)); 307 write_c0_entrylo0(lo); 308 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7)); 309 310 mtc0_tlbw_hazard(); 311 if (idx < 0) 312 tlb_write_random(); 313 else 314 tlb_write_indexed(); 315 tlbw_use_hazard(); 316 write_c0_pagemask(PM_DEFAULT_MASK); 317 } else 318 #endif 319 { 320 ptep = pte_offset_map(pmdp, address); 321 322 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 323 write_c0_entrylo0(ptep->pte_high); 324 ptep++; 325 write_c0_entrylo1(ptep->pte_high); 326 #else 327 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++))); 328 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep))); 329 #endif 330 mtc0_tlbw_hazard(); 331 if (idx < 0) 332 tlb_write_random(); 333 else 334 tlb_write_indexed(); 335 } 336 tlbw_use_hazard(); 337 FLUSH_ITLB_VM(vma); 338 EXIT_CRITICAL(flags); 339 } 340 341 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, 342 unsigned long entryhi, unsigned long pagemask) 343 { 344 unsigned long flags; 345 unsigned long wired; 346 unsigned long old_pagemask; 347 unsigned long old_ctx; 348 349 ENTER_CRITICAL(flags); 350 /* Save old context and create impossible VPN2 value */ 351 old_ctx = read_c0_entryhi(); 352 old_pagemask = read_c0_pagemask(); 353 wired = read_c0_wired(); 354 write_c0_wired(wired + 1); 355 write_c0_index(wired); 356 tlbw_use_hazard(); /* What is the hazard here? */ 357 write_c0_pagemask(pagemask); 358 write_c0_entryhi(entryhi); 359 write_c0_entrylo0(entrylo0); 360 write_c0_entrylo1(entrylo1); 361 mtc0_tlbw_hazard(); 362 tlb_write_indexed(); 363 tlbw_use_hazard(); 364 365 write_c0_entryhi(old_ctx); 366 tlbw_use_hazard(); /* What is the hazard here? */ 367 write_c0_pagemask(old_pagemask); 368 local_flush_tlb_all(); 369 EXIT_CRITICAL(flags); 370 } 371 372 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 373 374 int __init has_transparent_hugepage(void) 375 { 376 unsigned int mask; 377 unsigned long flags; 378 379 ENTER_CRITICAL(flags); 380 write_c0_pagemask(PM_HUGE_MASK); 381 back_to_back_c0_hazard(); 382 mask = read_c0_pagemask(); 383 write_c0_pagemask(PM_DEFAULT_MASK); 384 385 EXIT_CRITICAL(flags); 386 387 return mask == PM_HUGE_MASK; 388 } 389 390 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 391 392 static int __cpuinitdata ntlb; 393 static int __init set_ntlb(char *str) 394 { 395 get_option(&str, &ntlb); 396 return 1; 397 } 398 399 __setup("ntlb=", set_ntlb); 400 401 void __cpuinit tlb_init(void) 402 { 403 /* 404 * You should never change this register: 405 * - On R4600 1.7 the tlbp never hits for pages smaller than 406 * the value in the c0_pagemask register. 407 * - The entire mm handling assumes the c0_pagemask register to 408 * be set to fixed-size pages. 409 */ 410 write_c0_pagemask(PM_DEFAULT_MASK); 411 write_c0_wired(0); 412 if (current_cpu_type() == CPU_R10000 || 413 current_cpu_type() == CPU_R12000 || 414 current_cpu_type() == CPU_R14000) 415 write_c0_framemask(0); 416 417 if (cpu_has_rixi) { 418 /* 419 * Enable the no read, no exec bits, and enable large virtual 420 * address. 421 */ 422 u32 pg = PG_RIE | PG_XIE; 423 #ifdef CONFIG_64BIT 424 pg |= PG_ELPA; 425 #endif 426 write_c0_pagegrain(pg); 427 } 428 429 /* From this point on the ARC firmware is dead. */ 430 local_flush_tlb_all(); 431 432 /* Did I tell you that ARC SUCKS? */ 433 434 if (ntlb) { 435 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) { 436 int wired = current_cpu_data.tlbsize - ntlb; 437 write_c0_wired(wired); 438 write_c0_index(wired-1); 439 printk("Restricting TLB to %d entries\n", ntlb); 440 } else 441 printk("Ignoring invalid argument ntlb=%d\n", ntlb); 442 } 443 444 build_tlb_refill_handler(); 445 } 446