1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net) 7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org 8 * Carsten Langgaard, carstenl@mips.com 9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved. 10 */ 11 #include <linux/cpu_pm.h> 12 #include <linux/init.h> 13 #include <linux/sched.h> 14 #include <linux/smp.h> 15 #include <linux/mm.h> 16 #include <linux/hugetlb.h> 17 #include <linux/module.h> 18 19 #include <asm/cpu.h> 20 #include <asm/cpu-type.h> 21 #include <asm/bootinfo.h> 22 #include <asm/hazards.h> 23 #include <asm/mmu_context.h> 24 #include <asm/pgtable.h> 25 #include <asm/tlb.h> 26 #include <asm/tlbmisc.h> 27 28 extern void build_tlb_refill_handler(void); 29 30 /* 31 * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has 32 * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately, 33 * itlb/dtlb are not totally transparent to software. 34 */ 35 static inline void flush_micro_tlb(void) 36 { 37 switch (current_cpu_type()) { 38 case CPU_LOONGSON2: 39 write_c0_diag(LOONGSON_DIAG_ITLB); 40 break; 41 case CPU_LOONGSON3: 42 write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB); 43 break; 44 default: 45 break; 46 } 47 } 48 49 static inline void flush_micro_tlb_vm(struct vm_area_struct *vma) 50 { 51 if (vma->vm_flags & VM_EXEC) 52 flush_micro_tlb(); 53 } 54 55 void local_flush_tlb_all(void) 56 { 57 unsigned long flags; 58 unsigned long old_ctx; 59 int entry, ftlbhighset; 60 61 local_irq_save(flags); 62 /* Save old context and create impossible VPN2 value */ 63 old_ctx = read_c0_entryhi(); 64 htw_stop(); 65 write_c0_entrylo0(0); 66 write_c0_entrylo1(0); 67 68 entry = read_c0_wired(); 69 70 /* Blast 'em all away. */ 71 if (cpu_has_tlbinv) { 72 if (current_cpu_data.tlbsizevtlb) { 73 write_c0_index(0); 74 mtc0_tlbw_hazard(); 75 tlbinvf(); /* invalidate VTLB */ 76 } 77 ftlbhighset = current_cpu_data.tlbsizevtlb + 78 current_cpu_data.tlbsizeftlbsets; 79 for (entry = current_cpu_data.tlbsizevtlb; 80 entry < ftlbhighset; 81 entry++) { 82 write_c0_index(entry); 83 mtc0_tlbw_hazard(); 84 tlbinvf(); /* invalidate one FTLB set */ 85 } 86 } else { 87 while (entry < current_cpu_data.tlbsize) { 88 /* Make sure all entries differ. */ 89 write_c0_entryhi(UNIQUE_ENTRYHI(entry)); 90 write_c0_index(entry); 91 mtc0_tlbw_hazard(); 92 tlb_write_indexed(); 93 entry++; 94 } 95 } 96 tlbw_use_hazard(); 97 write_c0_entryhi(old_ctx); 98 htw_start(); 99 flush_micro_tlb(); 100 local_irq_restore(flags); 101 } 102 EXPORT_SYMBOL(local_flush_tlb_all); 103 104 /* All entries common to a mm share an asid. To effectively flush 105 these entries, we just bump the asid. */ 106 void local_flush_tlb_mm(struct mm_struct *mm) 107 { 108 int cpu; 109 110 preempt_disable(); 111 112 cpu = smp_processor_id(); 113 114 if (cpu_context(cpu, mm) != 0) { 115 drop_mmu_context(mm, cpu); 116 } 117 118 preempt_enable(); 119 } 120 121 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 122 unsigned long end) 123 { 124 struct mm_struct *mm = vma->vm_mm; 125 int cpu = smp_processor_id(); 126 127 if (cpu_context(cpu, mm) != 0) { 128 unsigned long size, flags; 129 130 local_irq_save(flags); 131 start = round_down(start, PAGE_SIZE << 1); 132 end = round_up(end, PAGE_SIZE << 1); 133 size = (end - start) >> (PAGE_SHIFT + 1); 134 if (size <= (current_cpu_data.tlbsizeftlbsets ? 135 current_cpu_data.tlbsize / 8 : 136 current_cpu_data.tlbsize / 2)) { 137 int oldpid = read_c0_entryhi(); 138 int newpid = cpu_asid(cpu, mm); 139 140 htw_stop(); 141 while (start < end) { 142 int idx; 143 144 write_c0_entryhi(start | newpid); 145 start += (PAGE_SIZE << 1); 146 mtc0_tlbw_hazard(); 147 tlb_probe(); 148 tlb_probe_hazard(); 149 idx = read_c0_index(); 150 write_c0_entrylo0(0); 151 write_c0_entrylo1(0); 152 if (idx < 0) 153 continue; 154 /* Make sure all entries differ. */ 155 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 156 mtc0_tlbw_hazard(); 157 tlb_write_indexed(); 158 } 159 tlbw_use_hazard(); 160 write_c0_entryhi(oldpid); 161 htw_start(); 162 } else { 163 drop_mmu_context(mm, cpu); 164 } 165 flush_micro_tlb(); 166 local_irq_restore(flags); 167 } 168 } 169 170 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) 171 { 172 unsigned long size, flags; 173 174 local_irq_save(flags); 175 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 176 size = (size + 1) >> 1; 177 if (size <= (current_cpu_data.tlbsizeftlbsets ? 178 current_cpu_data.tlbsize / 8 : 179 current_cpu_data.tlbsize / 2)) { 180 int pid = read_c0_entryhi(); 181 182 start &= (PAGE_MASK << 1); 183 end += ((PAGE_SIZE << 1) - 1); 184 end &= (PAGE_MASK << 1); 185 htw_stop(); 186 187 while (start < end) { 188 int idx; 189 190 write_c0_entryhi(start); 191 start += (PAGE_SIZE << 1); 192 mtc0_tlbw_hazard(); 193 tlb_probe(); 194 tlb_probe_hazard(); 195 idx = read_c0_index(); 196 write_c0_entrylo0(0); 197 write_c0_entrylo1(0); 198 if (idx < 0) 199 continue; 200 /* Make sure all entries differ. */ 201 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 202 mtc0_tlbw_hazard(); 203 tlb_write_indexed(); 204 } 205 tlbw_use_hazard(); 206 write_c0_entryhi(pid); 207 htw_start(); 208 } else { 209 local_flush_tlb_all(); 210 } 211 flush_micro_tlb(); 212 local_irq_restore(flags); 213 } 214 215 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 216 { 217 int cpu = smp_processor_id(); 218 219 if (cpu_context(cpu, vma->vm_mm) != 0) { 220 unsigned long flags; 221 int oldpid, newpid, idx; 222 223 newpid = cpu_asid(cpu, vma->vm_mm); 224 page &= (PAGE_MASK << 1); 225 local_irq_save(flags); 226 oldpid = read_c0_entryhi(); 227 htw_stop(); 228 write_c0_entryhi(page | newpid); 229 mtc0_tlbw_hazard(); 230 tlb_probe(); 231 tlb_probe_hazard(); 232 idx = read_c0_index(); 233 write_c0_entrylo0(0); 234 write_c0_entrylo1(0); 235 if (idx < 0) 236 goto finish; 237 /* Make sure all entries differ. */ 238 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 239 mtc0_tlbw_hazard(); 240 tlb_write_indexed(); 241 tlbw_use_hazard(); 242 243 finish: 244 write_c0_entryhi(oldpid); 245 htw_start(); 246 flush_micro_tlb_vm(vma); 247 local_irq_restore(flags); 248 } 249 } 250 251 /* 252 * This one is only used for pages with the global bit set so we don't care 253 * much about the ASID. 254 */ 255 void local_flush_tlb_one(unsigned long page) 256 { 257 unsigned long flags; 258 int oldpid, idx; 259 260 local_irq_save(flags); 261 oldpid = read_c0_entryhi(); 262 htw_stop(); 263 page &= (PAGE_MASK << 1); 264 write_c0_entryhi(page); 265 mtc0_tlbw_hazard(); 266 tlb_probe(); 267 tlb_probe_hazard(); 268 idx = read_c0_index(); 269 write_c0_entrylo0(0); 270 write_c0_entrylo1(0); 271 if (idx >= 0) { 272 /* Make sure all entries differ. */ 273 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 274 mtc0_tlbw_hazard(); 275 tlb_write_indexed(); 276 tlbw_use_hazard(); 277 } 278 write_c0_entryhi(oldpid); 279 htw_start(); 280 flush_micro_tlb(); 281 local_irq_restore(flags); 282 } 283 284 /* 285 * We will need multiple versions of update_mmu_cache(), one that just 286 * updates the TLB with the new pte(s), and another which also checks 287 * for the R4k "end of page" hardware bug and does the needy. 288 */ 289 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) 290 { 291 unsigned long flags; 292 pgd_t *pgdp; 293 pud_t *pudp; 294 pmd_t *pmdp; 295 pte_t *ptep; 296 int idx, pid; 297 298 /* 299 * Handle debugger faulting in for debugee. 300 */ 301 if (current->active_mm != vma->vm_mm) 302 return; 303 304 local_irq_save(flags); 305 306 htw_stop(); 307 pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data); 308 address &= (PAGE_MASK << 1); 309 write_c0_entryhi(address | pid); 310 pgdp = pgd_offset(vma->vm_mm, address); 311 mtc0_tlbw_hazard(); 312 tlb_probe(); 313 tlb_probe_hazard(); 314 pudp = pud_offset(pgdp, address); 315 pmdp = pmd_offset(pudp, address); 316 idx = read_c0_index(); 317 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 318 /* this could be a huge page */ 319 if (pmd_huge(*pmdp)) { 320 unsigned long lo; 321 write_c0_pagemask(PM_HUGE_MASK); 322 ptep = (pte_t *)pmdp; 323 lo = pte_to_entrylo(pte_val(*ptep)); 324 write_c0_entrylo0(lo); 325 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7)); 326 327 mtc0_tlbw_hazard(); 328 if (idx < 0) 329 tlb_write_random(); 330 else 331 tlb_write_indexed(); 332 tlbw_use_hazard(); 333 write_c0_pagemask(PM_DEFAULT_MASK); 334 } else 335 #endif 336 { 337 ptep = pte_offset_map(pmdp, address); 338 339 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 340 #ifdef CONFIG_XPA 341 write_c0_entrylo0(pte_to_entrylo(ptep->pte_high)); 342 if (cpu_has_xpa) 343 writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK); 344 ptep++; 345 write_c0_entrylo1(pte_to_entrylo(ptep->pte_high)); 346 if (cpu_has_xpa) 347 writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK); 348 #else 349 write_c0_entrylo0(ptep->pte_high); 350 ptep++; 351 write_c0_entrylo1(ptep->pte_high); 352 #endif 353 #else 354 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++))); 355 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep))); 356 #endif 357 mtc0_tlbw_hazard(); 358 if (idx < 0) 359 tlb_write_random(); 360 else 361 tlb_write_indexed(); 362 } 363 tlbw_use_hazard(); 364 htw_start(); 365 flush_micro_tlb_vm(vma); 366 local_irq_restore(flags); 367 } 368 369 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, 370 unsigned long entryhi, unsigned long pagemask) 371 { 372 #ifdef CONFIG_XPA 373 panic("Broken for XPA kernels"); 374 #else 375 unsigned long flags; 376 unsigned long wired; 377 unsigned long old_pagemask; 378 unsigned long old_ctx; 379 380 local_irq_save(flags); 381 /* Save old context and create impossible VPN2 value */ 382 old_ctx = read_c0_entryhi(); 383 htw_stop(); 384 old_pagemask = read_c0_pagemask(); 385 wired = read_c0_wired(); 386 write_c0_wired(wired + 1); 387 write_c0_index(wired); 388 tlbw_use_hazard(); /* What is the hazard here? */ 389 write_c0_pagemask(pagemask); 390 write_c0_entryhi(entryhi); 391 write_c0_entrylo0(entrylo0); 392 write_c0_entrylo1(entrylo1); 393 mtc0_tlbw_hazard(); 394 tlb_write_indexed(); 395 tlbw_use_hazard(); 396 397 write_c0_entryhi(old_ctx); 398 tlbw_use_hazard(); /* What is the hazard here? */ 399 htw_start(); 400 write_c0_pagemask(old_pagemask); 401 local_flush_tlb_all(); 402 local_irq_restore(flags); 403 #endif 404 } 405 406 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 407 408 int has_transparent_hugepage(void) 409 { 410 static unsigned int mask = -1; 411 412 if (mask == -1) { /* first call comes during __init */ 413 unsigned long flags; 414 415 local_irq_save(flags); 416 write_c0_pagemask(PM_HUGE_MASK); 417 back_to_back_c0_hazard(); 418 mask = read_c0_pagemask(); 419 write_c0_pagemask(PM_DEFAULT_MASK); 420 local_irq_restore(flags); 421 } 422 return mask == PM_HUGE_MASK; 423 } 424 425 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 426 427 /* 428 * Used for loading TLB entries before trap_init() has started, when we 429 * don't actually want to add a wired entry which remains throughout the 430 * lifetime of the system 431 */ 432 433 int temp_tlb_entry; 434 435 __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, 436 unsigned long entryhi, unsigned long pagemask) 437 { 438 int ret = 0; 439 unsigned long flags; 440 unsigned long wired; 441 unsigned long old_pagemask; 442 unsigned long old_ctx; 443 444 local_irq_save(flags); 445 /* Save old context and create impossible VPN2 value */ 446 htw_stop(); 447 old_ctx = read_c0_entryhi(); 448 old_pagemask = read_c0_pagemask(); 449 wired = read_c0_wired(); 450 if (--temp_tlb_entry < wired) { 451 printk(KERN_WARNING 452 "No TLB space left for add_temporary_entry\n"); 453 ret = -ENOSPC; 454 goto out; 455 } 456 457 write_c0_index(temp_tlb_entry); 458 write_c0_pagemask(pagemask); 459 write_c0_entryhi(entryhi); 460 write_c0_entrylo0(entrylo0); 461 write_c0_entrylo1(entrylo1); 462 mtc0_tlbw_hazard(); 463 tlb_write_indexed(); 464 tlbw_use_hazard(); 465 466 write_c0_entryhi(old_ctx); 467 write_c0_pagemask(old_pagemask); 468 htw_start(); 469 out: 470 local_irq_restore(flags); 471 return ret; 472 } 473 474 static int ntlb; 475 static int __init set_ntlb(char *str) 476 { 477 get_option(&str, &ntlb); 478 return 1; 479 } 480 481 __setup("ntlb=", set_ntlb); 482 483 /* 484 * Configure TLB (for init or after a CPU has been powered off). 485 */ 486 static void r4k_tlb_configure(void) 487 { 488 /* 489 * You should never change this register: 490 * - On R4600 1.7 the tlbp never hits for pages smaller than 491 * the value in the c0_pagemask register. 492 * - The entire mm handling assumes the c0_pagemask register to 493 * be set to fixed-size pages. 494 */ 495 write_c0_pagemask(PM_DEFAULT_MASK); 496 back_to_back_c0_hazard(); 497 if (read_c0_pagemask() != PM_DEFAULT_MASK) 498 panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE); 499 500 write_c0_wired(0); 501 if (current_cpu_type() == CPU_R10000 || 502 current_cpu_type() == CPU_R12000 || 503 current_cpu_type() == CPU_R14000 || 504 current_cpu_type() == CPU_R16000) 505 write_c0_framemask(0); 506 507 if (cpu_has_rixi) { 508 /* 509 * Enable the no read, no exec bits, and enable large physical 510 * address. 511 */ 512 #ifdef CONFIG_64BIT 513 set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA); 514 #else 515 set_c0_pagegrain(PG_RIE | PG_XIE); 516 #endif 517 } 518 519 temp_tlb_entry = current_cpu_data.tlbsize - 1; 520 521 /* From this point on the ARC firmware is dead. */ 522 local_flush_tlb_all(); 523 524 /* Did I tell you that ARC SUCKS? */ 525 } 526 527 void tlb_init(void) 528 { 529 r4k_tlb_configure(); 530 531 if (ntlb) { 532 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) { 533 int wired = current_cpu_data.tlbsize - ntlb; 534 write_c0_wired(wired); 535 write_c0_index(wired-1); 536 printk("Restricting TLB to %d entries\n", ntlb); 537 } else 538 printk("Ignoring invalid argument ntlb=%d\n", ntlb); 539 } 540 541 build_tlb_refill_handler(); 542 } 543 544 static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd, 545 void *v) 546 { 547 switch (cmd) { 548 case CPU_PM_ENTER_FAILED: 549 case CPU_PM_EXIT: 550 r4k_tlb_configure(); 551 break; 552 } 553 554 return NOTIFY_OK; 555 } 556 557 static struct notifier_block r4k_tlb_pm_notifier_block = { 558 .notifier_call = r4k_tlb_pm_notifier, 559 }; 560 561 static int __init r4k_tlb_init_pm(void) 562 { 563 return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block); 564 } 565 arch_initcall(r4k_tlb_init_pm); 566