1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net) 7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org 8 * Carsten Langgaard, carstenl@mips.com 9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved. 10 */ 11 #include <linux/cpu_pm.h> 12 #include <linux/init.h> 13 #include <linux/sched.h> 14 #include <linux/smp.h> 15 #include <linux/mm.h> 16 #include <linux/hugetlb.h> 17 #include <linux/module.h> 18 19 #include <asm/cpu.h> 20 #include <asm/cpu-type.h> 21 #include <asm/bootinfo.h> 22 #include <asm/hazards.h> 23 #include <asm/mmu_context.h> 24 #include <asm/pgtable.h> 25 #include <asm/tlb.h> 26 #include <asm/tlbmisc.h> 27 28 extern void build_tlb_refill_handler(void); 29 30 /* 31 * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb, 32 * unfortunately, itlb is not totally transparent to software. 33 */ 34 static inline void flush_itlb(void) 35 { 36 switch (current_cpu_type()) { 37 case CPU_LOONGSON2: 38 case CPU_LOONGSON3: 39 write_c0_diag(4); 40 break; 41 default: 42 break; 43 } 44 } 45 46 static inline void flush_itlb_vm(struct vm_area_struct *vma) 47 { 48 if (vma->vm_flags & VM_EXEC) 49 flush_itlb(); 50 } 51 52 void local_flush_tlb_all(void) 53 { 54 unsigned long flags; 55 unsigned long old_ctx; 56 int entry, ftlbhighset; 57 58 local_irq_save(flags); 59 /* Save old context and create impossible VPN2 value */ 60 old_ctx = read_c0_entryhi(); 61 htw_stop(); 62 write_c0_entrylo0(0); 63 write_c0_entrylo1(0); 64 65 entry = read_c0_wired(); 66 67 /* Blast 'em all away. */ 68 if (cpu_has_tlbinv) { 69 if (current_cpu_data.tlbsizevtlb) { 70 write_c0_index(0); 71 mtc0_tlbw_hazard(); 72 tlbinvf(); /* invalidate VTLB */ 73 } 74 ftlbhighset = current_cpu_data.tlbsizevtlb + 75 current_cpu_data.tlbsizeftlbsets; 76 for (entry = current_cpu_data.tlbsizevtlb; 77 entry < ftlbhighset; 78 entry++) { 79 write_c0_index(entry); 80 mtc0_tlbw_hazard(); 81 tlbinvf(); /* invalidate one FTLB set */ 82 } 83 } else { 84 while (entry < current_cpu_data.tlbsize) { 85 /* Make sure all entries differ. */ 86 write_c0_entryhi(UNIQUE_ENTRYHI(entry)); 87 write_c0_index(entry); 88 mtc0_tlbw_hazard(); 89 tlb_write_indexed(); 90 entry++; 91 } 92 } 93 tlbw_use_hazard(); 94 write_c0_entryhi(old_ctx); 95 htw_start(); 96 flush_itlb(); 97 local_irq_restore(flags); 98 } 99 EXPORT_SYMBOL(local_flush_tlb_all); 100 101 /* All entries common to a mm share an asid. To effectively flush 102 these entries, we just bump the asid. */ 103 void local_flush_tlb_mm(struct mm_struct *mm) 104 { 105 int cpu; 106 107 preempt_disable(); 108 109 cpu = smp_processor_id(); 110 111 if (cpu_context(cpu, mm) != 0) { 112 drop_mmu_context(mm, cpu); 113 } 114 115 preempt_enable(); 116 } 117 118 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 119 unsigned long end) 120 { 121 struct mm_struct *mm = vma->vm_mm; 122 int cpu = smp_processor_id(); 123 124 if (cpu_context(cpu, mm) != 0) { 125 unsigned long size, flags; 126 127 local_irq_save(flags); 128 start = round_down(start, PAGE_SIZE << 1); 129 end = round_up(end, PAGE_SIZE << 1); 130 size = (end - start) >> (PAGE_SHIFT + 1); 131 if (size <= (current_cpu_data.tlbsizeftlbsets ? 132 current_cpu_data.tlbsize / 8 : 133 current_cpu_data.tlbsize / 2)) { 134 int oldpid = read_c0_entryhi(); 135 int newpid = cpu_asid(cpu, mm); 136 137 htw_stop(); 138 while (start < end) { 139 int idx; 140 141 write_c0_entryhi(start | newpid); 142 start += (PAGE_SIZE << 1); 143 mtc0_tlbw_hazard(); 144 tlb_probe(); 145 tlb_probe_hazard(); 146 idx = read_c0_index(); 147 write_c0_entrylo0(0); 148 write_c0_entrylo1(0); 149 if (idx < 0) 150 continue; 151 /* Make sure all entries differ. */ 152 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 153 mtc0_tlbw_hazard(); 154 tlb_write_indexed(); 155 } 156 tlbw_use_hazard(); 157 write_c0_entryhi(oldpid); 158 htw_start(); 159 } else { 160 drop_mmu_context(mm, cpu); 161 } 162 flush_itlb(); 163 local_irq_restore(flags); 164 } 165 } 166 167 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) 168 { 169 unsigned long size, flags; 170 171 local_irq_save(flags); 172 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 173 size = (size + 1) >> 1; 174 if (size <= (current_cpu_data.tlbsizeftlbsets ? 175 current_cpu_data.tlbsize / 8 : 176 current_cpu_data.tlbsize / 2)) { 177 int pid = read_c0_entryhi(); 178 179 start &= (PAGE_MASK << 1); 180 end += ((PAGE_SIZE << 1) - 1); 181 end &= (PAGE_MASK << 1); 182 htw_stop(); 183 184 while (start < end) { 185 int idx; 186 187 write_c0_entryhi(start); 188 start += (PAGE_SIZE << 1); 189 mtc0_tlbw_hazard(); 190 tlb_probe(); 191 tlb_probe_hazard(); 192 idx = read_c0_index(); 193 write_c0_entrylo0(0); 194 write_c0_entrylo1(0); 195 if (idx < 0) 196 continue; 197 /* Make sure all entries differ. */ 198 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 199 mtc0_tlbw_hazard(); 200 tlb_write_indexed(); 201 } 202 tlbw_use_hazard(); 203 write_c0_entryhi(pid); 204 htw_start(); 205 } else { 206 local_flush_tlb_all(); 207 } 208 flush_itlb(); 209 local_irq_restore(flags); 210 } 211 212 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 213 { 214 int cpu = smp_processor_id(); 215 216 if (cpu_context(cpu, vma->vm_mm) != 0) { 217 unsigned long flags; 218 int oldpid, newpid, idx; 219 220 newpid = cpu_asid(cpu, vma->vm_mm); 221 page &= (PAGE_MASK << 1); 222 local_irq_save(flags); 223 oldpid = read_c0_entryhi(); 224 htw_stop(); 225 write_c0_entryhi(page | newpid); 226 mtc0_tlbw_hazard(); 227 tlb_probe(); 228 tlb_probe_hazard(); 229 idx = read_c0_index(); 230 write_c0_entrylo0(0); 231 write_c0_entrylo1(0); 232 if (idx < 0) 233 goto finish; 234 /* Make sure all entries differ. */ 235 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 236 mtc0_tlbw_hazard(); 237 tlb_write_indexed(); 238 tlbw_use_hazard(); 239 240 finish: 241 write_c0_entryhi(oldpid); 242 htw_start(); 243 flush_itlb_vm(vma); 244 local_irq_restore(flags); 245 } 246 } 247 248 /* 249 * This one is only used for pages with the global bit set so we don't care 250 * much about the ASID. 251 */ 252 void local_flush_tlb_one(unsigned long page) 253 { 254 unsigned long flags; 255 int oldpid, idx; 256 257 local_irq_save(flags); 258 oldpid = read_c0_entryhi(); 259 htw_stop(); 260 page &= (PAGE_MASK << 1); 261 write_c0_entryhi(page); 262 mtc0_tlbw_hazard(); 263 tlb_probe(); 264 tlb_probe_hazard(); 265 idx = read_c0_index(); 266 write_c0_entrylo0(0); 267 write_c0_entrylo1(0); 268 if (idx >= 0) { 269 /* Make sure all entries differ. */ 270 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 271 mtc0_tlbw_hazard(); 272 tlb_write_indexed(); 273 tlbw_use_hazard(); 274 } 275 write_c0_entryhi(oldpid); 276 htw_start(); 277 flush_itlb(); 278 local_irq_restore(flags); 279 } 280 281 /* 282 * We will need multiple versions of update_mmu_cache(), one that just 283 * updates the TLB with the new pte(s), and another which also checks 284 * for the R4k "end of page" hardware bug and does the needy. 285 */ 286 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) 287 { 288 unsigned long flags; 289 pgd_t *pgdp; 290 pud_t *pudp; 291 pmd_t *pmdp; 292 pte_t *ptep; 293 int idx, pid; 294 295 /* 296 * Handle debugger faulting in for debugee. 297 */ 298 if (current->active_mm != vma->vm_mm) 299 return; 300 301 local_irq_save(flags); 302 303 htw_stop(); 304 pid = read_c0_entryhi() & ASID_MASK; 305 address &= (PAGE_MASK << 1); 306 write_c0_entryhi(address | pid); 307 pgdp = pgd_offset(vma->vm_mm, address); 308 mtc0_tlbw_hazard(); 309 tlb_probe(); 310 tlb_probe_hazard(); 311 pudp = pud_offset(pgdp, address); 312 pmdp = pmd_offset(pudp, address); 313 idx = read_c0_index(); 314 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 315 /* this could be a huge page */ 316 if (pmd_huge(*pmdp)) { 317 unsigned long lo; 318 write_c0_pagemask(PM_HUGE_MASK); 319 ptep = (pte_t *)pmdp; 320 lo = pte_to_entrylo(pte_val(*ptep)); 321 write_c0_entrylo0(lo); 322 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7)); 323 324 mtc0_tlbw_hazard(); 325 if (idx < 0) 326 tlb_write_random(); 327 else 328 tlb_write_indexed(); 329 tlbw_use_hazard(); 330 write_c0_pagemask(PM_DEFAULT_MASK); 331 } else 332 #endif 333 { 334 ptep = pte_offset_map(pmdp, address); 335 336 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 337 #ifdef CONFIG_XPA 338 write_c0_entrylo0(pte_to_entrylo(ptep->pte_high)); 339 writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK); 340 ptep++; 341 write_c0_entrylo1(pte_to_entrylo(ptep->pte_high)); 342 writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK); 343 #else 344 write_c0_entrylo0(ptep->pte_high); 345 ptep++; 346 write_c0_entrylo1(ptep->pte_high); 347 #endif 348 #else 349 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++))); 350 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep))); 351 #endif 352 mtc0_tlbw_hazard(); 353 if (idx < 0) 354 tlb_write_random(); 355 else 356 tlb_write_indexed(); 357 } 358 tlbw_use_hazard(); 359 htw_start(); 360 flush_itlb_vm(vma); 361 local_irq_restore(flags); 362 } 363 364 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, 365 unsigned long entryhi, unsigned long pagemask) 366 { 367 #ifdef CONFIG_XPA 368 panic("Broken for XPA kernels"); 369 #else 370 unsigned long flags; 371 unsigned long wired; 372 unsigned long old_pagemask; 373 unsigned long old_ctx; 374 375 local_irq_save(flags); 376 /* Save old context and create impossible VPN2 value */ 377 old_ctx = read_c0_entryhi(); 378 htw_stop(); 379 old_pagemask = read_c0_pagemask(); 380 wired = read_c0_wired(); 381 write_c0_wired(wired + 1); 382 write_c0_index(wired); 383 tlbw_use_hazard(); /* What is the hazard here? */ 384 write_c0_pagemask(pagemask); 385 write_c0_entryhi(entryhi); 386 write_c0_entrylo0(entrylo0); 387 write_c0_entrylo1(entrylo1); 388 mtc0_tlbw_hazard(); 389 tlb_write_indexed(); 390 tlbw_use_hazard(); 391 392 write_c0_entryhi(old_ctx); 393 tlbw_use_hazard(); /* What is the hazard here? */ 394 htw_start(); 395 write_c0_pagemask(old_pagemask); 396 local_flush_tlb_all(); 397 local_irq_restore(flags); 398 #endif 399 } 400 401 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 402 403 int __init has_transparent_hugepage(void) 404 { 405 unsigned int mask; 406 unsigned long flags; 407 408 local_irq_save(flags); 409 write_c0_pagemask(PM_HUGE_MASK); 410 back_to_back_c0_hazard(); 411 mask = read_c0_pagemask(); 412 write_c0_pagemask(PM_DEFAULT_MASK); 413 414 local_irq_restore(flags); 415 416 return mask == PM_HUGE_MASK; 417 } 418 419 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 420 421 /* 422 * Used for loading TLB entries before trap_init() has started, when we 423 * don't actually want to add a wired entry which remains throughout the 424 * lifetime of the system 425 */ 426 427 int temp_tlb_entry; 428 429 __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, 430 unsigned long entryhi, unsigned long pagemask) 431 { 432 int ret = 0; 433 unsigned long flags; 434 unsigned long wired; 435 unsigned long old_pagemask; 436 unsigned long old_ctx; 437 438 local_irq_save(flags); 439 /* Save old context and create impossible VPN2 value */ 440 htw_stop(); 441 old_ctx = read_c0_entryhi(); 442 old_pagemask = read_c0_pagemask(); 443 wired = read_c0_wired(); 444 if (--temp_tlb_entry < wired) { 445 printk(KERN_WARNING 446 "No TLB space left for add_temporary_entry\n"); 447 ret = -ENOSPC; 448 goto out; 449 } 450 451 write_c0_index(temp_tlb_entry); 452 write_c0_pagemask(pagemask); 453 write_c0_entryhi(entryhi); 454 write_c0_entrylo0(entrylo0); 455 write_c0_entrylo1(entrylo1); 456 mtc0_tlbw_hazard(); 457 tlb_write_indexed(); 458 tlbw_use_hazard(); 459 460 write_c0_entryhi(old_ctx); 461 write_c0_pagemask(old_pagemask); 462 htw_start(); 463 out: 464 local_irq_restore(flags); 465 return ret; 466 } 467 468 static int ntlb; 469 static int __init set_ntlb(char *str) 470 { 471 get_option(&str, &ntlb); 472 return 1; 473 } 474 475 __setup("ntlb=", set_ntlb); 476 477 /* 478 * Configure TLB (for init or after a CPU has been powered off). 479 */ 480 static void r4k_tlb_configure(void) 481 { 482 /* 483 * You should never change this register: 484 * - On R4600 1.7 the tlbp never hits for pages smaller than 485 * the value in the c0_pagemask register. 486 * - The entire mm handling assumes the c0_pagemask register to 487 * be set to fixed-size pages. 488 */ 489 write_c0_pagemask(PM_DEFAULT_MASK); 490 back_to_back_c0_hazard(); 491 if (read_c0_pagemask() != PM_DEFAULT_MASK) 492 panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE); 493 494 write_c0_wired(0); 495 if (current_cpu_type() == CPU_R10000 || 496 current_cpu_type() == CPU_R12000 || 497 current_cpu_type() == CPU_R14000 || 498 current_cpu_type() == CPU_R16000) 499 write_c0_framemask(0); 500 501 if (cpu_has_rixi) { 502 /* 503 * Enable the no read, no exec bits, and enable large physical 504 * address. 505 */ 506 #ifdef CONFIG_64BIT 507 set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA); 508 #else 509 set_c0_pagegrain(PG_RIE | PG_XIE); 510 #endif 511 } 512 513 temp_tlb_entry = current_cpu_data.tlbsize - 1; 514 515 /* From this point on the ARC firmware is dead. */ 516 local_flush_tlb_all(); 517 518 /* Did I tell you that ARC SUCKS? */ 519 } 520 521 void tlb_init(void) 522 { 523 r4k_tlb_configure(); 524 525 if (ntlb) { 526 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) { 527 int wired = current_cpu_data.tlbsize - ntlb; 528 write_c0_wired(wired); 529 write_c0_index(wired-1); 530 printk("Restricting TLB to %d entries\n", ntlb); 531 } else 532 printk("Ignoring invalid argument ntlb=%d\n", ntlb); 533 } 534 535 build_tlb_refill_handler(); 536 } 537 538 static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd, 539 void *v) 540 { 541 switch (cmd) { 542 case CPU_PM_ENTER_FAILED: 543 case CPU_PM_EXIT: 544 r4k_tlb_configure(); 545 break; 546 } 547 548 return NOTIFY_OK; 549 } 550 551 static struct notifier_block r4k_tlb_pm_notifier_block = { 552 .notifier_call = r4k_tlb_pm_notifier, 553 }; 554 555 static int __init r4k_tlb_init_pm(void) 556 { 557 return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block); 558 } 559 arch_initcall(r4k_tlb_init_pm); 560