1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Synthesize TLB refill handlers at runtime. 7 * 8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki 10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 11 * Copyright (C) 2008, 2009 Cavium Networks, Inc. 12 * Copyright (C) 2011 MIPS Technologies, Inc. 13 * 14 * ... and the days got worse and worse and now you see 15 * I've gone completly out of my mind. 16 * 17 * They're coming to take me a away haha 18 * they're coming to take me a away hoho hihi haha 19 * to the funny farm where code is beautiful all the time ... 20 * 21 * (Condolences to Napoleon XIV) 22 */ 23 24 #include <linux/bug.h> 25 #include <linux/kernel.h> 26 #include <linux/types.h> 27 #include <linux/smp.h> 28 #include <linux/string.h> 29 #include <linux/init.h> 30 #include <linux/cache.h> 31 32 #include <asm/mmu_context.h> 33 #include <asm/cacheflush.h> 34 #include <asm/pgtable.h> 35 #include <asm/war.h> 36 #include <asm/uasm.h> 37 #include <asm/setup.h> 38 39 /* 40 * TLB load/store/modify handlers. 41 * 42 * Only the fastpath gets synthesized at runtime, the slowpath for 43 * do_page_fault remains normal asm. 44 */ 45 extern void tlb_do_page_fault_0(void); 46 extern void tlb_do_page_fault_1(void); 47 48 struct work_registers { 49 int r1; 50 int r2; 51 int r3; 52 }; 53 54 struct tlb_reg_save { 55 unsigned long a; 56 unsigned long b; 57 } ____cacheline_aligned_in_smp; 58 59 static struct tlb_reg_save handler_reg_save[NR_CPUS]; 60 61 static inline int r45k_bvahwbug(void) 62 { 63 /* XXX: We should probe for the presence of this bug, but we don't. */ 64 return 0; 65 } 66 67 static inline int r4k_250MHZhwbug(void) 68 { 69 /* XXX: We should probe for the presence of this bug, but we don't. */ 70 return 0; 71 } 72 73 static inline int __maybe_unused bcm1250_m3_war(void) 74 { 75 return BCM1250_M3_WAR; 76 } 77 78 static inline int __maybe_unused r10000_llsc_war(void) 79 { 80 return R10000_LLSC_WAR; 81 } 82 83 static int use_bbit_insns(void) 84 { 85 switch (current_cpu_type()) { 86 case CPU_CAVIUM_OCTEON: 87 case CPU_CAVIUM_OCTEON_PLUS: 88 case CPU_CAVIUM_OCTEON2: 89 return 1; 90 default: 91 return 0; 92 } 93 } 94 95 static int use_lwx_insns(void) 96 { 97 switch (current_cpu_type()) { 98 case CPU_CAVIUM_OCTEON2: 99 return 1; 100 default: 101 return 0; 102 } 103 } 104 #if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \ 105 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 106 static bool scratchpad_available(void) 107 { 108 return true; 109 } 110 static int scratchpad_offset(int i) 111 { 112 /* 113 * CVMSEG starts at address -32768 and extends for 114 * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines. 115 */ 116 i += 1; /* Kernel use starts at the top and works down. */ 117 return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768; 118 } 119 #else 120 static bool scratchpad_available(void) 121 { 122 return false; 123 } 124 static int scratchpad_offset(int i) 125 { 126 BUG(); 127 /* Really unreachable, but evidently some GCC want this. */ 128 return 0; 129 } 130 #endif 131 /* 132 * Found by experiment: At least some revisions of the 4kc throw under 133 * some circumstances a machine check exception, triggered by invalid 134 * values in the index register. Delaying the tlbp instruction until 135 * after the next branch, plus adding an additional nop in front of 136 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows 137 * why; it's not an issue caused by the core RTL. 138 * 139 */ 140 static int __cpuinit m4kc_tlbp_war(void) 141 { 142 return (current_cpu_data.processor_id & 0xffff00) == 143 (PRID_COMP_MIPS | PRID_IMP_4KC); 144 } 145 146 /* Handle labels (which must be positive integers). */ 147 enum label_id { 148 label_second_part = 1, 149 label_leave, 150 label_vmalloc, 151 label_vmalloc_done, 152 label_tlbw_hazard_0, 153 label_split = label_tlbw_hazard_0 + 8, 154 label_tlbl_goaround1, 155 label_tlbl_goaround2, 156 label_nopage_tlbl, 157 label_nopage_tlbs, 158 label_nopage_tlbm, 159 label_smp_pgtable_change, 160 label_r3000_write_probe_fail, 161 label_large_segbits_fault, 162 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 163 label_tlb_huge_update, 164 #endif 165 }; 166 167 UASM_L_LA(_second_part) 168 UASM_L_LA(_leave) 169 UASM_L_LA(_vmalloc) 170 UASM_L_LA(_vmalloc_done) 171 /* _tlbw_hazard_x is handled differently. */ 172 UASM_L_LA(_split) 173 UASM_L_LA(_tlbl_goaround1) 174 UASM_L_LA(_tlbl_goaround2) 175 UASM_L_LA(_nopage_tlbl) 176 UASM_L_LA(_nopage_tlbs) 177 UASM_L_LA(_nopage_tlbm) 178 UASM_L_LA(_smp_pgtable_change) 179 UASM_L_LA(_r3000_write_probe_fail) 180 UASM_L_LA(_large_segbits_fault) 181 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 182 UASM_L_LA(_tlb_huge_update) 183 #endif 184 185 static int __cpuinitdata hazard_instance; 186 187 static void __cpuinit uasm_bgezl_hazard(u32 **p, 188 struct uasm_reloc **r, 189 int instance) 190 { 191 switch (instance) { 192 case 0 ... 7: 193 uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance); 194 return; 195 default: 196 BUG(); 197 } 198 } 199 200 static void __cpuinit uasm_bgezl_label(struct uasm_label **l, 201 u32 **p, 202 int instance) 203 { 204 switch (instance) { 205 case 0 ... 7: 206 uasm_build_label(l, *p, label_tlbw_hazard_0 + instance); 207 break; 208 default: 209 BUG(); 210 } 211 } 212 213 /* 214 * pgtable bits are assigned dynamically depending on processor feature 215 * and statically based on kernel configuration. This spits out the actual 216 * values the kernel is using. Required to make sense from disassembled 217 * TLB exception handlers. 218 */ 219 static void output_pgtable_bits_defines(void) 220 { 221 #define pr_define(fmt, ...) \ 222 pr_debug("#define " fmt, ##__VA_ARGS__) 223 224 pr_debug("#include <asm/asm.h>\n"); 225 pr_debug("#include <asm/regdef.h>\n"); 226 pr_debug("\n"); 227 228 pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT); 229 pr_define("_PAGE_READ_SHIFT %d\n", _PAGE_READ_SHIFT); 230 pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT); 231 pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT); 232 pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT); 233 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 234 pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT); 235 pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT); 236 #endif 237 if (cpu_has_rixi) { 238 #ifdef _PAGE_NO_EXEC_SHIFT 239 pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT); 240 #endif 241 #ifdef _PAGE_NO_READ_SHIFT 242 pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT); 243 #endif 244 } 245 pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT); 246 pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT); 247 pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT); 248 pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT); 249 pr_debug("\n"); 250 } 251 252 static inline void dump_handler(const char *symbol, const u32 *handler, int count) 253 { 254 int i; 255 256 pr_debug("LEAF(%s)\n", symbol); 257 258 pr_debug("\t.set push\n"); 259 pr_debug("\t.set noreorder\n"); 260 261 for (i = 0; i < count; i++) 262 pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]); 263 264 pr_debug("\t.set\tpop\n"); 265 266 pr_debug("\tEND(%s)\n", symbol); 267 } 268 269 /* The only general purpose registers allowed in TLB handlers. */ 270 #define K0 26 271 #define K1 27 272 273 /* Some CP0 registers */ 274 #define C0_INDEX 0, 0 275 #define C0_ENTRYLO0 2, 0 276 #define C0_TCBIND 2, 2 277 #define C0_ENTRYLO1 3, 0 278 #define C0_CONTEXT 4, 0 279 #define C0_PAGEMASK 5, 0 280 #define C0_BADVADDR 8, 0 281 #define C0_ENTRYHI 10, 0 282 #define C0_EPC 14, 0 283 #define C0_XCONTEXT 20, 0 284 285 #ifdef CONFIG_64BIT 286 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT) 287 #else 288 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT) 289 #endif 290 291 /* The worst case length of the handler is around 18 instructions for 292 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs. 293 * Maximum space available is 32 instructions for R3000 and 64 294 * instructions for R4000. 295 * 296 * We deliberately chose a buffer size of 128, so we won't scribble 297 * over anything important on overflow before we panic. 298 */ 299 static u32 tlb_handler[128] __cpuinitdata; 300 301 /* simply assume worst case size for labels and relocs */ 302 static struct uasm_label labels[128] __cpuinitdata; 303 static struct uasm_reloc relocs[128] __cpuinitdata; 304 305 #ifdef CONFIG_64BIT 306 static int check_for_high_segbits __cpuinitdata; 307 #endif 308 309 static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop, 310 unsigned int i_const) 311 { 312 unsigned int **p; 313 314 for (p = start; p < stop; p++) { 315 #ifndef CONFIG_CPU_MICROMIPS 316 unsigned int *ip; 317 318 ip = *p; 319 *ip = (*ip & 0xffff0000) | i_const; 320 #else 321 unsigned short *ip; 322 323 ip = ((unsigned short *)((unsigned int)*p - 1)); 324 if ((*ip & 0xf000) == 0x4000) { 325 *ip &= 0xfff1; 326 *ip |= (i_const << 1); 327 } else if ((*ip & 0xf000) == 0x6000) { 328 *ip &= 0xfff1; 329 *ip |= ((i_const >> 2) << 1); 330 } else { 331 ip++; 332 *ip = i_const; 333 } 334 #endif 335 local_flush_icache_range((unsigned long)ip, 336 (unsigned long)ip + sizeof(*ip)); 337 } 338 } 339 340 #define asid_insn_fixup(section, const) \ 341 do { \ 342 extern unsigned int *__start_ ## section; \ 343 extern unsigned int *__stop_ ## section; \ 344 insn_fixup(&__start_ ## section, &__stop_ ## section, const); \ 345 } while(0) 346 347 /* 348 * Caller is assumed to flush the caches before the first context switch. 349 */ 350 static void __cpuinit setup_asid(unsigned int inc, unsigned int mask, 351 unsigned int version_mask, 352 unsigned int first_version) 353 { 354 extern asmlinkage void handle_ri_rdhwr_vivt(void); 355 unsigned long *vivt_exc; 356 357 #ifdef CONFIG_CPU_MICROMIPS 358 /* 359 * Worst case optimised microMIPS addiu instructions support 360 * only a 3-bit immediate value. 361 */ 362 if(inc > 7) 363 panic("Invalid ASID increment value!"); 364 #endif 365 asid_insn_fixup(__asid_inc, inc); 366 asid_insn_fixup(__asid_mask, mask); 367 asid_insn_fixup(__asid_version_mask, version_mask); 368 asid_insn_fixup(__asid_first_version, first_version); 369 370 /* Patch up the 'handle_ri_rdhwr_vivt' handler. */ 371 vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt; 372 #ifdef CONFIG_CPU_MICROMIPS 373 vivt_exc = (unsigned long *)((unsigned long) vivt_exc - 1); 374 #endif 375 vivt_exc++; 376 *vivt_exc = (*vivt_exc & ~mask) | mask; 377 378 current_cpu_data.asid_cache = first_version; 379 } 380 381 static int check_for_high_segbits __cpuinitdata; 382 383 static unsigned int kscratch_used_mask __cpuinitdata; 384 385 static int __cpuinit allocate_kscratch(void) 386 { 387 int r; 388 unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask; 389 390 r = ffs(a); 391 392 if (r == 0) 393 return -1; 394 395 r--; /* make it zero based */ 396 397 kscratch_used_mask |= (1 << r); 398 399 return r; 400 } 401 402 static int scratch_reg __cpuinitdata; 403 static int pgd_reg __cpuinitdata; 404 enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; 405 406 static struct work_registers __cpuinit build_get_work_registers(u32 **p) 407 { 408 struct work_registers r; 409 410 int smp_processor_id_reg; 411 int smp_processor_id_sel; 412 int smp_processor_id_shift; 413 414 if (scratch_reg > 0) { 415 /* Save in CPU local C0_KScratch? */ 416 UASM_i_MTC0(p, 1, 31, scratch_reg); 417 r.r1 = K0; 418 r.r2 = K1; 419 r.r3 = 1; 420 return r; 421 } 422 423 if (num_possible_cpus() > 1) { 424 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT 425 smp_processor_id_shift = 51; 426 smp_processor_id_reg = 20; /* XContext */ 427 smp_processor_id_sel = 0; 428 #else 429 # ifdef CONFIG_32BIT 430 smp_processor_id_shift = 25; 431 smp_processor_id_reg = 4; /* Context */ 432 smp_processor_id_sel = 0; 433 # endif 434 # ifdef CONFIG_64BIT 435 smp_processor_id_shift = 26; 436 smp_processor_id_reg = 4; /* Context */ 437 smp_processor_id_sel = 0; 438 # endif 439 #endif 440 /* Get smp_processor_id */ 441 UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel); 442 UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift); 443 444 /* handler_reg_save index in K0 */ 445 UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save))); 446 447 UASM_i_LA(p, K1, (long)&handler_reg_save); 448 UASM_i_ADDU(p, K0, K0, K1); 449 } else { 450 UASM_i_LA(p, K0, (long)&handler_reg_save); 451 } 452 /* K0 now points to save area, save $1 and $2 */ 453 UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0); 454 UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0); 455 456 r.r1 = K1; 457 r.r2 = 1; 458 r.r3 = 2; 459 return r; 460 } 461 462 static void __cpuinit build_restore_work_registers(u32 **p) 463 { 464 if (scratch_reg > 0) { 465 UASM_i_MFC0(p, 1, 31, scratch_reg); 466 return; 467 } 468 /* K0 already points to save area, restore $1 and $2 */ 469 UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0); 470 UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0); 471 } 472 473 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 474 475 /* 476 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current, 477 * we cannot do r3000 under these circumstances. 478 * 479 * Declare pgd_current here instead of including mmu_context.h to avoid type 480 * conflicts for tlbmiss_handler_setup_pgd 481 */ 482 extern unsigned long pgd_current[]; 483 484 /* 485 * The R3000 TLB handler is simple. 486 */ 487 static void __cpuinit build_r3000_tlb_refill_handler(void) 488 { 489 long pgdc = (long)pgd_current; 490 u32 *p; 491 492 memset(tlb_handler, 0, sizeof(tlb_handler)); 493 p = tlb_handler; 494 495 uasm_i_mfc0(&p, K0, C0_BADVADDR); 496 uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */ 497 uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1); 498 uasm_i_srl(&p, K0, K0, 22); /* load delay */ 499 uasm_i_sll(&p, K0, K0, 2); 500 uasm_i_addu(&p, K1, K1, K0); 501 uasm_i_mfc0(&p, K0, C0_CONTEXT); 502 uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */ 503 uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */ 504 uasm_i_addu(&p, K1, K1, K0); 505 uasm_i_lw(&p, K0, 0, K1); 506 uasm_i_nop(&p); /* load delay */ 507 uasm_i_mtc0(&p, K0, C0_ENTRYLO0); 508 uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */ 509 uasm_i_tlbwr(&p); /* cp0 delay */ 510 uasm_i_jr(&p, K1); 511 uasm_i_rfe(&p); /* branch delay */ 512 513 if (p > tlb_handler + 32) 514 panic("TLB refill handler space exceeded"); 515 516 pr_debug("Wrote TLB refill handler (%u instructions).\n", 517 (unsigned int)(p - tlb_handler)); 518 519 memcpy((void *)ebase, tlb_handler, 0x80); 520 521 dump_handler("r3000_tlb_refill", (u32 *)ebase, 32); 522 } 523 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ 524 525 /* 526 * The R4000 TLB handler is much more complicated. We have two 527 * consecutive handler areas with 32 instructions space each. 528 * Since they aren't used at the same time, we can overflow in the 529 * other one.To keep things simple, we first assume linear space, 530 * then we relocate it to the final handler layout as needed. 531 */ 532 static u32 final_handler[64] __cpuinitdata; 533 534 /* 535 * Hazards 536 * 537 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0: 538 * 2. A timing hazard exists for the TLBP instruction. 539 * 540 * stalling_instruction 541 * TLBP 542 * 543 * The JTLB is being read for the TLBP throughout the stall generated by the 544 * previous instruction. This is not really correct as the stalling instruction 545 * can modify the address used to access the JTLB. The failure symptom is that 546 * the TLBP instruction will use an address created for the stalling instruction 547 * and not the address held in C0_ENHI and thus report the wrong results. 548 * 549 * The software work-around is to not allow the instruction preceding the TLBP 550 * to stall - make it an NOP or some other instruction guaranteed not to stall. 551 * 552 * Errata 2 will not be fixed. This errata is also on the R5000. 553 * 554 * As if we MIPS hackers wouldn't know how to nop pipelines happy ... 555 */ 556 static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p) 557 { 558 switch (current_cpu_type()) { 559 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */ 560 case CPU_R4600: 561 case CPU_R4700: 562 case CPU_R5000: 563 case CPU_NEVADA: 564 uasm_i_nop(p); 565 uasm_i_tlbp(p); 566 break; 567 568 default: 569 uasm_i_tlbp(p); 570 break; 571 } 572 } 573 574 /* 575 * Write random or indexed TLB entry, and care about the hazards from 576 * the preceding mtc0 and for the following eret. 577 */ 578 enum tlb_write_entry { tlb_random, tlb_indexed }; 579 580 static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, 581 struct uasm_reloc **r, 582 enum tlb_write_entry wmode) 583 { 584 void(*tlbw)(u32 **) = NULL; 585 586 switch (wmode) { 587 case tlb_random: tlbw = uasm_i_tlbwr; break; 588 case tlb_indexed: tlbw = uasm_i_tlbwi; break; 589 } 590 591 if (cpu_has_mips_r2) { 592 /* 593 * The architecture spec says an ehb is required here, 594 * but a number of cores do not have the hazard and 595 * using an ehb causes an expensive pipeline stall. 596 */ 597 switch (current_cpu_type()) { 598 case CPU_M14KC: 599 case CPU_74K: 600 break; 601 602 default: 603 uasm_i_ehb(p); 604 break; 605 } 606 tlbw(p); 607 return; 608 } 609 610 switch (current_cpu_type()) { 611 case CPU_R4000PC: 612 case CPU_R4000SC: 613 case CPU_R4000MC: 614 case CPU_R4400PC: 615 case CPU_R4400SC: 616 case CPU_R4400MC: 617 /* 618 * This branch uses up a mtc0 hazard nop slot and saves 619 * two nops after the tlbw instruction. 620 */ 621 uasm_bgezl_hazard(p, r, hazard_instance); 622 tlbw(p); 623 uasm_bgezl_label(l, p, hazard_instance); 624 hazard_instance++; 625 uasm_i_nop(p); 626 break; 627 628 case CPU_R4600: 629 case CPU_R4700: 630 uasm_i_nop(p); 631 tlbw(p); 632 uasm_i_nop(p); 633 break; 634 635 case CPU_R5000: 636 case CPU_NEVADA: 637 uasm_i_nop(p); /* QED specifies 2 nops hazard */ 638 uasm_i_nop(p); /* QED specifies 2 nops hazard */ 639 tlbw(p); 640 break; 641 642 case CPU_R4300: 643 case CPU_5KC: 644 case CPU_TX49XX: 645 case CPU_PR4450: 646 case CPU_XLR: 647 uasm_i_nop(p); 648 tlbw(p); 649 break; 650 651 case CPU_R10000: 652 case CPU_R12000: 653 case CPU_R14000: 654 case CPU_4KC: 655 case CPU_4KEC: 656 case CPU_M14KC: 657 case CPU_M14KEC: 658 case CPU_SB1: 659 case CPU_SB1A: 660 case CPU_4KSC: 661 case CPU_20KC: 662 case CPU_25KF: 663 case CPU_BMIPS32: 664 case CPU_BMIPS3300: 665 case CPU_BMIPS4350: 666 case CPU_BMIPS4380: 667 case CPU_BMIPS5000: 668 case CPU_LOONGSON2: 669 case CPU_R5500: 670 if (m4kc_tlbp_war()) 671 uasm_i_nop(p); 672 case CPU_ALCHEMY: 673 tlbw(p); 674 break; 675 676 case CPU_RM7000: 677 uasm_i_nop(p); 678 uasm_i_nop(p); 679 uasm_i_nop(p); 680 uasm_i_nop(p); 681 tlbw(p); 682 break; 683 684 case CPU_VR4111: 685 case CPU_VR4121: 686 case CPU_VR4122: 687 case CPU_VR4181: 688 case CPU_VR4181A: 689 uasm_i_nop(p); 690 uasm_i_nop(p); 691 tlbw(p); 692 uasm_i_nop(p); 693 uasm_i_nop(p); 694 break; 695 696 case CPU_VR4131: 697 case CPU_VR4133: 698 case CPU_R5432: 699 uasm_i_nop(p); 700 uasm_i_nop(p); 701 tlbw(p); 702 break; 703 704 case CPU_JZRISC: 705 tlbw(p); 706 uasm_i_nop(p); 707 break; 708 709 default: 710 panic("No TLB refill handler yet (CPU type: %d)", 711 current_cpu_data.cputype); 712 break; 713 } 714 } 715 716 static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, 717 unsigned int reg) 718 { 719 if (cpu_has_rixi) { 720 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); 721 } else { 722 #ifdef CONFIG_64BIT_PHYS_ADDR 723 uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL)); 724 #else 725 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL)); 726 #endif 727 } 728 } 729 730 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 731 732 static __cpuinit void build_restore_pagemask(u32 **p, 733 struct uasm_reloc **r, 734 unsigned int tmp, 735 enum label_id lid, 736 int restore_scratch) 737 { 738 if (restore_scratch) { 739 /* Reset default page size */ 740 if (PM_DEFAULT_MASK >> 16) { 741 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); 742 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); 743 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 744 uasm_il_b(p, r, lid); 745 } else if (PM_DEFAULT_MASK) { 746 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); 747 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 748 uasm_il_b(p, r, lid); 749 } else { 750 uasm_i_mtc0(p, 0, C0_PAGEMASK); 751 uasm_il_b(p, r, lid); 752 } 753 if (scratch_reg > 0) 754 UASM_i_MFC0(p, 1, 31, scratch_reg); 755 else 756 UASM_i_LW(p, 1, scratchpad_offset(0), 0); 757 } else { 758 /* Reset default page size */ 759 if (PM_DEFAULT_MASK >> 16) { 760 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); 761 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); 762 uasm_il_b(p, r, lid); 763 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 764 } else if (PM_DEFAULT_MASK) { 765 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); 766 uasm_il_b(p, r, lid); 767 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 768 } else { 769 uasm_il_b(p, r, lid); 770 uasm_i_mtc0(p, 0, C0_PAGEMASK); 771 } 772 } 773 } 774 775 static __cpuinit void build_huge_tlb_write_entry(u32 **p, 776 struct uasm_label **l, 777 struct uasm_reloc **r, 778 unsigned int tmp, 779 enum tlb_write_entry wmode, 780 int restore_scratch) 781 { 782 /* Set huge page tlb entry size */ 783 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); 784 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); 785 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 786 787 build_tlb_write_entry(p, l, r, wmode); 788 789 build_restore_pagemask(p, r, tmp, label_leave, restore_scratch); 790 } 791 792 /* 793 * Check if Huge PTE is present, if so then jump to LABEL. 794 */ 795 static void __cpuinit 796 build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, 797 unsigned int pmd, int lid) 798 { 799 UASM_i_LW(p, tmp, 0, pmd); 800 if (use_bbit_insns()) { 801 uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid); 802 } else { 803 uasm_i_andi(p, tmp, tmp, _PAGE_HUGE); 804 uasm_il_bnez(p, r, tmp, lid); 805 } 806 } 807 808 static __cpuinit void build_huge_update_entries(u32 **p, 809 unsigned int pte, 810 unsigned int tmp) 811 { 812 int small_sequence; 813 814 /* 815 * A huge PTE describes an area the size of the 816 * configured huge page size. This is twice the 817 * of the large TLB entry size we intend to use. 818 * A TLB entry half the size of the configured 819 * huge page size is configured into entrylo0 820 * and entrylo1 to cover the contiguous huge PTE 821 * address space. 822 */ 823 small_sequence = (HPAGE_SIZE >> 7) < 0x10000; 824 825 /* We can clobber tmp. It isn't used after this.*/ 826 if (!small_sequence) 827 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); 828 829 build_convert_pte_to_entrylo(p, pte); 830 UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */ 831 /* convert to entrylo1 */ 832 if (small_sequence) 833 UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7); 834 else 835 UASM_i_ADDU(p, pte, pte, tmp); 836 837 UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */ 838 } 839 840 static __cpuinit void build_huge_handler_tail(u32 **p, 841 struct uasm_reloc **r, 842 struct uasm_label **l, 843 unsigned int pte, 844 unsigned int ptr) 845 { 846 #ifdef CONFIG_SMP 847 UASM_i_SC(p, pte, 0, ptr); 848 uasm_il_beqz(p, r, pte, label_tlb_huge_update); 849 UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */ 850 #else 851 UASM_i_SW(p, pte, 0, ptr); 852 #endif 853 build_huge_update_entries(p, pte, ptr); 854 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0); 855 } 856 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 857 858 #ifdef CONFIG_64BIT 859 /* 860 * TMP and PTR are scratch. 861 * TMP will be clobbered, PTR will hold the pmd entry. 862 */ 863 static void __cpuinit 864 build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 865 unsigned int tmp, unsigned int ptr) 866 { 867 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 868 long pgdc = (long)pgd_current; 869 #endif 870 /* 871 * The vmalloc handling is not in the hotpath. 872 */ 873 uasm_i_dmfc0(p, tmp, C0_BADVADDR); 874 875 if (check_for_high_segbits) { 876 /* 877 * The kernel currently implicitely assumes that the 878 * MIPS SEGBITS parameter for the processor is 879 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never 880 * allocate virtual addresses outside the maximum 881 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But 882 * that doesn't prevent user code from accessing the 883 * higher xuseg addresses. Here, we make sure that 884 * everything but the lower xuseg addresses goes down 885 * the module_alloc/vmalloc path. 886 */ 887 uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); 888 uasm_il_bnez(p, r, ptr, label_vmalloc); 889 } else { 890 uasm_il_bltz(p, r, tmp, label_vmalloc); 891 } 892 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ 893 894 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT 895 if (pgd_reg != -1) { 896 /* pgd is in pgd_reg */ 897 UASM_i_MFC0(p, ptr, 31, pgd_reg); 898 } else { 899 /* 900 * &pgd << 11 stored in CONTEXT [23..63]. 901 */ 902 UASM_i_MFC0(p, ptr, C0_CONTEXT); 903 904 /* Clear lower 23 bits of context. */ 905 uasm_i_dins(p, ptr, 0, 0, 23); 906 907 /* 1 0 1 0 1 << 6 xkphys cached */ 908 uasm_i_ori(p, ptr, ptr, 0x540); 909 uasm_i_drotr(p, ptr, ptr, 11); 910 } 911 #elif defined(CONFIG_SMP) 912 # ifdef CONFIG_MIPS_MT_SMTC 913 /* 914 * SMTC uses TCBind value as "CPU" index 915 */ 916 uasm_i_mfc0(p, ptr, C0_TCBIND); 917 uasm_i_dsrl_safe(p, ptr, ptr, 19); 918 # else 919 /* 920 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 921 * stored in CONTEXT. 922 */ 923 uasm_i_dmfc0(p, ptr, C0_CONTEXT); 924 uasm_i_dsrl_safe(p, ptr, ptr, 23); 925 # endif 926 UASM_i_LA_mostly(p, tmp, pgdc); 927 uasm_i_daddu(p, ptr, ptr, tmp); 928 uasm_i_dmfc0(p, tmp, C0_BADVADDR); 929 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); 930 #else 931 UASM_i_LA_mostly(p, ptr, pgdc); 932 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); 933 #endif 934 935 uasm_l_vmalloc_done(l, *p); 936 937 /* get pgd offset in bytes */ 938 uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3); 939 940 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); 941 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ 942 #ifndef __PAGETABLE_PMD_FOLDED 943 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 944 uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ 945 uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ 946 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); 947 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ 948 #endif 949 } 950 951 /* 952 * BVADDR is the faulting address, PTR is scratch. 953 * PTR will hold the pgd for vmalloc. 954 */ 955 static void __cpuinit 956 build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 957 unsigned int bvaddr, unsigned int ptr, 958 enum vmalloc64_mode mode) 959 { 960 long swpd = (long)swapper_pg_dir; 961 int single_insn_swpd; 962 int did_vmalloc_branch = 0; 963 964 single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd); 965 966 uasm_l_vmalloc(l, *p); 967 968 if (mode != not_refill && check_for_high_segbits) { 969 if (single_insn_swpd) { 970 uasm_il_bltz(p, r, bvaddr, label_vmalloc_done); 971 uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); 972 did_vmalloc_branch = 1; 973 /* fall through */ 974 } else { 975 uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault); 976 } 977 } 978 if (!did_vmalloc_branch) { 979 if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { 980 uasm_il_b(p, r, label_vmalloc_done); 981 uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); 982 } else { 983 UASM_i_LA_mostly(p, ptr, swpd); 984 uasm_il_b(p, r, label_vmalloc_done); 985 if (uasm_in_compat_space_p(swpd)) 986 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); 987 else 988 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); 989 } 990 } 991 if (mode != not_refill && check_for_high_segbits) { 992 uasm_l_large_segbits_fault(l, *p); 993 /* 994 * We get here if we are an xsseg address, or if we are 995 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary. 996 * 997 * Ignoring xsseg (assume disabled so would generate 998 * (address errors?), the only remaining possibility 999 * is the upper xuseg addresses. On processors with 1000 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these 1001 * addresses would have taken an address error. We try 1002 * to mimic that here by taking a load/istream page 1003 * fault. 1004 */ 1005 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); 1006 uasm_i_jr(p, ptr); 1007 1008 if (mode == refill_scratch) { 1009 if (scratch_reg > 0) 1010 UASM_i_MFC0(p, 1, 31, scratch_reg); 1011 else 1012 UASM_i_LW(p, 1, scratchpad_offset(0), 0); 1013 } else { 1014 uasm_i_nop(p); 1015 } 1016 } 1017 } 1018 1019 #else /* !CONFIG_64BIT */ 1020 1021 /* 1022 * TMP and PTR are scratch. 1023 * TMP will be clobbered, PTR will hold the pgd entry. 1024 */ 1025 static void __cpuinit __maybe_unused 1026 build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) 1027 { 1028 long pgdc = (long)pgd_current; 1029 1030 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ 1031 #ifdef CONFIG_SMP 1032 #ifdef CONFIG_MIPS_MT_SMTC 1033 /* 1034 * SMTC uses TCBind value as "CPU" index 1035 */ 1036 uasm_i_mfc0(p, ptr, C0_TCBIND); 1037 UASM_i_LA_mostly(p, tmp, pgdc); 1038 uasm_i_srl(p, ptr, ptr, 19); 1039 #else 1040 /* 1041 * smp_processor_id() << 3 is stored in CONTEXT. 1042 */ 1043 uasm_i_mfc0(p, ptr, C0_CONTEXT); 1044 UASM_i_LA_mostly(p, tmp, pgdc); 1045 uasm_i_srl(p, ptr, ptr, 23); 1046 #endif 1047 uasm_i_addu(p, ptr, tmp, ptr); 1048 #else 1049 UASM_i_LA_mostly(p, ptr, pgdc); 1050 #endif 1051 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 1052 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); 1053 uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */ 1054 uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); 1055 uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ 1056 } 1057 1058 #endif /* !CONFIG_64BIT */ 1059 1060 static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) 1061 { 1062 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; 1063 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); 1064 1065 switch (current_cpu_type()) { 1066 case CPU_VR41XX: 1067 case CPU_VR4111: 1068 case CPU_VR4121: 1069 case CPU_VR4122: 1070 case CPU_VR4131: 1071 case CPU_VR4181: 1072 case CPU_VR4181A: 1073 case CPU_VR4133: 1074 shift += 2; 1075 break; 1076 1077 default: 1078 break; 1079 } 1080 1081 if (shift) 1082 UASM_i_SRL(p, ctx, ctx, shift); 1083 uasm_i_andi(p, ctx, ctx, mask); 1084 } 1085 1086 static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) 1087 { 1088 /* 1089 * Bug workaround for the Nevada. It seems as if under certain 1090 * circumstances the move from cp0_context might produce a 1091 * bogus result when the mfc0 instruction and its consumer are 1092 * in a different cacheline or a load instruction, probably any 1093 * memory reference, is between them. 1094 */ 1095 switch (current_cpu_type()) { 1096 case CPU_NEVADA: 1097 UASM_i_LW(p, ptr, 0, ptr); 1098 GET_CONTEXT(p, tmp); /* get context reg */ 1099 break; 1100 1101 default: 1102 GET_CONTEXT(p, tmp); /* get context reg */ 1103 UASM_i_LW(p, ptr, 0, ptr); 1104 break; 1105 } 1106 1107 build_adjust_context(p, tmp); 1108 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ 1109 } 1110 1111 static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, 1112 unsigned int ptep) 1113 { 1114 /* 1115 * 64bit address support (36bit on a 32bit CPU) in a 32bit 1116 * Kernel is a special case. Only a few CPUs use it. 1117 */ 1118 #ifdef CONFIG_64BIT_PHYS_ADDR 1119 if (cpu_has_64bits) { 1120 uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ 1121 uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 1122 if (cpu_has_rixi) { 1123 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); 1124 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 1125 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); 1126 } else { 1127 uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ 1128 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 1129 uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ 1130 } 1131 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ 1132 } else { 1133 int pte_off_even = sizeof(pte_t) / 2; 1134 int pte_off_odd = pte_off_even + sizeof(pte_t); 1135 1136 /* The pte entries are pre-shifted */ 1137 uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ 1138 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 1139 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ 1140 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ 1141 } 1142 #else 1143 UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ 1144 UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 1145 if (r45k_bvahwbug()) 1146 build_tlb_probe_entry(p); 1147 if (cpu_has_rixi) { 1148 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); 1149 if (r4k_250MHZhwbug()) 1150 UASM_i_MTC0(p, 0, C0_ENTRYLO0); 1151 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 1152 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); 1153 } else { 1154 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ 1155 if (r4k_250MHZhwbug()) 1156 UASM_i_MTC0(p, 0, C0_ENTRYLO0); 1157 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 1158 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ 1159 if (r45k_bvahwbug()) 1160 uasm_i_mfc0(p, tmp, C0_INDEX); 1161 } 1162 if (r4k_250MHZhwbug()) 1163 UASM_i_MTC0(p, 0, C0_ENTRYLO1); 1164 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ 1165 #endif 1166 } 1167 1168 struct mips_huge_tlb_info { 1169 int huge_pte; 1170 int restore_scratch; 1171 }; 1172 1173 static struct mips_huge_tlb_info __cpuinit 1174 build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, 1175 struct uasm_reloc **r, unsigned int tmp, 1176 unsigned int ptr, int c0_scratch) 1177 { 1178 struct mips_huge_tlb_info rv; 1179 unsigned int even, odd; 1180 int vmalloc_branch_delay_filled = 0; 1181 const int scratch = 1; /* Our extra working register */ 1182 1183 rv.huge_pte = scratch; 1184 rv.restore_scratch = 0; 1185 1186 if (check_for_high_segbits) { 1187 UASM_i_MFC0(p, tmp, C0_BADVADDR); 1188 1189 if (pgd_reg != -1) 1190 UASM_i_MFC0(p, ptr, 31, pgd_reg); 1191 else 1192 UASM_i_MFC0(p, ptr, C0_CONTEXT); 1193 1194 if (c0_scratch >= 0) 1195 UASM_i_MTC0(p, scratch, 31, c0_scratch); 1196 else 1197 UASM_i_SW(p, scratch, scratchpad_offset(0), 0); 1198 1199 uasm_i_dsrl_safe(p, scratch, tmp, 1200 PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); 1201 uasm_il_bnez(p, r, scratch, label_vmalloc); 1202 1203 if (pgd_reg == -1) { 1204 vmalloc_branch_delay_filled = 1; 1205 /* Clear lower 23 bits of context. */ 1206 uasm_i_dins(p, ptr, 0, 0, 23); 1207 } 1208 } else { 1209 if (pgd_reg != -1) 1210 UASM_i_MFC0(p, ptr, 31, pgd_reg); 1211 else 1212 UASM_i_MFC0(p, ptr, C0_CONTEXT); 1213 1214 UASM_i_MFC0(p, tmp, C0_BADVADDR); 1215 1216 if (c0_scratch >= 0) 1217 UASM_i_MTC0(p, scratch, 31, c0_scratch); 1218 else 1219 UASM_i_SW(p, scratch, scratchpad_offset(0), 0); 1220 1221 if (pgd_reg == -1) 1222 /* Clear lower 23 bits of context. */ 1223 uasm_i_dins(p, ptr, 0, 0, 23); 1224 1225 uasm_il_bltz(p, r, tmp, label_vmalloc); 1226 } 1227 1228 if (pgd_reg == -1) { 1229 vmalloc_branch_delay_filled = 1; 1230 /* 1 0 1 0 1 << 6 xkphys cached */ 1231 uasm_i_ori(p, ptr, ptr, 0x540); 1232 uasm_i_drotr(p, ptr, ptr, 11); 1233 } 1234 1235 #ifdef __PAGETABLE_PMD_FOLDED 1236 #define LOC_PTEP scratch 1237 #else 1238 #define LOC_PTEP ptr 1239 #endif 1240 1241 if (!vmalloc_branch_delay_filled) 1242 /* get pgd offset in bytes */ 1243 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3); 1244 1245 uasm_l_vmalloc_done(l, *p); 1246 1247 /* 1248 * tmp ptr 1249 * fall-through case = badvaddr *pgd_current 1250 * vmalloc case = badvaddr swapper_pg_dir 1251 */ 1252 1253 if (vmalloc_branch_delay_filled) 1254 /* get pgd offset in bytes */ 1255 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3); 1256 1257 #ifdef __PAGETABLE_PMD_FOLDED 1258 GET_CONTEXT(p, tmp); /* get context reg */ 1259 #endif 1260 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3); 1261 1262 if (use_lwx_insns()) { 1263 UASM_i_LWX(p, LOC_PTEP, scratch, ptr); 1264 } else { 1265 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */ 1266 uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */ 1267 } 1268 1269 #ifndef __PAGETABLE_PMD_FOLDED 1270 /* get pmd offset in bytes */ 1271 uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3); 1272 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3); 1273 GET_CONTEXT(p, tmp); /* get context reg */ 1274 1275 if (use_lwx_insns()) { 1276 UASM_i_LWX(p, scratch, scratch, ptr); 1277 } else { 1278 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */ 1279 UASM_i_LW(p, scratch, 0, ptr); 1280 } 1281 #endif 1282 /* Adjust the context during the load latency. */ 1283 build_adjust_context(p, tmp); 1284 1285 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1286 uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update); 1287 /* 1288 * The in the LWX case we don't want to do the load in the 1289 * delay slot. It cannot issue in the same cycle and may be 1290 * speculative and unneeded. 1291 */ 1292 if (use_lwx_insns()) 1293 uasm_i_nop(p); 1294 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 1295 1296 1297 /* build_update_entries */ 1298 if (use_lwx_insns()) { 1299 even = ptr; 1300 odd = tmp; 1301 UASM_i_LWX(p, even, scratch, tmp); 1302 UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t)); 1303 UASM_i_LWX(p, odd, scratch, tmp); 1304 } else { 1305 UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */ 1306 even = tmp; 1307 odd = ptr; 1308 UASM_i_LW(p, even, 0, ptr); /* get even pte */ 1309 UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */ 1310 } 1311 if (cpu_has_rixi) { 1312 uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL)); 1313 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ 1314 uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL)); 1315 } else { 1316 uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL)); 1317 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ 1318 uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL)); 1319 } 1320 UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */ 1321 1322 if (c0_scratch >= 0) { 1323 UASM_i_MFC0(p, scratch, 31, c0_scratch); 1324 build_tlb_write_entry(p, l, r, tlb_random); 1325 uasm_l_leave(l, *p); 1326 rv.restore_scratch = 1; 1327 } else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13) { 1328 build_tlb_write_entry(p, l, r, tlb_random); 1329 uasm_l_leave(l, *p); 1330 UASM_i_LW(p, scratch, scratchpad_offset(0), 0); 1331 } else { 1332 UASM_i_LW(p, scratch, scratchpad_offset(0), 0); 1333 build_tlb_write_entry(p, l, r, tlb_random); 1334 uasm_l_leave(l, *p); 1335 rv.restore_scratch = 1; 1336 } 1337 1338 uasm_i_eret(p); /* return from trap */ 1339 1340 return rv; 1341 } 1342 1343 /* 1344 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception 1345 * because EXL == 0. If we wrap, we can also use the 32 instruction 1346 * slots before the XTLB refill exception handler which belong to the 1347 * unused TLB refill exception. 1348 */ 1349 #define MIPS64_REFILL_INSNS 32 1350 1351 static void __cpuinit build_r4000_tlb_refill_handler(void) 1352 { 1353 u32 *p = tlb_handler; 1354 struct uasm_label *l = labels; 1355 struct uasm_reloc *r = relocs; 1356 u32 *f; 1357 unsigned int final_len; 1358 struct mips_huge_tlb_info htlb_info __maybe_unused; 1359 enum vmalloc64_mode vmalloc_mode __maybe_unused; 1360 1361 memset(tlb_handler, 0, sizeof(tlb_handler)); 1362 memset(labels, 0, sizeof(labels)); 1363 memset(relocs, 0, sizeof(relocs)); 1364 memset(final_handler, 0, sizeof(final_handler)); 1365 1366 if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) { 1367 htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, 1368 scratch_reg); 1369 vmalloc_mode = refill_scratch; 1370 } else { 1371 htlb_info.huge_pte = K0; 1372 htlb_info.restore_scratch = 0; 1373 vmalloc_mode = refill_noscratch; 1374 /* 1375 * create the plain linear handler 1376 */ 1377 if (bcm1250_m3_war()) { 1378 unsigned int segbits = 44; 1379 1380 uasm_i_dmfc0(&p, K0, C0_BADVADDR); 1381 uasm_i_dmfc0(&p, K1, C0_ENTRYHI); 1382 uasm_i_xor(&p, K0, K0, K1); 1383 uasm_i_dsrl_safe(&p, K1, K0, 62); 1384 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); 1385 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); 1386 uasm_i_or(&p, K0, K0, K1); 1387 uasm_il_bnez(&p, &r, K0, label_leave); 1388 /* No need for uasm_i_nop */ 1389 } 1390 1391 #ifdef CONFIG_64BIT 1392 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ 1393 #else 1394 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ 1395 #endif 1396 1397 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1398 build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update); 1399 #endif 1400 1401 build_get_ptep(&p, K0, K1); 1402 build_update_entries(&p, K0, K1); 1403 build_tlb_write_entry(&p, &l, &r, tlb_random); 1404 uasm_l_leave(&l, p); 1405 uasm_i_eret(&p); /* return from trap */ 1406 } 1407 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1408 uasm_l_tlb_huge_update(&l, p); 1409 build_huge_update_entries(&p, htlb_info.huge_pte, K1); 1410 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random, 1411 htlb_info.restore_scratch); 1412 #endif 1413 1414 #ifdef CONFIG_64BIT 1415 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode); 1416 #endif 1417 1418 /* 1419 * Overflow check: For the 64bit handler, we need at least one 1420 * free instruction slot for the wrap-around branch. In worst 1421 * case, if the intended insertion point is a delay slot, we 1422 * need three, with the second nop'ed and the third being 1423 * unused. 1424 */ 1425 /* Loongson2 ebase is different than r4k, we have more space */ 1426 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) 1427 if ((p - tlb_handler) > 64) 1428 panic("TLB refill handler space exceeded"); 1429 #else 1430 if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) 1431 || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) 1432 && uasm_insn_has_bdelay(relocs, 1433 tlb_handler + MIPS64_REFILL_INSNS - 3))) 1434 panic("TLB refill handler space exceeded"); 1435 #endif 1436 1437 /* 1438 * Now fold the handler in the TLB refill handler space. 1439 */ 1440 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) 1441 f = final_handler; 1442 /* Simplest case, just copy the handler. */ 1443 uasm_copy_handler(relocs, labels, tlb_handler, p, f); 1444 final_len = p - tlb_handler; 1445 #else /* CONFIG_64BIT */ 1446 f = final_handler + MIPS64_REFILL_INSNS; 1447 if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) { 1448 /* Just copy the handler. */ 1449 uasm_copy_handler(relocs, labels, tlb_handler, p, f); 1450 final_len = p - tlb_handler; 1451 } else { 1452 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1453 const enum label_id ls = label_tlb_huge_update; 1454 #else 1455 const enum label_id ls = label_vmalloc; 1456 #endif 1457 u32 *split; 1458 int ov = 0; 1459 int i; 1460 1461 for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++) 1462 ; 1463 BUG_ON(i == ARRAY_SIZE(labels)); 1464 split = labels[i].addr; 1465 1466 /* 1467 * See if we have overflown one way or the other. 1468 */ 1469 if (split > tlb_handler + MIPS64_REFILL_INSNS || 1470 split < p - MIPS64_REFILL_INSNS) 1471 ov = 1; 1472 1473 if (ov) { 1474 /* 1475 * Split two instructions before the end. One 1476 * for the branch and one for the instruction 1477 * in the delay slot. 1478 */ 1479 split = tlb_handler + MIPS64_REFILL_INSNS - 2; 1480 1481 /* 1482 * If the branch would fall in a delay slot, 1483 * we must back up an additional instruction 1484 * so that it is no longer in a delay slot. 1485 */ 1486 if (uasm_insn_has_bdelay(relocs, split - 1)) 1487 split--; 1488 } 1489 /* Copy first part of the handler. */ 1490 uasm_copy_handler(relocs, labels, tlb_handler, split, f); 1491 f += split - tlb_handler; 1492 1493 if (ov) { 1494 /* Insert branch. */ 1495 uasm_l_split(&l, final_handler); 1496 uasm_il_b(&f, &r, label_split); 1497 if (uasm_insn_has_bdelay(relocs, split)) 1498 uasm_i_nop(&f); 1499 else { 1500 uasm_copy_handler(relocs, labels, 1501 split, split + 1, f); 1502 uasm_move_labels(labels, f, f + 1, -1); 1503 f++; 1504 split++; 1505 } 1506 } 1507 1508 /* Copy the rest of the handler. */ 1509 uasm_copy_handler(relocs, labels, split, p, final_handler); 1510 final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) + 1511 (p - split); 1512 } 1513 #endif /* CONFIG_64BIT */ 1514 1515 uasm_resolve_relocs(relocs, labels); 1516 pr_debug("Wrote TLB refill handler (%u instructions).\n", 1517 final_len); 1518 1519 memcpy((void *)ebase, final_handler, 0x100); 1520 1521 dump_handler("r4000_tlb_refill", (u32 *)ebase, 64); 1522 } 1523 1524 /* 1525 * 128 instructions for the fastpath handler is generous and should 1526 * never be exceeded. 1527 */ 1528 #define FASTPATH_SIZE 128 1529 1530 u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned; 1531 u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; 1532 u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; 1533 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT 1534 u32 tlbmiss_handler_setup_pgd_array[16] __cacheline_aligned; 1535 1536 static void __cpuinit build_r4000_setup_pgd(void) 1537 { 1538 const int a0 = 4; 1539 const int a1 = 5; 1540 u32 *p = tlbmiss_handler_setup_pgd_array; 1541 struct uasm_label *l = labels; 1542 struct uasm_reloc *r = relocs; 1543 1544 memset(tlbmiss_handler_setup_pgd_array, 0, sizeof(tlbmiss_handler_setup_pgd_array)); 1545 memset(labels, 0, sizeof(labels)); 1546 memset(relocs, 0, sizeof(relocs)); 1547 1548 pgd_reg = allocate_kscratch(); 1549 1550 if (pgd_reg == -1) { 1551 /* PGD << 11 in c0_Context */ 1552 /* 1553 * If it is a ckseg0 address, convert to a physical 1554 * address. Shifting right by 29 and adding 4 will 1555 * result in zero for these addresses. 1556 * 1557 */ 1558 UASM_i_SRA(&p, a1, a0, 29); 1559 UASM_i_ADDIU(&p, a1, a1, 4); 1560 uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1); 1561 uasm_i_nop(&p); 1562 uasm_i_dinsm(&p, a0, 0, 29, 64 - 29); 1563 uasm_l_tlbl_goaround1(&l, p); 1564 UASM_i_SLL(&p, a0, a0, 11); 1565 uasm_i_jr(&p, 31); 1566 UASM_i_MTC0(&p, a0, C0_CONTEXT); 1567 } else { 1568 /* PGD in c0_KScratch */ 1569 uasm_i_jr(&p, 31); 1570 UASM_i_MTC0(&p, a0, 31, pgd_reg); 1571 } 1572 if (p - tlbmiss_handler_setup_pgd_array > ARRAY_SIZE(tlbmiss_handler_setup_pgd_array)) 1573 panic("tlbmiss_handler_setup_pgd_array space exceeded"); 1574 uasm_resolve_relocs(relocs, labels); 1575 pr_debug("Wrote tlbmiss_handler_setup_pgd_array (%u instructions).\n", 1576 (unsigned int)(p - tlbmiss_handler_setup_pgd_array)); 1577 1578 dump_handler("tlbmiss_handler", 1579 tlbmiss_handler_setup_pgd_array, 1580 ARRAY_SIZE(tlbmiss_handler_setup_pgd_array)); 1581 } 1582 #endif 1583 1584 static void __cpuinit 1585 iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) 1586 { 1587 #ifdef CONFIG_SMP 1588 # ifdef CONFIG_64BIT_PHYS_ADDR 1589 if (cpu_has_64bits) 1590 uasm_i_lld(p, pte, 0, ptr); 1591 else 1592 # endif 1593 UASM_i_LL(p, pte, 0, ptr); 1594 #else 1595 # ifdef CONFIG_64BIT_PHYS_ADDR 1596 if (cpu_has_64bits) 1597 uasm_i_ld(p, pte, 0, ptr); 1598 else 1599 # endif 1600 UASM_i_LW(p, pte, 0, ptr); 1601 #endif 1602 } 1603 1604 static void __cpuinit 1605 iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, 1606 unsigned int mode) 1607 { 1608 #ifdef CONFIG_64BIT_PHYS_ADDR 1609 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); 1610 #endif 1611 1612 uasm_i_ori(p, pte, pte, mode); 1613 #ifdef CONFIG_SMP 1614 # ifdef CONFIG_64BIT_PHYS_ADDR 1615 if (cpu_has_64bits) 1616 uasm_i_scd(p, pte, 0, ptr); 1617 else 1618 # endif 1619 UASM_i_SC(p, pte, 0, ptr); 1620 1621 if (r10000_llsc_war()) 1622 uasm_il_beqzl(p, r, pte, label_smp_pgtable_change); 1623 else 1624 uasm_il_beqz(p, r, pte, label_smp_pgtable_change); 1625 1626 # ifdef CONFIG_64BIT_PHYS_ADDR 1627 if (!cpu_has_64bits) { 1628 /* no uasm_i_nop needed */ 1629 uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr); 1630 uasm_i_ori(p, pte, pte, hwmode); 1631 uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr); 1632 uasm_il_beqz(p, r, pte, label_smp_pgtable_change); 1633 /* no uasm_i_nop needed */ 1634 uasm_i_lw(p, pte, 0, ptr); 1635 } else 1636 uasm_i_nop(p); 1637 # else 1638 uasm_i_nop(p); 1639 # endif 1640 #else 1641 # ifdef CONFIG_64BIT_PHYS_ADDR 1642 if (cpu_has_64bits) 1643 uasm_i_sd(p, pte, 0, ptr); 1644 else 1645 # endif 1646 UASM_i_SW(p, pte, 0, ptr); 1647 1648 # ifdef CONFIG_64BIT_PHYS_ADDR 1649 if (!cpu_has_64bits) { 1650 uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr); 1651 uasm_i_ori(p, pte, pte, hwmode); 1652 uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr); 1653 uasm_i_lw(p, pte, 0, ptr); 1654 } 1655 # endif 1656 #endif 1657 } 1658 1659 /* 1660 * Check if PTE is present, if not then jump to LABEL. PTR points to 1661 * the page table where this PTE is located, PTE will be re-loaded 1662 * with it's original value. 1663 */ 1664 static void __cpuinit 1665 build_pte_present(u32 **p, struct uasm_reloc **r, 1666 int pte, int ptr, int scratch, enum label_id lid) 1667 { 1668 int t = scratch >= 0 ? scratch : pte; 1669 1670 if (cpu_has_rixi) { 1671 if (use_bbit_insns()) { 1672 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); 1673 uasm_i_nop(p); 1674 } else { 1675 uasm_i_andi(p, t, pte, _PAGE_PRESENT); 1676 uasm_il_beqz(p, r, t, lid); 1677 if (pte == t) 1678 /* You lose the SMP race :-(*/ 1679 iPTE_LW(p, pte, ptr); 1680 } 1681 } else { 1682 uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ); 1683 uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ); 1684 uasm_il_bnez(p, r, t, lid); 1685 if (pte == t) 1686 /* You lose the SMP race :-(*/ 1687 iPTE_LW(p, pte, ptr); 1688 } 1689 } 1690 1691 /* Make PTE valid, store result in PTR. */ 1692 static void __cpuinit 1693 build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, 1694 unsigned int ptr) 1695 { 1696 unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED; 1697 1698 iPTE_SW(p, r, pte, ptr, mode); 1699 } 1700 1701 /* 1702 * Check if PTE can be written to, if not branch to LABEL. Regardless 1703 * restore PTE with value from PTR when done. 1704 */ 1705 static void __cpuinit 1706 build_pte_writable(u32 **p, struct uasm_reloc **r, 1707 unsigned int pte, unsigned int ptr, int scratch, 1708 enum label_id lid) 1709 { 1710 int t = scratch >= 0 ? scratch : pte; 1711 1712 uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE); 1713 uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE); 1714 uasm_il_bnez(p, r, t, lid); 1715 if (pte == t) 1716 /* You lose the SMP race :-(*/ 1717 iPTE_LW(p, pte, ptr); 1718 else 1719 uasm_i_nop(p); 1720 } 1721 1722 /* Make PTE writable, update software status bits as well, then store 1723 * at PTR. 1724 */ 1725 static void __cpuinit 1726 build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, 1727 unsigned int ptr) 1728 { 1729 unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID 1730 | _PAGE_DIRTY); 1731 1732 iPTE_SW(p, r, pte, ptr, mode); 1733 } 1734 1735 /* 1736 * Check if PTE can be modified, if not branch to LABEL. Regardless 1737 * restore PTE with value from PTR when done. 1738 */ 1739 static void __cpuinit 1740 build_pte_modifiable(u32 **p, struct uasm_reloc **r, 1741 unsigned int pte, unsigned int ptr, int scratch, 1742 enum label_id lid) 1743 { 1744 if (use_bbit_insns()) { 1745 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid); 1746 uasm_i_nop(p); 1747 } else { 1748 int t = scratch >= 0 ? scratch : pte; 1749 uasm_i_andi(p, t, pte, _PAGE_WRITE); 1750 uasm_il_beqz(p, r, t, lid); 1751 if (pte == t) 1752 /* You lose the SMP race :-(*/ 1753 iPTE_LW(p, pte, ptr); 1754 } 1755 } 1756 1757 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 1758 1759 1760 /* 1761 * R3000 style TLB load/store/modify handlers. 1762 */ 1763 1764 /* 1765 * This places the pte into ENTRYLO0 and writes it with tlbwi. 1766 * Then it returns. 1767 */ 1768 static void __cpuinit 1769 build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) 1770 { 1771 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 1772 uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */ 1773 uasm_i_tlbwi(p); 1774 uasm_i_jr(p, tmp); 1775 uasm_i_rfe(p); /* branch delay */ 1776 } 1777 1778 /* 1779 * This places the pte into ENTRYLO0 and writes it with tlbwi 1780 * or tlbwr as appropriate. This is because the index register 1781 * may have the probe fail bit set as a result of a trap on a 1782 * kseg2 access, i.e. without refill. Then it returns. 1783 */ 1784 static void __cpuinit 1785 build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, 1786 struct uasm_reloc **r, unsigned int pte, 1787 unsigned int tmp) 1788 { 1789 uasm_i_mfc0(p, tmp, C0_INDEX); 1790 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 1791 uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */ 1792 uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */ 1793 uasm_i_tlbwi(p); /* cp0 delay */ 1794 uasm_i_jr(p, tmp); 1795 uasm_i_rfe(p); /* branch delay */ 1796 uasm_l_r3000_write_probe_fail(l, *p); 1797 uasm_i_tlbwr(p); /* cp0 delay */ 1798 uasm_i_jr(p, tmp); 1799 uasm_i_rfe(p); /* branch delay */ 1800 } 1801 1802 static void __cpuinit 1803 build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, 1804 unsigned int ptr) 1805 { 1806 long pgdc = (long)pgd_current; 1807 1808 uasm_i_mfc0(p, pte, C0_BADVADDR); 1809 uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */ 1810 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); 1811 uasm_i_srl(p, pte, pte, 22); /* load delay */ 1812 uasm_i_sll(p, pte, pte, 2); 1813 uasm_i_addu(p, ptr, ptr, pte); 1814 uasm_i_mfc0(p, pte, C0_CONTEXT); 1815 uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */ 1816 uasm_i_andi(p, pte, pte, 0xffc); /* load delay */ 1817 uasm_i_addu(p, ptr, ptr, pte); 1818 uasm_i_lw(p, pte, 0, ptr); 1819 uasm_i_tlbp(p); /* load delay */ 1820 } 1821 1822 static void __cpuinit build_r3000_tlb_load_handler(void) 1823 { 1824 u32 *p = handle_tlbl; 1825 struct uasm_label *l = labels; 1826 struct uasm_reloc *r = relocs; 1827 1828 memset(handle_tlbl, 0, sizeof(handle_tlbl)); 1829 memset(labels, 0, sizeof(labels)); 1830 memset(relocs, 0, sizeof(relocs)); 1831 1832 build_r3000_tlbchange_handler_head(&p, K0, K1); 1833 build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl); 1834 uasm_i_nop(&p); /* load delay */ 1835 build_make_valid(&p, &r, K0, K1); 1836 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1837 1838 uasm_l_nopage_tlbl(&l, p); 1839 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 1840 uasm_i_nop(&p); 1841 1842 if ((p - handle_tlbl) > FASTPATH_SIZE) 1843 panic("TLB load handler fastpath space exceeded"); 1844 1845 uasm_resolve_relocs(relocs, labels); 1846 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", 1847 (unsigned int)(p - handle_tlbl)); 1848 1849 dump_handler("r3000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl)); 1850 } 1851 1852 static void __cpuinit build_r3000_tlb_store_handler(void) 1853 { 1854 u32 *p = handle_tlbs; 1855 struct uasm_label *l = labels; 1856 struct uasm_reloc *r = relocs; 1857 1858 memset(handle_tlbs, 0, sizeof(handle_tlbs)); 1859 memset(labels, 0, sizeof(labels)); 1860 memset(relocs, 0, sizeof(relocs)); 1861 1862 build_r3000_tlbchange_handler_head(&p, K0, K1); 1863 build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs); 1864 uasm_i_nop(&p); /* load delay */ 1865 build_make_write(&p, &r, K0, K1); 1866 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1867 1868 uasm_l_nopage_tlbs(&l, p); 1869 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1870 uasm_i_nop(&p); 1871 1872 if ((p - handle_tlbs) > FASTPATH_SIZE) 1873 panic("TLB store handler fastpath space exceeded"); 1874 1875 uasm_resolve_relocs(relocs, labels); 1876 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", 1877 (unsigned int)(p - handle_tlbs)); 1878 1879 dump_handler("r3000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs)); 1880 } 1881 1882 static void __cpuinit build_r3000_tlb_modify_handler(void) 1883 { 1884 u32 *p = handle_tlbm; 1885 struct uasm_label *l = labels; 1886 struct uasm_reloc *r = relocs; 1887 1888 memset(handle_tlbm, 0, sizeof(handle_tlbm)); 1889 memset(labels, 0, sizeof(labels)); 1890 memset(relocs, 0, sizeof(relocs)); 1891 1892 build_r3000_tlbchange_handler_head(&p, K0, K1); 1893 build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm); 1894 uasm_i_nop(&p); /* load delay */ 1895 build_make_write(&p, &r, K0, K1); 1896 build_r3000_pte_reload_tlbwi(&p, K0, K1); 1897 1898 uasm_l_nopage_tlbm(&l, p); 1899 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1900 uasm_i_nop(&p); 1901 1902 if ((p - handle_tlbm) > FASTPATH_SIZE) 1903 panic("TLB modify handler fastpath space exceeded"); 1904 1905 uasm_resolve_relocs(relocs, labels); 1906 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", 1907 (unsigned int)(p - handle_tlbm)); 1908 1909 dump_handler("r3000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm)); 1910 } 1911 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ 1912 1913 /* 1914 * R4000 style TLB load/store/modify handlers. 1915 */ 1916 static struct work_registers __cpuinit 1917 build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, 1918 struct uasm_reloc **r) 1919 { 1920 struct work_registers wr = build_get_work_registers(p); 1921 1922 #ifdef CONFIG_64BIT 1923 build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */ 1924 #else 1925 build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */ 1926 #endif 1927 1928 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1929 /* 1930 * For huge tlb entries, pmd doesn't contain an address but 1931 * instead contains the tlb pte. Check the PAGE_HUGE bit and 1932 * see if we need to jump to huge tlb processing. 1933 */ 1934 build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update); 1935 #endif 1936 1937 UASM_i_MFC0(p, wr.r1, C0_BADVADDR); 1938 UASM_i_LW(p, wr.r2, 0, wr.r2); 1939 UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); 1940 uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2); 1941 UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1); 1942 1943 #ifdef CONFIG_SMP 1944 uasm_l_smp_pgtable_change(l, *p); 1945 #endif 1946 iPTE_LW(p, wr.r1, wr.r2); /* get even pte */ 1947 if (!m4kc_tlbp_war()) 1948 build_tlb_probe_entry(p); 1949 return wr; 1950 } 1951 1952 static void __cpuinit 1953 build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, 1954 struct uasm_reloc **r, unsigned int tmp, 1955 unsigned int ptr) 1956 { 1957 uasm_i_ori(p, ptr, ptr, sizeof(pte_t)); 1958 uasm_i_xori(p, ptr, ptr, sizeof(pte_t)); 1959 build_update_entries(p, tmp, ptr); 1960 build_tlb_write_entry(p, l, r, tlb_indexed); 1961 uasm_l_leave(l, *p); 1962 build_restore_work_registers(p); 1963 uasm_i_eret(p); /* return from trap */ 1964 1965 #ifdef CONFIG_64BIT 1966 build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill); 1967 #endif 1968 } 1969 1970 static void __cpuinit build_r4000_tlb_load_handler(void) 1971 { 1972 u32 *p = handle_tlbl; 1973 struct uasm_label *l = labels; 1974 struct uasm_reloc *r = relocs; 1975 struct work_registers wr; 1976 1977 memset(handle_tlbl, 0, sizeof(handle_tlbl)); 1978 memset(labels, 0, sizeof(labels)); 1979 memset(relocs, 0, sizeof(relocs)); 1980 1981 if (bcm1250_m3_war()) { 1982 unsigned int segbits = 44; 1983 1984 uasm_i_dmfc0(&p, K0, C0_BADVADDR); 1985 uasm_i_dmfc0(&p, K1, C0_ENTRYHI); 1986 uasm_i_xor(&p, K0, K0, K1); 1987 uasm_i_dsrl_safe(&p, K1, K0, 62); 1988 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); 1989 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); 1990 uasm_i_or(&p, K0, K0, K1); 1991 uasm_il_bnez(&p, &r, K0, label_leave); 1992 /* No need for uasm_i_nop */ 1993 } 1994 1995 wr = build_r4000_tlbchange_handler_head(&p, &l, &r); 1996 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); 1997 if (m4kc_tlbp_war()) 1998 build_tlb_probe_entry(&p); 1999 2000 if (cpu_has_rixi) { 2001 /* 2002 * If the page is not _PAGE_VALID, RI or XI could not 2003 * have triggered it. Skip the expensive test.. 2004 */ 2005 if (use_bbit_insns()) { 2006 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID), 2007 label_tlbl_goaround1); 2008 } else { 2009 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID); 2010 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1); 2011 } 2012 uasm_i_nop(&p); 2013 2014 uasm_i_tlbr(&p); 2015 /* Examine entrylo 0 or 1 based on ptr. */ 2016 if (use_bbit_insns()) { 2017 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); 2018 } else { 2019 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t)); 2020 uasm_i_beqz(&p, wr.r3, 8); 2021 } 2022 /* load it in the delay slot*/ 2023 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0); 2024 /* load it if ptr is odd */ 2025 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1); 2026 /* 2027 * If the entryLo (now in wr.r3) is valid (bit 1), RI or 2028 * XI must have triggered it. 2029 */ 2030 if (use_bbit_insns()) { 2031 uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl); 2032 uasm_i_nop(&p); 2033 uasm_l_tlbl_goaround1(&l, p); 2034 } else { 2035 uasm_i_andi(&p, wr.r3, wr.r3, 2); 2036 uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl); 2037 uasm_i_nop(&p); 2038 } 2039 uasm_l_tlbl_goaround1(&l, p); 2040 } 2041 build_make_valid(&p, &r, wr.r1, wr.r2); 2042 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); 2043 2044 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 2045 /* 2046 * This is the entry point when build_r4000_tlbchange_handler_head 2047 * spots a huge page. 2048 */ 2049 uasm_l_tlb_huge_update(&l, p); 2050 iPTE_LW(&p, wr.r1, wr.r2); 2051 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); 2052 build_tlb_probe_entry(&p); 2053 2054 if (cpu_has_rixi) { 2055 /* 2056 * If the page is not _PAGE_VALID, RI or XI could not 2057 * have triggered it. Skip the expensive test.. 2058 */ 2059 if (use_bbit_insns()) { 2060 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID), 2061 label_tlbl_goaround2); 2062 } else { 2063 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID); 2064 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); 2065 } 2066 uasm_i_nop(&p); 2067 2068 uasm_i_tlbr(&p); 2069 /* Examine entrylo 0 or 1 based on ptr. */ 2070 if (use_bbit_insns()) { 2071 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); 2072 } else { 2073 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t)); 2074 uasm_i_beqz(&p, wr.r3, 8); 2075 } 2076 /* load it in the delay slot*/ 2077 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0); 2078 /* load it if ptr is odd */ 2079 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1); 2080 /* 2081 * If the entryLo (now in wr.r3) is valid (bit 1), RI or 2082 * XI must have triggered it. 2083 */ 2084 if (use_bbit_insns()) { 2085 uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2); 2086 } else { 2087 uasm_i_andi(&p, wr.r3, wr.r3, 2); 2088 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); 2089 } 2090 if (PM_DEFAULT_MASK == 0) 2091 uasm_i_nop(&p); 2092 /* 2093 * We clobbered C0_PAGEMASK, restore it. On the other branch 2094 * it is restored in build_huge_tlb_write_entry. 2095 */ 2096 build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0); 2097 2098 uasm_l_tlbl_goaround2(&l, p); 2099 } 2100 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID)); 2101 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2102 #endif 2103 2104 uasm_l_nopage_tlbl(&l, p); 2105 build_restore_work_registers(&p); 2106 #ifdef CONFIG_CPU_MICROMIPS 2107 if ((unsigned long)tlb_do_page_fault_0 & 1) { 2108 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0)); 2109 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0)); 2110 uasm_i_jr(&p, K0); 2111 } else 2112 #endif 2113 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 2114 uasm_i_nop(&p); 2115 2116 if ((p - handle_tlbl) > FASTPATH_SIZE) 2117 panic("TLB load handler fastpath space exceeded"); 2118 2119 uasm_resolve_relocs(relocs, labels); 2120 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", 2121 (unsigned int)(p - handle_tlbl)); 2122 2123 dump_handler("r4000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl)); 2124 } 2125 2126 static void __cpuinit build_r4000_tlb_store_handler(void) 2127 { 2128 u32 *p = handle_tlbs; 2129 struct uasm_label *l = labels; 2130 struct uasm_reloc *r = relocs; 2131 struct work_registers wr; 2132 2133 memset(handle_tlbs, 0, sizeof(handle_tlbs)); 2134 memset(labels, 0, sizeof(labels)); 2135 memset(relocs, 0, sizeof(relocs)); 2136 2137 wr = build_r4000_tlbchange_handler_head(&p, &l, &r); 2138 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); 2139 if (m4kc_tlbp_war()) 2140 build_tlb_probe_entry(&p); 2141 build_make_write(&p, &r, wr.r1, wr.r2); 2142 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); 2143 2144 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 2145 /* 2146 * This is the entry point when 2147 * build_r4000_tlbchange_handler_head spots a huge page. 2148 */ 2149 uasm_l_tlb_huge_update(&l, p); 2150 iPTE_LW(&p, wr.r1, wr.r2); 2151 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); 2152 build_tlb_probe_entry(&p); 2153 uasm_i_ori(&p, wr.r1, wr.r1, 2154 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2155 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2156 #endif 2157 2158 uasm_l_nopage_tlbs(&l, p); 2159 build_restore_work_registers(&p); 2160 #ifdef CONFIG_CPU_MICROMIPS 2161 if ((unsigned long)tlb_do_page_fault_1 & 1) { 2162 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1)); 2163 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1)); 2164 uasm_i_jr(&p, K0); 2165 } else 2166 #endif 2167 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2168 uasm_i_nop(&p); 2169 2170 if ((p - handle_tlbs) > FASTPATH_SIZE) 2171 panic("TLB store handler fastpath space exceeded"); 2172 2173 uasm_resolve_relocs(relocs, labels); 2174 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", 2175 (unsigned int)(p - handle_tlbs)); 2176 2177 dump_handler("r4000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs)); 2178 } 2179 2180 static void __cpuinit build_r4000_tlb_modify_handler(void) 2181 { 2182 u32 *p = handle_tlbm; 2183 struct uasm_label *l = labels; 2184 struct uasm_reloc *r = relocs; 2185 struct work_registers wr; 2186 2187 memset(handle_tlbm, 0, sizeof(handle_tlbm)); 2188 memset(labels, 0, sizeof(labels)); 2189 memset(relocs, 0, sizeof(relocs)); 2190 2191 wr = build_r4000_tlbchange_handler_head(&p, &l, &r); 2192 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); 2193 if (m4kc_tlbp_war()) 2194 build_tlb_probe_entry(&p); 2195 /* Present and writable bits set, set accessed and dirty bits. */ 2196 build_make_write(&p, &r, wr.r1, wr.r2); 2197 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); 2198 2199 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 2200 /* 2201 * This is the entry point when 2202 * build_r4000_tlbchange_handler_head spots a huge page. 2203 */ 2204 uasm_l_tlb_huge_update(&l, p); 2205 iPTE_LW(&p, wr.r1, wr.r2); 2206 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); 2207 build_tlb_probe_entry(&p); 2208 uasm_i_ori(&p, wr.r1, wr.r1, 2209 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2210 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2211 #endif 2212 2213 uasm_l_nopage_tlbm(&l, p); 2214 build_restore_work_registers(&p); 2215 #ifdef CONFIG_CPU_MICROMIPS 2216 if ((unsigned long)tlb_do_page_fault_1 & 1) { 2217 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1)); 2218 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1)); 2219 uasm_i_jr(&p, K0); 2220 } else 2221 #endif 2222 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2223 uasm_i_nop(&p); 2224 2225 if ((p - handle_tlbm) > FASTPATH_SIZE) 2226 panic("TLB modify handler fastpath space exceeded"); 2227 2228 uasm_resolve_relocs(relocs, labels); 2229 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", 2230 (unsigned int)(p - handle_tlbm)); 2231 2232 dump_handler("r4000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm)); 2233 } 2234 2235 void __cpuinit build_tlb_refill_handler(void) 2236 { 2237 /* 2238 * The refill handler is generated per-CPU, multi-node systems 2239 * may have local storage for it. The other handlers are only 2240 * needed once. 2241 */ 2242 static int run_once = 0; 2243 2244 output_pgtable_bits_defines(); 2245 2246 #ifdef CONFIG_64BIT 2247 check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); 2248 #endif 2249 2250 switch (current_cpu_type()) { 2251 case CPU_R2000: 2252 case CPU_R3000: 2253 case CPU_R3000A: 2254 case CPU_R3081E: 2255 case CPU_TX3912: 2256 case CPU_TX3922: 2257 case CPU_TX3927: 2258 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 2259 setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000); 2260 if (cpu_has_local_ebase) 2261 build_r3000_tlb_refill_handler(); 2262 if (!run_once) { 2263 if (!cpu_has_local_ebase) 2264 build_r3000_tlb_refill_handler(); 2265 build_r3000_tlb_load_handler(); 2266 build_r3000_tlb_store_handler(); 2267 build_r3000_tlb_modify_handler(); 2268 run_once++; 2269 } 2270 #else 2271 panic("No R3000 TLB refill handler"); 2272 #endif 2273 break; 2274 2275 case CPU_R6000: 2276 case CPU_R6000A: 2277 panic("No R6000 TLB refill handler yet"); 2278 break; 2279 2280 case CPU_R8000: 2281 panic("No R8000 TLB refill handler yet"); 2282 break; 2283 2284 default: 2285 #ifndef CONFIG_MIPS_MT_SMTC 2286 setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000); 2287 #else 2288 setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000); 2289 #endif 2290 if (!run_once) { 2291 scratch_reg = allocate_kscratch(); 2292 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT 2293 build_r4000_setup_pgd(); 2294 #endif 2295 build_r4000_tlb_load_handler(); 2296 build_r4000_tlb_store_handler(); 2297 build_r4000_tlb_modify_handler(); 2298 if (!cpu_has_local_ebase) 2299 build_r4000_tlb_refill_handler(); 2300 run_once++; 2301 } 2302 if (cpu_has_local_ebase) 2303 build_r4000_tlb_refill_handler(); 2304 } 2305 } 2306 2307 void __cpuinit flush_tlb_handlers(void) 2308 { 2309 local_flush_icache_range((unsigned long)handle_tlbl, 2310 (unsigned long)handle_tlbl + sizeof(handle_tlbl)); 2311 local_flush_icache_range((unsigned long)handle_tlbs, 2312 (unsigned long)handle_tlbs + sizeof(handle_tlbs)); 2313 local_flush_icache_range((unsigned long)handle_tlbm, 2314 (unsigned long)handle_tlbm + sizeof(handle_tlbm)); 2315 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT 2316 local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd_array, 2317 (unsigned long)tlbmiss_handler_setup_pgd_array + sizeof(handle_tlbm)); 2318 #endif 2319 } 2320