1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Synthesize TLB refill handlers at runtime. 7 * 8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki 10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 11 * Copyright (C) 2008, 2009 Cavium Networks, Inc. 12 * Copyright (C) 2011 MIPS Technologies, Inc. 13 * 14 * ... and the days got worse and worse and now you see 15 * I've gone completly out of my mind. 16 * 17 * They're coming to take me a away haha 18 * they're coming to take me a away hoho hihi haha 19 * to the funny farm where code is beautiful all the time ... 20 * 21 * (Condolences to Napoleon XIV) 22 */ 23 24 #include <linux/bug.h> 25 #include <linux/kernel.h> 26 #include <linux/types.h> 27 #include <linux/smp.h> 28 #include <linux/string.h> 29 #include <linux/cache.h> 30 31 #include <asm/cacheflush.h> 32 #include <asm/cpu-type.h> 33 #include <asm/pgtable.h> 34 #include <asm/war.h> 35 #include <asm/uasm.h> 36 #include <asm/setup.h> 37 38 /* 39 * TLB load/store/modify handlers. 40 * 41 * Only the fastpath gets synthesized at runtime, the slowpath for 42 * do_page_fault remains normal asm. 43 */ 44 extern void tlb_do_page_fault_0(void); 45 extern void tlb_do_page_fault_1(void); 46 47 struct work_registers { 48 int r1; 49 int r2; 50 int r3; 51 }; 52 53 struct tlb_reg_save { 54 unsigned long a; 55 unsigned long b; 56 } ____cacheline_aligned_in_smp; 57 58 static struct tlb_reg_save handler_reg_save[NR_CPUS]; 59 60 static inline int r45k_bvahwbug(void) 61 { 62 /* XXX: We should probe for the presence of this bug, but we don't. */ 63 return 0; 64 } 65 66 static inline int r4k_250MHZhwbug(void) 67 { 68 /* XXX: We should probe for the presence of this bug, but we don't. */ 69 return 0; 70 } 71 72 static inline int __maybe_unused bcm1250_m3_war(void) 73 { 74 return BCM1250_M3_WAR; 75 } 76 77 static inline int __maybe_unused r10000_llsc_war(void) 78 { 79 return R10000_LLSC_WAR; 80 } 81 82 static int use_bbit_insns(void) 83 { 84 switch (current_cpu_type()) { 85 case CPU_CAVIUM_OCTEON: 86 case CPU_CAVIUM_OCTEON_PLUS: 87 case CPU_CAVIUM_OCTEON2: 88 case CPU_CAVIUM_OCTEON3: 89 return 1; 90 default: 91 return 0; 92 } 93 } 94 95 static int use_lwx_insns(void) 96 { 97 switch (current_cpu_type()) { 98 case CPU_CAVIUM_OCTEON2: 99 case CPU_CAVIUM_OCTEON3: 100 return 1; 101 default: 102 return 0; 103 } 104 } 105 #if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \ 106 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 107 static bool scratchpad_available(void) 108 { 109 return true; 110 } 111 static int scratchpad_offset(int i) 112 { 113 /* 114 * CVMSEG starts at address -32768 and extends for 115 * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines. 116 */ 117 i += 1; /* Kernel use starts at the top and works down. */ 118 return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768; 119 } 120 #else 121 static bool scratchpad_available(void) 122 { 123 return false; 124 } 125 static int scratchpad_offset(int i) 126 { 127 BUG(); 128 /* Really unreachable, but evidently some GCC want this. */ 129 return 0; 130 } 131 #endif 132 /* 133 * Found by experiment: At least some revisions of the 4kc throw under 134 * some circumstances a machine check exception, triggered by invalid 135 * values in the index register. Delaying the tlbp instruction until 136 * after the next branch, plus adding an additional nop in front of 137 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows 138 * why; it's not an issue caused by the core RTL. 139 * 140 */ 141 static int m4kc_tlbp_war(void) 142 { 143 return (current_cpu_data.processor_id & 0xffff00) == 144 (PRID_COMP_MIPS | PRID_IMP_4KC); 145 } 146 147 /* Handle labels (which must be positive integers). */ 148 enum label_id { 149 label_second_part = 1, 150 label_leave, 151 label_vmalloc, 152 label_vmalloc_done, 153 label_tlbw_hazard_0, 154 label_split = label_tlbw_hazard_0 + 8, 155 label_tlbl_goaround1, 156 label_tlbl_goaround2, 157 label_nopage_tlbl, 158 label_nopage_tlbs, 159 label_nopage_tlbm, 160 label_smp_pgtable_change, 161 label_r3000_write_probe_fail, 162 label_large_segbits_fault, 163 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 164 label_tlb_huge_update, 165 #endif 166 }; 167 168 UASM_L_LA(_second_part) 169 UASM_L_LA(_leave) 170 UASM_L_LA(_vmalloc) 171 UASM_L_LA(_vmalloc_done) 172 /* _tlbw_hazard_x is handled differently. */ 173 UASM_L_LA(_split) 174 UASM_L_LA(_tlbl_goaround1) 175 UASM_L_LA(_tlbl_goaround2) 176 UASM_L_LA(_nopage_tlbl) 177 UASM_L_LA(_nopage_tlbs) 178 UASM_L_LA(_nopage_tlbm) 179 UASM_L_LA(_smp_pgtable_change) 180 UASM_L_LA(_r3000_write_probe_fail) 181 UASM_L_LA(_large_segbits_fault) 182 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 183 UASM_L_LA(_tlb_huge_update) 184 #endif 185 186 static int hazard_instance; 187 188 static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance) 189 { 190 switch (instance) { 191 case 0 ... 7: 192 uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance); 193 return; 194 default: 195 BUG(); 196 } 197 } 198 199 static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance) 200 { 201 switch (instance) { 202 case 0 ... 7: 203 uasm_build_label(l, *p, label_tlbw_hazard_0 + instance); 204 break; 205 default: 206 BUG(); 207 } 208 } 209 210 /* 211 * pgtable bits are assigned dynamically depending on processor feature 212 * and statically based on kernel configuration. This spits out the actual 213 * values the kernel is using. Required to make sense from disassembled 214 * TLB exception handlers. 215 */ 216 static void output_pgtable_bits_defines(void) 217 { 218 #define pr_define(fmt, ...) \ 219 pr_debug("#define " fmt, ##__VA_ARGS__) 220 221 pr_debug("#include <asm/asm.h>\n"); 222 pr_debug("#include <asm/regdef.h>\n"); 223 pr_debug("\n"); 224 225 pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT); 226 pr_define("_PAGE_READ_SHIFT %d\n", _PAGE_READ_SHIFT); 227 pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT); 228 pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT); 229 pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT); 230 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 231 pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT); 232 pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT); 233 #endif 234 if (cpu_has_rixi) { 235 #ifdef _PAGE_NO_EXEC_SHIFT 236 pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT); 237 #endif 238 #ifdef _PAGE_NO_READ_SHIFT 239 pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT); 240 #endif 241 } 242 pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT); 243 pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT); 244 pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT); 245 pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT); 246 pr_debug("\n"); 247 } 248 249 static inline void dump_handler(const char *symbol, const u32 *handler, int count) 250 { 251 int i; 252 253 pr_debug("LEAF(%s)\n", symbol); 254 255 pr_debug("\t.set push\n"); 256 pr_debug("\t.set noreorder\n"); 257 258 for (i = 0; i < count; i++) 259 pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]); 260 261 pr_debug("\t.set\tpop\n"); 262 263 pr_debug("\tEND(%s)\n", symbol); 264 } 265 266 /* The only general purpose registers allowed in TLB handlers. */ 267 #define K0 26 268 #define K1 27 269 270 /* Some CP0 registers */ 271 #define C0_INDEX 0, 0 272 #define C0_ENTRYLO0 2, 0 273 #define C0_TCBIND 2, 2 274 #define C0_ENTRYLO1 3, 0 275 #define C0_CONTEXT 4, 0 276 #define C0_PAGEMASK 5, 0 277 #define C0_BADVADDR 8, 0 278 #define C0_ENTRYHI 10, 0 279 #define C0_EPC 14, 0 280 #define C0_XCONTEXT 20, 0 281 282 #ifdef CONFIG_64BIT 283 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT) 284 #else 285 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT) 286 #endif 287 288 /* The worst case length of the handler is around 18 instructions for 289 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs. 290 * Maximum space available is 32 instructions for R3000 and 64 291 * instructions for R4000. 292 * 293 * We deliberately chose a buffer size of 128, so we won't scribble 294 * over anything important on overflow before we panic. 295 */ 296 static u32 tlb_handler[128]; 297 298 /* simply assume worst case size for labels and relocs */ 299 static struct uasm_label labels[128]; 300 static struct uasm_reloc relocs[128]; 301 302 static int check_for_high_segbits; 303 304 static unsigned int kscratch_used_mask; 305 306 static inline int __maybe_unused c0_kscratch(void) 307 { 308 switch (current_cpu_type()) { 309 case CPU_XLP: 310 case CPU_XLR: 311 return 22; 312 default: 313 return 31; 314 } 315 } 316 317 static int allocate_kscratch(void) 318 { 319 int r; 320 unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask; 321 322 r = ffs(a); 323 324 if (r == 0) 325 return -1; 326 327 r--; /* make it zero based */ 328 329 kscratch_used_mask |= (1 << r); 330 331 return r; 332 } 333 334 static int scratch_reg; 335 static int pgd_reg; 336 enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; 337 338 static struct work_registers build_get_work_registers(u32 **p) 339 { 340 struct work_registers r; 341 342 if (scratch_reg >= 0) { 343 /* Save in CPU local C0_KScratch? */ 344 UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg); 345 r.r1 = K0; 346 r.r2 = K1; 347 r.r3 = 1; 348 return r; 349 } 350 351 if (num_possible_cpus() > 1) { 352 /* Get smp_processor_id */ 353 UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG); 354 UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT); 355 356 /* handler_reg_save index in K0 */ 357 UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save))); 358 359 UASM_i_LA(p, K1, (long)&handler_reg_save); 360 UASM_i_ADDU(p, K0, K0, K1); 361 } else { 362 UASM_i_LA(p, K0, (long)&handler_reg_save); 363 } 364 /* K0 now points to save area, save $1 and $2 */ 365 UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0); 366 UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0); 367 368 r.r1 = K1; 369 r.r2 = 1; 370 r.r3 = 2; 371 return r; 372 } 373 374 static void build_restore_work_registers(u32 **p) 375 { 376 if (scratch_reg >= 0) { 377 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); 378 return; 379 } 380 /* K0 already points to save area, restore $1 and $2 */ 381 UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0); 382 UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0); 383 } 384 385 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 386 387 /* 388 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current, 389 * we cannot do r3000 under these circumstances. 390 * 391 * Declare pgd_current here instead of including mmu_context.h to avoid type 392 * conflicts for tlbmiss_handler_setup_pgd 393 */ 394 extern unsigned long pgd_current[]; 395 396 /* 397 * The R3000 TLB handler is simple. 398 */ 399 static void build_r3000_tlb_refill_handler(void) 400 { 401 long pgdc = (long)pgd_current; 402 u32 *p; 403 404 memset(tlb_handler, 0, sizeof(tlb_handler)); 405 p = tlb_handler; 406 407 uasm_i_mfc0(&p, K0, C0_BADVADDR); 408 uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */ 409 uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1); 410 uasm_i_srl(&p, K0, K0, 22); /* load delay */ 411 uasm_i_sll(&p, K0, K0, 2); 412 uasm_i_addu(&p, K1, K1, K0); 413 uasm_i_mfc0(&p, K0, C0_CONTEXT); 414 uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */ 415 uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */ 416 uasm_i_addu(&p, K1, K1, K0); 417 uasm_i_lw(&p, K0, 0, K1); 418 uasm_i_nop(&p); /* load delay */ 419 uasm_i_mtc0(&p, K0, C0_ENTRYLO0); 420 uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */ 421 uasm_i_tlbwr(&p); /* cp0 delay */ 422 uasm_i_jr(&p, K1); 423 uasm_i_rfe(&p); /* branch delay */ 424 425 if (p > tlb_handler + 32) 426 panic("TLB refill handler space exceeded"); 427 428 pr_debug("Wrote TLB refill handler (%u instructions).\n", 429 (unsigned int)(p - tlb_handler)); 430 431 memcpy((void *)ebase, tlb_handler, 0x80); 432 local_flush_icache_range(ebase, ebase + 0x80); 433 434 dump_handler("r3000_tlb_refill", (u32 *)ebase, 32); 435 } 436 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ 437 438 /* 439 * The R4000 TLB handler is much more complicated. We have two 440 * consecutive handler areas with 32 instructions space each. 441 * Since they aren't used at the same time, we can overflow in the 442 * other one.To keep things simple, we first assume linear space, 443 * then we relocate it to the final handler layout as needed. 444 */ 445 static u32 final_handler[64]; 446 447 /* 448 * Hazards 449 * 450 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0: 451 * 2. A timing hazard exists for the TLBP instruction. 452 * 453 * stalling_instruction 454 * TLBP 455 * 456 * The JTLB is being read for the TLBP throughout the stall generated by the 457 * previous instruction. This is not really correct as the stalling instruction 458 * can modify the address used to access the JTLB. The failure symptom is that 459 * the TLBP instruction will use an address created for the stalling instruction 460 * and not the address held in C0_ENHI and thus report the wrong results. 461 * 462 * The software work-around is to not allow the instruction preceding the TLBP 463 * to stall - make it an NOP or some other instruction guaranteed not to stall. 464 * 465 * Errata 2 will not be fixed. This errata is also on the R5000. 466 * 467 * As if we MIPS hackers wouldn't know how to nop pipelines happy ... 468 */ 469 static void __maybe_unused build_tlb_probe_entry(u32 **p) 470 { 471 switch (current_cpu_type()) { 472 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */ 473 case CPU_R4600: 474 case CPU_R4700: 475 case CPU_R5000: 476 case CPU_NEVADA: 477 uasm_i_nop(p); 478 uasm_i_tlbp(p); 479 break; 480 481 default: 482 uasm_i_tlbp(p); 483 break; 484 } 485 } 486 487 /* 488 * Write random or indexed TLB entry, and care about the hazards from 489 * the preceding mtc0 and for the following eret. 490 */ 491 enum tlb_write_entry { tlb_random, tlb_indexed }; 492 493 static void build_tlb_write_entry(u32 **p, struct uasm_label **l, 494 struct uasm_reloc **r, 495 enum tlb_write_entry wmode) 496 { 497 void(*tlbw)(u32 **) = NULL; 498 499 switch (wmode) { 500 case tlb_random: tlbw = uasm_i_tlbwr; break; 501 case tlb_indexed: tlbw = uasm_i_tlbwi; break; 502 } 503 504 if (cpu_has_mips_r2) { 505 /* 506 * The architecture spec says an ehb is required here, 507 * but a number of cores do not have the hazard and 508 * using an ehb causes an expensive pipeline stall. 509 */ 510 switch (current_cpu_type()) { 511 case CPU_M14KC: 512 case CPU_74K: 513 case CPU_1074K: 514 case CPU_PROAPTIV: 515 case CPU_P5600: 516 case CPU_M5150: 517 break; 518 519 default: 520 uasm_i_ehb(p); 521 break; 522 } 523 tlbw(p); 524 return; 525 } 526 527 switch (current_cpu_type()) { 528 case CPU_R4000PC: 529 case CPU_R4000SC: 530 case CPU_R4000MC: 531 case CPU_R4400PC: 532 case CPU_R4400SC: 533 case CPU_R4400MC: 534 /* 535 * This branch uses up a mtc0 hazard nop slot and saves 536 * two nops after the tlbw instruction. 537 */ 538 uasm_bgezl_hazard(p, r, hazard_instance); 539 tlbw(p); 540 uasm_bgezl_label(l, p, hazard_instance); 541 hazard_instance++; 542 uasm_i_nop(p); 543 break; 544 545 case CPU_R4600: 546 case CPU_R4700: 547 uasm_i_nop(p); 548 tlbw(p); 549 uasm_i_nop(p); 550 break; 551 552 case CPU_R5000: 553 case CPU_NEVADA: 554 uasm_i_nop(p); /* QED specifies 2 nops hazard */ 555 uasm_i_nop(p); /* QED specifies 2 nops hazard */ 556 tlbw(p); 557 break; 558 559 case CPU_R4300: 560 case CPU_5KC: 561 case CPU_TX49XX: 562 case CPU_PR4450: 563 case CPU_XLR: 564 uasm_i_nop(p); 565 tlbw(p); 566 break; 567 568 case CPU_R10000: 569 case CPU_R12000: 570 case CPU_R14000: 571 case CPU_4KC: 572 case CPU_4KEC: 573 case CPU_M14KC: 574 case CPU_M14KEC: 575 case CPU_SB1: 576 case CPU_SB1A: 577 case CPU_4KSC: 578 case CPU_20KC: 579 case CPU_25KF: 580 case CPU_BMIPS32: 581 case CPU_BMIPS3300: 582 case CPU_BMIPS4350: 583 case CPU_BMIPS4380: 584 case CPU_BMIPS5000: 585 case CPU_LOONGSON2: 586 case CPU_LOONGSON3: 587 case CPU_R5500: 588 if (m4kc_tlbp_war()) 589 uasm_i_nop(p); 590 case CPU_ALCHEMY: 591 tlbw(p); 592 break; 593 594 case CPU_RM7000: 595 uasm_i_nop(p); 596 uasm_i_nop(p); 597 uasm_i_nop(p); 598 uasm_i_nop(p); 599 tlbw(p); 600 break; 601 602 case CPU_VR4111: 603 case CPU_VR4121: 604 case CPU_VR4122: 605 case CPU_VR4181: 606 case CPU_VR4181A: 607 uasm_i_nop(p); 608 uasm_i_nop(p); 609 tlbw(p); 610 uasm_i_nop(p); 611 uasm_i_nop(p); 612 break; 613 614 case CPU_VR4131: 615 case CPU_VR4133: 616 case CPU_R5432: 617 uasm_i_nop(p); 618 uasm_i_nop(p); 619 tlbw(p); 620 break; 621 622 case CPU_JZRISC: 623 tlbw(p); 624 uasm_i_nop(p); 625 break; 626 627 default: 628 panic("No TLB refill handler yet (CPU type: %d)", 629 current_cpu_type()); 630 break; 631 } 632 } 633 634 static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, 635 unsigned int reg) 636 { 637 if (cpu_has_rixi) { 638 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); 639 } else { 640 #ifdef CONFIG_64BIT_PHYS_ADDR 641 uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL)); 642 #else 643 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL)); 644 #endif 645 } 646 } 647 648 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 649 650 static void build_restore_pagemask(u32 **p, struct uasm_reloc **r, 651 unsigned int tmp, enum label_id lid, 652 int restore_scratch) 653 { 654 if (restore_scratch) { 655 /* Reset default page size */ 656 if (PM_DEFAULT_MASK >> 16) { 657 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); 658 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); 659 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 660 uasm_il_b(p, r, lid); 661 } else if (PM_DEFAULT_MASK) { 662 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); 663 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 664 uasm_il_b(p, r, lid); 665 } else { 666 uasm_i_mtc0(p, 0, C0_PAGEMASK); 667 uasm_il_b(p, r, lid); 668 } 669 if (scratch_reg >= 0) 670 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); 671 else 672 UASM_i_LW(p, 1, scratchpad_offset(0), 0); 673 } else { 674 /* Reset default page size */ 675 if (PM_DEFAULT_MASK >> 16) { 676 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); 677 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); 678 uasm_il_b(p, r, lid); 679 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 680 } else if (PM_DEFAULT_MASK) { 681 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); 682 uasm_il_b(p, r, lid); 683 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 684 } else { 685 uasm_il_b(p, r, lid); 686 uasm_i_mtc0(p, 0, C0_PAGEMASK); 687 } 688 } 689 } 690 691 static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l, 692 struct uasm_reloc **r, 693 unsigned int tmp, 694 enum tlb_write_entry wmode, 695 int restore_scratch) 696 { 697 /* Set huge page tlb entry size */ 698 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); 699 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); 700 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 701 702 build_tlb_write_entry(p, l, r, wmode); 703 704 build_restore_pagemask(p, r, tmp, label_leave, restore_scratch); 705 } 706 707 /* 708 * Check if Huge PTE is present, if so then jump to LABEL. 709 */ 710 static void 711 build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, 712 unsigned int pmd, int lid) 713 { 714 UASM_i_LW(p, tmp, 0, pmd); 715 if (use_bbit_insns()) { 716 uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid); 717 } else { 718 uasm_i_andi(p, tmp, tmp, _PAGE_HUGE); 719 uasm_il_bnez(p, r, tmp, lid); 720 } 721 } 722 723 static void build_huge_update_entries(u32 **p, unsigned int pte, 724 unsigned int tmp) 725 { 726 int small_sequence; 727 728 /* 729 * A huge PTE describes an area the size of the 730 * configured huge page size. This is twice the 731 * of the large TLB entry size we intend to use. 732 * A TLB entry half the size of the configured 733 * huge page size is configured into entrylo0 734 * and entrylo1 to cover the contiguous huge PTE 735 * address space. 736 */ 737 small_sequence = (HPAGE_SIZE >> 7) < 0x10000; 738 739 /* We can clobber tmp. It isn't used after this.*/ 740 if (!small_sequence) 741 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); 742 743 build_convert_pte_to_entrylo(p, pte); 744 UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */ 745 /* convert to entrylo1 */ 746 if (small_sequence) 747 UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7); 748 else 749 UASM_i_ADDU(p, pte, pte, tmp); 750 751 UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */ 752 } 753 754 static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, 755 struct uasm_label **l, 756 unsigned int pte, 757 unsigned int ptr) 758 { 759 #ifdef CONFIG_SMP 760 UASM_i_SC(p, pte, 0, ptr); 761 uasm_il_beqz(p, r, pte, label_tlb_huge_update); 762 UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */ 763 #else 764 UASM_i_SW(p, pte, 0, ptr); 765 #endif 766 build_huge_update_entries(p, pte, ptr); 767 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0); 768 } 769 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 770 771 #ifdef CONFIG_64BIT 772 /* 773 * TMP and PTR are scratch. 774 * TMP will be clobbered, PTR will hold the pmd entry. 775 */ 776 static void 777 build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 778 unsigned int tmp, unsigned int ptr) 779 { 780 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 781 long pgdc = (long)pgd_current; 782 #endif 783 /* 784 * The vmalloc handling is not in the hotpath. 785 */ 786 uasm_i_dmfc0(p, tmp, C0_BADVADDR); 787 788 if (check_for_high_segbits) { 789 /* 790 * The kernel currently implicitely assumes that the 791 * MIPS SEGBITS parameter for the processor is 792 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never 793 * allocate virtual addresses outside the maximum 794 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But 795 * that doesn't prevent user code from accessing the 796 * higher xuseg addresses. Here, we make sure that 797 * everything but the lower xuseg addresses goes down 798 * the module_alloc/vmalloc path. 799 */ 800 uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); 801 uasm_il_bnez(p, r, ptr, label_vmalloc); 802 } else { 803 uasm_il_bltz(p, r, tmp, label_vmalloc); 804 } 805 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ 806 807 if (pgd_reg != -1) { 808 /* pgd is in pgd_reg */ 809 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg); 810 } else { 811 #if defined(CONFIG_MIPS_PGD_C0_CONTEXT) 812 /* 813 * &pgd << 11 stored in CONTEXT [23..63]. 814 */ 815 UASM_i_MFC0(p, ptr, C0_CONTEXT); 816 817 /* Clear lower 23 bits of context. */ 818 uasm_i_dins(p, ptr, 0, 0, 23); 819 820 /* 1 0 1 0 1 << 6 xkphys cached */ 821 uasm_i_ori(p, ptr, ptr, 0x540); 822 uasm_i_drotr(p, ptr, ptr, 11); 823 #elif defined(CONFIG_SMP) 824 UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG); 825 uasm_i_dsrl_safe(p, ptr, ptr, SMP_CPUID_PTRSHIFT); 826 UASM_i_LA_mostly(p, tmp, pgdc); 827 uasm_i_daddu(p, ptr, ptr, tmp); 828 uasm_i_dmfc0(p, tmp, C0_BADVADDR); 829 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); 830 #else 831 UASM_i_LA_mostly(p, ptr, pgdc); 832 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); 833 #endif 834 } 835 836 uasm_l_vmalloc_done(l, *p); 837 838 /* get pgd offset in bytes */ 839 uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3); 840 841 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); 842 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ 843 #ifndef __PAGETABLE_PMD_FOLDED 844 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 845 uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ 846 uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ 847 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); 848 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ 849 #endif 850 } 851 852 /* 853 * BVADDR is the faulting address, PTR is scratch. 854 * PTR will hold the pgd for vmalloc. 855 */ 856 static void 857 build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 858 unsigned int bvaddr, unsigned int ptr, 859 enum vmalloc64_mode mode) 860 { 861 long swpd = (long)swapper_pg_dir; 862 int single_insn_swpd; 863 int did_vmalloc_branch = 0; 864 865 single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd); 866 867 uasm_l_vmalloc(l, *p); 868 869 if (mode != not_refill && check_for_high_segbits) { 870 if (single_insn_swpd) { 871 uasm_il_bltz(p, r, bvaddr, label_vmalloc_done); 872 uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); 873 did_vmalloc_branch = 1; 874 /* fall through */ 875 } else { 876 uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault); 877 } 878 } 879 if (!did_vmalloc_branch) { 880 if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { 881 uasm_il_b(p, r, label_vmalloc_done); 882 uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); 883 } else { 884 UASM_i_LA_mostly(p, ptr, swpd); 885 uasm_il_b(p, r, label_vmalloc_done); 886 if (uasm_in_compat_space_p(swpd)) 887 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); 888 else 889 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); 890 } 891 } 892 if (mode != not_refill && check_for_high_segbits) { 893 uasm_l_large_segbits_fault(l, *p); 894 /* 895 * We get here if we are an xsseg address, or if we are 896 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary. 897 * 898 * Ignoring xsseg (assume disabled so would generate 899 * (address errors?), the only remaining possibility 900 * is the upper xuseg addresses. On processors with 901 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these 902 * addresses would have taken an address error. We try 903 * to mimic that here by taking a load/istream page 904 * fault. 905 */ 906 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); 907 uasm_i_jr(p, ptr); 908 909 if (mode == refill_scratch) { 910 if (scratch_reg >= 0) 911 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); 912 else 913 UASM_i_LW(p, 1, scratchpad_offset(0), 0); 914 } else { 915 uasm_i_nop(p); 916 } 917 } 918 } 919 920 #else /* !CONFIG_64BIT */ 921 922 /* 923 * TMP and PTR are scratch. 924 * TMP will be clobbered, PTR will hold the pgd entry. 925 */ 926 static void __maybe_unused 927 build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) 928 { 929 if (pgd_reg != -1) { 930 /* pgd is in pgd_reg */ 931 uasm_i_mfc0(p, ptr, c0_kscratch(), pgd_reg); 932 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 933 } else { 934 long pgdc = (long)pgd_current; 935 936 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ 937 #ifdef CONFIG_SMP 938 uasm_i_mfc0(p, ptr, SMP_CPUID_REG); 939 UASM_i_LA_mostly(p, tmp, pgdc); 940 uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT); 941 uasm_i_addu(p, ptr, tmp, ptr); 942 #else 943 UASM_i_LA_mostly(p, ptr, pgdc); 944 #endif 945 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 946 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); 947 } 948 uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */ 949 uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); 950 uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ 951 } 952 953 #endif /* !CONFIG_64BIT */ 954 955 static void build_adjust_context(u32 **p, unsigned int ctx) 956 { 957 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; 958 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); 959 960 switch (current_cpu_type()) { 961 case CPU_VR41XX: 962 case CPU_VR4111: 963 case CPU_VR4121: 964 case CPU_VR4122: 965 case CPU_VR4131: 966 case CPU_VR4181: 967 case CPU_VR4181A: 968 case CPU_VR4133: 969 shift += 2; 970 break; 971 972 default: 973 break; 974 } 975 976 if (shift) 977 UASM_i_SRL(p, ctx, ctx, shift); 978 uasm_i_andi(p, ctx, ctx, mask); 979 } 980 981 static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) 982 { 983 /* 984 * Bug workaround for the Nevada. It seems as if under certain 985 * circumstances the move from cp0_context might produce a 986 * bogus result when the mfc0 instruction and its consumer are 987 * in a different cacheline or a load instruction, probably any 988 * memory reference, is between them. 989 */ 990 switch (current_cpu_type()) { 991 case CPU_NEVADA: 992 UASM_i_LW(p, ptr, 0, ptr); 993 GET_CONTEXT(p, tmp); /* get context reg */ 994 break; 995 996 default: 997 GET_CONTEXT(p, tmp); /* get context reg */ 998 UASM_i_LW(p, ptr, 0, ptr); 999 break; 1000 } 1001 1002 build_adjust_context(p, tmp); 1003 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ 1004 } 1005 1006 static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) 1007 { 1008 /* 1009 * 64bit address support (36bit on a 32bit CPU) in a 32bit 1010 * Kernel is a special case. Only a few CPUs use it. 1011 */ 1012 #ifdef CONFIG_64BIT_PHYS_ADDR 1013 if (cpu_has_64bits) { 1014 uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ 1015 uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 1016 if (cpu_has_rixi) { 1017 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); 1018 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 1019 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); 1020 } else { 1021 uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ 1022 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 1023 uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ 1024 } 1025 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ 1026 } else { 1027 int pte_off_even = sizeof(pte_t) / 2; 1028 int pte_off_odd = pte_off_even + sizeof(pte_t); 1029 1030 /* The pte entries are pre-shifted */ 1031 uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ 1032 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 1033 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ 1034 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ 1035 } 1036 #else 1037 UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ 1038 UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 1039 if (r45k_bvahwbug()) 1040 build_tlb_probe_entry(p); 1041 if (cpu_has_rixi) { 1042 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); 1043 if (r4k_250MHZhwbug()) 1044 UASM_i_MTC0(p, 0, C0_ENTRYLO0); 1045 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 1046 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); 1047 } else { 1048 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ 1049 if (r4k_250MHZhwbug()) 1050 UASM_i_MTC0(p, 0, C0_ENTRYLO0); 1051 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 1052 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ 1053 if (r45k_bvahwbug()) 1054 uasm_i_mfc0(p, tmp, C0_INDEX); 1055 } 1056 if (r4k_250MHZhwbug()) 1057 UASM_i_MTC0(p, 0, C0_ENTRYLO1); 1058 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ 1059 #endif 1060 } 1061 1062 struct mips_huge_tlb_info { 1063 int huge_pte; 1064 int restore_scratch; 1065 bool need_reload_pte; 1066 }; 1067 1068 static struct mips_huge_tlb_info 1069 build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, 1070 struct uasm_reloc **r, unsigned int tmp, 1071 unsigned int ptr, int c0_scratch_reg) 1072 { 1073 struct mips_huge_tlb_info rv; 1074 unsigned int even, odd; 1075 int vmalloc_branch_delay_filled = 0; 1076 const int scratch = 1; /* Our extra working register */ 1077 1078 rv.huge_pte = scratch; 1079 rv.restore_scratch = 0; 1080 rv.need_reload_pte = false; 1081 1082 if (check_for_high_segbits) { 1083 UASM_i_MFC0(p, tmp, C0_BADVADDR); 1084 1085 if (pgd_reg != -1) 1086 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg); 1087 else 1088 UASM_i_MFC0(p, ptr, C0_CONTEXT); 1089 1090 if (c0_scratch_reg >= 0) 1091 UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg); 1092 else 1093 UASM_i_SW(p, scratch, scratchpad_offset(0), 0); 1094 1095 uasm_i_dsrl_safe(p, scratch, tmp, 1096 PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); 1097 uasm_il_bnez(p, r, scratch, label_vmalloc); 1098 1099 if (pgd_reg == -1) { 1100 vmalloc_branch_delay_filled = 1; 1101 /* Clear lower 23 bits of context. */ 1102 uasm_i_dins(p, ptr, 0, 0, 23); 1103 } 1104 } else { 1105 if (pgd_reg != -1) 1106 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg); 1107 else 1108 UASM_i_MFC0(p, ptr, C0_CONTEXT); 1109 1110 UASM_i_MFC0(p, tmp, C0_BADVADDR); 1111 1112 if (c0_scratch_reg >= 0) 1113 UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg); 1114 else 1115 UASM_i_SW(p, scratch, scratchpad_offset(0), 0); 1116 1117 if (pgd_reg == -1) 1118 /* Clear lower 23 bits of context. */ 1119 uasm_i_dins(p, ptr, 0, 0, 23); 1120 1121 uasm_il_bltz(p, r, tmp, label_vmalloc); 1122 } 1123 1124 if (pgd_reg == -1) { 1125 vmalloc_branch_delay_filled = 1; 1126 /* 1 0 1 0 1 << 6 xkphys cached */ 1127 uasm_i_ori(p, ptr, ptr, 0x540); 1128 uasm_i_drotr(p, ptr, ptr, 11); 1129 } 1130 1131 #ifdef __PAGETABLE_PMD_FOLDED 1132 #define LOC_PTEP scratch 1133 #else 1134 #define LOC_PTEP ptr 1135 #endif 1136 1137 if (!vmalloc_branch_delay_filled) 1138 /* get pgd offset in bytes */ 1139 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3); 1140 1141 uasm_l_vmalloc_done(l, *p); 1142 1143 /* 1144 * tmp ptr 1145 * fall-through case = badvaddr *pgd_current 1146 * vmalloc case = badvaddr swapper_pg_dir 1147 */ 1148 1149 if (vmalloc_branch_delay_filled) 1150 /* get pgd offset in bytes */ 1151 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3); 1152 1153 #ifdef __PAGETABLE_PMD_FOLDED 1154 GET_CONTEXT(p, tmp); /* get context reg */ 1155 #endif 1156 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3); 1157 1158 if (use_lwx_insns()) { 1159 UASM_i_LWX(p, LOC_PTEP, scratch, ptr); 1160 } else { 1161 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */ 1162 uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */ 1163 } 1164 1165 #ifndef __PAGETABLE_PMD_FOLDED 1166 /* get pmd offset in bytes */ 1167 uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3); 1168 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3); 1169 GET_CONTEXT(p, tmp); /* get context reg */ 1170 1171 if (use_lwx_insns()) { 1172 UASM_i_LWX(p, scratch, scratch, ptr); 1173 } else { 1174 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */ 1175 UASM_i_LW(p, scratch, 0, ptr); 1176 } 1177 #endif 1178 /* Adjust the context during the load latency. */ 1179 build_adjust_context(p, tmp); 1180 1181 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1182 uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update); 1183 /* 1184 * The in the LWX case we don't want to do the load in the 1185 * delay slot. It cannot issue in the same cycle and may be 1186 * speculative and unneeded. 1187 */ 1188 if (use_lwx_insns()) 1189 uasm_i_nop(p); 1190 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 1191 1192 1193 /* build_update_entries */ 1194 if (use_lwx_insns()) { 1195 even = ptr; 1196 odd = tmp; 1197 UASM_i_LWX(p, even, scratch, tmp); 1198 UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t)); 1199 UASM_i_LWX(p, odd, scratch, tmp); 1200 } else { 1201 UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */ 1202 even = tmp; 1203 odd = ptr; 1204 UASM_i_LW(p, even, 0, ptr); /* get even pte */ 1205 UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */ 1206 } 1207 if (cpu_has_rixi) { 1208 uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL)); 1209 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ 1210 uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL)); 1211 } else { 1212 uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL)); 1213 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ 1214 uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL)); 1215 } 1216 UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */ 1217 1218 if (c0_scratch_reg >= 0) { 1219 UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg); 1220 build_tlb_write_entry(p, l, r, tlb_random); 1221 uasm_l_leave(l, *p); 1222 rv.restore_scratch = 1; 1223 } else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13) { 1224 build_tlb_write_entry(p, l, r, tlb_random); 1225 uasm_l_leave(l, *p); 1226 UASM_i_LW(p, scratch, scratchpad_offset(0), 0); 1227 } else { 1228 UASM_i_LW(p, scratch, scratchpad_offset(0), 0); 1229 build_tlb_write_entry(p, l, r, tlb_random); 1230 uasm_l_leave(l, *p); 1231 rv.restore_scratch = 1; 1232 } 1233 1234 uasm_i_eret(p); /* return from trap */ 1235 1236 return rv; 1237 } 1238 1239 /* 1240 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception 1241 * because EXL == 0. If we wrap, we can also use the 32 instruction 1242 * slots before the XTLB refill exception handler which belong to the 1243 * unused TLB refill exception. 1244 */ 1245 #define MIPS64_REFILL_INSNS 32 1246 1247 static void build_r4000_tlb_refill_handler(void) 1248 { 1249 u32 *p = tlb_handler; 1250 struct uasm_label *l = labels; 1251 struct uasm_reloc *r = relocs; 1252 u32 *f; 1253 unsigned int final_len; 1254 struct mips_huge_tlb_info htlb_info __maybe_unused; 1255 enum vmalloc64_mode vmalloc_mode __maybe_unused; 1256 1257 memset(tlb_handler, 0, sizeof(tlb_handler)); 1258 memset(labels, 0, sizeof(labels)); 1259 memset(relocs, 0, sizeof(relocs)); 1260 memset(final_handler, 0, sizeof(final_handler)); 1261 1262 if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) { 1263 htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, 1264 scratch_reg); 1265 vmalloc_mode = refill_scratch; 1266 } else { 1267 htlb_info.huge_pte = K0; 1268 htlb_info.restore_scratch = 0; 1269 htlb_info.need_reload_pte = true; 1270 vmalloc_mode = refill_noscratch; 1271 /* 1272 * create the plain linear handler 1273 */ 1274 if (bcm1250_m3_war()) { 1275 unsigned int segbits = 44; 1276 1277 uasm_i_dmfc0(&p, K0, C0_BADVADDR); 1278 uasm_i_dmfc0(&p, K1, C0_ENTRYHI); 1279 uasm_i_xor(&p, K0, K0, K1); 1280 uasm_i_dsrl_safe(&p, K1, K0, 62); 1281 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); 1282 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); 1283 uasm_i_or(&p, K0, K0, K1); 1284 uasm_il_bnez(&p, &r, K0, label_leave); 1285 /* No need for uasm_i_nop */ 1286 } 1287 1288 #ifdef CONFIG_64BIT 1289 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ 1290 #else 1291 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ 1292 #endif 1293 1294 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1295 build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update); 1296 #endif 1297 1298 build_get_ptep(&p, K0, K1); 1299 build_update_entries(&p, K0, K1); 1300 build_tlb_write_entry(&p, &l, &r, tlb_random); 1301 uasm_l_leave(&l, p); 1302 uasm_i_eret(&p); /* return from trap */ 1303 } 1304 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1305 uasm_l_tlb_huge_update(&l, p); 1306 if (htlb_info.need_reload_pte) 1307 UASM_i_LW(&p, htlb_info.huge_pte, 0, K1); 1308 build_huge_update_entries(&p, htlb_info.huge_pte, K1); 1309 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random, 1310 htlb_info.restore_scratch); 1311 #endif 1312 1313 #ifdef CONFIG_64BIT 1314 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode); 1315 #endif 1316 1317 /* 1318 * Overflow check: For the 64bit handler, we need at least one 1319 * free instruction slot for the wrap-around branch. In worst 1320 * case, if the intended insertion point is a delay slot, we 1321 * need three, with the second nop'ed and the third being 1322 * unused. 1323 */ 1324 switch (boot_cpu_type()) { 1325 default: 1326 if (sizeof(long) == 4) { 1327 case CPU_LOONGSON2: 1328 /* Loongson2 ebase is different than r4k, we have more space */ 1329 if ((p - tlb_handler) > 64) 1330 panic("TLB refill handler space exceeded"); 1331 /* 1332 * Now fold the handler in the TLB refill handler space. 1333 */ 1334 f = final_handler; 1335 /* Simplest case, just copy the handler. */ 1336 uasm_copy_handler(relocs, labels, tlb_handler, p, f); 1337 final_len = p - tlb_handler; 1338 break; 1339 } else { 1340 if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) 1341 || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) 1342 && uasm_insn_has_bdelay(relocs, 1343 tlb_handler + MIPS64_REFILL_INSNS - 3))) 1344 panic("TLB refill handler space exceeded"); 1345 /* 1346 * Now fold the handler in the TLB refill handler space. 1347 */ 1348 f = final_handler + MIPS64_REFILL_INSNS; 1349 if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) { 1350 /* Just copy the handler. */ 1351 uasm_copy_handler(relocs, labels, tlb_handler, p, f); 1352 final_len = p - tlb_handler; 1353 } else { 1354 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1355 const enum label_id ls = label_tlb_huge_update; 1356 #else 1357 const enum label_id ls = label_vmalloc; 1358 #endif 1359 u32 *split; 1360 int ov = 0; 1361 int i; 1362 1363 for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++) 1364 ; 1365 BUG_ON(i == ARRAY_SIZE(labels)); 1366 split = labels[i].addr; 1367 1368 /* 1369 * See if we have overflown one way or the other. 1370 */ 1371 if (split > tlb_handler + MIPS64_REFILL_INSNS || 1372 split < p - MIPS64_REFILL_INSNS) 1373 ov = 1; 1374 1375 if (ov) { 1376 /* 1377 * Split two instructions before the end. One 1378 * for the branch and one for the instruction 1379 * in the delay slot. 1380 */ 1381 split = tlb_handler + MIPS64_REFILL_INSNS - 2; 1382 1383 /* 1384 * If the branch would fall in a delay slot, 1385 * we must back up an additional instruction 1386 * so that it is no longer in a delay slot. 1387 */ 1388 if (uasm_insn_has_bdelay(relocs, split - 1)) 1389 split--; 1390 } 1391 /* Copy first part of the handler. */ 1392 uasm_copy_handler(relocs, labels, tlb_handler, split, f); 1393 f += split - tlb_handler; 1394 1395 if (ov) { 1396 /* Insert branch. */ 1397 uasm_l_split(&l, final_handler); 1398 uasm_il_b(&f, &r, label_split); 1399 if (uasm_insn_has_bdelay(relocs, split)) 1400 uasm_i_nop(&f); 1401 else { 1402 uasm_copy_handler(relocs, labels, 1403 split, split + 1, f); 1404 uasm_move_labels(labels, f, f + 1, -1); 1405 f++; 1406 split++; 1407 } 1408 } 1409 1410 /* Copy the rest of the handler. */ 1411 uasm_copy_handler(relocs, labels, split, p, final_handler); 1412 final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) + 1413 (p - split); 1414 } 1415 } 1416 break; 1417 } 1418 1419 uasm_resolve_relocs(relocs, labels); 1420 pr_debug("Wrote TLB refill handler (%u instructions).\n", 1421 final_len); 1422 1423 memcpy((void *)ebase, final_handler, 0x100); 1424 local_flush_icache_range(ebase, ebase + 0x100); 1425 1426 dump_handler("r4000_tlb_refill", (u32 *)ebase, 64); 1427 } 1428 1429 extern u32 handle_tlbl[], handle_tlbl_end[]; 1430 extern u32 handle_tlbs[], handle_tlbs_end[]; 1431 extern u32 handle_tlbm[], handle_tlbm_end[]; 1432 extern u32 tlbmiss_handler_setup_pgd_start[], tlbmiss_handler_setup_pgd[]; 1433 extern u32 tlbmiss_handler_setup_pgd_end[]; 1434 1435 static void build_setup_pgd(void) 1436 { 1437 const int a0 = 4; 1438 const int __maybe_unused a1 = 5; 1439 const int __maybe_unused a2 = 6; 1440 u32 *p = tlbmiss_handler_setup_pgd_start; 1441 const int tlbmiss_handler_setup_pgd_size = 1442 tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd_start; 1443 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 1444 long pgdc = (long)pgd_current; 1445 #endif 1446 1447 memset(tlbmiss_handler_setup_pgd, 0, tlbmiss_handler_setup_pgd_size * 1448 sizeof(tlbmiss_handler_setup_pgd[0])); 1449 memset(labels, 0, sizeof(labels)); 1450 memset(relocs, 0, sizeof(relocs)); 1451 pgd_reg = allocate_kscratch(); 1452 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT 1453 if (pgd_reg == -1) { 1454 struct uasm_label *l = labels; 1455 struct uasm_reloc *r = relocs; 1456 1457 /* PGD << 11 in c0_Context */ 1458 /* 1459 * If it is a ckseg0 address, convert to a physical 1460 * address. Shifting right by 29 and adding 4 will 1461 * result in zero for these addresses. 1462 * 1463 */ 1464 UASM_i_SRA(&p, a1, a0, 29); 1465 UASM_i_ADDIU(&p, a1, a1, 4); 1466 uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1); 1467 uasm_i_nop(&p); 1468 uasm_i_dinsm(&p, a0, 0, 29, 64 - 29); 1469 uasm_l_tlbl_goaround1(&l, p); 1470 UASM_i_SLL(&p, a0, a0, 11); 1471 uasm_i_jr(&p, 31); 1472 UASM_i_MTC0(&p, a0, C0_CONTEXT); 1473 } else { 1474 /* PGD in c0_KScratch */ 1475 uasm_i_jr(&p, 31); 1476 UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); 1477 } 1478 #else 1479 #ifdef CONFIG_SMP 1480 /* Save PGD to pgd_current[smp_processor_id()] */ 1481 UASM_i_CPUID_MFC0(&p, a1, SMP_CPUID_REG); 1482 UASM_i_SRL_SAFE(&p, a1, a1, SMP_CPUID_PTRSHIFT); 1483 UASM_i_LA_mostly(&p, a2, pgdc); 1484 UASM_i_ADDU(&p, a2, a2, a1); 1485 UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2); 1486 #else 1487 UASM_i_LA_mostly(&p, a2, pgdc); 1488 UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2); 1489 #endif /* SMP */ 1490 uasm_i_jr(&p, 31); 1491 1492 /* if pgd_reg is allocated, save PGD also to scratch register */ 1493 if (pgd_reg != -1) 1494 UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); 1495 else 1496 uasm_i_nop(&p); 1497 #endif 1498 if (p >= tlbmiss_handler_setup_pgd_end) 1499 panic("tlbmiss_handler_setup_pgd space exceeded"); 1500 1501 uasm_resolve_relocs(relocs, labels); 1502 pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n", 1503 (unsigned int)(p - tlbmiss_handler_setup_pgd)); 1504 1505 dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd, 1506 tlbmiss_handler_setup_pgd_size); 1507 } 1508 1509 static void 1510 iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) 1511 { 1512 #ifdef CONFIG_SMP 1513 # ifdef CONFIG_64BIT_PHYS_ADDR 1514 if (cpu_has_64bits) 1515 uasm_i_lld(p, pte, 0, ptr); 1516 else 1517 # endif 1518 UASM_i_LL(p, pte, 0, ptr); 1519 #else 1520 # ifdef CONFIG_64BIT_PHYS_ADDR 1521 if (cpu_has_64bits) 1522 uasm_i_ld(p, pte, 0, ptr); 1523 else 1524 # endif 1525 UASM_i_LW(p, pte, 0, ptr); 1526 #endif 1527 } 1528 1529 static void 1530 iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, 1531 unsigned int mode) 1532 { 1533 #ifdef CONFIG_64BIT_PHYS_ADDR 1534 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); 1535 #endif 1536 1537 uasm_i_ori(p, pte, pte, mode); 1538 #ifdef CONFIG_SMP 1539 # ifdef CONFIG_64BIT_PHYS_ADDR 1540 if (cpu_has_64bits) 1541 uasm_i_scd(p, pte, 0, ptr); 1542 else 1543 # endif 1544 UASM_i_SC(p, pte, 0, ptr); 1545 1546 if (r10000_llsc_war()) 1547 uasm_il_beqzl(p, r, pte, label_smp_pgtable_change); 1548 else 1549 uasm_il_beqz(p, r, pte, label_smp_pgtable_change); 1550 1551 # ifdef CONFIG_64BIT_PHYS_ADDR 1552 if (!cpu_has_64bits) { 1553 /* no uasm_i_nop needed */ 1554 uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr); 1555 uasm_i_ori(p, pte, pte, hwmode); 1556 uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr); 1557 uasm_il_beqz(p, r, pte, label_smp_pgtable_change); 1558 /* no uasm_i_nop needed */ 1559 uasm_i_lw(p, pte, 0, ptr); 1560 } else 1561 uasm_i_nop(p); 1562 # else 1563 uasm_i_nop(p); 1564 # endif 1565 #else 1566 # ifdef CONFIG_64BIT_PHYS_ADDR 1567 if (cpu_has_64bits) 1568 uasm_i_sd(p, pte, 0, ptr); 1569 else 1570 # endif 1571 UASM_i_SW(p, pte, 0, ptr); 1572 1573 # ifdef CONFIG_64BIT_PHYS_ADDR 1574 if (!cpu_has_64bits) { 1575 uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr); 1576 uasm_i_ori(p, pte, pte, hwmode); 1577 uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr); 1578 uasm_i_lw(p, pte, 0, ptr); 1579 } 1580 # endif 1581 #endif 1582 } 1583 1584 /* 1585 * Check if PTE is present, if not then jump to LABEL. PTR points to 1586 * the page table where this PTE is located, PTE will be re-loaded 1587 * with it's original value. 1588 */ 1589 static void 1590 build_pte_present(u32 **p, struct uasm_reloc **r, 1591 int pte, int ptr, int scratch, enum label_id lid) 1592 { 1593 int t = scratch >= 0 ? scratch : pte; 1594 1595 if (cpu_has_rixi) { 1596 if (use_bbit_insns()) { 1597 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); 1598 uasm_i_nop(p); 1599 } else { 1600 uasm_i_andi(p, t, pte, _PAGE_PRESENT); 1601 uasm_il_beqz(p, r, t, lid); 1602 if (pte == t) 1603 /* You lose the SMP race :-(*/ 1604 iPTE_LW(p, pte, ptr); 1605 } 1606 } else { 1607 uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ); 1608 uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ); 1609 uasm_il_bnez(p, r, t, lid); 1610 if (pte == t) 1611 /* You lose the SMP race :-(*/ 1612 iPTE_LW(p, pte, ptr); 1613 } 1614 } 1615 1616 /* Make PTE valid, store result in PTR. */ 1617 static void 1618 build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, 1619 unsigned int ptr) 1620 { 1621 unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED; 1622 1623 iPTE_SW(p, r, pte, ptr, mode); 1624 } 1625 1626 /* 1627 * Check if PTE can be written to, if not branch to LABEL. Regardless 1628 * restore PTE with value from PTR when done. 1629 */ 1630 static void 1631 build_pte_writable(u32 **p, struct uasm_reloc **r, 1632 unsigned int pte, unsigned int ptr, int scratch, 1633 enum label_id lid) 1634 { 1635 int t = scratch >= 0 ? scratch : pte; 1636 1637 uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE); 1638 uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE); 1639 uasm_il_bnez(p, r, t, lid); 1640 if (pte == t) 1641 /* You lose the SMP race :-(*/ 1642 iPTE_LW(p, pte, ptr); 1643 else 1644 uasm_i_nop(p); 1645 } 1646 1647 /* Make PTE writable, update software status bits as well, then store 1648 * at PTR. 1649 */ 1650 static void 1651 build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, 1652 unsigned int ptr) 1653 { 1654 unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID 1655 | _PAGE_DIRTY); 1656 1657 iPTE_SW(p, r, pte, ptr, mode); 1658 } 1659 1660 /* 1661 * Check if PTE can be modified, if not branch to LABEL. Regardless 1662 * restore PTE with value from PTR when done. 1663 */ 1664 static void 1665 build_pte_modifiable(u32 **p, struct uasm_reloc **r, 1666 unsigned int pte, unsigned int ptr, int scratch, 1667 enum label_id lid) 1668 { 1669 if (use_bbit_insns()) { 1670 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid); 1671 uasm_i_nop(p); 1672 } else { 1673 int t = scratch >= 0 ? scratch : pte; 1674 uasm_i_andi(p, t, pte, _PAGE_WRITE); 1675 uasm_il_beqz(p, r, t, lid); 1676 if (pte == t) 1677 /* You lose the SMP race :-(*/ 1678 iPTE_LW(p, pte, ptr); 1679 } 1680 } 1681 1682 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 1683 1684 1685 /* 1686 * R3000 style TLB load/store/modify handlers. 1687 */ 1688 1689 /* 1690 * This places the pte into ENTRYLO0 and writes it with tlbwi. 1691 * Then it returns. 1692 */ 1693 static void 1694 build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) 1695 { 1696 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 1697 uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */ 1698 uasm_i_tlbwi(p); 1699 uasm_i_jr(p, tmp); 1700 uasm_i_rfe(p); /* branch delay */ 1701 } 1702 1703 /* 1704 * This places the pte into ENTRYLO0 and writes it with tlbwi 1705 * or tlbwr as appropriate. This is because the index register 1706 * may have the probe fail bit set as a result of a trap on a 1707 * kseg2 access, i.e. without refill. Then it returns. 1708 */ 1709 static void 1710 build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, 1711 struct uasm_reloc **r, unsigned int pte, 1712 unsigned int tmp) 1713 { 1714 uasm_i_mfc0(p, tmp, C0_INDEX); 1715 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 1716 uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */ 1717 uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */ 1718 uasm_i_tlbwi(p); /* cp0 delay */ 1719 uasm_i_jr(p, tmp); 1720 uasm_i_rfe(p); /* branch delay */ 1721 uasm_l_r3000_write_probe_fail(l, *p); 1722 uasm_i_tlbwr(p); /* cp0 delay */ 1723 uasm_i_jr(p, tmp); 1724 uasm_i_rfe(p); /* branch delay */ 1725 } 1726 1727 static void 1728 build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, 1729 unsigned int ptr) 1730 { 1731 long pgdc = (long)pgd_current; 1732 1733 uasm_i_mfc0(p, pte, C0_BADVADDR); 1734 uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */ 1735 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); 1736 uasm_i_srl(p, pte, pte, 22); /* load delay */ 1737 uasm_i_sll(p, pte, pte, 2); 1738 uasm_i_addu(p, ptr, ptr, pte); 1739 uasm_i_mfc0(p, pte, C0_CONTEXT); 1740 uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */ 1741 uasm_i_andi(p, pte, pte, 0xffc); /* load delay */ 1742 uasm_i_addu(p, ptr, ptr, pte); 1743 uasm_i_lw(p, pte, 0, ptr); 1744 uasm_i_tlbp(p); /* load delay */ 1745 } 1746 1747 static void build_r3000_tlb_load_handler(void) 1748 { 1749 u32 *p = handle_tlbl; 1750 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; 1751 struct uasm_label *l = labels; 1752 struct uasm_reloc *r = relocs; 1753 1754 memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0])); 1755 memset(labels, 0, sizeof(labels)); 1756 memset(relocs, 0, sizeof(relocs)); 1757 1758 build_r3000_tlbchange_handler_head(&p, K0, K1); 1759 build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl); 1760 uasm_i_nop(&p); /* load delay */ 1761 build_make_valid(&p, &r, K0, K1); 1762 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1763 1764 uasm_l_nopage_tlbl(&l, p); 1765 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 1766 uasm_i_nop(&p); 1767 1768 if (p >= handle_tlbl_end) 1769 panic("TLB load handler fastpath space exceeded"); 1770 1771 uasm_resolve_relocs(relocs, labels); 1772 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", 1773 (unsigned int)(p - handle_tlbl)); 1774 1775 dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size); 1776 } 1777 1778 static void build_r3000_tlb_store_handler(void) 1779 { 1780 u32 *p = handle_tlbs; 1781 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; 1782 struct uasm_label *l = labels; 1783 struct uasm_reloc *r = relocs; 1784 1785 memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0])); 1786 memset(labels, 0, sizeof(labels)); 1787 memset(relocs, 0, sizeof(relocs)); 1788 1789 build_r3000_tlbchange_handler_head(&p, K0, K1); 1790 build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs); 1791 uasm_i_nop(&p); /* load delay */ 1792 build_make_write(&p, &r, K0, K1); 1793 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1794 1795 uasm_l_nopage_tlbs(&l, p); 1796 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1797 uasm_i_nop(&p); 1798 1799 if (p >= handle_tlbs_end) 1800 panic("TLB store handler fastpath space exceeded"); 1801 1802 uasm_resolve_relocs(relocs, labels); 1803 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", 1804 (unsigned int)(p - handle_tlbs)); 1805 1806 dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size); 1807 } 1808 1809 static void build_r3000_tlb_modify_handler(void) 1810 { 1811 u32 *p = handle_tlbm; 1812 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; 1813 struct uasm_label *l = labels; 1814 struct uasm_reloc *r = relocs; 1815 1816 memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0])); 1817 memset(labels, 0, sizeof(labels)); 1818 memset(relocs, 0, sizeof(relocs)); 1819 1820 build_r3000_tlbchange_handler_head(&p, K0, K1); 1821 build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm); 1822 uasm_i_nop(&p); /* load delay */ 1823 build_make_write(&p, &r, K0, K1); 1824 build_r3000_pte_reload_tlbwi(&p, K0, K1); 1825 1826 uasm_l_nopage_tlbm(&l, p); 1827 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1828 uasm_i_nop(&p); 1829 1830 if (p >= handle_tlbm_end) 1831 panic("TLB modify handler fastpath space exceeded"); 1832 1833 uasm_resolve_relocs(relocs, labels); 1834 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", 1835 (unsigned int)(p - handle_tlbm)); 1836 1837 dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_size); 1838 } 1839 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ 1840 1841 /* 1842 * R4000 style TLB load/store/modify handlers. 1843 */ 1844 static struct work_registers 1845 build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, 1846 struct uasm_reloc **r) 1847 { 1848 struct work_registers wr = build_get_work_registers(p); 1849 1850 #ifdef CONFIG_64BIT 1851 build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */ 1852 #else 1853 build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */ 1854 #endif 1855 1856 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1857 /* 1858 * For huge tlb entries, pmd doesn't contain an address but 1859 * instead contains the tlb pte. Check the PAGE_HUGE bit and 1860 * see if we need to jump to huge tlb processing. 1861 */ 1862 build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update); 1863 #endif 1864 1865 UASM_i_MFC0(p, wr.r1, C0_BADVADDR); 1866 UASM_i_LW(p, wr.r2, 0, wr.r2); 1867 UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); 1868 uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2); 1869 UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1); 1870 1871 #ifdef CONFIG_SMP 1872 uasm_l_smp_pgtable_change(l, *p); 1873 #endif 1874 iPTE_LW(p, wr.r1, wr.r2); /* get even pte */ 1875 if (!m4kc_tlbp_war()) 1876 build_tlb_probe_entry(p); 1877 return wr; 1878 } 1879 1880 static void 1881 build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, 1882 struct uasm_reloc **r, unsigned int tmp, 1883 unsigned int ptr) 1884 { 1885 uasm_i_ori(p, ptr, ptr, sizeof(pte_t)); 1886 uasm_i_xori(p, ptr, ptr, sizeof(pte_t)); 1887 build_update_entries(p, tmp, ptr); 1888 build_tlb_write_entry(p, l, r, tlb_indexed); 1889 uasm_l_leave(l, *p); 1890 build_restore_work_registers(p); 1891 uasm_i_eret(p); /* return from trap */ 1892 1893 #ifdef CONFIG_64BIT 1894 build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill); 1895 #endif 1896 } 1897 1898 static void build_r4000_tlb_load_handler(void) 1899 { 1900 u32 *p = handle_tlbl; 1901 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; 1902 struct uasm_label *l = labels; 1903 struct uasm_reloc *r = relocs; 1904 struct work_registers wr; 1905 1906 memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0])); 1907 memset(labels, 0, sizeof(labels)); 1908 memset(relocs, 0, sizeof(relocs)); 1909 1910 if (bcm1250_m3_war()) { 1911 unsigned int segbits = 44; 1912 1913 uasm_i_dmfc0(&p, K0, C0_BADVADDR); 1914 uasm_i_dmfc0(&p, K1, C0_ENTRYHI); 1915 uasm_i_xor(&p, K0, K0, K1); 1916 uasm_i_dsrl_safe(&p, K1, K0, 62); 1917 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); 1918 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); 1919 uasm_i_or(&p, K0, K0, K1); 1920 uasm_il_bnez(&p, &r, K0, label_leave); 1921 /* No need for uasm_i_nop */ 1922 } 1923 1924 wr = build_r4000_tlbchange_handler_head(&p, &l, &r); 1925 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); 1926 if (m4kc_tlbp_war()) 1927 build_tlb_probe_entry(&p); 1928 1929 if (cpu_has_rixi && !cpu_has_rixiex) { 1930 /* 1931 * If the page is not _PAGE_VALID, RI or XI could not 1932 * have triggered it. Skip the expensive test.. 1933 */ 1934 if (use_bbit_insns()) { 1935 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID), 1936 label_tlbl_goaround1); 1937 } else { 1938 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID); 1939 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1); 1940 } 1941 uasm_i_nop(&p); 1942 1943 uasm_i_tlbr(&p); 1944 1945 switch (current_cpu_type()) { 1946 default: 1947 if (cpu_has_mips_r2) { 1948 uasm_i_ehb(&p); 1949 1950 case CPU_CAVIUM_OCTEON: 1951 case CPU_CAVIUM_OCTEON_PLUS: 1952 case CPU_CAVIUM_OCTEON2: 1953 break; 1954 } 1955 } 1956 1957 /* Examine entrylo 0 or 1 based on ptr. */ 1958 if (use_bbit_insns()) { 1959 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); 1960 } else { 1961 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t)); 1962 uasm_i_beqz(&p, wr.r3, 8); 1963 } 1964 /* load it in the delay slot*/ 1965 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0); 1966 /* load it if ptr is odd */ 1967 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1); 1968 /* 1969 * If the entryLo (now in wr.r3) is valid (bit 1), RI or 1970 * XI must have triggered it. 1971 */ 1972 if (use_bbit_insns()) { 1973 uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl); 1974 uasm_i_nop(&p); 1975 uasm_l_tlbl_goaround1(&l, p); 1976 } else { 1977 uasm_i_andi(&p, wr.r3, wr.r3, 2); 1978 uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl); 1979 uasm_i_nop(&p); 1980 } 1981 uasm_l_tlbl_goaround1(&l, p); 1982 } 1983 build_make_valid(&p, &r, wr.r1, wr.r2); 1984 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); 1985 1986 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1987 /* 1988 * This is the entry point when build_r4000_tlbchange_handler_head 1989 * spots a huge page. 1990 */ 1991 uasm_l_tlb_huge_update(&l, p); 1992 iPTE_LW(&p, wr.r1, wr.r2); 1993 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); 1994 build_tlb_probe_entry(&p); 1995 1996 if (cpu_has_rixi && !cpu_has_rixiex) { 1997 /* 1998 * If the page is not _PAGE_VALID, RI or XI could not 1999 * have triggered it. Skip the expensive test.. 2000 */ 2001 if (use_bbit_insns()) { 2002 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID), 2003 label_tlbl_goaround2); 2004 } else { 2005 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID); 2006 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); 2007 } 2008 uasm_i_nop(&p); 2009 2010 uasm_i_tlbr(&p); 2011 2012 switch (current_cpu_type()) { 2013 default: 2014 if (cpu_has_mips_r2) { 2015 uasm_i_ehb(&p); 2016 2017 case CPU_CAVIUM_OCTEON: 2018 case CPU_CAVIUM_OCTEON_PLUS: 2019 case CPU_CAVIUM_OCTEON2: 2020 break; 2021 } 2022 } 2023 2024 /* Examine entrylo 0 or 1 based on ptr. */ 2025 if (use_bbit_insns()) { 2026 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); 2027 } else { 2028 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t)); 2029 uasm_i_beqz(&p, wr.r3, 8); 2030 } 2031 /* load it in the delay slot*/ 2032 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0); 2033 /* load it if ptr is odd */ 2034 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1); 2035 /* 2036 * If the entryLo (now in wr.r3) is valid (bit 1), RI or 2037 * XI must have triggered it. 2038 */ 2039 if (use_bbit_insns()) { 2040 uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2); 2041 } else { 2042 uasm_i_andi(&p, wr.r3, wr.r3, 2); 2043 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); 2044 } 2045 if (PM_DEFAULT_MASK == 0) 2046 uasm_i_nop(&p); 2047 /* 2048 * We clobbered C0_PAGEMASK, restore it. On the other branch 2049 * it is restored in build_huge_tlb_write_entry. 2050 */ 2051 build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0); 2052 2053 uasm_l_tlbl_goaround2(&l, p); 2054 } 2055 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID)); 2056 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2057 #endif 2058 2059 uasm_l_nopage_tlbl(&l, p); 2060 build_restore_work_registers(&p); 2061 #ifdef CONFIG_CPU_MICROMIPS 2062 if ((unsigned long)tlb_do_page_fault_0 & 1) { 2063 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0)); 2064 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0)); 2065 uasm_i_jr(&p, K0); 2066 } else 2067 #endif 2068 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 2069 uasm_i_nop(&p); 2070 2071 if (p >= handle_tlbl_end) 2072 panic("TLB load handler fastpath space exceeded"); 2073 2074 uasm_resolve_relocs(relocs, labels); 2075 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", 2076 (unsigned int)(p - handle_tlbl)); 2077 2078 dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size); 2079 } 2080 2081 static void build_r4000_tlb_store_handler(void) 2082 { 2083 u32 *p = handle_tlbs; 2084 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; 2085 struct uasm_label *l = labels; 2086 struct uasm_reloc *r = relocs; 2087 struct work_registers wr; 2088 2089 memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0])); 2090 memset(labels, 0, sizeof(labels)); 2091 memset(relocs, 0, sizeof(relocs)); 2092 2093 wr = build_r4000_tlbchange_handler_head(&p, &l, &r); 2094 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); 2095 if (m4kc_tlbp_war()) 2096 build_tlb_probe_entry(&p); 2097 build_make_write(&p, &r, wr.r1, wr.r2); 2098 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); 2099 2100 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 2101 /* 2102 * This is the entry point when 2103 * build_r4000_tlbchange_handler_head spots a huge page. 2104 */ 2105 uasm_l_tlb_huge_update(&l, p); 2106 iPTE_LW(&p, wr.r1, wr.r2); 2107 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); 2108 build_tlb_probe_entry(&p); 2109 uasm_i_ori(&p, wr.r1, wr.r1, 2110 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2111 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2112 #endif 2113 2114 uasm_l_nopage_tlbs(&l, p); 2115 build_restore_work_registers(&p); 2116 #ifdef CONFIG_CPU_MICROMIPS 2117 if ((unsigned long)tlb_do_page_fault_1 & 1) { 2118 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1)); 2119 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1)); 2120 uasm_i_jr(&p, K0); 2121 } else 2122 #endif 2123 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2124 uasm_i_nop(&p); 2125 2126 if (p >= handle_tlbs_end) 2127 panic("TLB store handler fastpath space exceeded"); 2128 2129 uasm_resolve_relocs(relocs, labels); 2130 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", 2131 (unsigned int)(p - handle_tlbs)); 2132 2133 dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size); 2134 } 2135 2136 static void build_r4000_tlb_modify_handler(void) 2137 { 2138 u32 *p = handle_tlbm; 2139 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; 2140 struct uasm_label *l = labels; 2141 struct uasm_reloc *r = relocs; 2142 struct work_registers wr; 2143 2144 memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0])); 2145 memset(labels, 0, sizeof(labels)); 2146 memset(relocs, 0, sizeof(relocs)); 2147 2148 wr = build_r4000_tlbchange_handler_head(&p, &l, &r); 2149 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); 2150 if (m4kc_tlbp_war()) 2151 build_tlb_probe_entry(&p); 2152 /* Present and writable bits set, set accessed and dirty bits. */ 2153 build_make_write(&p, &r, wr.r1, wr.r2); 2154 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); 2155 2156 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 2157 /* 2158 * This is the entry point when 2159 * build_r4000_tlbchange_handler_head spots a huge page. 2160 */ 2161 uasm_l_tlb_huge_update(&l, p); 2162 iPTE_LW(&p, wr.r1, wr.r2); 2163 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); 2164 build_tlb_probe_entry(&p); 2165 uasm_i_ori(&p, wr.r1, wr.r1, 2166 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2167 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2168 #endif 2169 2170 uasm_l_nopage_tlbm(&l, p); 2171 build_restore_work_registers(&p); 2172 #ifdef CONFIG_CPU_MICROMIPS 2173 if ((unsigned long)tlb_do_page_fault_1 & 1) { 2174 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1)); 2175 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1)); 2176 uasm_i_jr(&p, K0); 2177 } else 2178 #endif 2179 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2180 uasm_i_nop(&p); 2181 2182 if (p >= handle_tlbm_end) 2183 panic("TLB modify handler fastpath space exceeded"); 2184 2185 uasm_resolve_relocs(relocs, labels); 2186 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", 2187 (unsigned int)(p - handle_tlbm)); 2188 2189 dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size); 2190 } 2191 2192 static void flush_tlb_handlers(void) 2193 { 2194 local_flush_icache_range((unsigned long)handle_tlbl, 2195 (unsigned long)handle_tlbl_end); 2196 local_flush_icache_range((unsigned long)handle_tlbs, 2197 (unsigned long)handle_tlbs_end); 2198 local_flush_icache_range((unsigned long)handle_tlbm, 2199 (unsigned long)handle_tlbm_end); 2200 local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd, 2201 (unsigned long)tlbmiss_handler_setup_pgd_end); 2202 } 2203 2204 static void print_htw_config(void) 2205 { 2206 unsigned long config; 2207 unsigned int pwctl; 2208 const int field = 2 * sizeof(unsigned long); 2209 2210 config = read_c0_pwfield(); 2211 pr_debug("PWField (0x%0*lx): GDI: 0x%02lx UDI: 0x%02lx MDI: 0x%02lx PTI: 0x%02lx PTEI: 0x%02lx\n", 2212 field, config, 2213 (config & MIPS_PWFIELD_GDI_MASK) >> MIPS_PWFIELD_GDI_SHIFT, 2214 (config & MIPS_PWFIELD_UDI_MASK) >> MIPS_PWFIELD_UDI_SHIFT, 2215 (config & MIPS_PWFIELD_MDI_MASK) >> MIPS_PWFIELD_MDI_SHIFT, 2216 (config & MIPS_PWFIELD_PTI_MASK) >> MIPS_PWFIELD_PTI_SHIFT, 2217 (config & MIPS_PWFIELD_PTEI_MASK) >> MIPS_PWFIELD_PTEI_SHIFT); 2218 2219 config = read_c0_pwsize(); 2220 pr_debug("PWSize (0x%0*lx): GDW: 0x%02lx UDW: 0x%02lx MDW: 0x%02lx PTW: 0x%02lx PTEW: 0x%02lx\n", 2221 field, config, 2222 (config & MIPS_PWSIZE_GDW_MASK) >> MIPS_PWSIZE_GDW_SHIFT, 2223 (config & MIPS_PWSIZE_UDW_MASK) >> MIPS_PWSIZE_UDW_SHIFT, 2224 (config & MIPS_PWSIZE_MDW_MASK) >> MIPS_PWSIZE_MDW_SHIFT, 2225 (config & MIPS_PWSIZE_PTW_MASK) >> MIPS_PWSIZE_PTW_SHIFT, 2226 (config & MIPS_PWSIZE_PTEW_MASK) >> MIPS_PWSIZE_PTEW_SHIFT); 2227 2228 pwctl = read_c0_pwctl(); 2229 pr_debug("PWCtl (0x%x): PWEn: 0x%x DPH: 0x%x HugePg: 0x%x Psn: 0x%x\n", 2230 pwctl, 2231 (pwctl & MIPS_PWCTL_PWEN_MASK) >> MIPS_PWCTL_PWEN_SHIFT, 2232 (pwctl & MIPS_PWCTL_DPH_MASK) >> MIPS_PWCTL_DPH_SHIFT, 2233 (pwctl & MIPS_PWCTL_HUGEPG_MASK) >> MIPS_PWCTL_HUGEPG_SHIFT, 2234 (pwctl & MIPS_PWCTL_PSN_MASK) >> MIPS_PWCTL_PSN_SHIFT); 2235 } 2236 2237 static void config_htw_params(void) 2238 { 2239 unsigned long pwfield, pwsize, ptei; 2240 unsigned int config; 2241 2242 /* 2243 * We are using 2-level page tables, so we only need to 2244 * setup GDW and PTW appropriately. UDW and MDW will remain 0. 2245 * The default value of GDI/UDI/MDI/PTI is 0xc. It is illegal to 2246 * write values less than 0xc in these fields because the entire 2247 * write will be dropped. As a result of which, we must preserve 2248 * the original reset values and overwrite only what we really want. 2249 */ 2250 2251 pwfield = read_c0_pwfield(); 2252 /* re-initialize the GDI field */ 2253 pwfield &= ~MIPS_PWFIELD_GDI_MASK; 2254 pwfield |= PGDIR_SHIFT << MIPS_PWFIELD_GDI_SHIFT; 2255 /* re-initialize the PTI field including the even/odd bit */ 2256 pwfield &= ~MIPS_PWFIELD_PTI_MASK; 2257 pwfield |= PAGE_SHIFT << MIPS_PWFIELD_PTI_SHIFT; 2258 /* Set the PTEI right shift */ 2259 ptei = _PAGE_GLOBAL_SHIFT << MIPS_PWFIELD_PTEI_SHIFT; 2260 pwfield |= ptei; 2261 write_c0_pwfield(pwfield); 2262 /* Check whether the PTEI value is supported */ 2263 back_to_back_c0_hazard(); 2264 pwfield = read_c0_pwfield(); 2265 if (((pwfield & MIPS_PWFIELD_PTEI_MASK) << MIPS_PWFIELD_PTEI_SHIFT) 2266 != ptei) { 2267 pr_warn("Unsupported PTEI field value: 0x%lx. HTW will not be enabled", 2268 ptei); 2269 /* 2270 * Drop option to avoid HTW being enabled via another path 2271 * (eg htw_reset()) 2272 */ 2273 current_cpu_data.options &= ~MIPS_CPU_HTW; 2274 return; 2275 } 2276 2277 pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT; 2278 pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT; 2279 write_c0_pwsize(pwsize); 2280 2281 /* Make sure everything is set before we enable the HTW */ 2282 back_to_back_c0_hazard(); 2283 2284 /* Enable HTW and disable the rest of the pwctl fields */ 2285 config = 1 << MIPS_PWCTL_PWEN_SHIFT; 2286 write_c0_pwctl(config); 2287 pr_info("Hardware Page Table Walker enabled\n"); 2288 2289 print_htw_config(); 2290 } 2291 2292 void build_tlb_refill_handler(void) 2293 { 2294 /* 2295 * The refill handler is generated per-CPU, multi-node systems 2296 * may have local storage for it. The other handlers are only 2297 * needed once. 2298 */ 2299 static int run_once = 0; 2300 2301 output_pgtable_bits_defines(); 2302 2303 #ifdef CONFIG_64BIT 2304 check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); 2305 #endif 2306 2307 switch (current_cpu_type()) { 2308 case CPU_R2000: 2309 case CPU_R3000: 2310 case CPU_R3000A: 2311 case CPU_R3081E: 2312 case CPU_TX3912: 2313 case CPU_TX3922: 2314 case CPU_TX3927: 2315 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 2316 if (cpu_has_local_ebase) 2317 build_r3000_tlb_refill_handler(); 2318 if (!run_once) { 2319 if (!cpu_has_local_ebase) 2320 build_r3000_tlb_refill_handler(); 2321 build_setup_pgd(); 2322 build_r3000_tlb_load_handler(); 2323 build_r3000_tlb_store_handler(); 2324 build_r3000_tlb_modify_handler(); 2325 flush_tlb_handlers(); 2326 run_once++; 2327 } 2328 #else 2329 panic("No R3000 TLB refill handler"); 2330 #endif 2331 break; 2332 2333 case CPU_R6000: 2334 case CPU_R6000A: 2335 panic("No R6000 TLB refill handler yet"); 2336 break; 2337 2338 case CPU_R8000: 2339 panic("No R8000 TLB refill handler yet"); 2340 break; 2341 2342 default: 2343 if (!run_once) { 2344 scratch_reg = allocate_kscratch(); 2345 build_setup_pgd(); 2346 build_r4000_tlb_load_handler(); 2347 build_r4000_tlb_store_handler(); 2348 build_r4000_tlb_modify_handler(); 2349 if (!cpu_has_local_ebase) 2350 build_r4000_tlb_refill_handler(); 2351 flush_tlb_handlers(); 2352 run_once++; 2353 } 2354 if (cpu_has_local_ebase) 2355 build_r4000_tlb_refill_handler(); 2356 if (cpu_has_htw) 2357 config_htw_params(); 2358 2359 } 2360 } 2361