1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Synthesize TLB refill handlers at runtime. 7 * 8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki 10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 11 * Copyright (C) 2008, 2009 Cavium Networks, Inc. 12 * Copyright (C) 2011 MIPS Technologies, Inc. 13 * 14 * ... and the days got worse and worse and now you see 15 * I've gone completly out of my mind. 16 * 17 * They're coming to take me a away haha 18 * they're coming to take me a away hoho hihi haha 19 * to the funny farm where code is beautiful all the time ... 20 * 21 * (Condolences to Napoleon XIV) 22 */ 23 24 #include <linux/bug.h> 25 #include <linux/kernel.h> 26 #include <linux/types.h> 27 #include <linux/smp.h> 28 #include <linux/string.h> 29 #include <linux/cache.h> 30 31 #include <asm/cacheflush.h> 32 #include <asm/cpu-type.h> 33 #include <asm/pgtable.h> 34 #include <asm/war.h> 35 #include <asm/uasm.h> 36 #include <asm/setup.h> 37 38 /* 39 * TLB load/store/modify handlers. 40 * 41 * Only the fastpath gets synthesized at runtime, the slowpath for 42 * do_page_fault remains normal asm. 43 */ 44 extern void tlb_do_page_fault_0(void); 45 extern void tlb_do_page_fault_1(void); 46 47 struct work_registers { 48 int r1; 49 int r2; 50 int r3; 51 }; 52 53 struct tlb_reg_save { 54 unsigned long a; 55 unsigned long b; 56 } ____cacheline_aligned_in_smp; 57 58 static struct tlb_reg_save handler_reg_save[NR_CPUS]; 59 60 static inline int r45k_bvahwbug(void) 61 { 62 /* XXX: We should probe for the presence of this bug, but we don't. */ 63 return 0; 64 } 65 66 static inline int r4k_250MHZhwbug(void) 67 { 68 /* XXX: We should probe for the presence of this bug, but we don't. */ 69 return 0; 70 } 71 72 static inline int __maybe_unused bcm1250_m3_war(void) 73 { 74 return BCM1250_M3_WAR; 75 } 76 77 static inline int __maybe_unused r10000_llsc_war(void) 78 { 79 return R10000_LLSC_WAR; 80 } 81 82 static int use_bbit_insns(void) 83 { 84 switch (current_cpu_type()) { 85 case CPU_CAVIUM_OCTEON: 86 case CPU_CAVIUM_OCTEON_PLUS: 87 case CPU_CAVIUM_OCTEON2: 88 case CPU_CAVIUM_OCTEON3: 89 return 1; 90 default: 91 return 0; 92 } 93 } 94 95 static int use_lwx_insns(void) 96 { 97 switch (current_cpu_type()) { 98 case CPU_CAVIUM_OCTEON2: 99 case CPU_CAVIUM_OCTEON3: 100 return 1; 101 default: 102 return 0; 103 } 104 } 105 #if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \ 106 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 107 static bool scratchpad_available(void) 108 { 109 return true; 110 } 111 static int scratchpad_offset(int i) 112 { 113 /* 114 * CVMSEG starts at address -32768 and extends for 115 * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines. 116 */ 117 i += 1; /* Kernel use starts at the top and works down. */ 118 return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768; 119 } 120 #else 121 static bool scratchpad_available(void) 122 { 123 return false; 124 } 125 static int scratchpad_offset(int i) 126 { 127 BUG(); 128 /* Really unreachable, but evidently some GCC want this. */ 129 return 0; 130 } 131 #endif 132 /* 133 * Found by experiment: At least some revisions of the 4kc throw under 134 * some circumstances a machine check exception, triggered by invalid 135 * values in the index register. Delaying the tlbp instruction until 136 * after the next branch, plus adding an additional nop in front of 137 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows 138 * why; it's not an issue caused by the core RTL. 139 * 140 */ 141 static int m4kc_tlbp_war(void) 142 { 143 return (current_cpu_data.processor_id & 0xffff00) == 144 (PRID_COMP_MIPS | PRID_IMP_4KC); 145 } 146 147 /* Handle labels (which must be positive integers). */ 148 enum label_id { 149 label_second_part = 1, 150 label_leave, 151 label_vmalloc, 152 label_vmalloc_done, 153 label_tlbw_hazard_0, 154 label_split = label_tlbw_hazard_0 + 8, 155 label_tlbl_goaround1, 156 label_tlbl_goaround2, 157 label_nopage_tlbl, 158 label_nopage_tlbs, 159 label_nopage_tlbm, 160 label_smp_pgtable_change, 161 label_r3000_write_probe_fail, 162 label_large_segbits_fault, 163 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 164 label_tlb_huge_update, 165 #endif 166 }; 167 168 UASM_L_LA(_second_part) 169 UASM_L_LA(_leave) 170 UASM_L_LA(_vmalloc) 171 UASM_L_LA(_vmalloc_done) 172 /* _tlbw_hazard_x is handled differently. */ 173 UASM_L_LA(_split) 174 UASM_L_LA(_tlbl_goaround1) 175 UASM_L_LA(_tlbl_goaround2) 176 UASM_L_LA(_nopage_tlbl) 177 UASM_L_LA(_nopage_tlbs) 178 UASM_L_LA(_nopage_tlbm) 179 UASM_L_LA(_smp_pgtable_change) 180 UASM_L_LA(_r3000_write_probe_fail) 181 UASM_L_LA(_large_segbits_fault) 182 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 183 UASM_L_LA(_tlb_huge_update) 184 #endif 185 186 static int hazard_instance; 187 188 static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance) 189 { 190 switch (instance) { 191 case 0 ... 7: 192 uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance); 193 return; 194 default: 195 BUG(); 196 } 197 } 198 199 static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance) 200 { 201 switch (instance) { 202 case 0 ... 7: 203 uasm_build_label(l, *p, label_tlbw_hazard_0 + instance); 204 break; 205 default: 206 BUG(); 207 } 208 } 209 210 /* 211 * pgtable bits are assigned dynamically depending on processor feature 212 * and statically based on kernel configuration. This spits out the actual 213 * values the kernel is using. Required to make sense from disassembled 214 * TLB exception handlers. 215 */ 216 static void output_pgtable_bits_defines(void) 217 { 218 #define pr_define(fmt, ...) \ 219 pr_debug("#define " fmt, ##__VA_ARGS__) 220 221 pr_debug("#include <asm/asm.h>\n"); 222 pr_debug("#include <asm/regdef.h>\n"); 223 pr_debug("\n"); 224 225 pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT); 226 pr_define("_PAGE_READ_SHIFT %d\n", _PAGE_READ_SHIFT); 227 pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT); 228 pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT); 229 pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT); 230 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 231 pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT); 232 pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT); 233 #endif 234 if (cpu_has_rixi) { 235 #ifdef _PAGE_NO_EXEC_SHIFT 236 pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT); 237 #endif 238 #ifdef _PAGE_NO_READ_SHIFT 239 pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT); 240 #endif 241 } 242 pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT); 243 pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT); 244 pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT); 245 pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT); 246 pr_debug("\n"); 247 } 248 249 static inline void dump_handler(const char *symbol, const u32 *handler, int count) 250 { 251 int i; 252 253 pr_debug("LEAF(%s)\n", symbol); 254 255 pr_debug("\t.set push\n"); 256 pr_debug("\t.set noreorder\n"); 257 258 for (i = 0; i < count; i++) 259 pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]); 260 261 pr_debug("\t.set\tpop\n"); 262 263 pr_debug("\tEND(%s)\n", symbol); 264 } 265 266 /* The only general purpose registers allowed in TLB handlers. */ 267 #define K0 26 268 #define K1 27 269 270 /* Some CP0 registers */ 271 #define C0_INDEX 0, 0 272 #define C0_ENTRYLO0 2, 0 273 #define C0_TCBIND 2, 2 274 #define C0_ENTRYLO1 3, 0 275 #define C0_CONTEXT 4, 0 276 #define C0_PAGEMASK 5, 0 277 #define C0_BADVADDR 8, 0 278 #define C0_ENTRYHI 10, 0 279 #define C0_EPC 14, 0 280 #define C0_XCONTEXT 20, 0 281 282 #ifdef CONFIG_64BIT 283 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT) 284 #else 285 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT) 286 #endif 287 288 /* The worst case length of the handler is around 18 instructions for 289 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs. 290 * Maximum space available is 32 instructions for R3000 and 64 291 * instructions for R4000. 292 * 293 * We deliberately chose a buffer size of 128, so we won't scribble 294 * over anything important on overflow before we panic. 295 */ 296 static u32 tlb_handler[128]; 297 298 /* simply assume worst case size for labels and relocs */ 299 static struct uasm_label labels[128]; 300 static struct uasm_reloc relocs[128]; 301 302 static int check_for_high_segbits; 303 304 static unsigned int kscratch_used_mask; 305 306 static inline int __maybe_unused c0_kscratch(void) 307 { 308 switch (current_cpu_type()) { 309 case CPU_XLP: 310 case CPU_XLR: 311 return 22; 312 default: 313 return 31; 314 } 315 } 316 317 static int allocate_kscratch(void) 318 { 319 int r; 320 unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask; 321 322 r = ffs(a); 323 324 if (r == 0) 325 return -1; 326 327 r--; /* make it zero based */ 328 329 kscratch_used_mask |= (1 << r); 330 331 return r; 332 } 333 334 static int scratch_reg; 335 static int pgd_reg; 336 enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; 337 338 static struct work_registers build_get_work_registers(u32 **p) 339 { 340 struct work_registers r; 341 342 if (scratch_reg >= 0) { 343 /* Save in CPU local C0_KScratch? */ 344 UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg); 345 r.r1 = K0; 346 r.r2 = K1; 347 r.r3 = 1; 348 return r; 349 } 350 351 if (num_possible_cpus() > 1) { 352 /* Get smp_processor_id */ 353 UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG); 354 UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT); 355 356 /* handler_reg_save index in K0 */ 357 UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save))); 358 359 UASM_i_LA(p, K1, (long)&handler_reg_save); 360 UASM_i_ADDU(p, K0, K0, K1); 361 } else { 362 UASM_i_LA(p, K0, (long)&handler_reg_save); 363 } 364 /* K0 now points to save area, save $1 and $2 */ 365 UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0); 366 UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0); 367 368 r.r1 = K1; 369 r.r2 = 1; 370 r.r3 = 2; 371 return r; 372 } 373 374 static void build_restore_work_registers(u32 **p) 375 { 376 if (scratch_reg >= 0) { 377 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); 378 return; 379 } 380 /* K0 already points to save area, restore $1 and $2 */ 381 UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0); 382 UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0); 383 } 384 385 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 386 387 /* 388 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current, 389 * we cannot do r3000 under these circumstances. 390 * 391 * Declare pgd_current here instead of including mmu_context.h to avoid type 392 * conflicts for tlbmiss_handler_setup_pgd 393 */ 394 extern unsigned long pgd_current[]; 395 396 /* 397 * The R3000 TLB handler is simple. 398 */ 399 static void build_r3000_tlb_refill_handler(void) 400 { 401 long pgdc = (long)pgd_current; 402 u32 *p; 403 404 memset(tlb_handler, 0, sizeof(tlb_handler)); 405 p = tlb_handler; 406 407 uasm_i_mfc0(&p, K0, C0_BADVADDR); 408 uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */ 409 uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1); 410 uasm_i_srl(&p, K0, K0, 22); /* load delay */ 411 uasm_i_sll(&p, K0, K0, 2); 412 uasm_i_addu(&p, K1, K1, K0); 413 uasm_i_mfc0(&p, K0, C0_CONTEXT); 414 uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */ 415 uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */ 416 uasm_i_addu(&p, K1, K1, K0); 417 uasm_i_lw(&p, K0, 0, K1); 418 uasm_i_nop(&p); /* load delay */ 419 uasm_i_mtc0(&p, K0, C0_ENTRYLO0); 420 uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */ 421 uasm_i_tlbwr(&p); /* cp0 delay */ 422 uasm_i_jr(&p, K1); 423 uasm_i_rfe(&p); /* branch delay */ 424 425 if (p > tlb_handler + 32) 426 panic("TLB refill handler space exceeded"); 427 428 pr_debug("Wrote TLB refill handler (%u instructions).\n", 429 (unsigned int)(p - tlb_handler)); 430 431 memcpy((void *)ebase, tlb_handler, 0x80); 432 433 dump_handler("r3000_tlb_refill", (u32 *)ebase, 32); 434 } 435 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ 436 437 /* 438 * The R4000 TLB handler is much more complicated. We have two 439 * consecutive handler areas with 32 instructions space each. 440 * Since they aren't used at the same time, we can overflow in the 441 * other one.To keep things simple, we first assume linear space, 442 * then we relocate it to the final handler layout as needed. 443 */ 444 static u32 final_handler[64]; 445 446 /* 447 * Hazards 448 * 449 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0: 450 * 2. A timing hazard exists for the TLBP instruction. 451 * 452 * stalling_instruction 453 * TLBP 454 * 455 * The JTLB is being read for the TLBP throughout the stall generated by the 456 * previous instruction. This is not really correct as the stalling instruction 457 * can modify the address used to access the JTLB. The failure symptom is that 458 * the TLBP instruction will use an address created for the stalling instruction 459 * and not the address held in C0_ENHI and thus report the wrong results. 460 * 461 * The software work-around is to not allow the instruction preceding the TLBP 462 * to stall - make it an NOP or some other instruction guaranteed not to stall. 463 * 464 * Errata 2 will not be fixed. This errata is also on the R5000. 465 * 466 * As if we MIPS hackers wouldn't know how to nop pipelines happy ... 467 */ 468 static void __maybe_unused build_tlb_probe_entry(u32 **p) 469 { 470 switch (current_cpu_type()) { 471 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */ 472 case CPU_R4600: 473 case CPU_R4700: 474 case CPU_R5000: 475 case CPU_NEVADA: 476 uasm_i_nop(p); 477 uasm_i_tlbp(p); 478 break; 479 480 default: 481 uasm_i_tlbp(p); 482 break; 483 } 484 } 485 486 /* 487 * Write random or indexed TLB entry, and care about the hazards from 488 * the preceding mtc0 and for the following eret. 489 */ 490 enum tlb_write_entry { tlb_random, tlb_indexed }; 491 492 static void build_tlb_write_entry(u32 **p, struct uasm_label **l, 493 struct uasm_reloc **r, 494 enum tlb_write_entry wmode) 495 { 496 void(*tlbw)(u32 **) = NULL; 497 498 switch (wmode) { 499 case tlb_random: tlbw = uasm_i_tlbwr; break; 500 case tlb_indexed: tlbw = uasm_i_tlbwi; break; 501 } 502 503 if (cpu_has_mips_r2) { 504 /* 505 * The architecture spec says an ehb is required here, 506 * but a number of cores do not have the hazard and 507 * using an ehb causes an expensive pipeline stall. 508 */ 509 switch (current_cpu_type()) { 510 case CPU_M14KC: 511 case CPU_74K: 512 case CPU_1074K: 513 case CPU_PROAPTIV: 514 case CPU_P5600: 515 case CPU_M5150: 516 break; 517 518 default: 519 uasm_i_ehb(p); 520 break; 521 } 522 tlbw(p); 523 return; 524 } 525 526 switch (current_cpu_type()) { 527 case CPU_R4000PC: 528 case CPU_R4000SC: 529 case CPU_R4000MC: 530 case CPU_R4400PC: 531 case CPU_R4400SC: 532 case CPU_R4400MC: 533 /* 534 * This branch uses up a mtc0 hazard nop slot and saves 535 * two nops after the tlbw instruction. 536 */ 537 uasm_bgezl_hazard(p, r, hazard_instance); 538 tlbw(p); 539 uasm_bgezl_label(l, p, hazard_instance); 540 hazard_instance++; 541 uasm_i_nop(p); 542 break; 543 544 case CPU_R4600: 545 case CPU_R4700: 546 uasm_i_nop(p); 547 tlbw(p); 548 uasm_i_nop(p); 549 break; 550 551 case CPU_R5000: 552 case CPU_NEVADA: 553 uasm_i_nop(p); /* QED specifies 2 nops hazard */ 554 uasm_i_nop(p); /* QED specifies 2 nops hazard */ 555 tlbw(p); 556 break; 557 558 case CPU_R4300: 559 case CPU_5KC: 560 case CPU_TX49XX: 561 case CPU_PR4450: 562 case CPU_XLR: 563 uasm_i_nop(p); 564 tlbw(p); 565 break; 566 567 case CPU_R10000: 568 case CPU_R12000: 569 case CPU_R14000: 570 case CPU_4KC: 571 case CPU_4KEC: 572 case CPU_M14KC: 573 case CPU_M14KEC: 574 case CPU_SB1: 575 case CPU_SB1A: 576 case CPU_4KSC: 577 case CPU_20KC: 578 case CPU_25KF: 579 case CPU_BMIPS32: 580 case CPU_BMIPS3300: 581 case CPU_BMIPS4350: 582 case CPU_BMIPS4380: 583 case CPU_BMIPS5000: 584 case CPU_LOONGSON2: 585 case CPU_LOONGSON3: 586 case CPU_R5500: 587 if (m4kc_tlbp_war()) 588 uasm_i_nop(p); 589 case CPU_ALCHEMY: 590 tlbw(p); 591 break; 592 593 case CPU_RM7000: 594 uasm_i_nop(p); 595 uasm_i_nop(p); 596 uasm_i_nop(p); 597 uasm_i_nop(p); 598 tlbw(p); 599 break; 600 601 case CPU_VR4111: 602 case CPU_VR4121: 603 case CPU_VR4122: 604 case CPU_VR4181: 605 case CPU_VR4181A: 606 uasm_i_nop(p); 607 uasm_i_nop(p); 608 tlbw(p); 609 uasm_i_nop(p); 610 uasm_i_nop(p); 611 break; 612 613 case CPU_VR4131: 614 case CPU_VR4133: 615 case CPU_R5432: 616 uasm_i_nop(p); 617 uasm_i_nop(p); 618 tlbw(p); 619 break; 620 621 case CPU_JZRISC: 622 tlbw(p); 623 uasm_i_nop(p); 624 break; 625 626 default: 627 panic("No TLB refill handler yet (CPU type: %d)", 628 current_cpu_type()); 629 break; 630 } 631 } 632 633 static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, 634 unsigned int reg) 635 { 636 if (cpu_has_rixi) { 637 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); 638 } else { 639 #ifdef CONFIG_64BIT_PHYS_ADDR 640 uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL)); 641 #else 642 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL)); 643 #endif 644 } 645 } 646 647 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 648 649 static void build_restore_pagemask(u32 **p, struct uasm_reloc **r, 650 unsigned int tmp, enum label_id lid, 651 int restore_scratch) 652 { 653 if (restore_scratch) { 654 /* Reset default page size */ 655 if (PM_DEFAULT_MASK >> 16) { 656 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); 657 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); 658 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 659 uasm_il_b(p, r, lid); 660 } else if (PM_DEFAULT_MASK) { 661 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); 662 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 663 uasm_il_b(p, r, lid); 664 } else { 665 uasm_i_mtc0(p, 0, C0_PAGEMASK); 666 uasm_il_b(p, r, lid); 667 } 668 if (scratch_reg >= 0) 669 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); 670 else 671 UASM_i_LW(p, 1, scratchpad_offset(0), 0); 672 } else { 673 /* Reset default page size */ 674 if (PM_DEFAULT_MASK >> 16) { 675 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); 676 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); 677 uasm_il_b(p, r, lid); 678 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 679 } else if (PM_DEFAULT_MASK) { 680 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); 681 uasm_il_b(p, r, lid); 682 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 683 } else { 684 uasm_il_b(p, r, lid); 685 uasm_i_mtc0(p, 0, C0_PAGEMASK); 686 } 687 } 688 } 689 690 static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l, 691 struct uasm_reloc **r, 692 unsigned int tmp, 693 enum tlb_write_entry wmode, 694 int restore_scratch) 695 { 696 /* Set huge page tlb entry size */ 697 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); 698 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); 699 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 700 701 build_tlb_write_entry(p, l, r, wmode); 702 703 build_restore_pagemask(p, r, tmp, label_leave, restore_scratch); 704 } 705 706 /* 707 * Check if Huge PTE is present, if so then jump to LABEL. 708 */ 709 static void 710 build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, 711 unsigned int pmd, int lid) 712 { 713 UASM_i_LW(p, tmp, 0, pmd); 714 if (use_bbit_insns()) { 715 uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid); 716 } else { 717 uasm_i_andi(p, tmp, tmp, _PAGE_HUGE); 718 uasm_il_bnez(p, r, tmp, lid); 719 } 720 } 721 722 static void build_huge_update_entries(u32 **p, unsigned int pte, 723 unsigned int tmp) 724 { 725 int small_sequence; 726 727 /* 728 * A huge PTE describes an area the size of the 729 * configured huge page size. This is twice the 730 * of the large TLB entry size we intend to use. 731 * A TLB entry half the size of the configured 732 * huge page size is configured into entrylo0 733 * and entrylo1 to cover the contiguous huge PTE 734 * address space. 735 */ 736 small_sequence = (HPAGE_SIZE >> 7) < 0x10000; 737 738 /* We can clobber tmp. It isn't used after this.*/ 739 if (!small_sequence) 740 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); 741 742 build_convert_pte_to_entrylo(p, pte); 743 UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */ 744 /* convert to entrylo1 */ 745 if (small_sequence) 746 UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7); 747 else 748 UASM_i_ADDU(p, pte, pte, tmp); 749 750 UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */ 751 } 752 753 static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, 754 struct uasm_label **l, 755 unsigned int pte, 756 unsigned int ptr) 757 { 758 #ifdef CONFIG_SMP 759 UASM_i_SC(p, pte, 0, ptr); 760 uasm_il_beqz(p, r, pte, label_tlb_huge_update); 761 UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */ 762 #else 763 UASM_i_SW(p, pte, 0, ptr); 764 #endif 765 build_huge_update_entries(p, pte, ptr); 766 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0); 767 } 768 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 769 770 #ifdef CONFIG_64BIT 771 /* 772 * TMP and PTR are scratch. 773 * TMP will be clobbered, PTR will hold the pmd entry. 774 */ 775 static void 776 build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 777 unsigned int tmp, unsigned int ptr) 778 { 779 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 780 long pgdc = (long)pgd_current; 781 #endif 782 /* 783 * The vmalloc handling is not in the hotpath. 784 */ 785 uasm_i_dmfc0(p, tmp, C0_BADVADDR); 786 787 if (check_for_high_segbits) { 788 /* 789 * The kernel currently implicitely assumes that the 790 * MIPS SEGBITS parameter for the processor is 791 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never 792 * allocate virtual addresses outside the maximum 793 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But 794 * that doesn't prevent user code from accessing the 795 * higher xuseg addresses. Here, we make sure that 796 * everything but the lower xuseg addresses goes down 797 * the module_alloc/vmalloc path. 798 */ 799 uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); 800 uasm_il_bnez(p, r, ptr, label_vmalloc); 801 } else { 802 uasm_il_bltz(p, r, tmp, label_vmalloc); 803 } 804 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ 805 806 if (pgd_reg != -1) { 807 /* pgd is in pgd_reg */ 808 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg); 809 } else { 810 #if defined(CONFIG_MIPS_PGD_C0_CONTEXT) 811 /* 812 * &pgd << 11 stored in CONTEXT [23..63]. 813 */ 814 UASM_i_MFC0(p, ptr, C0_CONTEXT); 815 816 /* Clear lower 23 bits of context. */ 817 uasm_i_dins(p, ptr, 0, 0, 23); 818 819 /* 1 0 1 0 1 << 6 xkphys cached */ 820 uasm_i_ori(p, ptr, ptr, 0x540); 821 uasm_i_drotr(p, ptr, ptr, 11); 822 #elif defined(CONFIG_SMP) 823 UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG); 824 uasm_i_dsrl_safe(p, ptr, ptr, SMP_CPUID_PTRSHIFT); 825 UASM_i_LA_mostly(p, tmp, pgdc); 826 uasm_i_daddu(p, ptr, ptr, tmp); 827 uasm_i_dmfc0(p, tmp, C0_BADVADDR); 828 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); 829 #else 830 UASM_i_LA_mostly(p, ptr, pgdc); 831 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); 832 #endif 833 } 834 835 uasm_l_vmalloc_done(l, *p); 836 837 /* get pgd offset in bytes */ 838 uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3); 839 840 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); 841 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ 842 #ifndef __PAGETABLE_PMD_FOLDED 843 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 844 uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ 845 uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ 846 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); 847 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ 848 #endif 849 } 850 851 /* 852 * BVADDR is the faulting address, PTR is scratch. 853 * PTR will hold the pgd for vmalloc. 854 */ 855 static void 856 build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 857 unsigned int bvaddr, unsigned int ptr, 858 enum vmalloc64_mode mode) 859 { 860 long swpd = (long)swapper_pg_dir; 861 int single_insn_swpd; 862 int did_vmalloc_branch = 0; 863 864 single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd); 865 866 uasm_l_vmalloc(l, *p); 867 868 if (mode != not_refill && check_for_high_segbits) { 869 if (single_insn_swpd) { 870 uasm_il_bltz(p, r, bvaddr, label_vmalloc_done); 871 uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); 872 did_vmalloc_branch = 1; 873 /* fall through */ 874 } else { 875 uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault); 876 } 877 } 878 if (!did_vmalloc_branch) { 879 if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { 880 uasm_il_b(p, r, label_vmalloc_done); 881 uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); 882 } else { 883 UASM_i_LA_mostly(p, ptr, swpd); 884 uasm_il_b(p, r, label_vmalloc_done); 885 if (uasm_in_compat_space_p(swpd)) 886 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); 887 else 888 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); 889 } 890 } 891 if (mode != not_refill && check_for_high_segbits) { 892 uasm_l_large_segbits_fault(l, *p); 893 /* 894 * We get here if we are an xsseg address, or if we are 895 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary. 896 * 897 * Ignoring xsseg (assume disabled so would generate 898 * (address errors?), the only remaining possibility 899 * is the upper xuseg addresses. On processors with 900 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these 901 * addresses would have taken an address error. We try 902 * to mimic that here by taking a load/istream page 903 * fault. 904 */ 905 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); 906 uasm_i_jr(p, ptr); 907 908 if (mode == refill_scratch) { 909 if (scratch_reg >= 0) 910 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); 911 else 912 UASM_i_LW(p, 1, scratchpad_offset(0), 0); 913 } else { 914 uasm_i_nop(p); 915 } 916 } 917 } 918 919 #else /* !CONFIG_64BIT */ 920 921 /* 922 * TMP and PTR are scratch. 923 * TMP will be clobbered, PTR will hold the pgd entry. 924 */ 925 static void __maybe_unused 926 build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) 927 { 928 if (pgd_reg != -1) { 929 /* pgd is in pgd_reg */ 930 uasm_i_mfc0(p, ptr, c0_kscratch(), pgd_reg); 931 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 932 } else { 933 long pgdc = (long)pgd_current; 934 935 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ 936 #ifdef CONFIG_SMP 937 uasm_i_mfc0(p, ptr, SMP_CPUID_REG); 938 UASM_i_LA_mostly(p, tmp, pgdc); 939 uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT); 940 uasm_i_addu(p, ptr, tmp, ptr); 941 #else 942 UASM_i_LA_mostly(p, ptr, pgdc); 943 #endif 944 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 945 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); 946 } 947 uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */ 948 uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); 949 uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ 950 } 951 952 #endif /* !CONFIG_64BIT */ 953 954 static void build_adjust_context(u32 **p, unsigned int ctx) 955 { 956 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; 957 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); 958 959 switch (current_cpu_type()) { 960 case CPU_VR41XX: 961 case CPU_VR4111: 962 case CPU_VR4121: 963 case CPU_VR4122: 964 case CPU_VR4131: 965 case CPU_VR4181: 966 case CPU_VR4181A: 967 case CPU_VR4133: 968 shift += 2; 969 break; 970 971 default: 972 break; 973 } 974 975 if (shift) 976 UASM_i_SRL(p, ctx, ctx, shift); 977 uasm_i_andi(p, ctx, ctx, mask); 978 } 979 980 static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) 981 { 982 /* 983 * Bug workaround for the Nevada. It seems as if under certain 984 * circumstances the move from cp0_context might produce a 985 * bogus result when the mfc0 instruction and its consumer are 986 * in a different cacheline or a load instruction, probably any 987 * memory reference, is between them. 988 */ 989 switch (current_cpu_type()) { 990 case CPU_NEVADA: 991 UASM_i_LW(p, ptr, 0, ptr); 992 GET_CONTEXT(p, tmp); /* get context reg */ 993 break; 994 995 default: 996 GET_CONTEXT(p, tmp); /* get context reg */ 997 UASM_i_LW(p, ptr, 0, ptr); 998 break; 999 } 1000 1001 build_adjust_context(p, tmp); 1002 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ 1003 } 1004 1005 static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) 1006 { 1007 /* 1008 * 64bit address support (36bit on a 32bit CPU) in a 32bit 1009 * Kernel is a special case. Only a few CPUs use it. 1010 */ 1011 #ifdef CONFIG_64BIT_PHYS_ADDR 1012 if (cpu_has_64bits) { 1013 uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ 1014 uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 1015 if (cpu_has_rixi) { 1016 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); 1017 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 1018 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); 1019 } else { 1020 uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ 1021 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 1022 uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ 1023 } 1024 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ 1025 } else { 1026 int pte_off_even = sizeof(pte_t) / 2; 1027 int pte_off_odd = pte_off_even + sizeof(pte_t); 1028 1029 /* The pte entries are pre-shifted */ 1030 uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ 1031 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 1032 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ 1033 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ 1034 } 1035 #else 1036 UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ 1037 UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 1038 if (r45k_bvahwbug()) 1039 build_tlb_probe_entry(p); 1040 if (cpu_has_rixi) { 1041 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); 1042 if (r4k_250MHZhwbug()) 1043 UASM_i_MTC0(p, 0, C0_ENTRYLO0); 1044 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 1045 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); 1046 } else { 1047 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ 1048 if (r4k_250MHZhwbug()) 1049 UASM_i_MTC0(p, 0, C0_ENTRYLO0); 1050 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 1051 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ 1052 if (r45k_bvahwbug()) 1053 uasm_i_mfc0(p, tmp, C0_INDEX); 1054 } 1055 if (r4k_250MHZhwbug()) 1056 UASM_i_MTC0(p, 0, C0_ENTRYLO1); 1057 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ 1058 #endif 1059 } 1060 1061 struct mips_huge_tlb_info { 1062 int huge_pte; 1063 int restore_scratch; 1064 }; 1065 1066 static struct mips_huge_tlb_info 1067 build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, 1068 struct uasm_reloc **r, unsigned int tmp, 1069 unsigned int ptr, int c0_scratch_reg) 1070 { 1071 struct mips_huge_tlb_info rv; 1072 unsigned int even, odd; 1073 int vmalloc_branch_delay_filled = 0; 1074 const int scratch = 1; /* Our extra working register */ 1075 1076 rv.huge_pte = scratch; 1077 rv.restore_scratch = 0; 1078 1079 if (check_for_high_segbits) { 1080 UASM_i_MFC0(p, tmp, C0_BADVADDR); 1081 1082 if (pgd_reg != -1) 1083 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg); 1084 else 1085 UASM_i_MFC0(p, ptr, C0_CONTEXT); 1086 1087 if (c0_scratch_reg >= 0) 1088 UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg); 1089 else 1090 UASM_i_SW(p, scratch, scratchpad_offset(0), 0); 1091 1092 uasm_i_dsrl_safe(p, scratch, tmp, 1093 PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); 1094 uasm_il_bnez(p, r, scratch, label_vmalloc); 1095 1096 if (pgd_reg == -1) { 1097 vmalloc_branch_delay_filled = 1; 1098 /* Clear lower 23 bits of context. */ 1099 uasm_i_dins(p, ptr, 0, 0, 23); 1100 } 1101 } else { 1102 if (pgd_reg != -1) 1103 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg); 1104 else 1105 UASM_i_MFC0(p, ptr, C0_CONTEXT); 1106 1107 UASM_i_MFC0(p, tmp, C0_BADVADDR); 1108 1109 if (c0_scratch_reg >= 0) 1110 UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg); 1111 else 1112 UASM_i_SW(p, scratch, scratchpad_offset(0), 0); 1113 1114 if (pgd_reg == -1) 1115 /* Clear lower 23 bits of context. */ 1116 uasm_i_dins(p, ptr, 0, 0, 23); 1117 1118 uasm_il_bltz(p, r, tmp, label_vmalloc); 1119 } 1120 1121 if (pgd_reg == -1) { 1122 vmalloc_branch_delay_filled = 1; 1123 /* 1 0 1 0 1 << 6 xkphys cached */ 1124 uasm_i_ori(p, ptr, ptr, 0x540); 1125 uasm_i_drotr(p, ptr, ptr, 11); 1126 } 1127 1128 #ifdef __PAGETABLE_PMD_FOLDED 1129 #define LOC_PTEP scratch 1130 #else 1131 #define LOC_PTEP ptr 1132 #endif 1133 1134 if (!vmalloc_branch_delay_filled) 1135 /* get pgd offset in bytes */ 1136 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3); 1137 1138 uasm_l_vmalloc_done(l, *p); 1139 1140 /* 1141 * tmp ptr 1142 * fall-through case = badvaddr *pgd_current 1143 * vmalloc case = badvaddr swapper_pg_dir 1144 */ 1145 1146 if (vmalloc_branch_delay_filled) 1147 /* get pgd offset in bytes */ 1148 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3); 1149 1150 #ifdef __PAGETABLE_PMD_FOLDED 1151 GET_CONTEXT(p, tmp); /* get context reg */ 1152 #endif 1153 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3); 1154 1155 if (use_lwx_insns()) { 1156 UASM_i_LWX(p, LOC_PTEP, scratch, ptr); 1157 } else { 1158 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */ 1159 uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */ 1160 } 1161 1162 #ifndef __PAGETABLE_PMD_FOLDED 1163 /* get pmd offset in bytes */ 1164 uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3); 1165 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3); 1166 GET_CONTEXT(p, tmp); /* get context reg */ 1167 1168 if (use_lwx_insns()) { 1169 UASM_i_LWX(p, scratch, scratch, ptr); 1170 } else { 1171 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */ 1172 UASM_i_LW(p, scratch, 0, ptr); 1173 } 1174 #endif 1175 /* Adjust the context during the load latency. */ 1176 build_adjust_context(p, tmp); 1177 1178 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1179 uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update); 1180 /* 1181 * The in the LWX case we don't want to do the load in the 1182 * delay slot. It cannot issue in the same cycle and may be 1183 * speculative and unneeded. 1184 */ 1185 if (use_lwx_insns()) 1186 uasm_i_nop(p); 1187 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 1188 1189 1190 /* build_update_entries */ 1191 if (use_lwx_insns()) { 1192 even = ptr; 1193 odd = tmp; 1194 UASM_i_LWX(p, even, scratch, tmp); 1195 UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t)); 1196 UASM_i_LWX(p, odd, scratch, tmp); 1197 } else { 1198 UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */ 1199 even = tmp; 1200 odd = ptr; 1201 UASM_i_LW(p, even, 0, ptr); /* get even pte */ 1202 UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */ 1203 } 1204 if (cpu_has_rixi) { 1205 uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL)); 1206 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ 1207 uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL)); 1208 } else { 1209 uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL)); 1210 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ 1211 uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL)); 1212 } 1213 UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */ 1214 1215 if (c0_scratch_reg >= 0) { 1216 UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg); 1217 build_tlb_write_entry(p, l, r, tlb_random); 1218 uasm_l_leave(l, *p); 1219 rv.restore_scratch = 1; 1220 } else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13) { 1221 build_tlb_write_entry(p, l, r, tlb_random); 1222 uasm_l_leave(l, *p); 1223 UASM_i_LW(p, scratch, scratchpad_offset(0), 0); 1224 } else { 1225 UASM_i_LW(p, scratch, scratchpad_offset(0), 0); 1226 build_tlb_write_entry(p, l, r, tlb_random); 1227 uasm_l_leave(l, *p); 1228 rv.restore_scratch = 1; 1229 } 1230 1231 uasm_i_eret(p); /* return from trap */ 1232 1233 return rv; 1234 } 1235 1236 /* 1237 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception 1238 * because EXL == 0. If we wrap, we can also use the 32 instruction 1239 * slots before the XTLB refill exception handler which belong to the 1240 * unused TLB refill exception. 1241 */ 1242 #define MIPS64_REFILL_INSNS 32 1243 1244 static void build_r4000_tlb_refill_handler(void) 1245 { 1246 u32 *p = tlb_handler; 1247 struct uasm_label *l = labels; 1248 struct uasm_reloc *r = relocs; 1249 u32 *f; 1250 unsigned int final_len; 1251 struct mips_huge_tlb_info htlb_info __maybe_unused; 1252 enum vmalloc64_mode vmalloc_mode __maybe_unused; 1253 1254 memset(tlb_handler, 0, sizeof(tlb_handler)); 1255 memset(labels, 0, sizeof(labels)); 1256 memset(relocs, 0, sizeof(relocs)); 1257 memset(final_handler, 0, sizeof(final_handler)); 1258 1259 if ((scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) { 1260 htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, 1261 scratch_reg); 1262 vmalloc_mode = refill_scratch; 1263 } else { 1264 htlb_info.huge_pte = K0; 1265 htlb_info.restore_scratch = 0; 1266 vmalloc_mode = refill_noscratch; 1267 /* 1268 * create the plain linear handler 1269 */ 1270 if (bcm1250_m3_war()) { 1271 unsigned int segbits = 44; 1272 1273 uasm_i_dmfc0(&p, K0, C0_BADVADDR); 1274 uasm_i_dmfc0(&p, K1, C0_ENTRYHI); 1275 uasm_i_xor(&p, K0, K0, K1); 1276 uasm_i_dsrl_safe(&p, K1, K0, 62); 1277 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); 1278 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); 1279 uasm_i_or(&p, K0, K0, K1); 1280 uasm_il_bnez(&p, &r, K0, label_leave); 1281 /* No need for uasm_i_nop */ 1282 } 1283 1284 #ifdef CONFIG_64BIT 1285 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ 1286 #else 1287 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ 1288 #endif 1289 1290 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1291 build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update); 1292 #endif 1293 1294 build_get_ptep(&p, K0, K1); 1295 build_update_entries(&p, K0, K1); 1296 build_tlb_write_entry(&p, &l, &r, tlb_random); 1297 uasm_l_leave(&l, p); 1298 uasm_i_eret(&p); /* return from trap */ 1299 } 1300 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1301 uasm_l_tlb_huge_update(&l, p); 1302 build_huge_update_entries(&p, htlb_info.huge_pte, K1); 1303 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random, 1304 htlb_info.restore_scratch); 1305 #endif 1306 1307 #ifdef CONFIG_64BIT 1308 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode); 1309 #endif 1310 1311 /* 1312 * Overflow check: For the 64bit handler, we need at least one 1313 * free instruction slot for the wrap-around branch. In worst 1314 * case, if the intended insertion point is a delay slot, we 1315 * need three, with the second nop'ed and the third being 1316 * unused. 1317 */ 1318 switch (boot_cpu_type()) { 1319 default: 1320 if (sizeof(long) == 4) { 1321 case CPU_LOONGSON2: 1322 /* Loongson2 ebase is different than r4k, we have more space */ 1323 if ((p - tlb_handler) > 64) 1324 panic("TLB refill handler space exceeded"); 1325 /* 1326 * Now fold the handler in the TLB refill handler space. 1327 */ 1328 f = final_handler; 1329 /* Simplest case, just copy the handler. */ 1330 uasm_copy_handler(relocs, labels, tlb_handler, p, f); 1331 final_len = p - tlb_handler; 1332 break; 1333 } else { 1334 if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) 1335 || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) 1336 && uasm_insn_has_bdelay(relocs, 1337 tlb_handler + MIPS64_REFILL_INSNS - 3))) 1338 panic("TLB refill handler space exceeded"); 1339 /* 1340 * Now fold the handler in the TLB refill handler space. 1341 */ 1342 f = final_handler + MIPS64_REFILL_INSNS; 1343 if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) { 1344 /* Just copy the handler. */ 1345 uasm_copy_handler(relocs, labels, tlb_handler, p, f); 1346 final_len = p - tlb_handler; 1347 } else { 1348 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1349 const enum label_id ls = label_tlb_huge_update; 1350 #else 1351 const enum label_id ls = label_vmalloc; 1352 #endif 1353 u32 *split; 1354 int ov = 0; 1355 int i; 1356 1357 for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++) 1358 ; 1359 BUG_ON(i == ARRAY_SIZE(labels)); 1360 split = labels[i].addr; 1361 1362 /* 1363 * See if we have overflown one way or the other. 1364 */ 1365 if (split > tlb_handler + MIPS64_REFILL_INSNS || 1366 split < p - MIPS64_REFILL_INSNS) 1367 ov = 1; 1368 1369 if (ov) { 1370 /* 1371 * Split two instructions before the end. One 1372 * for the branch and one for the instruction 1373 * in the delay slot. 1374 */ 1375 split = tlb_handler + MIPS64_REFILL_INSNS - 2; 1376 1377 /* 1378 * If the branch would fall in a delay slot, 1379 * we must back up an additional instruction 1380 * so that it is no longer in a delay slot. 1381 */ 1382 if (uasm_insn_has_bdelay(relocs, split - 1)) 1383 split--; 1384 } 1385 /* Copy first part of the handler. */ 1386 uasm_copy_handler(relocs, labels, tlb_handler, split, f); 1387 f += split - tlb_handler; 1388 1389 if (ov) { 1390 /* Insert branch. */ 1391 uasm_l_split(&l, final_handler); 1392 uasm_il_b(&f, &r, label_split); 1393 if (uasm_insn_has_bdelay(relocs, split)) 1394 uasm_i_nop(&f); 1395 else { 1396 uasm_copy_handler(relocs, labels, 1397 split, split + 1, f); 1398 uasm_move_labels(labels, f, f + 1, -1); 1399 f++; 1400 split++; 1401 } 1402 } 1403 1404 /* Copy the rest of the handler. */ 1405 uasm_copy_handler(relocs, labels, split, p, final_handler); 1406 final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) + 1407 (p - split); 1408 } 1409 } 1410 break; 1411 } 1412 1413 uasm_resolve_relocs(relocs, labels); 1414 pr_debug("Wrote TLB refill handler (%u instructions).\n", 1415 final_len); 1416 1417 memcpy((void *)ebase, final_handler, 0x100); 1418 1419 dump_handler("r4000_tlb_refill", (u32 *)ebase, 64); 1420 } 1421 1422 extern u32 handle_tlbl[], handle_tlbl_end[]; 1423 extern u32 handle_tlbs[], handle_tlbs_end[]; 1424 extern u32 handle_tlbm[], handle_tlbm_end[]; 1425 extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[]; 1426 1427 static void build_setup_pgd(void) 1428 { 1429 const int a0 = 4; 1430 const int __maybe_unused a1 = 5; 1431 const int __maybe_unused a2 = 6; 1432 u32 *p = tlbmiss_handler_setup_pgd; 1433 const int tlbmiss_handler_setup_pgd_size = 1434 tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd; 1435 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 1436 long pgdc = (long)pgd_current; 1437 #endif 1438 1439 memset(tlbmiss_handler_setup_pgd, 0, tlbmiss_handler_setup_pgd_size * 1440 sizeof(tlbmiss_handler_setup_pgd[0])); 1441 memset(labels, 0, sizeof(labels)); 1442 memset(relocs, 0, sizeof(relocs)); 1443 pgd_reg = allocate_kscratch(); 1444 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT 1445 if (pgd_reg == -1) { 1446 struct uasm_label *l = labels; 1447 struct uasm_reloc *r = relocs; 1448 1449 /* PGD << 11 in c0_Context */ 1450 /* 1451 * If it is a ckseg0 address, convert to a physical 1452 * address. Shifting right by 29 and adding 4 will 1453 * result in zero for these addresses. 1454 * 1455 */ 1456 UASM_i_SRA(&p, a1, a0, 29); 1457 UASM_i_ADDIU(&p, a1, a1, 4); 1458 uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1); 1459 uasm_i_nop(&p); 1460 uasm_i_dinsm(&p, a0, 0, 29, 64 - 29); 1461 uasm_l_tlbl_goaround1(&l, p); 1462 UASM_i_SLL(&p, a0, a0, 11); 1463 uasm_i_jr(&p, 31); 1464 UASM_i_MTC0(&p, a0, C0_CONTEXT); 1465 } else { 1466 /* PGD in c0_KScratch */ 1467 uasm_i_jr(&p, 31); 1468 UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); 1469 } 1470 #else 1471 #ifdef CONFIG_SMP 1472 /* Save PGD to pgd_current[smp_processor_id()] */ 1473 UASM_i_CPUID_MFC0(&p, a1, SMP_CPUID_REG); 1474 UASM_i_SRL_SAFE(&p, a1, a1, SMP_CPUID_PTRSHIFT); 1475 UASM_i_LA_mostly(&p, a2, pgdc); 1476 UASM_i_ADDU(&p, a2, a2, a1); 1477 UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2); 1478 #else 1479 UASM_i_LA_mostly(&p, a2, pgdc); 1480 UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2); 1481 #endif /* SMP */ 1482 uasm_i_jr(&p, 31); 1483 1484 /* if pgd_reg is allocated, save PGD also to scratch register */ 1485 if (pgd_reg != -1) 1486 UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); 1487 else 1488 uasm_i_nop(&p); 1489 #endif 1490 if (p >= tlbmiss_handler_setup_pgd_end) 1491 panic("tlbmiss_handler_setup_pgd space exceeded"); 1492 1493 uasm_resolve_relocs(relocs, labels); 1494 pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n", 1495 (unsigned int)(p - tlbmiss_handler_setup_pgd)); 1496 1497 dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd, 1498 tlbmiss_handler_setup_pgd_size); 1499 } 1500 1501 static void 1502 iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) 1503 { 1504 #ifdef CONFIG_SMP 1505 # ifdef CONFIG_64BIT_PHYS_ADDR 1506 if (cpu_has_64bits) 1507 uasm_i_lld(p, pte, 0, ptr); 1508 else 1509 # endif 1510 UASM_i_LL(p, pte, 0, ptr); 1511 #else 1512 # ifdef CONFIG_64BIT_PHYS_ADDR 1513 if (cpu_has_64bits) 1514 uasm_i_ld(p, pte, 0, ptr); 1515 else 1516 # endif 1517 UASM_i_LW(p, pte, 0, ptr); 1518 #endif 1519 } 1520 1521 static void 1522 iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, 1523 unsigned int mode) 1524 { 1525 #ifdef CONFIG_64BIT_PHYS_ADDR 1526 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); 1527 #endif 1528 1529 uasm_i_ori(p, pte, pte, mode); 1530 #ifdef CONFIG_SMP 1531 # ifdef CONFIG_64BIT_PHYS_ADDR 1532 if (cpu_has_64bits) 1533 uasm_i_scd(p, pte, 0, ptr); 1534 else 1535 # endif 1536 UASM_i_SC(p, pte, 0, ptr); 1537 1538 if (r10000_llsc_war()) 1539 uasm_il_beqzl(p, r, pte, label_smp_pgtable_change); 1540 else 1541 uasm_il_beqz(p, r, pte, label_smp_pgtable_change); 1542 1543 # ifdef CONFIG_64BIT_PHYS_ADDR 1544 if (!cpu_has_64bits) { 1545 /* no uasm_i_nop needed */ 1546 uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr); 1547 uasm_i_ori(p, pte, pte, hwmode); 1548 uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr); 1549 uasm_il_beqz(p, r, pte, label_smp_pgtable_change); 1550 /* no uasm_i_nop needed */ 1551 uasm_i_lw(p, pte, 0, ptr); 1552 } else 1553 uasm_i_nop(p); 1554 # else 1555 uasm_i_nop(p); 1556 # endif 1557 #else 1558 # ifdef CONFIG_64BIT_PHYS_ADDR 1559 if (cpu_has_64bits) 1560 uasm_i_sd(p, pte, 0, ptr); 1561 else 1562 # endif 1563 UASM_i_SW(p, pte, 0, ptr); 1564 1565 # ifdef CONFIG_64BIT_PHYS_ADDR 1566 if (!cpu_has_64bits) { 1567 uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr); 1568 uasm_i_ori(p, pte, pte, hwmode); 1569 uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr); 1570 uasm_i_lw(p, pte, 0, ptr); 1571 } 1572 # endif 1573 #endif 1574 } 1575 1576 /* 1577 * Check if PTE is present, if not then jump to LABEL. PTR points to 1578 * the page table where this PTE is located, PTE will be re-loaded 1579 * with it's original value. 1580 */ 1581 static void 1582 build_pte_present(u32 **p, struct uasm_reloc **r, 1583 int pte, int ptr, int scratch, enum label_id lid) 1584 { 1585 int t = scratch >= 0 ? scratch : pte; 1586 1587 if (cpu_has_rixi) { 1588 if (use_bbit_insns()) { 1589 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); 1590 uasm_i_nop(p); 1591 } else { 1592 uasm_i_andi(p, t, pte, _PAGE_PRESENT); 1593 uasm_il_beqz(p, r, t, lid); 1594 if (pte == t) 1595 /* You lose the SMP race :-(*/ 1596 iPTE_LW(p, pte, ptr); 1597 } 1598 } else { 1599 uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ); 1600 uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ); 1601 uasm_il_bnez(p, r, t, lid); 1602 if (pte == t) 1603 /* You lose the SMP race :-(*/ 1604 iPTE_LW(p, pte, ptr); 1605 } 1606 } 1607 1608 /* Make PTE valid, store result in PTR. */ 1609 static void 1610 build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, 1611 unsigned int ptr) 1612 { 1613 unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED; 1614 1615 iPTE_SW(p, r, pte, ptr, mode); 1616 } 1617 1618 /* 1619 * Check if PTE can be written to, if not branch to LABEL. Regardless 1620 * restore PTE with value from PTR when done. 1621 */ 1622 static void 1623 build_pte_writable(u32 **p, struct uasm_reloc **r, 1624 unsigned int pte, unsigned int ptr, int scratch, 1625 enum label_id lid) 1626 { 1627 int t = scratch >= 0 ? scratch : pte; 1628 1629 uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE); 1630 uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE); 1631 uasm_il_bnez(p, r, t, lid); 1632 if (pte == t) 1633 /* You lose the SMP race :-(*/ 1634 iPTE_LW(p, pte, ptr); 1635 else 1636 uasm_i_nop(p); 1637 } 1638 1639 /* Make PTE writable, update software status bits as well, then store 1640 * at PTR. 1641 */ 1642 static void 1643 build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, 1644 unsigned int ptr) 1645 { 1646 unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID 1647 | _PAGE_DIRTY); 1648 1649 iPTE_SW(p, r, pte, ptr, mode); 1650 } 1651 1652 /* 1653 * Check if PTE can be modified, if not branch to LABEL. Regardless 1654 * restore PTE with value from PTR when done. 1655 */ 1656 static void 1657 build_pte_modifiable(u32 **p, struct uasm_reloc **r, 1658 unsigned int pte, unsigned int ptr, int scratch, 1659 enum label_id lid) 1660 { 1661 if (use_bbit_insns()) { 1662 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid); 1663 uasm_i_nop(p); 1664 } else { 1665 int t = scratch >= 0 ? scratch : pte; 1666 uasm_i_andi(p, t, pte, _PAGE_WRITE); 1667 uasm_il_beqz(p, r, t, lid); 1668 if (pte == t) 1669 /* You lose the SMP race :-(*/ 1670 iPTE_LW(p, pte, ptr); 1671 } 1672 } 1673 1674 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 1675 1676 1677 /* 1678 * R3000 style TLB load/store/modify handlers. 1679 */ 1680 1681 /* 1682 * This places the pte into ENTRYLO0 and writes it with tlbwi. 1683 * Then it returns. 1684 */ 1685 static void 1686 build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) 1687 { 1688 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 1689 uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */ 1690 uasm_i_tlbwi(p); 1691 uasm_i_jr(p, tmp); 1692 uasm_i_rfe(p); /* branch delay */ 1693 } 1694 1695 /* 1696 * This places the pte into ENTRYLO0 and writes it with tlbwi 1697 * or tlbwr as appropriate. This is because the index register 1698 * may have the probe fail bit set as a result of a trap on a 1699 * kseg2 access, i.e. without refill. Then it returns. 1700 */ 1701 static void 1702 build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, 1703 struct uasm_reloc **r, unsigned int pte, 1704 unsigned int tmp) 1705 { 1706 uasm_i_mfc0(p, tmp, C0_INDEX); 1707 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 1708 uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */ 1709 uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */ 1710 uasm_i_tlbwi(p); /* cp0 delay */ 1711 uasm_i_jr(p, tmp); 1712 uasm_i_rfe(p); /* branch delay */ 1713 uasm_l_r3000_write_probe_fail(l, *p); 1714 uasm_i_tlbwr(p); /* cp0 delay */ 1715 uasm_i_jr(p, tmp); 1716 uasm_i_rfe(p); /* branch delay */ 1717 } 1718 1719 static void 1720 build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, 1721 unsigned int ptr) 1722 { 1723 long pgdc = (long)pgd_current; 1724 1725 uasm_i_mfc0(p, pte, C0_BADVADDR); 1726 uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */ 1727 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); 1728 uasm_i_srl(p, pte, pte, 22); /* load delay */ 1729 uasm_i_sll(p, pte, pte, 2); 1730 uasm_i_addu(p, ptr, ptr, pte); 1731 uasm_i_mfc0(p, pte, C0_CONTEXT); 1732 uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */ 1733 uasm_i_andi(p, pte, pte, 0xffc); /* load delay */ 1734 uasm_i_addu(p, ptr, ptr, pte); 1735 uasm_i_lw(p, pte, 0, ptr); 1736 uasm_i_tlbp(p); /* load delay */ 1737 } 1738 1739 static void build_r3000_tlb_load_handler(void) 1740 { 1741 u32 *p = handle_tlbl; 1742 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; 1743 struct uasm_label *l = labels; 1744 struct uasm_reloc *r = relocs; 1745 1746 memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0])); 1747 memset(labels, 0, sizeof(labels)); 1748 memset(relocs, 0, sizeof(relocs)); 1749 1750 build_r3000_tlbchange_handler_head(&p, K0, K1); 1751 build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl); 1752 uasm_i_nop(&p); /* load delay */ 1753 build_make_valid(&p, &r, K0, K1); 1754 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1755 1756 uasm_l_nopage_tlbl(&l, p); 1757 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 1758 uasm_i_nop(&p); 1759 1760 if (p >= handle_tlbl_end) 1761 panic("TLB load handler fastpath space exceeded"); 1762 1763 uasm_resolve_relocs(relocs, labels); 1764 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", 1765 (unsigned int)(p - handle_tlbl)); 1766 1767 dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size); 1768 } 1769 1770 static void build_r3000_tlb_store_handler(void) 1771 { 1772 u32 *p = handle_tlbs; 1773 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; 1774 struct uasm_label *l = labels; 1775 struct uasm_reloc *r = relocs; 1776 1777 memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0])); 1778 memset(labels, 0, sizeof(labels)); 1779 memset(relocs, 0, sizeof(relocs)); 1780 1781 build_r3000_tlbchange_handler_head(&p, K0, K1); 1782 build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs); 1783 uasm_i_nop(&p); /* load delay */ 1784 build_make_write(&p, &r, K0, K1); 1785 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1786 1787 uasm_l_nopage_tlbs(&l, p); 1788 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1789 uasm_i_nop(&p); 1790 1791 if (p >= handle_tlbs_end) 1792 panic("TLB store handler fastpath space exceeded"); 1793 1794 uasm_resolve_relocs(relocs, labels); 1795 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", 1796 (unsigned int)(p - handle_tlbs)); 1797 1798 dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size); 1799 } 1800 1801 static void build_r3000_tlb_modify_handler(void) 1802 { 1803 u32 *p = handle_tlbm; 1804 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; 1805 struct uasm_label *l = labels; 1806 struct uasm_reloc *r = relocs; 1807 1808 memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0])); 1809 memset(labels, 0, sizeof(labels)); 1810 memset(relocs, 0, sizeof(relocs)); 1811 1812 build_r3000_tlbchange_handler_head(&p, K0, K1); 1813 build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm); 1814 uasm_i_nop(&p); /* load delay */ 1815 build_make_write(&p, &r, K0, K1); 1816 build_r3000_pte_reload_tlbwi(&p, K0, K1); 1817 1818 uasm_l_nopage_tlbm(&l, p); 1819 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1820 uasm_i_nop(&p); 1821 1822 if (p >= handle_tlbm_end) 1823 panic("TLB modify handler fastpath space exceeded"); 1824 1825 uasm_resolve_relocs(relocs, labels); 1826 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", 1827 (unsigned int)(p - handle_tlbm)); 1828 1829 dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_size); 1830 } 1831 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ 1832 1833 /* 1834 * R4000 style TLB load/store/modify handlers. 1835 */ 1836 static struct work_registers 1837 build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, 1838 struct uasm_reloc **r) 1839 { 1840 struct work_registers wr = build_get_work_registers(p); 1841 1842 #ifdef CONFIG_64BIT 1843 build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */ 1844 #else 1845 build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */ 1846 #endif 1847 1848 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1849 /* 1850 * For huge tlb entries, pmd doesn't contain an address but 1851 * instead contains the tlb pte. Check the PAGE_HUGE bit and 1852 * see if we need to jump to huge tlb processing. 1853 */ 1854 build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update); 1855 #endif 1856 1857 UASM_i_MFC0(p, wr.r1, C0_BADVADDR); 1858 UASM_i_LW(p, wr.r2, 0, wr.r2); 1859 UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); 1860 uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2); 1861 UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1); 1862 1863 #ifdef CONFIG_SMP 1864 uasm_l_smp_pgtable_change(l, *p); 1865 #endif 1866 iPTE_LW(p, wr.r1, wr.r2); /* get even pte */ 1867 if (!m4kc_tlbp_war()) 1868 build_tlb_probe_entry(p); 1869 return wr; 1870 } 1871 1872 static void 1873 build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, 1874 struct uasm_reloc **r, unsigned int tmp, 1875 unsigned int ptr) 1876 { 1877 uasm_i_ori(p, ptr, ptr, sizeof(pte_t)); 1878 uasm_i_xori(p, ptr, ptr, sizeof(pte_t)); 1879 build_update_entries(p, tmp, ptr); 1880 build_tlb_write_entry(p, l, r, tlb_indexed); 1881 uasm_l_leave(l, *p); 1882 build_restore_work_registers(p); 1883 uasm_i_eret(p); /* return from trap */ 1884 1885 #ifdef CONFIG_64BIT 1886 build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill); 1887 #endif 1888 } 1889 1890 static void build_r4000_tlb_load_handler(void) 1891 { 1892 u32 *p = handle_tlbl; 1893 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; 1894 struct uasm_label *l = labels; 1895 struct uasm_reloc *r = relocs; 1896 struct work_registers wr; 1897 1898 memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0])); 1899 memset(labels, 0, sizeof(labels)); 1900 memset(relocs, 0, sizeof(relocs)); 1901 1902 if (bcm1250_m3_war()) { 1903 unsigned int segbits = 44; 1904 1905 uasm_i_dmfc0(&p, K0, C0_BADVADDR); 1906 uasm_i_dmfc0(&p, K1, C0_ENTRYHI); 1907 uasm_i_xor(&p, K0, K0, K1); 1908 uasm_i_dsrl_safe(&p, K1, K0, 62); 1909 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); 1910 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); 1911 uasm_i_or(&p, K0, K0, K1); 1912 uasm_il_bnez(&p, &r, K0, label_leave); 1913 /* No need for uasm_i_nop */ 1914 } 1915 1916 wr = build_r4000_tlbchange_handler_head(&p, &l, &r); 1917 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); 1918 if (m4kc_tlbp_war()) 1919 build_tlb_probe_entry(&p); 1920 1921 if (cpu_has_rixi) { 1922 /* 1923 * If the page is not _PAGE_VALID, RI or XI could not 1924 * have triggered it. Skip the expensive test.. 1925 */ 1926 if (use_bbit_insns()) { 1927 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID), 1928 label_tlbl_goaround1); 1929 } else { 1930 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID); 1931 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1); 1932 } 1933 uasm_i_nop(&p); 1934 1935 uasm_i_tlbr(&p); 1936 1937 switch (current_cpu_type()) { 1938 default: 1939 if (cpu_has_mips_r2) { 1940 uasm_i_ehb(&p); 1941 1942 case CPU_CAVIUM_OCTEON: 1943 case CPU_CAVIUM_OCTEON_PLUS: 1944 case CPU_CAVIUM_OCTEON2: 1945 break; 1946 } 1947 } 1948 1949 /* Examine entrylo 0 or 1 based on ptr. */ 1950 if (use_bbit_insns()) { 1951 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); 1952 } else { 1953 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t)); 1954 uasm_i_beqz(&p, wr.r3, 8); 1955 } 1956 /* load it in the delay slot*/ 1957 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0); 1958 /* load it if ptr is odd */ 1959 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1); 1960 /* 1961 * If the entryLo (now in wr.r3) is valid (bit 1), RI or 1962 * XI must have triggered it. 1963 */ 1964 if (use_bbit_insns()) { 1965 uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl); 1966 uasm_i_nop(&p); 1967 uasm_l_tlbl_goaround1(&l, p); 1968 } else { 1969 uasm_i_andi(&p, wr.r3, wr.r3, 2); 1970 uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl); 1971 uasm_i_nop(&p); 1972 } 1973 uasm_l_tlbl_goaround1(&l, p); 1974 } 1975 build_make_valid(&p, &r, wr.r1, wr.r2); 1976 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); 1977 1978 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1979 /* 1980 * This is the entry point when build_r4000_tlbchange_handler_head 1981 * spots a huge page. 1982 */ 1983 uasm_l_tlb_huge_update(&l, p); 1984 iPTE_LW(&p, wr.r1, wr.r2); 1985 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); 1986 build_tlb_probe_entry(&p); 1987 1988 if (cpu_has_rixi) { 1989 /* 1990 * If the page is not _PAGE_VALID, RI or XI could not 1991 * have triggered it. Skip the expensive test.. 1992 */ 1993 if (use_bbit_insns()) { 1994 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID), 1995 label_tlbl_goaround2); 1996 } else { 1997 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID); 1998 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); 1999 } 2000 uasm_i_nop(&p); 2001 2002 uasm_i_tlbr(&p); 2003 2004 switch (current_cpu_type()) { 2005 default: 2006 if (cpu_has_mips_r2) { 2007 uasm_i_ehb(&p); 2008 2009 case CPU_CAVIUM_OCTEON: 2010 case CPU_CAVIUM_OCTEON_PLUS: 2011 case CPU_CAVIUM_OCTEON2: 2012 break; 2013 } 2014 } 2015 2016 /* Examine entrylo 0 or 1 based on ptr. */ 2017 if (use_bbit_insns()) { 2018 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); 2019 } else { 2020 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t)); 2021 uasm_i_beqz(&p, wr.r3, 8); 2022 } 2023 /* load it in the delay slot*/ 2024 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0); 2025 /* load it if ptr is odd */ 2026 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1); 2027 /* 2028 * If the entryLo (now in wr.r3) is valid (bit 1), RI or 2029 * XI must have triggered it. 2030 */ 2031 if (use_bbit_insns()) { 2032 uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2); 2033 } else { 2034 uasm_i_andi(&p, wr.r3, wr.r3, 2); 2035 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); 2036 } 2037 if (PM_DEFAULT_MASK == 0) 2038 uasm_i_nop(&p); 2039 /* 2040 * We clobbered C0_PAGEMASK, restore it. On the other branch 2041 * it is restored in build_huge_tlb_write_entry. 2042 */ 2043 build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0); 2044 2045 uasm_l_tlbl_goaround2(&l, p); 2046 } 2047 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID)); 2048 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2049 #endif 2050 2051 uasm_l_nopage_tlbl(&l, p); 2052 build_restore_work_registers(&p); 2053 #ifdef CONFIG_CPU_MICROMIPS 2054 if ((unsigned long)tlb_do_page_fault_0 & 1) { 2055 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0)); 2056 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0)); 2057 uasm_i_jr(&p, K0); 2058 } else 2059 #endif 2060 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 2061 uasm_i_nop(&p); 2062 2063 if (p >= handle_tlbl_end) 2064 panic("TLB load handler fastpath space exceeded"); 2065 2066 uasm_resolve_relocs(relocs, labels); 2067 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", 2068 (unsigned int)(p - handle_tlbl)); 2069 2070 dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size); 2071 } 2072 2073 static void build_r4000_tlb_store_handler(void) 2074 { 2075 u32 *p = handle_tlbs; 2076 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; 2077 struct uasm_label *l = labels; 2078 struct uasm_reloc *r = relocs; 2079 struct work_registers wr; 2080 2081 memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0])); 2082 memset(labels, 0, sizeof(labels)); 2083 memset(relocs, 0, sizeof(relocs)); 2084 2085 wr = build_r4000_tlbchange_handler_head(&p, &l, &r); 2086 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); 2087 if (m4kc_tlbp_war()) 2088 build_tlb_probe_entry(&p); 2089 build_make_write(&p, &r, wr.r1, wr.r2); 2090 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); 2091 2092 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 2093 /* 2094 * This is the entry point when 2095 * build_r4000_tlbchange_handler_head spots a huge page. 2096 */ 2097 uasm_l_tlb_huge_update(&l, p); 2098 iPTE_LW(&p, wr.r1, wr.r2); 2099 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); 2100 build_tlb_probe_entry(&p); 2101 uasm_i_ori(&p, wr.r1, wr.r1, 2102 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2103 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2104 #endif 2105 2106 uasm_l_nopage_tlbs(&l, p); 2107 build_restore_work_registers(&p); 2108 #ifdef CONFIG_CPU_MICROMIPS 2109 if ((unsigned long)tlb_do_page_fault_1 & 1) { 2110 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1)); 2111 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1)); 2112 uasm_i_jr(&p, K0); 2113 } else 2114 #endif 2115 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2116 uasm_i_nop(&p); 2117 2118 if (p >= handle_tlbs_end) 2119 panic("TLB store handler fastpath space exceeded"); 2120 2121 uasm_resolve_relocs(relocs, labels); 2122 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", 2123 (unsigned int)(p - handle_tlbs)); 2124 2125 dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size); 2126 } 2127 2128 static void build_r4000_tlb_modify_handler(void) 2129 { 2130 u32 *p = handle_tlbm; 2131 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; 2132 struct uasm_label *l = labels; 2133 struct uasm_reloc *r = relocs; 2134 struct work_registers wr; 2135 2136 memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0])); 2137 memset(labels, 0, sizeof(labels)); 2138 memset(relocs, 0, sizeof(relocs)); 2139 2140 wr = build_r4000_tlbchange_handler_head(&p, &l, &r); 2141 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); 2142 if (m4kc_tlbp_war()) 2143 build_tlb_probe_entry(&p); 2144 /* Present and writable bits set, set accessed and dirty bits. */ 2145 build_make_write(&p, &r, wr.r1, wr.r2); 2146 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); 2147 2148 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 2149 /* 2150 * This is the entry point when 2151 * build_r4000_tlbchange_handler_head spots a huge page. 2152 */ 2153 uasm_l_tlb_huge_update(&l, p); 2154 iPTE_LW(&p, wr.r1, wr.r2); 2155 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); 2156 build_tlb_probe_entry(&p); 2157 uasm_i_ori(&p, wr.r1, wr.r1, 2158 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2159 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2160 #endif 2161 2162 uasm_l_nopage_tlbm(&l, p); 2163 build_restore_work_registers(&p); 2164 #ifdef CONFIG_CPU_MICROMIPS 2165 if ((unsigned long)tlb_do_page_fault_1 & 1) { 2166 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1)); 2167 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1)); 2168 uasm_i_jr(&p, K0); 2169 } else 2170 #endif 2171 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2172 uasm_i_nop(&p); 2173 2174 if (p >= handle_tlbm_end) 2175 panic("TLB modify handler fastpath space exceeded"); 2176 2177 uasm_resolve_relocs(relocs, labels); 2178 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", 2179 (unsigned int)(p - handle_tlbm)); 2180 2181 dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size); 2182 } 2183 2184 static void flush_tlb_handlers(void) 2185 { 2186 local_flush_icache_range((unsigned long)handle_tlbl, 2187 (unsigned long)handle_tlbl_end); 2188 local_flush_icache_range((unsigned long)handle_tlbs, 2189 (unsigned long)handle_tlbs_end); 2190 local_flush_icache_range((unsigned long)handle_tlbm, 2191 (unsigned long)handle_tlbm_end); 2192 local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd, 2193 (unsigned long)tlbmiss_handler_setup_pgd_end); 2194 } 2195 2196 void build_tlb_refill_handler(void) 2197 { 2198 /* 2199 * The refill handler is generated per-CPU, multi-node systems 2200 * may have local storage for it. The other handlers are only 2201 * needed once. 2202 */ 2203 static int run_once = 0; 2204 2205 output_pgtable_bits_defines(); 2206 2207 #ifdef CONFIG_64BIT 2208 check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); 2209 #endif 2210 2211 switch (current_cpu_type()) { 2212 case CPU_R2000: 2213 case CPU_R3000: 2214 case CPU_R3000A: 2215 case CPU_R3081E: 2216 case CPU_TX3912: 2217 case CPU_TX3922: 2218 case CPU_TX3927: 2219 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 2220 if (cpu_has_local_ebase) 2221 build_r3000_tlb_refill_handler(); 2222 if (!run_once) { 2223 if (!cpu_has_local_ebase) 2224 build_r3000_tlb_refill_handler(); 2225 build_setup_pgd(); 2226 build_r3000_tlb_load_handler(); 2227 build_r3000_tlb_store_handler(); 2228 build_r3000_tlb_modify_handler(); 2229 flush_tlb_handlers(); 2230 run_once++; 2231 } 2232 #else 2233 panic("No R3000 TLB refill handler"); 2234 #endif 2235 break; 2236 2237 case CPU_R6000: 2238 case CPU_R6000A: 2239 panic("No R6000 TLB refill handler yet"); 2240 break; 2241 2242 case CPU_R8000: 2243 panic("No R8000 TLB refill handler yet"); 2244 break; 2245 2246 default: 2247 if (!run_once) { 2248 scratch_reg = allocate_kscratch(); 2249 build_setup_pgd(); 2250 build_r4000_tlb_load_handler(); 2251 build_r4000_tlb_store_handler(); 2252 build_r4000_tlb_modify_handler(); 2253 if (!cpu_has_local_ebase) 2254 build_r4000_tlb_refill_handler(); 2255 flush_tlb_handlers(); 2256 run_once++; 2257 } 2258 if (cpu_has_local_ebase) 2259 build_r4000_tlb_refill_handler(); 2260 } 2261 } 2262