1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Synthesize TLB refill handlers at runtime. 7 * 8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki 10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 11 * Copyright (C) 2008, 2009 Cavium Networks, Inc. 12 * Copyright (C) 2011 MIPS Technologies, Inc. 13 * 14 * ... and the days got worse and worse and now you see 15 * I've gone completely out of my mind. 16 * 17 * They're coming to take me a away haha 18 * they're coming to take me a away hoho hihi haha 19 * to the funny farm where code is beautiful all the time ... 20 * 21 * (Condolences to Napoleon XIV) 22 */ 23 24 #include <linux/bug.h> 25 #include <linux/export.h> 26 #include <linux/kernel.h> 27 #include <linux/types.h> 28 #include <linux/smp.h> 29 #include <linux/string.h> 30 #include <linux/cache.h> 31 #include <linux/pgtable.h> 32 33 #include <asm/cacheflush.h> 34 #include <asm/cpu-type.h> 35 #include <asm/mmu_context.h> 36 #include <asm/war.h> 37 #include <asm/uasm.h> 38 #include <asm/setup.h> 39 #include <asm/tlbex.h> 40 41 static int mips_xpa_disabled; 42 43 static int __init xpa_disable(char *s) 44 { 45 mips_xpa_disabled = 1; 46 47 return 1; 48 } 49 50 __setup("noxpa", xpa_disable); 51 52 /* 53 * TLB load/store/modify handlers. 54 * 55 * Only the fastpath gets synthesized at runtime, the slowpath for 56 * do_page_fault remains normal asm. 57 */ 58 extern void tlb_do_page_fault_0(void); 59 extern void tlb_do_page_fault_1(void); 60 61 struct work_registers { 62 int r1; 63 int r2; 64 int r3; 65 }; 66 67 struct tlb_reg_save { 68 unsigned long a; 69 unsigned long b; 70 } ____cacheline_aligned_in_smp; 71 72 static struct tlb_reg_save handler_reg_save[NR_CPUS]; 73 74 static inline int r45k_bvahwbug(void) 75 { 76 /* XXX: We should probe for the presence of this bug, but we don't. */ 77 return 0; 78 } 79 80 static inline int r4k_250MHZhwbug(void) 81 { 82 /* XXX: We should probe for the presence of this bug, but we don't. */ 83 return 0; 84 } 85 86 static inline int __maybe_unused bcm1250_m3_war(void) 87 { 88 return BCM1250_M3_WAR; 89 } 90 91 static inline int __maybe_unused r10000_llsc_war(void) 92 { 93 return R10000_LLSC_WAR; 94 } 95 96 static int use_bbit_insns(void) 97 { 98 switch (current_cpu_type()) { 99 case CPU_CAVIUM_OCTEON: 100 case CPU_CAVIUM_OCTEON_PLUS: 101 case CPU_CAVIUM_OCTEON2: 102 case CPU_CAVIUM_OCTEON3: 103 return 1; 104 default: 105 return 0; 106 } 107 } 108 109 static int use_lwx_insns(void) 110 { 111 switch (current_cpu_type()) { 112 case CPU_CAVIUM_OCTEON2: 113 case CPU_CAVIUM_OCTEON3: 114 return 1; 115 default: 116 return 0; 117 } 118 } 119 #if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \ 120 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 121 static bool scratchpad_available(void) 122 { 123 return true; 124 } 125 static int scratchpad_offset(int i) 126 { 127 /* 128 * CVMSEG starts at address -32768 and extends for 129 * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines. 130 */ 131 i += 1; /* Kernel use starts at the top and works down. */ 132 return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768; 133 } 134 #else 135 static bool scratchpad_available(void) 136 { 137 return false; 138 } 139 static int scratchpad_offset(int i) 140 { 141 BUG(); 142 /* Really unreachable, but evidently some GCC want this. */ 143 return 0; 144 } 145 #endif 146 /* 147 * Found by experiment: At least some revisions of the 4kc throw under 148 * some circumstances a machine check exception, triggered by invalid 149 * values in the index register. Delaying the tlbp instruction until 150 * after the next branch, plus adding an additional nop in front of 151 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows 152 * why; it's not an issue caused by the core RTL. 153 * 154 */ 155 static int m4kc_tlbp_war(void) 156 { 157 return current_cpu_type() == CPU_4KC; 158 } 159 160 /* Handle labels (which must be positive integers). */ 161 enum label_id { 162 label_second_part = 1, 163 label_leave, 164 label_vmalloc, 165 label_vmalloc_done, 166 label_tlbw_hazard_0, 167 label_split = label_tlbw_hazard_0 + 8, 168 label_tlbl_goaround1, 169 label_tlbl_goaround2, 170 label_nopage_tlbl, 171 label_nopage_tlbs, 172 label_nopage_tlbm, 173 label_smp_pgtable_change, 174 label_r3000_write_probe_fail, 175 label_large_segbits_fault, 176 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 177 label_tlb_huge_update, 178 #endif 179 }; 180 181 UASM_L_LA(_second_part) 182 UASM_L_LA(_leave) 183 UASM_L_LA(_vmalloc) 184 UASM_L_LA(_vmalloc_done) 185 /* _tlbw_hazard_x is handled differently. */ 186 UASM_L_LA(_split) 187 UASM_L_LA(_tlbl_goaround1) 188 UASM_L_LA(_tlbl_goaround2) 189 UASM_L_LA(_nopage_tlbl) 190 UASM_L_LA(_nopage_tlbs) 191 UASM_L_LA(_nopage_tlbm) 192 UASM_L_LA(_smp_pgtable_change) 193 UASM_L_LA(_r3000_write_probe_fail) 194 UASM_L_LA(_large_segbits_fault) 195 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 196 UASM_L_LA(_tlb_huge_update) 197 #endif 198 199 static int hazard_instance; 200 201 static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance) 202 { 203 switch (instance) { 204 case 0 ... 7: 205 uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance); 206 return; 207 default: 208 BUG(); 209 } 210 } 211 212 static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance) 213 { 214 switch (instance) { 215 case 0 ... 7: 216 uasm_build_label(l, *p, label_tlbw_hazard_0 + instance); 217 break; 218 default: 219 BUG(); 220 } 221 } 222 223 /* 224 * pgtable bits are assigned dynamically depending on processor feature 225 * and statically based on kernel configuration. This spits out the actual 226 * values the kernel is using. Required to make sense from disassembled 227 * TLB exception handlers. 228 */ 229 static void output_pgtable_bits_defines(void) 230 { 231 #define pr_define(fmt, ...) \ 232 pr_debug("#define " fmt, ##__VA_ARGS__) 233 234 pr_debug("#include <asm/asm.h>\n"); 235 pr_debug("#include <asm/regdef.h>\n"); 236 pr_debug("\n"); 237 238 pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT); 239 pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT); 240 pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT); 241 pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT); 242 pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT); 243 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 244 pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT); 245 #endif 246 #ifdef _PAGE_NO_EXEC_SHIFT 247 if (cpu_has_rixi) 248 pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT); 249 #endif 250 pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT); 251 pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT); 252 pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT); 253 pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT); 254 pr_debug("\n"); 255 } 256 257 static inline void dump_handler(const char *symbol, const void *start, const void *end) 258 { 259 unsigned int count = (end - start) / sizeof(u32); 260 const u32 *handler = start; 261 int i; 262 263 pr_debug("LEAF(%s)\n", symbol); 264 265 pr_debug("\t.set push\n"); 266 pr_debug("\t.set noreorder\n"); 267 268 for (i = 0; i < count; i++) 269 pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]); 270 271 pr_debug("\t.set\tpop\n"); 272 273 pr_debug("\tEND(%s)\n", symbol); 274 } 275 276 /* The only general purpose registers allowed in TLB handlers. */ 277 #define K0 26 278 #define K1 27 279 280 /* Some CP0 registers */ 281 #define C0_INDEX 0, 0 282 #define C0_ENTRYLO0 2, 0 283 #define C0_TCBIND 2, 2 284 #define C0_ENTRYLO1 3, 0 285 #define C0_CONTEXT 4, 0 286 #define C0_PAGEMASK 5, 0 287 #define C0_PWBASE 5, 5 288 #define C0_PWFIELD 5, 6 289 #define C0_PWSIZE 5, 7 290 #define C0_PWCTL 6, 6 291 #define C0_BADVADDR 8, 0 292 #define C0_PGD 9, 7 293 #define C0_ENTRYHI 10, 0 294 #define C0_EPC 14, 0 295 #define C0_XCONTEXT 20, 0 296 297 #ifdef CONFIG_64BIT 298 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT) 299 #else 300 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT) 301 #endif 302 303 /* The worst case length of the handler is around 18 instructions for 304 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs. 305 * Maximum space available is 32 instructions for R3000 and 64 306 * instructions for R4000. 307 * 308 * We deliberately chose a buffer size of 128, so we won't scribble 309 * over anything important on overflow before we panic. 310 */ 311 static u32 tlb_handler[128]; 312 313 /* simply assume worst case size for labels and relocs */ 314 static struct uasm_label labels[128]; 315 static struct uasm_reloc relocs[128]; 316 317 static int check_for_high_segbits; 318 static bool fill_includes_sw_bits; 319 320 static unsigned int kscratch_used_mask; 321 322 static inline int __maybe_unused c0_kscratch(void) 323 { 324 switch (current_cpu_type()) { 325 case CPU_XLP: 326 case CPU_XLR: 327 return 22; 328 default: 329 return 31; 330 } 331 } 332 333 static int allocate_kscratch(void) 334 { 335 int r; 336 unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask; 337 338 r = ffs(a); 339 340 if (r == 0) 341 return -1; 342 343 r--; /* make it zero based */ 344 345 kscratch_used_mask |= (1 << r); 346 347 return r; 348 } 349 350 static int scratch_reg; 351 int pgd_reg; 352 EXPORT_SYMBOL_GPL(pgd_reg); 353 enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; 354 355 static struct work_registers build_get_work_registers(u32 **p) 356 { 357 struct work_registers r; 358 359 if (scratch_reg >= 0) { 360 /* Save in CPU local C0_KScratch? */ 361 UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg); 362 r.r1 = K0; 363 r.r2 = K1; 364 r.r3 = 1; 365 return r; 366 } 367 368 if (num_possible_cpus() > 1) { 369 /* Get smp_processor_id */ 370 UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG); 371 UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT); 372 373 /* handler_reg_save index in K0 */ 374 UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save))); 375 376 UASM_i_LA(p, K1, (long)&handler_reg_save); 377 UASM_i_ADDU(p, K0, K0, K1); 378 } else { 379 UASM_i_LA(p, K0, (long)&handler_reg_save); 380 } 381 /* K0 now points to save area, save $1 and $2 */ 382 UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0); 383 UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0); 384 385 r.r1 = K1; 386 r.r2 = 1; 387 r.r3 = 2; 388 return r; 389 } 390 391 static void build_restore_work_registers(u32 **p) 392 { 393 if (scratch_reg >= 0) { 394 uasm_i_ehb(p); 395 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); 396 return; 397 } 398 /* K0 already points to save area, restore $1 and $2 */ 399 UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0); 400 UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0); 401 } 402 403 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 404 405 /* 406 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current, 407 * we cannot do r3000 under these circumstances. 408 * 409 * The R3000 TLB handler is simple. 410 */ 411 static void build_r3000_tlb_refill_handler(void) 412 { 413 long pgdc = (long)pgd_current; 414 u32 *p; 415 416 memset(tlb_handler, 0, sizeof(tlb_handler)); 417 p = tlb_handler; 418 419 uasm_i_mfc0(&p, K0, C0_BADVADDR); 420 uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */ 421 uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1); 422 uasm_i_srl(&p, K0, K0, 22); /* load delay */ 423 uasm_i_sll(&p, K0, K0, 2); 424 uasm_i_addu(&p, K1, K1, K0); 425 uasm_i_mfc0(&p, K0, C0_CONTEXT); 426 uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */ 427 uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */ 428 uasm_i_addu(&p, K1, K1, K0); 429 uasm_i_lw(&p, K0, 0, K1); 430 uasm_i_nop(&p); /* load delay */ 431 uasm_i_mtc0(&p, K0, C0_ENTRYLO0); 432 uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */ 433 uasm_i_tlbwr(&p); /* cp0 delay */ 434 uasm_i_jr(&p, K1); 435 uasm_i_rfe(&p); /* branch delay */ 436 437 if (p > tlb_handler + 32) 438 panic("TLB refill handler space exceeded"); 439 440 pr_debug("Wrote TLB refill handler (%u instructions).\n", 441 (unsigned int)(p - tlb_handler)); 442 443 memcpy((void *)ebase, tlb_handler, 0x80); 444 local_flush_icache_range(ebase, ebase + 0x80); 445 dump_handler("r3000_tlb_refill", (u32 *)ebase, (u32 *)(ebase + 0x80)); 446 } 447 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ 448 449 /* 450 * The R4000 TLB handler is much more complicated. We have two 451 * consecutive handler areas with 32 instructions space each. 452 * Since they aren't used at the same time, we can overflow in the 453 * other one.To keep things simple, we first assume linear space, 454 * then we relocate it to the final handler layout as needed. 455 */ 456 static u32 final_handler[64]; 457 458 /* 459 * Hazards 460 * 461 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0: 462 * 2. A timing hazard exists for the TLBP instruction. 463 * 464 * stalling_instruction 465 * TLBP 466 * 467 * The JTLB is being read for the TLBP throughout the stall generated by the 468 * previous instruction. This is not really correct as the stalling instruction 469 * can modify the address used to access the JTLB. The failure symptom is that 470 * the TLBP instruction will use an address created for the stalling instruction 471 * and not the address held in C0_ENHI and thus report the wrong results. 472 * 473 * The software work-around is to not allow the instruction preceding the TLBP 474 * to stall - make it an NOP or some other instruction guaranteed not to stall. 475 * 476 * Errata 2 will not be fixed. This errata is also on the R5000. 477 * 478 * As if we MIPS hackers wouldn't know how to nop pipelines happy ... 479 */ 480 static void __maybe_unused build_tlb_probe_entry(u32 **p) 481 { 482 switch (current_cpu_type()) { 483 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */ 484 case CPU_R4600: 485 case CPU_R4700: 486 case CPU_R5000: 487 case CPU_NEVADA: 488 uasm_i_nop(p); 489 uasm_i_tlbp(p); 490 break; 491 492 default: 493 uasm_i_tlbp(p); 494 break; 495 } 496 } 497 498 void build_tlb_write_entry(u32 **p, struct uasm_label **l, 499 struct uasm_reloc **r, 500 enum tlb_write_entry wmode) 501 { 502 void(*tlbw)(u32 **) = NULL; 503 504 switch (wmode) { 505 case tlb_random: tlbw = uasm_i_tlbwr; break; 506 case tlb_indexed: tlbw = uasm_i_tlbwi; break; 507 } 508 509 if (cpu_has_mips_r2_r6) { 510 if (cpu_has_mips_r2_exec_hazard) 511 uasm_i_ehb(p); 512 tlbw(p); 513 return; 514 } 515 516 switch (current_cpu_type()) { 517 case CPU_R4000PC: 518 case CPU_R4000SC: 519 case CPU_R4000MC: 520 case CPU_R4400PC: 521 case CPU_R4400SC: 522 case CPU_R4400MC: 523 /* 524 * This branch uses up a mtc0 hazard nop slot and saves 525 * two nops after the tlbw instruction. 526 */ 527 uasm_bgezl_hazard(p, r, hazard_instance); 528 tlbw(p); 529 uasm_bgezl_label(l, p, hazard_instance); 530 hazard_instance++; 531 uasm_i_nop(p); 532 break; 533 534 case CPU_R4600: 535 case CPU_R4700: 536 uasm_i_nop(p); 537 tlbw(p); 538 uasm_i_nop(p); 539 break; 540 541 case CPU_R5000: 542 case CPU_NEVADA: 543 uasm_i_nop(p); /* QED specifies 2 nops hazard */ 544 uasm_i_nop(p); /* QED specifies 2 nops hazard */ 545 tlbw(p); 546 break; 547 548 case CPU_5KC: 549 case CPU_TX49XX: 550 case CPU_PR4450: 551 case CPU_XLR: 552 uasm_i_nop(p); 553 tlbw(p); 554 break; 555 556 case CPU_R10000: 557 case CPU_R12000: 558 case CPU_R14000: 559 case CPU_R16000: 560 case CPU_4KC: 561 case CPU_4KEC: 562 case CPU_M14KC: 563 case CPU_M14KEC: 564 case CPU_SB1: 565 case CPU_SB1A: 566 case CPU_4KSC: 567 case CPU_20KC: 568 case CPU_25KF: 569 case CPU_BMIPS32: 570 case CPU_BMIPS3300: 571 case CPU_BMIPS4350: 572 case CPU_BMIPS4380: 573 case CPU_BMIPS5000: 574 case CPU_LOONGSON2EF: 575 case CPU_LOONGSON64: 576 case CPU_R5500: 577 if (m4kc_tlbp_war()) 578 uasm_i_nop(p); 579 fallthrough; 580 case CPU_ALCHEMY: 581 tlbw(p); 582 break; 583 584 case CPU_RM7000: 585 uasm_i_nop(p); 586 uasm_i_nop(p); 587 uasm_i_nop(p); 588 uasm_i_nop(p); 589 tlbw(p); 590 break; 591 592 case CPU_VR4111: 593 case CPU_VR4121: 594 case CPU_VR4122: 595 case CPU_VR4181: 596 case CPU_VR4181A: 597 uasm_i_nop(p); 598 uasm_i_nop(p); 599 tlbw(p); 600 uasm_i_nop(p); 601 uasm_i_nop(p); 602 break; 603 604 case CPU_VR4131: 605 case CPU_VR4133: 606 uasm_i_nop(p); 607 uasm_i_nop(p); 608 tlbw(p); 609 break; 610 611 case CPU_XBURST: 612 tlbw(p); 613 uasm_i_nop(p); 614 break; 615 616 default: 617 panic("No TLB refill handler yet (CPU type: %d)", 618 current_cpu_type()); 619 break; 620 } 621 } 622 EXPORT_SYMBOL_GPL(build_tlb_write_entry); 623 624 static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, 625 unsigned int reg) 626 { 627 if (_PAGE_GLOBAL_SHIFT == 0) { 628 /* pte_t is already in EntryLo format */ 629 return; 630 } 631 632 if (cpu_has_rixi && !!_PAGE_NO_EXEC) { 633 if (fill_includes_sw_bits) { 634 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); 635 } else { 636 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC)); 637 UASM_i_ROTR(p, reg, reg, 638 ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); 639 } 640 } else { 641 #ifdef CONFIG_PHYS_ADDR_T_64BIT 642 uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL)); 643 #else 644 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL)); 645 #endif 646 } 647 } 648 649 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 650 651 static void build_restore_pagemask(u32 **p, struct uasm_reloc **r, 652 unsigned int tmp, enum label_id lid, 653 int restore_scratch) 654 { 655 if (restore_scratch) { 656 /* 657 * Ensure the MFC0 below observes the value written to the 658 * KScratch register by the prior MTC0. 659 */ 660 if (scratch_reg >= 0) 661 uasm_i_ehb(p); 662 663 /* Reset default page size */ 664 if (PM_DEFAULT_MASK >> 16) { 665 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); 666 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); 667 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 668 uasm_il_b(p, r, lid); 669 } else if (PM_DEFAULT_MASK) { 670 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); 671 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 672 uasm_il_b(p, r, lid); 673 } else { 674 uasm_i_mtc0(p, 0, C0_PAGEMASK); 675 uasm_il_b(p, r, lid); 676 } 677 if (scratch_reg >= 0) 678 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); 679 else 680 UASM_i_LW(p, 1, scratchpad_offset(0), 0); 681 } else { 682 /* Reset default page size */ 683 if (PM_DEFAULT_MASK >> 16) { 684 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); 685 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); 686 uasm_il_b(p, r, lid); 687 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 688 } else if (PM_DEFAULT_MASK) { 689 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); 690 uasm_il_b(p, r, lid); 691 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 692 } else { 693 uasm_il_b(p, r, lid); 694 uasm_i_mtc0(p, 0, C0_PAGEMASK); 695 } 696 } 697 } 698 699 static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l, 700 struct uasm_reloc **r, 701 unsigned int tmp, 702 enum tlb_write_entry wmode, 703 int restore_scratch) 704 { 705 /* Set huge page tlb entry size */ 706 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); 707 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); 708 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 709 710 build_tlb_write_entry(p, l, r, wmode); 711 712 build_restore_pagemask(p, r, tmp, label_leave, restore_scratch); 713 } 714 715 /* 716 * Check if Huge PTE is present, if so then jump to LABEL. 717 */ 718 static void 719 build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, 720 unsigned int pmd, int lid) 721 { 722 UASM_i_LW(p, tmp, 0, pmd); 723 if (use_bbit_insns()) { 724 uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid); 725 } else { 726 uasm_i_andi(p, tmp, tmp, _PAGE_HUGE); 727 uasm_il_bnez(p, r, tmp, lid); 728 } 729 } 730 731 static void build_huge_update_entries(u32 **p, unsigned int pte, 732 unsigned int tmp) 733 { 734 int small_sequence; 735 736 /* 737 * A huge PTE describes an area the size of the 738 * configured huge page size. This is twice the 739 * of the large TLB entry size we intend to use. 740 * A TLB entry half the size of the configured 741 * huge page size is configured into entrylo0 742 * and entrylo1 to cover the contiguous huge PTE 743 * address space. 744 */ 745 small_sequence = (HPAGE_SIZE >> 7) < 0x10000; 746 747 /* We can clobber tmp. It isn't used after this.*/ 748 if (!small_sequence) 749 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); 750 751 build_convert_pte_to_entrylo(p, pte); 752 UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */ 753 /* convert to entrylo1 */ 754 if (small_sequence) 755 UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7); 756 else 757 UASM_i_ADDU(p, pte, pte, tmp); 758 759 UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */ 760 } 761 762 static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, 763 struct uasm_label **l, 764 unsigned int pte, 765 unsigned int ptr, 766 unsigned int flush) 767 { 768 #ifdef CONFIG_SMP 769 UASM_i_SC(p, pte, 0, ptr); 770 uasm_il_beqz(p, r, pte, label_tlb_huge_update); 771 UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */ 772 #else 773 UASM_i_SW(p, pte, 0, ptr); 774 #endif 775 if (cpu_has_ftlb && flush) { 776 BUG_ON(!cpu_has_tlbinv); 777 778 UASM_i_MFC0(p, ptr, C0_ENTRYHI); 779 uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV); 780 UASM_i_MTC0(p, ptr, C0_ENTRYHI); 781 build_tlb_write_entry(p, l, r, tlb_indexed); 782 783 uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV); 784 UASM_i_MTC0(p, ptr, C0_ENTRYHI); 785 build_huge_update_entries(p, pte, ptr); 786 build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0); 787 788 return; 789 } 790 791 build_huge_update_entries(p, pte, ptr); 792 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0); 793 } 794 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 795 796 #ifdef CONFIG_64BIT 797 /* 798 * TMP and PTR are scratch. 799 * TMP will be clobbered, PTR will hold the pmd entry. 800 */ 801 void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 802 unsigned int tmp, unsigned int ptr) 803 { 804 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 805 long pgdc = (long)pgd_current; 806 #endif 807 /* 808 * The vmalloc handling is not in the hotpath. 809 */ 810 uasm_i_dmfc0(p, tmp, C0_BADVADDR); 811 812 if (check_for_high_segbits) { 813 /* 814 * The kernel currently implicitely assumes that the 815 * MIPS SEGBITS parameter for the processor is 816 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never 817 * allocate virtual addresses outside the maximum 818 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But 819 * that doesn't prevent user code from accessing the 820 * higher xuseg addresses. Here, we make sure that 821 * everything but the lower xuseg addresses goes down 822 * the module_alloc/vmalloc path. 823 */ 824 uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); 825 uasm_il_bnez(p, r, ptr, label_vmalloc); 826 } else { 827 uasm_il_bltz(p, r, tmp, label_vmalloc); 828 } 829 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ 830 831 if (pgd_reg != -1) { 832 /* pgd is in pgd_reg */ 833 if (cpu_has_ldpte) 834 UASM_i_MFC0(p, ptr, C0_PWBASE); 835 else 836 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg); 837 } else { 838 #if defined(CONFIG_MIPS_PGD_C0_CONTEXT) 839 /* 840 * &pgd << 11 stored in CONTEXT [23..63]. 841 */ 842 UASM_i_MFC0(p, ptr, C0_CONTEXT); 843 844 /* Clear lower 23 bits of context. */ 845 uasm_i_dins(p, ptr, 0, 0, 23); 846 847 /* 1 0 1 0 1 << 6 xkphys cached */ 848 uasm_i_ori(p, ptr, ptr, 0x540); 849 uasm_i_drotr(p, ptr, ptr, 11); 850 #elif defined(CONFIG_SMP) 851 UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG); 852 uasm_i_dsrl_safe(p, ptr, ptr, SMP_CPUID_PTRSHIFT); 853 UASM_i_LA_mostly(p, tmp, pgdc); 854 uasm_i_daddu(p, ptr, ptr, tmp); 855 uasm_i_dmfc0(p, tmp, C0_BADVADDR); 856 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); 857 #else 858 UASM_i_LA_mostly(p, ptr, pgdc); 859 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); 860 #endif 861 } 862 863 uasm_l_vmalloc_done(l, *p); 864 865 /* get pgd offset in bytes */ 866 uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3); 867 868 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); 869 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ 870 #ifndef __PAGETABLE_PUD_FOLDED 871 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 872 uasm_i_ld(p, ptr, 0, ptr); /* get pud pointer */ 873 uasm_i_dsrl_safe(p, tmp, tmp, PUD_SHIFT - 3); /* get pud offset in bytes */ 874 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PUD - 1) << 3); 875 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pud offset */ 876 #endif 877 #ifndef __PAGETABLE_PMD_FOLDED 878 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 879 uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ 880 uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ 881 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); 882 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ 883 #endif 884 } 885 EXPORT_SYMBOL_GPL(build_get_pmde64); 886 887 /* 888 * BVADDR is the faulting address, PTR is scratch. 889 * PTR will hold the pgd for vmalloc. 890 */ 891 static void 892 build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 893 unsigned int bvaddr, unsigned int ptr, 894 enum vmalloc64_mode mode) 895 { 896 long swpd = (long)swapper_pg_dir; 897 int single_insn_swpd; 898 int did_vmalloc_branch = 0; 899 900 single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd); 901 902 uasm_l_vmalloc(l, *p); 903 904 if (mode != not_refill && check_for_high_segbits) { 905 if (single_insn_swpd) { 906 uasm_il_bltz(p, r, bvaddr, label_vmalloc_done); 907 uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); 908 did_vmalloc_branch = 1; 909 /* fall through */ 910 } else { 911 uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault); 912 } 913 } 914 if (!did_vmalloc_branch) { 915 if (single_insn_swpd) { 916 uasm_il_b(p, r, label_vmalloc_done); 917 uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); 918 } else { 919 UASM_i_LA_mostly(p, ptr, swpd); 920 uasm_il_b(p, r, label_vmalloc_done); 921 if (uasm_in_compat_space_p(swpd)) 922 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); 923 else 924 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); 925 } 926 } 927 if (mode != not_refill && check_for_high_segbits) { 928 uasm_l_large_segbits_fault(l, *p); 929 930 if (mode == refill_scratch && scratch_reg >= 0) 931 uasm_i_ehb(p); 932 933 /* 934 * We get here if we are an xsseg address, or if we are 935 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary. 936 * 937 * Ignoring xsseg (assume disabled so would generate 938 * (address errors?), the only remaining possibility 939 * is the upper xuseg addresses. On processors with 940 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these 941 * addresses would have taken an address error. We try 942 * to mimic that here by taking a load/istream page 943 * fault. 944 */ 945 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) 946 uasm_i_sync(p, 0); 947 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); 948 uasm_i_jr(p, ptr); 949 950 if (mode == refill_scratch) { 951 if (scratch_reg >= 0) 952 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); 953 else 954 UASM_i_LW(p, 1, scratchpad_offset(0), 0); 955 } else { 956 uasm_i_nop(p); 957 } 958 } 959 } 960 961 #else /* !CONFIG_64BIT */ 962 963 /* 964 * TMP and PTR are scratch. 965 * TMP will be clobbered, PTR will hold the pgd entry. 966 */ 967 void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) 968 { 969 if (pgd_reg != -1) { 970 /* pgd is in pgd_reg */ 971 uasm_i_mfc0(p, ptr, c0_kscratch(), pgd_reg); 972 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 973 } else { 974 long pgdc = (long)pgd_current; 975 976 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ 977 #ifdef CONFIG_SMP 978 uasm_i_mfc0(p, ptr, SMP_CPUID_REG); 979 UASM_i_LA_mostly(p, tmp, pgdc); 980 uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT); 981 uasm_i_addu(p, ptr, tmp, ptr); 982 #else 983 UASM_i_LA_mostly(p, ptr, pgdc); 984 #endif 985 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 986 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); 987 } 988 uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */ 989 uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); 990 uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ 991 } 992 EXPORT_SYMBOL_GPL(build_get_pgde32); 993 994 #endif /* !CONFIG_64BIT */ 995 996 static void build_adjust_context(u32 **p, unsigned int ctx) 997 { 998 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; 999 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); 1000 1001 switch (current_cpu_type()) { 1002 case CPU_VR41XX: 1003 case CPU_VR4111: 1004 case CPU_VR4121: 1005 case CPU_VR4122: 1006 case CPU_VR4131: 1007 case CPU_VR4181: 1008 case CPU_VR4181A: 1009 case CPU_VR4133: 1010 shift += 2; 1011 break; 1012 1013 default: 1014 break; 1015 } 1016 1017 if (shift) 1018 UASM_i_SRL(p, ctx, ctx, shift); 1019 uasm_i_andi(p, ctx, ctx, mask); 1020 } 1021 1022 void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) 1023 { 1024 /* 1025 * Bug workaround for the Nevada. It seems as if under certain 1026 * circumstances the move from cp0_context might produce a 1027 * bogus result when the mfc0 instruction and its consumer are 1028 * in a different cacheline or a load instruction, probably any 1029 * memory reference, is between them. 1030 */ 1031 switch (current_cpu_type()) { 1032 case CPU_NEVADA: 1033 UASM_i_LW(p, ptr, 0, ptr); 1034 GET_CONTEXT(p, tmp); /* get context reg */ 1035 break; 1036 1037 default: 1038 GET_CONTEXT(p, tmp); /* get context reg */ 1039 UASM_i_LW(p, ptr, 0, ptr); 1040 break; 1041 } 1042 1043 build_adjust_context(p, tmp); 1044 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ 1045 } 1046 EXPORT_SYMBOL_GPL(build_get_ptep); 1047 1048 void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) 1049 { 1050 int pte_off_even = 0; 1051 int pte_off_odd = sizeof(pte_t); 1052 1053 #if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_PHYS_ADDR_T_64BIT) 1054 /* The low 32 bits of EntryLo is stored in pte_high */ 1055 pte_off_even += offsetof(pte_t, pte_high); 1056 pte_off_odd += offsetof(pte_t, pte_high); 1057 #endif 1058 1059 if (IS_ENABLED(CONFIG_XPA)) { 1060 uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */ 1061 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); 1062 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); 1063 1064 if (cpu_has_xpa && !mips_xpa_disabled) { 1065 uasm_i_lw(p, tmp, 0, ptep); 1066 uasm_i_ext(p, tmp, tmp, 0, 24); 1067 uasm_i_mthc0(p, tmp, C0_ENTRYLO0); 1068 } 1069 1070 uasm_i_lw(p, tmp, pte_off_odd, ptep); /* odd pte */ 1071 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); 1072 UASM_i_MTC0(p, tmp, C0_ENTRYLO1); 1073 1074 if (cpu_has_xpa && !mips_xpa_disabled) { 1075 uasm_i_lw(p, tmp, sizeof(pte_t), ptep); 1076 uasm_i_ext(p, tmp, tmp, 0, 24); 1077 uasm_i_mthc0(p, tmp, C0_ENTRYLO1); 1078 } 1079 return; 1080 } 1081 1082 UASM_i_LW(p, tmp, pte_off_even, ptep); /* get even pte */ 1083 UASM_i_LW(p, ptep, pte_off_odd, ptep); /* get odd pte */ 1084 if (r45k_bvahwbug()) 1085 build_tlb_probe_entry(p); 1086 build_convert_pte_to_entrylo(p, tmp); 1087 if (r4k_250MHZhwbug()) 1088 UASM_i_MTC0(p, 0, C0_ENTRYLO0); 1089 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 1090 build_convert_pte_to_entrylo(p, ptep); 1091 if (r45k_bvahwbug()) 1092 uasm_i_mfc0(p, tmp, C0_INDEX); 1093 if (r4k_250MHZhwbug()) 1094 UASM_i_MTC0(p, 0, C0_ENTRYLO1); 1095 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ 1096 } 1097 EXPORT_SYMBOL_GPL(build_update_entries); 1098 1099 struct mips_huge_tlb_info { 1100 int huge_pte; 1101 int restore_scratch; 1102 bool need_reload_pte; 1103 }; 1104 1105 static struct mips_huge_tlb_info 1106 build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, 1107 struct uasm_reloc **r, unsigned int tmp, 1108 unsigned int ptr, int c0_scratch_reg) 1109 { 1110 struct mips_huge_tlb_info rv; 1111 unsigned int even, odd; 1112 int vmalloc_branch_delay_filled = 0; 1113 const int scratch = 1; /* Our extra working register */ 1114 1115 rv.huge_pte = scratch; 1116 rv.restore_scratch = 0; 1117 rv.need_reload_pte = false; 1118 1119 if (check_for_high_segbits) { 1120 UASM_i_MFC0(p, tmp, C0_BADVADDR); 1121 1122 if (pgd_reg != -1) 1123 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg); 1124 else 1125 UASM_i_MFC0(p, ptr, C0_CONTEXT); 1126 1127 if (c0_scratch_reg >= 0) 1128 UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg); 1129 else 1130 UASM_i_SW(p, scratch, scratchpad_offset(0), 0); 1131 1132 uasm_i_dsrl_safe(p, scratch, tmp, 1133 PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); 1134 uasm_il_bnez(p, r, scratch, label_vmalloc); 1135 1136 if (pgd_reg == -1) { 1137 vmalloc_branch_delay_filled = 1; 1138 /* Clear lower 23 bits of context. */ 1139 uasm_i_dins(p, ptr, 0, 0, 23); 1140 } 1141 } else { 1142 if (pgd_reg != -1) 1143 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg); 1144 else 1145 UASM_i_MFC0(p, ptr, C0_CONTEXT); 1146 1147 UASM_i_MFC0(p, tmp, C0_BADVADDR); 1148 1149 if (c0_scratch_reg >= 0) 1150 UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg); 1151 else 1152 UASM_i_SW(p, scratch, scratchpad_offset(0), 0); 1153 1154 if (pgd_reg == -1) 1155 /* Clear lower 23 bits of context. */ 1156 uasm_i_dins(p, ptr, 0, 0, 23); 1157 1158 uasm_il_bltz(p, r, tmp, label_vmalloc); 1159 } 1160 1161 if (pgd_reg == -1) { 1162 vmalloc_branch_delay_filled = 1; 1163 /* 1 0 1 0 1 << 6 xkphys cached */ 1164 uasm_i_ori(p, ptr, ptr, 0x540); 1165 uasm_i_drotr(p, ptr, ptr, 11); 1166 } 1167 1168 #ifdef __PAGETABLE_PMD_FOLDED 1169 #define LOC_PTEP scratch 1170 #else 1171 #define LOC_PTEP ptr 1172 #endif 1173 1174 if (!vmalloc_branch_delay_filled) 1175 /* get pgd offset in bytes */ 1176 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3); 1177 1178 uasm_l_vmalloc_done(l, *p); 1179 1180 /* 1181 * tmp ptr 1182 * fall-through case = badvaddr *pgd_current 1183 * vmalloc case = badvaddr swapper_pg_dir 1184 */ 1185 1186 if (vmalloc_branch_delay_filled) 1187 /* get pgd offset in bytes */ 1188 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3); 1189 1190 #ifdef __PAGETABLE_PMD_FOLDED 1191 GET_CONTEXT(p, tmp); /* get context reg */ 1192 #endif 1193 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3); 1194 1195 if (use_lwx_insns()) { 1196 UASM_i_LWX(p, LOC_PTEP, scratch, ptr); 1197 } else { 1198 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */ 1199 uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */ 1200 } 1201 1202 #ifndef __PAGETABLE_PUD_FOLDED 1203 /* get pud offset in bytes */ 1204 uasm_i_dsrl_safe(p, scratch, tmp, PUD_SHIFT - 3); 1205 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PUD - 1) << 3); 1206 1207 if (use_lwx_insns()) { 1208 UASM_i_LWX(p, ptr, scratch, ptr); 1209 } else { 1210 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */ 1211 UASM_i_LW(p, ptr, 0, ptr); 1212 } 1213 /* ptr contains a pointer to PMD entry */ 1214 /* tmp contains the address */ 1215 #endif 1216 1217 #ifndef __PAGETABLE_PMD_FOLDED 1218 /* get pmd offset in bytes */ 1219 uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3); 1220 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3); 1221 GET_CONTEXT(p, tmp); /* get context reg */ 1222 1223 if (use_lwx_insns()) { 1224 UASM_i_LWX(p, scratch, scratch, ptr); 1225 } else { 1226 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */ 1227 UASM_i_LW(p, scratch, 0, ptr); 1228 } 1229 #endif 1230 /* Adjust the context during the load latency. */ 1231 build_adjust_context(p, tmp); 1232 1233 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1234 uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update); 1235 /* 1236 * The in the LWX case we don't want to do the load in the 1237 * delay slot. It cannot issue in the same cycle and may be 1238 * speculative and unneeded. 1239 */ 1240 if (use_lwx_insns()) 1241 uasm_i_nop(p); 1242 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 1243 1244 1245 /* build_update_entries */ 1246 if (use_lwx_insns()) { 1247 even = ptr; 1248 odd = tmp; 1249 UASM_i_LWX(p, even, scratch, tmp); 1250 UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t)); 1251 UASM_i_LWX(p, odd, scratch, tmp); 1252 } else { 1253 UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */ 1254 even = tmp; 1255 odd = ptr; 1256 UASM_i_LW(p, even, 0, ptr); /* get even pte */ 1257 UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */ 1258 } 1259 if (cpu_has_rixi) { 1260 uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL)); 1261 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ 1262 uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL)); 1263 } else { 1264 uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL)); 1265 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ 1266 uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL)); 1267 } 1268 UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */ 1269 1270 if (c0_scratch_reg >= 0) { 1271 uasm_i_ehb(p); 1272 UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg); 1273 build_tlb_write_entry(p, l, r, tlb_random); 1274 uasm_l_leave(l, *p); 1275 rv.restore_scratch = 1; 1276 } else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13) { 1277 build_tlb_write_entry(p, l, r, tlb_random); 1278 uasm_l_leave(l, *p); 1279 UASM_i_LW(p, scratch, scratchpad_offset(0), 0); 1280 } else { 1281 UASM_i_LW(p, scratch, scratchpad_offset(0), 0); 1282 build_tlb_write_entry(p, l, r, tlb_random); 1283 uasm_l_leave(l, *p); 1284 rv.restore_scratch = 1; 1285 } 1286 1287 uasm_i_eret(p); /* return from trap */ 1288 1289 return rv; 1290 } 1291 1292 /* 1293 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception 1294 * because EXL == 0. If we wrap, we can also use the 32 instruction 1295 * slots before the XTLB refill exception handler which belong to the 1296 * unused TLB refill exception. 1297 */ 1298 #define MIPS64_REFILL_INSNS 32 1299 1300 static void build_r4000_tlb_refill_handler(void) 1301 { 1302 u32 *p = tlb_handler; 1303 struct uasm_label *l = labels; 1304 struct uasm_reloc *r = relocs; 1305 u32 *f; 1306 unsigned int final_len; 1307 struct mips_huge_tlb_info htlb_info __maybe_unused; 1308 enum vmalloc64_mode vmalloc_mode __maybe_unused; 1309 1310 memset(tlb_handler, 0, sizeof(tlb_handler)); 1311 memset(labels, 0, sizeof(labels)); 1312 memset(relocs, 0, sizeof(relocs)); 1313 memset(final_handler, 0, sizeof(final_handler)); 1314 1315 if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) { 1316 htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, 1317 scratch_reg); 1318 vmalloc_mode = refill_scratch; 1319 } else { 1320 htlb_info.huge_pte = K0; 1321 htlb_info.restore_scratch = 0; 1322 htlb_info.need_reload_pte = true; 1323 vmalloc_mode = refill_noscratch; 1324 /* 1325 * create the plain linear handler 1326 */ 1327 if (bcm1250_m3_war()) { 1328 unsigned int segbits = 44; 1329 1330 uasm_i_dmfc0(&p, K0, C0_BADVADDR); 1331 uasm_i_dmfc0(&p, K1, C0_ENTRYHI); 1332 uasm_i_xor(&p, K0, K0, K1); 1333 uasm_i_dsrl_safe(&p, K1, K0, 62); 1334 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); 1335 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); 1336 uasm_i_or(&p, K0, K0, K1); 1337 uasm_il_bnez(&p, &r, K0, label_leave); 1338 /* No need for uasm_i_nop */ 1339 } 1340 1341 #ifdef CONFIG_64BIT 1342 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ 1343 #else 1344 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ 1345 #endif 1346 1347 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1348 build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update); 1349 #endif 1350 1351 build_get_ptep(&p, K0, K1); 1352 build_update_entries(&p, K0, K1); 1353 build_tlb_write_entry(&p, &l, &r, tlb_random); 1354 uasm_l_leave(&l, p); 1355 uasm_i_eret(&p); /* return from trap */ 1356 } 1357 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1358 uasm_l_tlb_huge_update(&l, p); 1359 if (htlb_info.need_reload_pte) 1360 UASM_i_LW(&p, htlb_info.huge_pte, 0, K1); 1361 build_huge_update_entries(&p, htlb_info.huge_pte, K1); 1362 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random, 1363 htlb_info.restore_scratch); 1364 #endif 1365 1366 #ifdef CONFIG_64BIT 1367 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode); 1368 #endif 1369 1370 /* 1371 * Overflow check: For the 64bit handler, we need at least one 1372 * free instruction slot for the wrap-around branch. In worst 1373 * case, if the intended insertion point is a delay slot, we 1374 * need three, with the second nop'ed and the third being 1375 * unused. 1376 */ 1377 switch (boot_cpu_type()) { 1378 default: 1379 if (sizeof(long) == 4) { 1380 case CPU_LOONGSON2EF: 1381 /* Loongson2 ebase is different than r4k, we have more space */ 1382 if ((p - tlb_handler) > 64) 1383 panic("TLB refill handler space exceeded"); 1384 /* 1385 * Now fold the handler in the TLB refill handler space. 1386 */ 1387 f = final_handler; 1388 /* Simplest case, just copy the handler. */ 1389 uasm_copy_handler(relocs, labels, tlb_handler, p, f); 1390 final_len = p - tlb_handler; 1391 break; 1392 } else { 1393 if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) 1394 || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) 1395 && uasm_insn_has_bdelay(relocs, 1396 tlb_handler + MIPS64_REFILL_INSNS - 3))) 1397 panic("TLB refill handler space exceeded"); 1398 /* 1399 * Now fold the handler in the TLB refill handler space. 1400 */ 1401 f = final_handler + MIPS64_REFILL_INSNS; 1402 if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) { 1403 /* Just copy the handler. */ 1404 uasm_copy_handler(relocs, labels, tlb_handler, p, f); 1405 final_len = p - tlb_handler; 1406 } else { 1407 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1408 const enum label_id ls = label_tlb_huge_update; 1409 #else 1410 const enum label_id ls = label_vmalloc; 1411 #endif 1412 u32 *split; 1413 int ov = 0; 1414 int i; 1415 1416 for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++) 1417 ; 1418 BUG_ON(i == ARRAY_SIZE(labels)); 1419 split = labels[i].addr; 1420 1421 /* 1422 * See if we have overflown one way or the other. 1423 */ 1424 if (split > tlb_handler + MIPS64_REFILL_INSNS || 1425 split < p - MIPS64_REFILL_INSNS) 1426 ov = 1; 1427 1428 if (ov) { 1429 /* 1430 * Split two instructions before the end. One 1431 * for the branch and one for the instruction 1432 * in the delay slot. 1433 */ 1434 split = tlb_handler + MIPS64_REFILL_INSNS - 2; 1435 1436 /* 1437 * If the branch would fall in a delay slot, 1438 * we must back up an additional instruction 1439 * so that it is no longer in a delay slot. 1440 */ 1441 if (uasm_insn_has_bdelay(relocs, split - 1)) 1442 split--; 1443 } 1444 /* Copy first part of the handler. */ 1445 uasm_copy_handler(relocs, labels, tlb_handler, split, f); 1446 f += split - tlb_handler; 1447 1448 if (ov) { 1449 /* Insert branch. */ 1450 uasm_l_split(&l, final_handler); 1451 uasm_il_b(&f, &r, label_split); 1452 if (uasm_insn_has_bdelay(relocs, split)) 1453 uasm_i_nop(&f); 1454 else { 1455 uasm_copy_handler(relocs, labels, 1456 split, split + 1, f); 1457 uasm_move_labels(labels, f, f + 1, -1); 1458 f++; 1459 split++; 1460 } 1461 } 1462 1463 /* Copy the rest of the handler. */ 1464 uasm_copy_handler(relocs, labels, split, p, final_handler); 1465 final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) + 1466 (p - split); 1467 } 1468 } 1469 break; 1470 } 1471 1472 uasm_resolve_relocs(relocs, labels); 1473 pr_debug("Wrote TLB refill handler (%u instructions).\n", 1474 final_len); 1475 1476 memcpy((void *)ebase, final_handler, 0x100); 1477 local_flush_icache_range(ebase, ebase + 0x100); 1478 dump_handler("r4000_tlb_refill", (u32 *)ebase, (u32 *)(ebase + 0x100)); 1479 } 1480 1481 static void setup_pw(void) 1482 { 1483 unsigned int pwctl; 1484 unsigned long pgd_i, pgd_w; 1485 #ifndef __PAGETABLE_PMD_FOLDED 1486 unsigned long pmd_i, pmd_w; 1487 #endif 1488 unsigned long pt_i, pt_w; 1489 unsigned long pte_i, pte_w; 1490 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1491 unsigned long psn; 1492 1493 psn = ilog2(_PAGE_HUGE); /* bit used to indicate huge page */ 1494 #endif 1495 pgd_i = PGDIR_SHIFT; /* 1st level PGD */ 1496 #ifndef __PAGETABLE_PMD_FOLDED 1497 pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_ORDER; 1498 1499 pmd_i = PMD_SHIFT; /* 2nd level PMD */ 1500 pmd_w = PMD_SHIFT - PAGE_SHIFT; 1501 #else 1502 pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_ORDER; 1503 #endif 1504 1505 pt_i = PAGE_SHIFT; /* 3rd level PTE */ 1506 pt_w = PAGE_SHIFT - 3; 1507 1508 pte_i = ilog2(_PAGE_GLOBAL); 1509 pte_w = 0; 1510 pwctl = 1 << 30; /* Set PWDirExt */ 1511 1512 #ifndef __PAGETABLE_PMD_FOLDED 1513 write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i); 1514 write_c0_pwsize(1 << 30 | pgd_w << 24 | pmd_w << 12 | pt_w << 6 | pte_w); 1515 #else 1516 write_c0_pwfield(pgd_i << 24 | pt_i << 6 | pte_i); 1517 write_c0_pwsize(1 << 30 | pgd_w << 24 | pt_w << 6 | pte_w); 1518 #endif 1519 1520 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1521 pwctl |= (1 << 6 | psn); 1522 #endif 1523 write_c0_pwctl(pwctl); 1524 write_c0_kpgd((long)swapper_pg_dir); 1525 kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */ 1526 } 1527 1528 static void build_loongson3_tlb_refill_handler(void) 1529 { 1530 u32 *p = tlb_handler; 1531 struct uasm_label *l = labels; 1532 struct uasm_reloc *r = relocs; 1533 1534 memset(labels, 0, sizeof(labels)); 1535 memset(relocs, 0, sizeof(relocs)); 1536 memset(tlb_handler, 0, sizeof(tlb_handler)); 1537 1538 if (check_for_high_segbits) { 1539 uasm_i_dmfc0(&p, K0, C0_BADVADDR); 1540 uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); 1541 uasm_il_beqz(&p, &r, K1, label_vmalloc); 1542 uasm_i_nop(&p); 1543 1544 uasm_il_bgez(&p, &r, K0, label_large_segbits_fault); 1545 uasm_i_nop(&p); 1546 uasm_l_vmalloc(&l, p); 1547 } 1548 1549 uasm_i_dmfc0(&p, K1, C0_PGD); 1550 1551 uasm_i_lddir(&p, K0, K1, 3); /* global page dir */ 1552 #ifndef __PAGETABLE_PMD_FOLDED 1553 uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */ 1554 #endif 1555 uasm_i_ldpte(&p, K1, 0); /* even */ 1556 uasm_i_ldpte(&p, K1, 1); /* odd */ 1557 uasm_i_tlbwr(&p); 1558 1559 /* restore page mask */ 1560 if (PM_DEFAULT_MASK >> 16) { 1561 uasm_i_lui(&p, K0, PM_DEFAULT_MASK >> 16); 1562 uasm_i_ori(&p, K0, K0, PM_DEFAULT_MASK & 0xffff); 1563 uasm_i_mtc0(&p, K0, C0_PAGEMASK); 1564 } else if (PM_DEFAULT_MASK) { 1565 uasm_i_ori(&p, K0, 0, PM_DEFAULT_MASK); 1566 uasm_i_mtc0(&p, K0, C0_PAGEMASK); 1567 } else { 1568 uasm_i_mtc0(&p, 0, C0_PAGEMASK); 1569 } 1570 1571 uasm_i_eret(&p); 1572 1573 if (check_for_high_segbits) { 1574 uasm_l_large_segbits_fault(&l, p); 1575 UASM_i_LA(&p, K1, (unsigned long)tlb_do_page_fault_0); 1576 uasm_i_jr(&p, K1); 1577 uasm_i_nop(&p); 1578 } 1579 1580 uasm_resolve_relocs(relocs, labels); 1581 memcpy((void *)(ebase + 0x80), tlb_handler, 0x80); 1582 local_flush_icache_range(ebase + 0x80, ebase + 0x100); 1583 dump_handler("loongson3_tlb_refill", 1584 (u32 *)(ebase + 0x80), (u32 *)(ebase + 0x100)); 1585 } 1586 1587 static void build_setup_pgd(void) 1588 { 1589 const int a0 = 4; 1590 const int __maybe_unused a1 = 5; 1591 const int __maybe_unused a2 = 6; 1592 u32 *p = (u32 *)msk_isa16_mode((ulong)tlbmiss_handler_setup_pgd); 1593 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 1594 long pgdc = (long)pgd_current; 1595 #endif 1596 1597 memset(p, 0, tlbmiss_handler_setup_pgd_end - (char *)p); 1598 memset(labels, 0, sizeof(labels)); 1599 memset(relocs, 0, sizeof(relocs)); 1600 pgd_reg = allocate_kscratch(); 1601 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT 1602 if (pgd_reg == -1) { 1603 struct uasm_label *l = labels; 1604 struct uasm_reloc *r = relocs; 1605 1606 /* PGD << 11 in c0_Context */ 1607 /* 1608 * If it is a ckseg0 address, convert to a physical 1609 * address. Shifting right by 29 and adding 4 will 1610 * result in zero for these addresses. 1611 * 1612 */ 1613 UASM_i_SRA(&p, a1, a0, 29); 1614 UASM_i_ADDIU(&p, a1, a1, 4); 1615 uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1); 1616 uasm_i_nop(&p); 1617 uasm_i_dinsm(&p, a0, 0, 29, 64 - 29); 1618 uasm_l_tlbl_goaround1(&l, p); 1619 UASM_i_SLL(&p, a0, a0, 11); 1620 UASM_i_MTC0(&p, a0, C0_CONTEXT); 1621 uasm_i_jr(&p, 31); 1622 uasm_i_ehb(&p); 1623 } else { 1624 /* PGD in c0_KScratch */ 1625 if (cpu_has_ldpte) 1626 UASM_i_MTC0(&p, a0, C0_PWBASE); 1627 else 1628 UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); 1629 uasm_i_jr(&p, 31); 1630 uasm_i_ehb(&p); 1631 } 1632 #else 1633 #ifdef CONFIG_SMP 1634 /* Save PGD to pgd_current[smp_processor_id()] */ 1635 UASM_i_CPUID_MFC0(&p, a1, SMP_CPUID_REG); 1636 UASM_i_SRL_SAFE(&p, a1, a1, SMP_CPUID_PTRSHIFT); 1637 UASM_i_LA_mostly(&p, a2, pgdc); 1638 UASM_i_ADDU(&p, a2, a2, a1); 1639 UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2); 1640 #else 1641 UASM_i_LA_mostly(&p, a2, pgdc); 1642 UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2); 1643 #endif /* SMP */ 1644 1645 /* if pgd_reg is allocated, save PGD also to scratch register */ 1646 if (pgd_reg != -1) { 1647 UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); 1648 uasm_i_jr(&p, 31); 1649 uasm_i_ehb(&p); 1650 } else { 1651 uasm_i_jr(&p, 31); 1652 uasm_i_nop(&p); 1653 } 1654 #endif 1655 if (p >= (u32 *)tlbmiss_handler_setup_pgd_end) 1656 panic("tlbmiss_handler_setup_pgd space exceeded"); 1657 1658 uasm_resolve_relocs(relocs, labels); 1659 pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n", 1660 (unsigned int)(p - (u32 *)tlbmiss_handler_setup_pgd)); 1661 1662 dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd, 1663 tlbmiss_handler_setup_pgd_end); 1664 } 1665 1666 static void 1667 iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) 1668 { 1669 #ifdef CONFIG_SMP 1670 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) 1671 uasm_i_sync(p, 0); 1672 # ifdef CONFIG_PHYS_ADDR_T_64BIT 1673 if (cpu_has_64bits) 1674 uasm_i_lld(p, pte, 0, ptr); 1675 else 1676 # endif 1677 UASM_i_LL(p, pte, 0, ptr); 1678 #else 1679 # ifdef CONFIG_PHYS_ADDR_T_64BIT 1680 if (cpu_has_64bits) 1681 uasm_i_ld(p, pte, 0, ptr); 1682 else 1683 # endif 1684 UASM_i_LW(p, pte, 0, ptr); 1685 #endif 1686 } 1687 1688 static void 1689 iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, 1690 unsigned int mode, unsigned int scratch) 1691 { 1692 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); 1693 unsigned int swmode = mode & ~hwmode; 1694 1695 if (IS_ENABLED(CONFIG_XPA) && !cpu_has_64bits) { 1696 uasm_i_lui(p, scratch, swmode >> 16); 1697 uasm_i_or(p, pte, pte, scratch); 1698 BUG_ON(swmode & 0xffff); 1699 } else { 1700 uasm_i_ori(p, pte, pte, mode); 1701 } 1702 1703 #ifdef CONFIG_SMP 1704 # ifdef CONFIG_PHYS_ADDR_T_64BIT 1705 if (cpu_has_64bits) 1706 uasm_i_scd(p, pte, 0, ptr); 1707 else 1708 # endif 1709 UASM_i_SC(p, pte, 0, ptr); 1710 1711 if (r10000_llsc_war()) 1712 uasm_il_beqzl(p, r, pte, label_smp_pgtable_change); 1713 else 1714 uasm_il_beqz(p, r, pte, label_smp_pgtable_change); 1715 1716 # ifdef CONFIG_PHYS_ADDR_T_64BIT 1717 if (!cpu_has_64bits) { 1718 /* no uasm_i_nop needed */ 1719 uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr); 1720 uasm_i_ori(p, pte, pte, hwmode); 1721 BUG_ON(hwmode & ~0xffff); 1722 uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr); 1723 uasm_il_beqz(p, r, pte, label_smp_pgtable_change); 1724 /* no uasm_i_nop needed */ 1725 uasm_i_lw(p, pte, 0, ptr); 1726 } else 1727 uasm_i_nop(p); 1728 # else 1729 uasm_i_nop(p); 1730 # endif 1731 #else 1732 # ifdef CONFIG_PHYS_ADDR_T_64BIT 1733 if (cpu_has_64bits) 1734 uasm_i_sd(p, pte, 0, ptr); 1735 else 1736 # endif 1737 UASM_i_SW(p, pte, 0, ptr); 1738 1739 # ifdef CONFIG_PHYS_ADDR_T_64BIT 1740 if (!cpu_has_64bits) { 1741 uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr); 1742 uasm_i_ori(p, pte, pte, hwmode); 1743 BUG_ON(hwmode & ~0xffff); 1744 uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr); 1745 uasm_i_lw(p, pte, 0, ptr); 1746 } 1747 # endif 1748 #endif 1749 } 1750 1751 /* 1752 * Check if PTE is present, if not then jump to LABEL. PTR points to 1753 * the page table where this PTE is located, PTE will be re-loaded 1754 * with it's original value. 1755 */ 1756 static void 1757 build_pte_present(u32 **p, struct uasm_reloc **r, 1758 int pte, int ptr, int scratch, enum label_id lid) 1759 { 1760 int t = scratch >= 0 ? scratch : pte; 1761 int cur = pte; 1762 1763 if (cpu_has_rixi) { 1764 if (use_bbit_insns()) { 1765 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); 1766 uasm_i_nop(p); 1767 } else { 1768 if (_PAGE_PRESENT_SHIFT) { 1769 uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT); 1770 cur = t; 1771 } 1772 uasm_i_andi(p, t, cur, 1); 1773 uasm_il_beqz(p, r, t, lid); 1774 if (pte == t) 1775 /* You lose the SMP race :-(*/ 1776 iPTE_LW(p, pte, ptr); 1777 } 1778 } else { 1779 if (_PAGE_PRESENT_SHIFT) { 1780 uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT); 1781 cur = t; 1782 } 1783 uasm_i_andi(p, t, cur, 1784 (_PAGE_PRESENT | _PAGE_NO_READ) >> _PAGE_PRESENT_SHIFT); 1785 uasm_i_xori(p, t, t, _PAGE_PRESENT >> _PAGE_PRESENT_SHIFT); 1786 uasm_il_bnez(p, r, t, lid); 1787 if (pte == t) 1788 /* You lose the SMP race :-(*/ 1789 iPTE_LW(p, pte, ptr); 1790 } 1791 } 1792 1793 /* Make PTE valid, store result in PTR. */ 1794 static void 1795 build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, 1796 unsigned int ptr, unsigned int scratch) 1797 { 1798 unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED; 1799 1800 iPTE_SW(p, r, pte, ptr, mode, scratch); 1801 } 1802 1803 /* 1804 * Check if PTE can be written to, if not branch to LABEL. Regardless 1805 * restore PTE with value from PTR when done. 1806 */ 1807 static void 1808 build_pte_writable(u32 **p, struct uasm_reloc **r, 1809 unsigned int pte, unsigned int ptr, int scratch, 1810 enum label_id lid) 1811 { 1812 int t = scratch >= 0 ? scratch : pte; 1813 int cur = pte; 1814 1815 if (_PAGE_PRESENT_SHIFT) { 1816 uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT); 1817 cur = t; 1818 } 1819 uasm_i_andi(p, t, cur, 1820 (_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT); 1821 uasm_i_xori(p, t, t, 1822 (_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT); 1823 uasm_il_bnez(p, r, t, lid); 1824 if (pte == t) 1825 /* You lose the SMP race :-(*/ 1826 iPTE_LW(p, pte, ptr); 1827 else 1828 uasm_i_nop(p); 1829 } 1830 1831 /* Make PTE writable, update software status bits as well, then store 1832 * at PTR. 1833 */ 1834 static void 1835 build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, 1836 unsigned int ptr, unsigned int scratch) 1837 { 1838 unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID 1839 | _PAGE_DIRTY); 1840 1841 iPTE_SW(p, r, pte, ptr, mode, scratch); 1842 } 1843 1844 /* 1845 * Check if PTE can be modified, if not branch to LABEL. Regardless 1846 * restore PTE with value from PTR when done. 1847 */ 1848 static void 1849 build_pte_modifiable(u32 **p, struct uasm_reloc **r, 1850 unsigned int pte, unsigned int ptr, int scratch, 1851 enum label_id lid) 1852 { 1853 if (use_bbit_insns()) { 1854 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid); 1855 uasm_i_nop(p); 1856 } else { 1857 int t = scratch >= 0 ? scratch : pte; 1858 uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT); 1859 uasm_i_andi(p, t, t, 1); 1860 uasm_il_beqz(p, r, t, lid); 1861 if (pte == t) 1862 /* You lose the SMP race :-(*/ 1863 iPTE_LW(p, pte, ptr); 1864 } 1865 } 1866 1867 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 1868 1869 1870 /* 1871 * R3000 style TLB load/store/modify handlers. 1872 */ 1873 1874 /* 1875 * This places the pte into ENTRYLO0 and writes it with tlbwi. 1876 * Then it returns. 1877 */ 1878 static void 1879 build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) 1880 { 1881 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 1882 uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */ 1883 uasm_i_tlbwi(p); 1884 uasm_i_jr(p, tmp); 1885 uasm_i_rfe(p); /* branch delay */ 1886 } 1887 1888 /* 1889 * This places the pte into ENTRYLO0 and writes it with tlbwi 1890 * or tlbwr as appropriate. This is because the index register 1891 * may have the probe fail bit set as a result of a trap on a 1892 * kseg2 access, i.e. without refill. Then it returns. 1893 */ 1894 static void 1895 build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, 1896 struct uasm_reloc **r, unsigned int pte, 1897 unsigned int tmp) 1898 { 1899 uasm_i_mfc0(p, tmp, C0_INDEX); 1900 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 1901 uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */ 1902 uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */ 1903 uasm_i_tlbwi(p); /* cp0 delay */ 1904 uasm_i_jr(p, tmp); 1905 uasm_i_rfe(p); /* branch delay */ 1906 uasm_l_r3000_write_probe_fail(l, *p); 1907 uasm_i_tlbwr(p); /* cp0 delay */ 1908 uasm_i_jr(p, tmp); 1909 uasm_i_rfe(p); /* branch delay */ 1910 } 1911 1912 static void 1913 build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, 1914 unsigned int ptr) 1915 { 1916 long pgdc = (long)pgd_current; 1917 1918 uasm_i_mfc0(p, pte, C0_BADVADDR); 1919 uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */ 1920 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); 1921 uasm_i_srl(p, pte, pte, 22); /* load delay */ 1922 uasm_i_sll(p, pte, pte, 2); 1923 uasm_i_addu(p, ptr, ptr, pte); 1924 uasm_i_mfc0(p, pte, C0_CONTEXT); 1925 uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */ 1926 uasm_i_andi(p, pte, pte, 0xffc); /* load delay */ 1927 uasm_i_addu(p, ptr, ptr, pte); 1928 uasm_i_lw(p, pte, 0, ptr); 1929 uasm_i_tlbp(p); /* load delay */ 1930 } 1931 1932 static void build_r3000_tlb_load_handler(void) 1933 { 1934 u32 *p = (u32 *)handle_tlbl; 1935 struct uasm_label *l = labels; 1936 struct uasm_reloc *r = relocs; 1937 1938 memset(p, 0, handle_tlbl_end - (char *)p); 1939 memset(labels, 0, sizeof(labels)); 1940 memset(relocs, 0, sizeof(relocs)); 1941 1942 build_r3000_tlbchange_handler_head(&p, K0, K1); 1943 build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl); 1944 uasm_i_nop(&p); /* load delay */ 1945 build_make_valid(&p, &r, K0, K1, -1); 1946 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1947 1948 uasm_l_nopage_tlbl(&l, p); 1949 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 1950 uasm_i_nop(&p); 1951 1952 if (p >= (u32 *)handle_tlbl_end) 1953 panic("TLB load handler fastpath space exceeded"); 1954 1955 uasm_resolve_relocs(relocs, labels); 1956 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", 1957 (unsigned int)(p - (u32 *)handle_tlbl)); 1958 1959 dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_end); 1960 } 1961 1962 static void build_r3000_tlb_store_handler(void) 1963 { 1964 u32 *p = (u32 *)handle_tlbs; 1965 struct uasm_label *l = labels; 1966 struct uasm_reloc *r = relocs; 1967 1968 memset(p, 0, handle_tlbs_end - (char *)p); 1969 memset(labels, 0, sizeof(labels)); 1970 memset(relocs, 0, sizeof(relocs)); 1971 1972 build_r3000_tlbchange_handler_head(&p, K0, K1); 1973 build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs); 1974 uasm_i_nop(&p); /* load delay */ 1975 build_make_write(&p, &r, K0, K1, -1); 1976 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1977 1978 uasm_l_nopage_tlbs(&l, p); 1979 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1980 uasm_i_nop(&p); 1981 1982 if (p >= (u32 *)handle_tlbs_end) 1983 panic("TLB store handler fastpath space exceeded"); 1984 1985 uasm_resolve_relocs(relocs, labels); 1986 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", 1987 (unsigned int)(p - (u32 *)handle_tlbs)); 1988 1989 dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_end); 1990 } 1991 1992 static void build_r3000_tlb_modify_handler(void) 1993 { 1994 u32 *p = (u32 *)handle_tlbm; 1995 struct uasm_label *l = labels; 1996 struct uasm_reloc *r = relocs; 1997 1998 memset(p, 0, handle_tlbm_end - (char *)p); 1999 memset(labels, 0, sizeof(labels)); 2000 memset(relocs, 0, sizeof(relocs)); 2001 2002 build_r3000_tlbchange_handler_head(&p, K0, K1); 2003 build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm); 2004 uasm_i_nop(&p); /* load delay */ 2005 build_make_write(&p, &r, K0, K1, -1); 2006 build_r3000_pte_reload_tlbwi(&p, K0, K1); 2007 2008 uasm_l_nopage_tlbm(&l, p); 2009 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2010 uasm_i_nop(&p); 2011 2012 if (p >= (u32 *)handle_tlbm_end) 2013 panic("TLB modify handler fastpath space exceeded"); 2014 2015 uasm_resolve_relocs(relocs, labels); 2016 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", 2017 (unsigned int)(p - (u32 *)handle_tlbm)); 2018 2019 dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_end); 2020 } 2021 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ 2022 2023 static bool cpu_has_tlbex_tlbp_race(void) 2024 { 2025 /* 2026 * When a Hardware Table Walker is running it can replace TLB entries 2027 * at any time, leading to a race between it & the CPU. 2028 */ 2029 if (cpu_has_htw) 2030 return true; 2031 2032 /* 2033 * If the CPU shares FTLB RAM with its siblings then our entry may be 2034 * replaced at any time by a sibling performing a write to the FTLB. 2035 */ 2036 if (cpu_has_shared_ftlb_ram) 2037 return true; 2038 2039 /* In all other cases there ought to be no race condition to handle */ 2040 return false; 2041 } 2042 2043 /* 2044 * R4000 style TLB load/store/modify handlers. 2045 */ 2046 static struct work_registers 2047 build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, 2048 struct uasm_reloc **r) 2049 { 2050 struct work_registers wr = build_get_work_registers(p); 2051 2052 #ifdef CONFIG_64BIT 2053 build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */ 2054 #else 2055 build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */ 2056 #endif 2057 2058 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 2059 /* 2060 * For huge tlb entries, pmd doesn't contain an address but 2061 * instead contains the tlb pte. Check the PAGE_HUGE bit and 2062 * see if we need to jump to huge tlb processing. 2063 */ 2064 build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update); 2065 #endif 2066 2067 UASM_i_MFC0(p, wr.r1, C0_BADVADDR); 2068 UASM_i_LW(p, wr.r2, 0, wr.r2); 2069 UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); 2070 uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2); 2071 UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1); 2072 2073 #ifdef CONFIG_SMP 2074 uasm_l_smp_pgtable_change(l, *p); 2075 #endif 2076 iPTE_LW(p, wr.r1, wr.r2); /* get even pte */ 2077 if (!m4kc_tlbp_war()) { 2078 build_tlb_probe_entry(p); 2079 if (cpu_has_tlbex_tlbp_race()) { 2080 /* race condition happens, leaving */ 2081 uasm_i_ehb(p); 2082 uasm_i_mfc0(p, wr.r3, C0_INDEX); 2083 uasm_il_bltz(p, r, wr.r3, label_leave); 2084 uasm_i_nop(p); 2085 } 2086 } 2087 return wr; 2088 } 2089 2090 static void 2091 build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, 2092 struct uasm_reloc **r, unsigned int tmp, 2093 unsigned int ptr) 2094 { 2095 uasm_i_ori(p, ptr, ptr, sizeof(pte_t)); 2096 uasm_i_xori(p, ptr, ptr, sizeof(pte_t)); 2097 build_update_entries(p, tmp, ptr); 2098 build_tlb_write_entry(p, l, r, tlb_indexed); 2099 uasm_l_leave(l, *p); 2100 build_restore_work_registers(p); 2101 uasm_i_eret(p); /* return from trap */ 2102 2103 #ifdef CONFIG_64BIT 2104 build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill); 2105 #endif 2106 } 2107 2108 static void build_r4000_tlb_load_handler(void) 2109 { 2110 u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbl); 2111 struct uasm_label *l = labels; 2112 struct uasm_reloc *r = relocs; 2113 struct work_registers wr; 2114 2115 memset(p, 0, handle_tlbl_end - (char *)p); 2116 memset(labels, 0, sizeof(labels)); 2117 memset(relocs, 0, sizeof(relocs)); 2118 2119 if (bcm1250_m3_war()) { 2120 unsigned int segbits = 44; 2121 2122 uasm_i_dmfc0(&p, K0, C0_BADVADDR); 2123 uasm_i_dmfc0(&p, K1, C0_ENTRYHI); 2124 uasm_i_xor(&p, K0, K0, K1); 2125 uasm_i_dsrl_safe(&p, K1, K0, 62); 2126 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); 2127 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); 2128 uasm_i_or(&p, K0, K0, K1); 2129 uasm_il_bnez(&p, &r, K0, label_leave); 2130 /* No need for uasm_i_nop */ 2131 } 2132 2133 wr = build_r4000_tlbchange_handler_head(&p, &l, &r); 2134 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); 2135 if (m4kc_tlbp_war()) 2136 build_tlb_probe_entry(&p); 2137 2138 if (cpu_has_rixi && !cpu_has_rixiex) { 2139 /* 2140 * If the page is not _PAGE_VALID, RI or XI could not 2141 * have triggered it. Skip the expensive test.. 2142 */ 2143 if (use_bbit_insns()) { 2144 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID), 2145 label_tlbl_goaround1); 2146 } else { 2147 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID); 2148 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1); 2149 } 2150 uasm_i_nop(&p); 2151 2152 /* 2153 * Warn if something may race with us & replace the TLB entry 2154 * before we read it here. Everything with such races should 2155 * also have dedicated RiXi exception handlers, so this 2156 * shouldn't be hit. 2157 */ 2158 WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path"); 2159 2160 uasm_i_tlbr(&p); 2161 2162 switch (current_cpu_type()) { 2163 default: 2164 if (cpu_has_mips_r2_exec_hazard) { 2165 uasm_i_ehb(&p); 2166 2167 case CPU_CAVIUM_OCTEON: 2168 case CPU_CAVIUM_OCTEON_PLUS: 2169 case CPU_CAVIUM_OCTEON2: 2170 break; 2171 } 2172 } 2173 2174 /* Examine entrylo 0 or 1 based on ptr. */ 2175 if (use_bbit_insns()) { 2176 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); 2177 } else { 2178 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t)); 2179 uasm_i_beqz(&p, wr.r3, 8); 2180 } 2181 /* load it in the delay slot*/ 2182 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0); 2183 /* load it if ptr is odd */ 2184 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1); 2185 /* 2186 * If the entryLo (now in wr.r3) is valid (bit 1), RI or 2187 * XI must have triggered it. 2188 */ 2189 if (use_bbit_insns()) { 2190 uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl); 2191 uasm_i_nop(&p); 2192 uasm_l_tlbl_goaround1(&l, p); 2193 } else { 2194 uasm_i_andi(&p, wr.r3, wr.r3, 2); 2195 uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl); 2196 uasm_i_nop(&p); 2197 } 2198 uasm_l_tlbl_goaround1(&l, p); 2199 } 2200 build_make_valid(&p, &r, wr.r1, wr.r2, wr.r3); 2201 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); 2202 2203 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 2204 /* 2205 * This is the entry point when build_r4000_tlbchange_handler_head 2206 * spots a huge page. 2207 */ 2208 uasm_l_tlb_huge_update(&l, p); 2209 iPTE_LW(&p, wr.r1, wr.r2); 2210 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); 2211 build_tlb_probe_entry(&p); 2212 2213 if (cpu_has_rixi && !cpu_has_rixiex) { 2214 /* 2215 * If the page is not _PAGE_VALID, RI or XI could not 2216 * have triggered it. Skip the expensive test.. 2217 */ 2218 if (use_bbit_insns()) { 2219 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID), 2220 label_tlbl_goaround2); 2221 } else { 2222 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID); 2223 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); 2224 } 2225 uasm_i_nop(&p); 2226 2227 /* 2228 * Warn if something may race with us & replace the TLB entry 2229 * before we read it here. Everything with such races should 2230 * also have dedicated RiXi exception handlers, so this 2231 * shouldn't be hit. 2232 */ 2233 WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path"); 2234 2235 uasm_i_tlbr(&p); 2236 2237 switch (current_cpu_type()) { 2238 default: 2239 if (cpu_has_mips_r2_exec_hazard) { 2240 uasm_i_ehb(&p); 2241 2242 case CPU_CAVIUM_OCTEON: 2243 case CPU_CAVIUM_OCTEON_PLUS: 2244 case CPU_CAVIUM_OCTEON2: 2245 break; 2246 } 2247 } 2248 2249 /* Examine entrylo 0 or 1 based on ptr. */ 2250 if (use_bbit_insns()) { 2251 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); 2252 } else { 2253 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t)); 2254 uasm_i_beqz(&p, wr.r3, 8); 2255 } 2256 /* load it in the delay slot*/ 2257 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0); 2258 /* load it if ptr is odd */ 2259 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1); 2260 /* 2261 * If the entryLo (now in wr.r3) is valid (bit 1), RI or 2262 * XI must have triggered it. 2263 */ 2264 if (use_bbit_insns()) { 2265 uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2); 2266 } else { 2267 uasm_i_andi(&p, wr.r3, wr.r3, 2); 2268 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); 2269 } 2270 if (PM_DEFAULT_MASK == 0) 2271 uasm_i_nop(&p); 2272 /* 2273 * We clobbered C0_PAGEMASK, restore it. On the other branch 2274 * it is restored in build_huge_tlb_write_entry. 2275 */ 2276 build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0); 2277 2278 uasm_l_tlbl_goaround2(&l, p); 2279 } 2280 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID)); 2281 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1); 2282 #endif 2283 2284 uasm_l_nopage_tlbl(&l, p); 2285 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) 2286 uasm_i_sync(&p, 0); 2287 build_restore_work_registers(&p); 2288 #ifdef CONFIG_CPU_MICROMIPS 2289 if ((unsigned long)tlb_do_page_fault_0 & 1) { 2290 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0)); 2291 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0)); 2292 uasm_i_jr(&p, K0); 2293 } else 2294 #endif 2295 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 2296 uasm_i_nop(&p); 2297 2298 if (p >= (u32 *)handle_tlbl_end) 2299 panic("TLB load handler fastpath space exceeded"); 2300 2301 uasm_resolve_relocs(relocs, labels); 2302 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", 2303 (unsigned int)(p - (u32 *)handle_tlbl)); 2304 2305 dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_end); 2306 } 2307 2308 static void build_r4000_tlb_store_handler(void) 2309 { 2310 u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbs); 2311 struct uasm_label *l = labels; 2312 struct uasm_reloc *r = relocs; 2313 struct work_registers wr; 2314 2315 memset(p, 0, handle_tlbs_end - (char *)p); 2316 memset(labels, 0, sizeof(labels)); 2317 memset(relocs, 0, sizeof(relocs)); 2318 2319 wr = build_r4000_tlbchange_handler_head(&p, &l, &r); 2320 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); 2321 if (m4kc_tlbp_war()) 2322 build_tlb_probe_entry(&p); 2323 build_make_write(&p, &r, wr.r1, wr.r2, wr.r3); 2324 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); 2325 2326 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 2327 /* 2328 * This is the entry point when 2329 * build_r4000_tlbchange_handler_head spots a huge page. 2330 */ 2331 uasm_l_tlb_huge_update(&l, p); 2332 iPTE_LW(&p, wr.r1, wr.r2); 2333 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); 2334 build_tlb_probe_entry(&p); 2335 uasm_i_ori(&p, wr.r1, wr.r1, 2336 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2337 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1); 2338 #endif 2339 2340 uasm_l_nopage_tlbs(&l, p); 2341 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) 2342 uasm_i_sync(&p, 0); 2343 build_restore_work_registers(&p); 2344 #ifdef CONFIG_CPU_MICROMIPS 2345 if ((unsigned long)tlb_do_page_fault_1 & 1) { 2346 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1)); 2347 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1)); 2348 uasm_i_jr(&p, K0); 2349 } else 2350 #endif 2351 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2352 uasm_i_nop(&p); 2353 2354 if (p >= (u32 *)handle_tlbs_end) 2355 panic("TLB store handler fastpath space exceeded"); 2356 2357 uasm_resolve_relocs(relocs, labels); 2358 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", 2359 (unsigned int)(p - (u32 *)handle_tlbs)); 2360 2361 dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_end); 2362 } 2363 2364 static void build_r4000_tlb_modify_handler(void) 2365 { 2366 u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbm); 2367 struct uasm_label *l = labels; 2368 struct uasm_reloc *r = relocs; 2369 struct work_registers wr; 2370 2371 memset(p, 0, handle_tlbm_end - (char *)p); 2372 memset(labels, 0, sizeof(labels)); 2373 memset(relocs, 0, sizeof(relocs)); 2374 2375 wr = build_r4000_tlbchange_handler_head(&p, &l, &r); 2376 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); 2377 if (m4kc_tlbp_war()) 2378 build_tlb_probe_entry(&p); 2379 /* Present and writable bits set, set accessed and dirty bits. */ 2380 build_make_write(&p, &r, wr.r1, wr.r2, wr.r3); 2381 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); 2382 2383 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 2384 /* 2385 * This is the entry point when 2386 * build_r4000_tlbchange_handler_head spots a huge page. 2387 */ 2388 uasm_l_tlb_huge_update(&l, p); 2389 iPTE_LW(&p, wr.r1, wr.r2); 2390 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); 2391 build_tlb_probe_entry(&p); 2392 uasm_i_ori(&p, wr.r1, wr.r1, 2393 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2394 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0); 2395 #endif 2396 2397 uasm_l_nopage_tlbm(&l, p); 2398 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) 2399 uasm_i_sync(&p, 0); 2400 build_restore_work_registers(&p); 2401 #ifdef CONFIG_CPU_MICROMIPS 2402 if ((unsigned long)tlb_do_page_fault_1 & 1) { 2403 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1)); 2404 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1)); 2405 uasm_i_jr(&p, K0); 2406 } else 2407 #endif 2408 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2409 uasm_i_nop(&p); 2410 2411 if (p >= (u32 *)handle_tlbm_end) 2412 panic("TLB modify handler fastpath space exceeded"); 2413 2414 uasm_resolve_relocs(relocs, labels); 2415 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", 2416 (unsigned int)(p - (u32 *)handle_tlbm)); 2417 2418 dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_end); 2419 } 2420 2421 static void flush_tlb_handlers(void) 2422 { 2423 local_flush_icache_range((unsigned long)handle_tlbl, 2424 (unsigned long)handle_tlbl_end); 2425 local_flush_icache_range((unsigned long)handle_tlbs, 2426 (unsigned long)handle_tlbs_end); 2427 local_flush_icache_range((unsigned long)handle_tlbm, 2428 (unsigned long)handle_tlbm_end); 2429 local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd, 2430 (unsigned long)tlbmiss_handler_setup_pgd_end); 2431 } 2432 2433 static void print_htw_config(void) 2434 { 2435 unsigned long config; 2436 unsigned int pwctl; 2437 const int field = 2 * sizeof(unsigned long); 2438 2439 config = read_c0_pwfield(); 2440 pr_debug("PWField (0x%0*lx): GDI: 0x%02lx UDI: 0x%02lx MDI: 0x%02lx PTI: 0x%02lx PTEI: 0x%02lx\n", 2441 field, config, 2442 (config & MIPS_PWFIELD_GDI_MASK) >> MIPS_PWFIELD_GDI_SHIFT, 2443 (config & MIPS_PWFIELD_UDI_MASK) >> MIPS_PWFIELD_UDI_SHIFT, 2444 (config & MIPS_PWFIELD_MDI_MASK) >> MIPS_PWFIELD_MDI_SHIFT, 2445 (config & MIPS_PWFIELD_PTI_MASK) >> MIPS_PWFIELD_PTI_SHIFT, 2446 (config & MIPS_PWFIELD_PTEI_MASK) >> MIPS_PWFIELD_PTEI_SHIFT); 2447 2448 config = read_c0_pwsize(); 2449 pr_debug("PWSize (0x%0*lx): PS: 0x%lx GDW: 0x%02lx UDW: 0x%02lx MDW: 0x%02lx PTW: 0x%02lx PTEW: 0x%02lx\n", 2450 field, config, 2451 (config & MIPS_PWSIZE_PS_MASK) >> MIPS_PWSIZE_PS_SHIFT, 2452 (config & MIPS_PWSIZE_GDW_MASK) >> MIPS_PWSIZE_GDW_SHIFT, 2453 (config & MIPS_PWSIZE_UDW_MASK) >> MIPS_PWSIZE_UDW_SHIFT, 2454 (config & MIPS_PWSIZE_MDW_MASK) >> MIPS_PWSIZE_MDW_SHIFT, 2455 (config & MIPS_PWSIZE_PTW_MASK) >> MIPS_PWSIZE_PTW_SHIFT, 2456 (config & MIPS_PWSIZE_PTEW_MASK) >> MIPS_PWSIZE_PTEW_SHIFT); 2457 2458 pwctl = read_c0_pwctl(); 2459 pr_debug("PWCtl (0x%x): PWEn: 0x%x XK: 0x%x XS: 0x%x XU: 0x%x DPH: 0x%x HugePg: 0x%x Psn: 0x%x\n", 2460 pwctl, 2461 (pwctl & MIPS_PWCTL_PWEN_MASK) >> MIPS_PWCTL_PWEN_SHIFT, 2462 (pwctl & MIPS_PWCTL_XK_MASK) >> MIPS_PWCTL_XK_SHIFT, 2463 (pwctl & MIPS_PWCTL_XS_MASK) >> MIPS_PWCTL_XS_SHIFT, 2464 (pwctl & MIPS_PWCTL_XU_MASK) >> MIPS_PWCTL_XU_SHIFT, 2465 (pwctl & MIPS_PWCTL_DPH_MASK) >> MIPS_PWCTL_DPH_SHIFT, 2466 (pwctl & MIPS_PWCTL_HUGEPG_MASK) >> MIPS_PWCTL_HUGEPG_SHIFT, 2467 (pwctl & MIPS_PWCTL_PSN_MASK) >> MIPS_PWCTL_PSN_SHIFT); 2468 } 2469 2470 static void config_htw_params(void) 2471 { 2472 unsigned long pwfield, pwsize, ptei; 2473 unsigned int config; 2474 2475 /* 2476 * We are using 2-level page tables, so we only need to 2477 * setup GDW and PTW appropriately. UDW and MDW will remain 0. 2478 * The default value of GDI/UDI/MDI/PTI is 0xc. It is illegal to 2479 * write values less than 0xc in these fields because the entire 2480 * write will be dropped. As a result of which, we must preserve 2481 * the original reset values and overwrite only what we really want. 2482 */ 2483 2484 pwfield = read_c0_pwfield(); 2485 /* re-initialize the GDI field */ 2486 pwfield &= ~MIPS_PWFIELD_GDI_MASK; 2487 pwfield |= PGDIR_SHIFT << MIPS_PWFIELD_GDI_SHIFT; 2488 /* re-initialize the PTI field including the even/odd bit */ 2489 pwfield &= ~MIPS_PWFIELD_PTI_MASK; 2490 pwfield |= PAGE_SHIFT << MIPS_PWFIELD_PTI_SHIFT; 2491 if (CONFIG_PGTABLE_LEVELS >= 3) { 2492 pwfield &= ~MIPS_PWFIELD_MDI_MASK; 2493 pwfield |= PMD_SHIFT << MIPS_PWFIELD_MDI_SHIFT; 2494 } 2495 /* Set the PTEI right shift */ 2496 ptei = _PAGE_GLOBAL_SHIFT << MIPS_PWFIELD_PTEI_SHIFT; 2497 pwfield |= ptei; 2498 write_c0_pwfield(pwfield); 2499 /* Check whether the PTEI value is supported */ 2500 back_to_back_c0_hazard(); 2501 pwfield = read_c0_pwfield(); 2502 if (((pwfield & MIPS_PWFIELD_PTEI_MASK) << MIPS_PWFIELD_PTEI_SHIFT) 2503 != ptei) { 2504 pr_warn("Unsupported PTEI field value: 0x%lx. HTW will not be enabled", 2505 ptei); 2506 /* 2507 * Drop option to avoid HTW being enabled via another path 2508 * (eg htw_reset()) 2509 */ 2510 current_cpu_data.options &= ~MIPS_CPU_HTW; 2511 return; 2512 } 2513 2514 pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT; 2515 pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT; 2516 if (CONFIG_PGTABLE_LEVELS >= 3) 2517 pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT; 2518 2519 /* Set pointer size to size of directory pointers */ 2520 if (IS_ENABLED(CONFIG_64BIT)) 2521 pwsize |= MIPS_PWSIZE_PS_MASK; 2522 /* PTEs may be multiple pointers long (e.g. with XPA) */ 2523 pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT) 2524 & MIPS_PWSIZE_PTEW_MASK; 2525 2526 write_c0_pwsize(pwsize); 2527 2528 /* Make sure everything is set before we enable the HTW */ 2529 back_to_back_c0_hazard(); 2530 2531 /* 2532 * Enable HTW (and only for XUSeg on 64-bit), and disable the rest of 2533 * the pwctl fields. 2534 */ 2535 config = 1 << MIPS_PWCTL_PWEN_SHIFT; 2536 if (IS_ENABLED(CONFIG_64BIT)) 2537 config |= MIPS_PWCTL_XU_MASK; 2538 write_c0_pwctl(config); 2539 pr_info("Hardware Page Table Walker enabled\n"); 2540 2541 print_htw_config(); 2542 } 2543 2544 static void config_xpa_params(void) 2545 { 2546 #ifdef CONFIG_XPA 2547 unsigned int pagegrain; 2548 2549 if (mips_xpa_disabled) { 2550 pr_info("Extended Physical Addressing (XPA) disabled\n"); 2551 return; 2552 } 2553 2554 pagegrain = read_c0_pagegrain(); 2555 write_c0_pagegrain(pagegrain | PG_ELPA); 2556 back_to_back_c0_hazard(); 2557 pagegrain = read_c0_pagegrain(); 2558 2559 if (pagegrain & PG_ELPA) 2560 pr_info("Extended Physical Addressing (XPA) enabled\n"); 2561 else 2562 panic("Extended Physical Addressing (XPA) disabled"); 2563 #endif 2564 } 2565 2566 static void check_pabits(void) 2567 { 2568 unsigned long entry; 2569 unsigned pabits, fillbits; 2570 2571 if (!cpu_has_rixi || !_PAGE_NO_EXEC) { 2572 /* 2573 * We'll only be making use of the fact that we can rotate bits 2574 * into the fill if the CPU supports RIXI, so don't bother 2575 * probing this for CPUs which don't. 2576 */ 2577 return; 2578 } 2579 2580 write_c0_entrylo0(~0ul); 2581 back_to_back_c0_hazard(); 2582 entry = read_c0_entrylo0(); 2583 2584 /* clear all non-PFN bits */ 2585 entry &= ~((1 << MIPS_ENTRYLO_PFN_SHIFT) - 1); 2586 entry &= ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); 2587 2588 /* find a lower bound on PABITS, and upper bound on fill bits */ 2589 pabits = fls_long(entry) + 6; 2590 fillbits = max_t(int, (int)BITS_PER_LONG - pabits, 0); 2591 2592 /* minus the RI & XI bits */ 2593 fillbits -= min_t(unsigned, fillbits, 2); 2594 2595 if (fillbits >= ilog2(_PAGE_NO_EXEC)) 2596 fill_includes_sw_bits = true; 2597 2598 pr_debug("Entry* registers contain %u fill bits\n", fillbits); 2599 } 2600 2601 void build_tlb_refill_handler(void) 2602 { 2603 /* 2604 * The refill handler is generated per-CPU, multi-node systems 2605 * may have local storage for it. The other handlers are only 2606 * needed once. 2607 */ 2608 static int run_once = 0; 2609 2610 if (IS_ENABLED(CONFIG_XPA) && !cpu_has_rixi) 2611 panic("Kernels supporting XPA currently require CPUs with RIXI"); 2612 2613 output_pgtable_bits_defines(); 2614 check_pabits(); 2615 2616 #ifdef CONFIG_64BIT 2617 check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); 2618 #endif 2619 2620 if (cpu_has_3kex) { 2621 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 2622 if (!run_once) { 2623 build_setup_pgd(); 2624 build_r3000_tlb_refill_handler(); 2625 build_r3000_tlb_load_handler(); 2626 build_r3000_tlb_store_handler(); 2627 build_r3000_tlb_modify_handler(); 2628 flush_tlb_handlers(); 2629 run_once++; 2630 } 2631 #else 2632 panic("No R3000 TLB refill handler"); 2633 #endif 2634 return; 2635 } 2636 2637 if (cpu_has_ldpte) 2638 setup_pw(); 2639 2640 if (!run_once) { 2641 scratch_reg = allocate_kscratch(); 2642 build_setup_pgd(); 2643 build_r4000_tlb_load_handler(); 2644 build_r4000_tlb_store_handler(); 2645 build_r4000_tlb_modify_handler(); 2646 if (cpu_has_ldpte) 2647 build_loongson3_tlb_refill_handler(); 2648 else 2649 build_r4000_tlb_refill_handler(); 2650 flush_tlb_handlers(); 2651 run_once++; 2652 } 2653 if (cpu_has_xpa) 2654 config_xpa_params(); 2655 if (cpu_has_htw) 2656 config_htw_params(); 2657 } 2658