1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Synthesize TLB refill handlers at runtime. 7 * 8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki 10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 11 * Copyright (C) 2008, 2009 Cavium Networks, Inc. 12 * Copyright (C) 2011 MIPS Technologies, Inc. 13 * 14 * ... and the days got worse and worse and now you see 15 * I've gone completely out of my mind. 16 * 17 * They're coming to take me a away haha 18 * they're coming to take me a away hoho hihi haha 19 * to the funny farm where code is beautiful all the time ... 20 * 21 * (Condolences to Napoleon XIV) 22 */ 23 24 #include <linux/bug.h> 25 #include <linux/export.h> 26 #include <linux/kernel.h> 27 #include <linux/types.h> 28 #include <linux/smp.h> 29 #include <linux/string.h> 30 #include <linux/cache.h> 31 32 #include <asm/cacheflush.h> 33 #include <asm/cpu-type.h> 34 #include <asm/mmu_context.h> 35 #include <asm/pgtable.h> 36 #include <asm/war.h> 37 #include <asm/uasm.h> 38 #include <asm/setup.h> 39 #include <asm/tlbex.h> 40 41 static int mips_xpa_disabled; 42 43 static int __init xpa_disable(char *s) 44 { 45 mips_xpa_disabled = 1; 46 47 return 1; 48 } 49 50 __setup("noxpa", xpa_disable); 51 52 /* 53 * TLB load/store/modify handlers. 54 * 55 * Only the fastpath gets synthesized at runtime, the slowpath for 56 * do_page_fault remains normal asm. 57 */ 58 extern void tlb_do_page_fault_0(void); 59 extern void tlb_do_page_fault_1(void); 60 61 struct work_registers { 62 int r1; 63 int r2; 64 int r3; 65 }; 66 67 struct tlb_reg_save { 68 unsigned long a; 69 unsigned long b; 70 } ____cacheline_aligned_in_smp; 71 72 static struct tlb_reg_save handler_reg_save[NR_CPUS]; 73 74 static inline int r45k_bvahwbug(void) 75 { 76 /* XXX: We should probe for the presence of this bug, but we don't. */ 77 return 0; 78 } 79 80 static inline int r4k_250MHZhwbug(void) 81 { 82 /* XXX: We should probe for the presence of this bug, but we don't. */ 83 return 0; 84 } 85 86 static inline int __maybe_unused bcm1250_m3_war(void) 87 { 88 return BCM1250_M3_WAR; 89 } 90 91 static inline int __maybe_unused r10000_llsc_war(void) 92 { 93 return R10000_LLSC_WAR; 94 } 95 96 static int use_bbit_insns(void) 97 { 98 switch (current_cpu_type()) { 99 case CPU_CAVIUM_OCTEON: 100 case CPU_CAVIUM_OCTEON_PLUS: 101 case CPU_CAVIUM_OCTEON2: 102 case CPU_CAVIUM_OCTEON3: 103 return 1; 104 default: 105 return 0; 106 } 107 } 108 109 static int use_lwx_insns(void) 110 { 111 switch (current_cpu_type()) { 112 case CPU_CAVIUM_OCTEON2: 113 case CPU_CAVIUM_OCTEON3: 114 return 1; 115 default: 116 return 0; 117 } 118 } 119 #if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \ 120 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 121 static bool scratchpad_available(void) 122 { 123 return true; 124 } 125 static int scratchpad_offset(int i) 126 { 127 /* 128 * CVMSEG starts at address -32768 and extends for 129 * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines. 130 */ 131 i += 1; /* Kernel use starts at the top and works down. */ 132 return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768; 133 } 134 #else 135 static bool scratchpad_available(void) 136 { 137 return false; 138 } 139 static int scratchpad_offset(int i) 140 { 141 BUG(); 142 /* Really unreachable, but evidently some GCC want this. */ 143 return 0; 144 } 145 #endif 146 /* 147 * Found by experiment: At least some revisions of the 4kc throw under 148 * some circumstances a machine check exception, triggered by invalid 149 * values in the index register. Delaying the tlbp instruction until 150 * after the next branch, plus adding an additional nop in front of 151 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows 152 * why; it's not an issue caused by the core RTL. 153 * 154 */ 155 static int m4kc_tlbp_war(void) 156 { 157 return current_cpu_type() == CPU_4KC; 158 } 159 160 /* Handle labels (which must be positive integers). */ 161 enum label_id { 162 label_second_part = 1, 163 label_leave, 164 label_vmalloc, 165 label_vmalloc_done, 166 label_tlbw_hazard_0, 167 label_split = label_tlbw_hazard_0 + 8, 168 label_tlbl_goaround1, 169 label_tlbl_goaround2, 170 label_nopage_tlbl, 171 label_nopage_tlbs, 172 label_nopage_tlbm, 173 label_smp_pgtable_change, 174 label_r3000_write_probe_fail, 175 label_large_segbits_fault, 176 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 177 label_tlb_huge_update, 178 #endif 179 }; 180 181 UASM_L_LA(_second_part) 182 UASM_L_LA(_leave) 183 UASM_L_LA(_vmalloc) 184 UASM_L_LA(_vmalloc_done) 185 /* _tlbw_hazard_x is handled differently. */ 186 UASM_L_LA(_split) 187 UASM_L_LA(_tlbl_goaround1) 188 UASM_L_LA(_tlbl_goaround2) 189 UASM_L_LA(_nopage_tlbl) 190 UASM_L_LA(_nopage_tlbs) 191 UASM_L_LA(_nopage_tlbm) 192 UASM_L_LA(_smp_pgtable_change) 193 UASM_L_LA(_r3000_write_probe_fail) 194 UASM_L_LA(_large_segbits_fault) 195 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 196 UASM_L_LA(_tlb_huge_update) 197 #endif 198 199 static int hazard_instance; 200 201 static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance) 202 { 203 switch (instance) { 204 case 0 ... 7: 205 uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance); 206 return; 207 default: 208 BUG(); 209 } 210 } 211 212 static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance) 213 { 214 switch (instance) { 215 case 0 ... 7: 216 uasm_build_label(l, *p, label_tlbw_hazard_0 + instance); 217 break; 218 default: 219 BUG(); 220 } 221 } 222 223 /* 224 * pgtable bits are assigned dynamically depending on processor feature 225 * and statically based on kernel configuration. This spits out the actual 226 * values the kernel is using. Required to make sense from disassembled 227 * TLB exception handlers. 228 */ 229 static void output_pgtable_bits_defines(void) 230 { 231 #define pr_define(fmt, ...) \ 232 pr_debug("#define " fmt, ##__VA_ARGS__) 233 234 pr_debug("#include <asm/asm.h>\n"); 235 pr_debug("#include <asm/regdef.h>\n"); 236 pr_debug("\n"); 237 238 pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT); 239 pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT); 240 pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT); 241 pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT); 242 pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT); 243 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 244 pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT); 245 #endif 246 #ifdef _PAGE_NO_EXEC_SHIFT 247 if (cpu_has_rixi) 248 pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT); 249 #endif 250 pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT); 251 pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT); 252 pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT); 253 pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT); 254 pr_debug("\n"); 255 } 256 257 static inline void dump_handler(const char *symbol, const void *start, const void *end) 258 { 259 unsigned int count = (end - start) / sizeof(u32); 260 const u32 *handler = start; 261 int i; 262 263 pr_debug("LEAF(%s)\n", symbol); 264 265 pr_debug("\t.set push\n"); 266 pr_debug("\t.set noreorder\n"); 267 268 for (i = 0; i < count; i++) 269 pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]); 270 271 pr_debug("\t.set\tpop\n"); 272 273 pr_debug("\tEND(%s)\n", symbol); 274 } 275 276 /* The only general purpose registers allowed in TLB handlers. */ 277 #define K0 26 278 #define K1 27 279 280 /* Some CP0 registers */ 281 #define C0_INDEX 0, 0 282 #define C0_ENTRYLO0 2, 0 283 #define C0_TCBIND 2, 2 284 #define C0_ENTRYLO1 3, 0 285 #define C0_CONTEXT 4, 0 286 #define C0_PAGEMASK 5, 0 287 #define C0_PWBASE 5, 5 288 #define C0_PWFIELD 5, 6 289 #define C0_PWSIZE 5, 7 290 #define C0_PWCTL 6, 6 291 #define C0_BADVADDR 8, 0 292 #define C0_PGD 9, 7 293 #define C0_ENTRYHI 10, 0 294 #define C0_EPC 14, 0 295 #define C0_XCONTEXT 20, 0 296 297 #ifdef CONFIG_64BIT 298 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT) 299 #else 300 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT) 301 #endif 302 303 /* The worst case length of the handler is around 18 instructions for 304 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs. 305 * Maximum space available is 32 instructions for R3000 and 64 306 * instructions for R4000. 307 * 308 * We deliberately chose a buffer size of 128, so we won't scribble 309 * over anything important on overflow before we panic. 310 */ 311 static u32 tlb_handler[128]; 312 313 /* simply assume worst case size for labels and relocs */ 314 static struct uasm_label labels[128]; 315 static struct uasm_reloc relocs[128]; 316 317 static int check_for_high_segbits; 318 static bool fill_includes_sw_bits; 319 320 static unsigned int kscratch_used_mask; 321 322 static inline int __maybe_unused c0_kscratch(void) 323 { 324 switch (current_cpu_type()) { 325 case CPU_XLP: 326 case CPU_XLR: 327 return 22; 328 default: 329 return 31; 330 } 331 } 332 333 static int allocate_kscratch(void) 334 { 335 int r; 336 unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask; 337 338 r = ffs(a); 339 340 if (r == 0) 341 return -1; 342 343 r--; /* make it zero based */ 344 345 kscratch_used_mask |= (1 << r); 346 347 return r; 348 } 349 350 static int scratch_reg; 351 int pgd_reg; 352 EXPORT_SYMBOL_GPL(pgd_reg); 353 enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; 354 355 static struct work_registers build_get_work_registers(u32 **p) 356 { 357 struct work_registers r; 358 359 if (scratch_reg >= 0) { 360 /* Save in CPU local C0_KScratch? */ 361 UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg); 362 r.r1 = K0; 363 r.r2 = K1; 364 r.r3 = 1; 365 return r; 366 } 367 368 if (num_possible_cpus() > 1) { 369 /* Get smp_processor_id */ 370 UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG); 371 UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT); 372 373 /* handler_reg_save index in K0 */ 374 UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save))); 375 376 UASM_i_LA(p, K1, (long)&handler_reg_save); 377 UASM_i_ADDU(p, K0, K0, K1); 378 } else { 379 UASM_i_LA(p, K0, (long)&handler_reg_save); 380 } 381 /* K0 now points to save area, save $1 and $2 */ 382 UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0); 383 UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0); 384 385 r.r1 = K1; 386 r.r2 = 1; 387 r.r3 = 2; 388 return r; 389 } 390 391 static void build_restore_work_registers(u32 **p) 392 { 393 if (scratch_reg >= 0) { 394 uasm_i_ehb(p); 395 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); 396 return; 397 } 398 /* K0 already points to save area, restore $1 and $2 */ 399 UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0); 400 UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0); 401 } 402 403 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 404 405 /* 406 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current, 407 * we cannot do r3000 under these circumstances. 408 * 409 * The R3000 TLB handler is simple. 410 */ 411 static void build_r3000_tlb_refill_handler(void) 412 { 413 long pgdc = (long)pgd_current; 414 u32 *p; 415 416 memset(tlb_handler, 0, sizeof(tlb_handler)); 417 p = tlb_handler; 418 419 uasm_i_mfc0(&p, K0, C0_BADVADDR); 420 uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */ 421 uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1); 422 uasm_i_srl(&p, K0, K0, 22); /* load delay */ 423 uasm_i_sll(&p, K0, K0, 2); 424 uasm_i_addu(&p, K1, K1, K0); 425 uasm_i_mfc0(&p, K0, C0_CONTEXT); 426 uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */ 427 uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */ 428 uasm_i_addu(&p, K1, K1, K0); 429 uasm_i_lw(&p, K0, 0, K1); 430 uasm_i_nop(&p); /* load delay */ 431 uasm_i_mtc0(&p, K0, C0_ENTRYLO0); 432 uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */ 433 uasm_i_tlbwr(&p); /* cp0 delay */ 434 uasm_i_jr(&p, K1); 435 uasm_i_rfe(&p); /* branch delay */ 436 437 if (p > tlb_handler + 32) 438 panic("TLB refill handler space exceeded"); 439 440 pr_debug("Wrote TLB refill handler (%u instructions).\n", 441 (unsigned int)(p - tlb_handler)); 442 443 memcpy((void *)ebase, tlb_handler, 0x80); 444 local_flush_icache_range(ebase, ebase + 0x80); 445 dump_handler("r3000_tlb_refill", (u32 *)ebase, (u32 *)(ebase + 0x80)); 446 } 447 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ 448 449 /* 450 * The R4000 TLB handler is much more complicated. We have two 451 * consecutive handler areas with 32 instructions space each. 452 * Since they aren't used at the same time, we can overflow in the 453 * other one.To keep things simple, we first assume linear space, 454 * then we relocate it to the final handler layout as needed. 455 */ 456 static u32 final_handler[64]; 457 458 /* 459 * Hazards 460 * 461 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0: 462 * 2. A timing hazard exists for the TLBP instruction. 463 * 464 * stalling_instruction 465 * TLBP 466 * 467 * The JTLB is being read for the TLBP throughout the stall generated by the 468 * previous instruction. This is not really correct as the stalling instruction 469 * can modify the address used to access the JTLB. The failure symptom is that 470 * the TLBP instruction will use an address created for the stalling instruction 471 * and not the address held in C0_ENHI and thus report the wrong results. 472 * 473 * The software work-around is to not allow the instruction preceding the TLBP 474 * to stall - make it an NOP or some other instruction guaranteed not to stall. 475 * 476 * Errata 2 will not be fixed. This errata is also on the R5000. 477 * 478 * As if we MIPS hackers wouldn't know how to nop pipelines happy ... 479 */ 480 static void __maybe_unused build_tlb_probe_entry(u32 **p) 481 { 482 switch (current_cpu_type()) { 483 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */ 484 case CPU_R4600: 485 case CPU_R4700: 486 case CPU_R5000: 487 case CPU_NEVADA: 488 uasm_i_nop(p); 489 uasm_i_tlbp(p); 490 break; 491 492 default: 493 uasm_i_tlbp(p); 494 break; 495 } 496 } 497 498 void build_tlb_write_entry(u32 **p, struct uasm_label **l, 499 struct uasm_reloc **r, 500 enum tlb_write_entry wmode) 501 { 502 void(*tlbw)(u32 **) = NULL; 503 504 switch (wmode) { 505 case tlb_random: tlbw = uasm_i_tlbwr; break; 506 case tlb_indexed: tlbw = uasm_i_tlbwi; break; 507 } 508 509 if (cpu_has_mips_r2_r6) { 510 if (cpu_has_mips_r2_exec_hazard) 511 uasm_i_ehb(p); 512 tlbw(p); 513 return; 514 } 515 516 switch (current_cpu_type()) { 517 case CPU_R4000PC: 518 case CPU_R4000SC: 519 case CPU_R4000MC: 520 case CPU_R4400PC: 521 case CPU_R4400SC: 522 case CPU_R4400MC: 523 /* 524 * This branch uses up a mtc0 hazard nop slot and saves 525 * two nops after the tlbw instruction. 526 */ 527 uasm_bgezl_hazard(p, r, hazard_instance); 528 tlbw(p); 529 uasm_bgezl_label(l, p, hazard_instance); 530 hazard_instance++; 531 uasm_i_nop(p); 532 break; 533 534 case CPU_R4600: 535 case CPU_R4700: 536 uasm_i_nop(p); 537 tlbw(p); 538 uasm_i_nop(p); 539 break; 540 541 case CPU_R5000: 542 case CPU_NEVADA: 543 uasm_i_nop(p); /* QED specifies 2 nops hazard */ 544 uasm_i_nop(p); /* QED specifies 2 nops hazard */ 545 tlbw(p); 546 break; 547 548 case CPU_5KC: 549 case CPU_TX49XX: 550 case CPU_PR4450: 551 case CPU_XLR: 552 uasm_i_nop(p); 553 tlbw(p); 554 break; 555 556 case CPU_R10000: 557 case CPU_R12000: 558 case CPU_R14000: 559 case CPU_R16000: 560 case CPU_4KC: 561 case CPU_4KEC: 562 case CPU_M14KC: 563 case CPU_M14KEC: 564 case CPU_SB1: 565 case CPU_SB1A: 566 case CPU_4KSC: 567 case CPU_20KC: 568 case CPU_25KF: 569 case CPU_BMIPS32: 570 case CPU_BMIPS3300: 571 case CPU_BMIPS4350: 572 case CPU_BMIPS4380: 573 case CPU_BMIPS5000: 574 case CPU_LOONGSON2EF: 575 case CPU_LOONGSON64: 576 case CPU_R5500: 577 if (m4kc_tlbp_war()) 578 uasm_i_nop(p); 579 /* fall through */ 580 case CPU_ALCHEMY: 581 tlbw(p); 582 break; 583 584 case CPU_RM7000: 585 uasm_i_nop(p); 586 uasm_i_nop(p); 587 uasm_i_nop(p); 588 uasm_i_nop(p); 589 tlbw(p); 590 break; 591 592 case CPU_VR4111: 593 case CPU_VR4121: 594 case CPU_VR4122: 595 case CPU_VR4181: 596 case CPU_VR4181A: 597 uasm_i_nop(p); 598 uasm_i_nop(p); 599 tlbw(p); 600 uasm_i_nop(p); 601 uasm_i_nop(p); 602 break; 603 604 case CPU_VR4131: 605 case CPU_VR4133: 606 uasm_i_nop(p); 607 uasm_i_nop(p); 608 tlbw(p); 609 break; 610 611 case CPU_XBURST: 612 tlbw(p); 613 uasm_i_nop(p); 614 break; 615 616 default: 617 panic("No TLB refill handler yet (CPU type: %d)", 618 current_cpu_type()); 619 break; 620 } 621 } 622 EXPORT_SYMBOL_GPL(build_tlb_write_entry); 623 624 static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, 625 unsigned int reg) 626 { 627 if (_PAGE_GLOBAL_SHIFT == 0) { 628 /* pte_t is already in EntryLo format */ 629 return; 630 } 631 632 if (cpu_has_rixi && !!_PAGE_NO_EXEC) { 633 if (fill_includes_sw_bits) { 634 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); 635 } else { 636 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC)); 637 UASM_i_ROTR(p, reg, reg, 638 ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); 639 } 640 } else { 641 #ifdef CONFIG_PHYS_ADDR_T_64BIT 642 uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL)); 643 #else 644 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL)); 645 #endif 646 } 647 } 648 649 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 650 651 static void build_restore_pagemask(u32 **p, struct uasm_reloc **r, 652 unsigned int tmp, enum label_id lid, 653 int restore_scratch) 654 { 655 if (restore_scratch) { 656 /* 657 * Ensure the MFC0 below observes the value written to the 658 * KScratch register by the prior MTC0. 659 */ 660 if (scratch_reg >= 0) 661 uasm_i_ehb(p); 662 663 /* Reset default page size */ 664 if (PM_DEFAULT_MASK >> 16) { 665 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); 666 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); 667 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 668 uasm_il_b(p, r, lid); 669 } else if (PM_DEFAULT_MASK) { 670 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); 671 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 672 uasm_il_b(p, r, lid); 673 } else { 674 uasm_i_mtc0(p, 0, C0_PAGEMASK); 675 uasm_il_b(p, r, lid); 676 } 677 if (scratch_reg >= 0) 678 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); 679 else 680 UASM_i_LW(p, 1, scratchpad_offset(0), 0); 681 } else { 682 /* Reset default page size */ 683 if (PM_DEFAULT_MASK >> 16) { 684 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); 685 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); 686 uasm_il_b(p, r, lid); 687 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 688 } else if (PM_DEFAULT_MASK) { 689 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); 690 uasm_il_b(p, r, lid); 691 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 692 } else { 693 uasm_il_b(p, r, lid); 694 uasm_i_mtc0(p, 0, C0_PAGEMASK); 695 } 696 } 697 } 698 699 static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l, 700 struct uasm_reloc **r, 701 unsigned int tmp, 702 enum tlb_write_entry wmode, 703 int restore_scratch) 704 { 705 /* Set huge page tlb entry size */ 706 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); 707 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); 708 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 709 710 build_tlb_write_entry(p, l, r, wmode); 711 712 build_restore_pagemask(p, r, tmp, label_leave, restore_scratch); 713 } 714 715 /* 716 * Check if Huge PTE is present, if so then jump to LABEL. 717 */ 718 static void 719 build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, 720 unsigned int pmd, int lid) 721 { 722 UASM_i_LW(p, tmp, 0, pmd); 723 if (use_bbit_insns()) { 724 uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid); 725 } else { 726 uasm_i_andi(p, tmp, tmp, _PAGE_HUGE); 727 uasm_il_bnez(p, r, tmp, lid); 728 } 729 } 730 731 static void build_huge_update_entries(u32 **p, unsigned int pte, 732 unsigned int tmp) 733 { 734 int small_sequence; 735 736 /* 737 * A huge PTE describes an area the size of the 738 * configured huge page size. This is twice the 739 * of the large TLB entry size we intend to use. 740 * A TLB entry half the size of the configured 741 * huge page size is configured into entrylo0 742 * and entrylo1 to cover the contiguous huge PTE 743 * address space. 744 */ 745 small_sequence = (HPAGE_SIZE >> 7) < 0x10000; 746 747 /* We can clobber tmp. It isn't used after this.*/ 748 if (!small_sequence) 749 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); 750 751 build_convert_pte_to_entrylo(p, pte); 752 UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */ 753 /* convert to entrylo1 */ 754 if (small_sequence) 755 UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7); 756 else 757 UASM_i_ADDU(p, pte, pte, tmp); 758 759 UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */ 760 } 761 762 static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, 763 struct uasm_label **l, 764 unsigned int pte, 765 unsigned int ptr, 766 unsigned int flush) 767 { 768 #ifdef CONFIG_SMP 769 UASM_i_SC(p, pte, 0, ptr); 770 uasm_il_beqz(p, r, pte, label_tlb_huge_update); 771 UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */ 772 #else 773 UASM_i_SW(p, pte, 0, ptr); 774 #endif 775 if (cpu_has_ftlb && flush) { 776 BUG_ON(!cpu_has_tlbinv); 777 778 UASM_i_MFC0(p, ptr, C0_ENTRYHI); 779 uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV); 780 UASM_i_MTC0(p, ptr, C0_ENTRYHI); 781 build_tlb_write_entry(p, l, r, tlb_indexed); 782 783 uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV); 784 UASM_i_MTC0(p, ptr, C0_ENTRYHI); 785 build_huge_update_entries(p, pte, ptr); 786 build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0); 787 788 return; 789 } 790 791 build_huge_update_entries(p, pte, ptr); 792 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0); 793 } 794 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 795 796 #ifdef CONFIG_64BIT 797 /* 798 * TMP and PTR are scratch. 799 * TMP will be clobbered, PTR will hold the pmd entry. 800 */ 801 void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 802 unsigned int tmp, unsigned int ptr) 803 { 804 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 805 long pgdc = (long)pgd_current; 806 #endif 807 /* 808 * The vmalloc handling is not in the hotpath. 809 */ 810 uasm_i_dmfc0(p, tmp, C0_BADVADDR); 811 812 if (check_for_high_segbits) { 813 /* 814 * The kernel currently implicitely assumes that the 815 * MIPS SEGBITS parameter for the processor is 816 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never 817 * allocate virtual addresses outside the maximum 818 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But 819 * that doesn't prevent user code from accessing the 820 * higher xuseg addresses. Here, we make sure that 821 * everything but the lower xuseg addresses goes down 822 * the module_alloc/vmalloc path. 823 */ 824 uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); 825 uasm_il_bnez(p, r, ptr, label_vmalloc); 826 } else { 827 uasm_il_bltz(p, r, tmp, label_vmalloc); 828 } 829 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ 830 831 if (pgd_reg != -1) { 832 /* pgd is in pgd_reg */ 833 if (cpu_has_ldpte) 834 UASM_i_MFC0(p, ptr, C0_PWBASE); 835 else 836 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg); 837 } else { 838 #if defined(CONFIG_MIPS_PGD_C0_CONTEXT) 839 /* 840 * &pgd << 11 stored in CONTEXT [23..63]. 841 */ 842 UASM_i_MFC0(p, ptr, C0_CONTEXT); 843 844 /* Clear lower 23 bits of context. */ 845 uasm_i_dins(p, ptr, 0, 0, 23); 846 847 /* 1 0 1 0 1 << 6 xkphys cached */ 848 uasm_i_ori(p, ptr, ptr, 0x540); 849 uasm_i_drotr(p, ptr, ptr, 11); 850 #elif defined(CONFIG_SMP) 851 UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG); 852 uasm_i_dsrl_safe(p, ptr, ptr, SMP_CPUID_PTRSHIFT); 853 UASM_i_LA_mostly(p, tmp, pgdc); 854 uasm_i_daddu(p, ptr, ptr, tmp); 855 uasm_i_dmfc0(p, tmp, C0_BADVADDR); 856 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); 857 #else 858 UASM_i_LA_mostly(p, ptr, pgdc); 859 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); 860 #endif 861 } 862 863 uasm_l_vmalloc_done(l, *p); 864 865 /* get pgd offset in bytes */ 866 uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3); 867 868 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); 869 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ 870 #ifndef __PAGETABLE_PUD_FOLDED 871 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 872 uasm_i_ld(p, ptr, 0, ptr); /* get pud pointer */ 873 uasm_i_dsrl_safe(p, tmp, tmp, PUD_SHIFT - 3); /* get pud offset in bytes */ 874 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PUD - 1) << 3); 875 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pud offset */ 876 #endif 877 #ifndef __PAGETABLE_PMD_FOLDED 878 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 879 uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ 880 uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ 881 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); 882 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ 883 #endif 884 } 885 EXPORT_SYMBOL_GPL(build_get_pmde64); 886 887 /* 888 * BVADDR is the faulting address, PTR is scratch. 889 * PTR will hold the pgd for vmalloc. 890 */ 891 static void 892 build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 893 unsigned int bvaddr, unsigned int ptr, 894 enum vmalloc64_mode mode) 895 { 896 long swpd = (long)swapper_pg_dir; 897 int single_insn_swpd; 898 int did_vmalloc_branch = 0; 899 900 single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd); 901 902 uasm_l_vmalloc(l, *p); 903 904 if (mode != not_refill && check_for_high_segbits) { 905 if (single_insn_swpd) { 906 uasm_il_bltz(p, r, bvaddr, label_vmalloc_done); 907 uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); 908 did_vmalloc_branch = 1; 909 /* fall through */ 910 } else { 911 uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault); 912 } 913 } 914 if (!did_vmalloc_branch) { 915 if (single_insn_swpd) { 916 uasm_il_b(p, r, label_vmalloc_done); 917 uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); 918 } else { 919 UASM_i_LA_mostly(p, ptr, swpd); 920 uasm_il_b(p, r, label_vmalloc_done); 921 if (uasm_in_compat_space_p(swpd)) 922 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); 923 else 924 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); 925 } 926 } 927 if (mode != not_refill && check_for_high_segbits) { 928 uasm_l_large_segbits_fault(l, *p); 929 930 if (mode == refill_scratch && scratch_reg >= 0) 931 uasm_i_ehb(p); 932 933 /* 934 * We get here if we are an xsseg address, or if we are 935 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary. 936 * 937 * Ignoring xsseg (assume disabled so would generate 938 * (address errors?), the only remaining possibility 939 * is the upper xuseg addresses. On processors with 940 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these 941 * addresses would have taken an address error. We try 942 * to mimic that here by taking a load/istream page 943 * fault. 944 */ 945 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) 946 uasm_i_sync(p, 0); 947 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); 948 uasm_i_jr(p, ptr); 949 950 if (mode == refill_scratch) { 951 if (scratch_reg >= 0) 952 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); 953 else 954 UASM_i_LW(p, 1, scratchpad_offset(0), 0); 955 } else { 956 uasm_i_nop(p); 957 } 958 } 959 } 960 961 #else /* !CONFIG_64BIT */ 962 963 /* 964 * TMP and PTR are scratch. 965 * TMP will be clobbered, PTR will hold the pgd entry. 966 */ 967 void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) 968 { 969 if (pgd_reg != -1) { 970 /* pgd is in pgd_reg */ 971 uasm_i_mfc0(p, ptr, c0_kscratch(), pgd_reg); 972 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 973 } else { 974 long pgdc = (long)pgd_current; 975 976 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ 977 #ifdef CONFIG_SMP 978 uasm_i_mfc0(p, ptr, SMP_CPUID_REG); 979 UASM_i_LA_mostly(p, tmp, pgdc); 980 uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT); 981 uasm_i_addu(p, ptr, tmp, ptr); 982 #else 983 UASM_i_LA_mostly(p, ptr, pgdc); 984 #endif 985 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 986 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); 987 } 988 uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */ 989 uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); 990 uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ 991 } 992 EXPORT_SYMBOL_GPL(build_get_pgde32); 993 994 #endif /* !CONFIG_64BIT */ 995 996 static void build_adjust_context(u32 **p, unsigned int ctx) 997 { 998 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; 999 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); 1000 1001 switch (current_cpu_type()) { 1002 case CPU_VR41XX: 1003 case CPU_VR4111: 1004 case CPU_VR4121: 1005 case CPU_VR4122: 1006 case CPU_VR4131: 1007 case CPU_VR4181: 1008 case CPU_VR4181A: 1009 case CPU_VR4133: 1010 shift += 2; 1011 break; 1012 1013 default: 1014 break; 1015 } 1016 1017 if (shift) 1018 UASM_i_SRL(p, ctx, ctx, shift); 1019 uasm_i_andi(p, ctx, ctx, mask); 1020 } 1021 1022 void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) 1023 { 1024 /* 1025 * Bug workaround for the Nevada. It seems as if under certain 1026 * circumstances the move from cp0_context might produce a 1027 * bogus result when the mfc0 instruction and its consumer are 1028 * in a different cacheline or a load instruction, probably any 1029 * memory reference, is between them. 1030 */ 1031 switch (current_cpu_type()) { 1032 case CPU_NEVADA: 1033 UASM_i_LW(p, ptr, 0, ptr); 1034 GET_CONTEXT(p, tmp); /* get context reg */ 1035 break; 1036 1037 default: 1038 GET_CONTEXT(p, tmp); /* get context reg */ 1039 UASM_i_LW(p, ptr, 0, ptr); 1040 break; 1041 } 1042 1043 build_adjust_context(p, tmp); 1044 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ 1045 } 1046 EXPORT_SYMBOL_GPL(build_get_ptep); 1047 1048 void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) 1049 { 1050 int pte_off_even = 0; 1051 int pte_off_odd = sizeof(pte_t); 1052 1053 #if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_PHYS_ADDR_T_64BIT) 1054 /* The low 32 bits of EntryLo is stored in pte_high */ 1055 pte_off_even += offsetof(pte_t, pte_high); 1056 pte_off_odd += offsetof(pte_t, pte_high); 1057 #endif 1058 1059 if (IS_ENABLED(CONFIG_XPA)) { 1060 uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */ 1061 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); 1062 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); 1063 1064 if (cpu_has_xpa && !mips_xpa_disabled) { 1065 uasm_i_lw(p, tmp, 0, ptep); 1066 uasm_i_ext(p, tmp, tmp, 0, 24); 1067 uasm_i_mthc0(p, tmp, C0_ENTRYLO0); 1068 } 1069 1070 uasm_i_lw(p, tmp, pte_off_odd, ptep); /* odd pte */ 1071 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); 1072 UASM_i_MTC0(p, tmp, C0_ENTRYLO1); 1073 1074 if (cpu_has_xpa && !mips_xpa_disabled) { 1075 uasm_i_lw(p, tmp, sizeof(pte_t), ptep); 1076 uasm_i_ext(p, tmp, tmp, 0, 24); 1077 uasm_i_mthc0(p, tmp, C0_ENTRYLO1); 1078 } 1079 return; 1080 } 1081 1082 UASM_i_LW(p, tmp, pte_off_even, ptep); /* get even pte */ 1083 UASM_i_LW(p, ptep, pte_off_odd, ptep); /* get odd pte */ 1084 if (r45k_bvahwbug()) 1085 build_tlb_probe_entry(p); 1086 build_convert_pte_to_entrylo(p, tmp); 1087 if (r4k_250MHZhwbug()) 1088 UASM_i_MTC0(p, 0, C0_ENTRYLO0); 1089 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 1090 build_convert_pte_to_entrylo(p, ptep); 1091 if (r45k_bvahwbug()) 1092 uasm_i_mfc0(p, tmp, C0_INDEX); 1093 if (r4k_250MHZhwbug()) 1094 UASM_i_MTC0(p, 0, C0_ENTRYLO1); 1095 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ 1096 } 1097 EXPORT_SYMBOL_GPL(build_update_entries); 1098 1099 struct mips_huge_tlb_info { 1100 int huge_pte; 1101 int restore_scratch; 1102 bool need_reload_pte; 1103 }; 1104 1105 static struct mips_huge_tlb_info 1106 build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, 1107 struct uasm_reloc **r, unsigned int tmp, 1108 unsigned int ptr, int c0_scratch_reg) 1109 { 1110 struct mips_huge_tlb_info rv; 1111 unsigned int even, odd; 1112 int vmalloc_branch_delay_filled = 0; 1113 const int scratch = 1; /* Our extra working register */ 1114 1115 rv.huge_pte = scratch; 1116 rv.restore_scratch = 0; 1117 rv.need_reload_pte = false; 1118 1119 if (check_for_high_segbits) { 1120 UASM_i_MFC0(p, tmp, C0_BADVADDR); 1121 1122 if (pgd_reg != -1) 1123 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg); 1124 else 1125 UASM_i_MFC0(p, ptr, C0_CONTEXT); 1126 1127 if (c0_scratch_reg >= 0) 1128 UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg); 1129 else 1130 UASM_i_SW(p, scratch, scratchpad_offset(0), 0); 1131 1132 uasm_i_dsrl_safe(p, scratch, tmp, 1133 PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); 1134 uasm_il_bnez(p, r, scratch, label_vmalloc); 1135 1136 if (pgd_reg == -1) { 1137 vmalloc_branch_delay_filled = 1; 1138 /* Clear lower 23 bits of context. */ 1139 uasm_i_dins(p, ptr, 0, 0, 23); 1140 } 1141 } else { 1142 if (pgd_reg != -1) 1143 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg); 1144 else 1145 UASM_i_MFC0(p, ptr, C0_CONTEXT); 1146 1147 UASM_i_MFC0(p, tmp, C0_BADVADDR); 1148 1149 if (c0_scratch_reg >= 0) 1150 UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg); 1151 else 1152 UASM_i_SW(p, scratch, scratchpad_offset(0), 0); 1153 1154 if (pgd_reg == -1) 1155 /* Clear lower 23 bits of context. */ 1156 uasm_i_dins(p, ptr, 0, 0, 23); 1157 1158 uasm_il_bltz(p, r, tmp, label_vmalloc); 1159 } 1160 1161 if (pgd_reg == -1) { 1162 vmalloc_branch_delay_filled = 1; 1163 /* 1 0 1 0 1 << 6 xkphys cached */ 1164 uasm_i_ori(p, ptr, ptr, 0x540); 1165 uasm_i_drotr(p, ptr, ptr, 11); 1166 } 1167 1168 #ifdef __PAGETABLE_PMD_FOLDED 1169 #define LOC_PTEP scratch 1170 #else 1171 #define LOC_PTEP ptr 1172 #endif 1173 1174 if (!vmalloc_branch_delay_filled) 1175 /* get pgd offset in bytes */ 1176 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3); 1177 1178 uasm_l_vmalloc_done(l, *p); 1179 1180 /* 1181 * tmp ptr 1182 * fall-through case = badvaddr *pgd_current 1183 * vmalloc case = badvaddr swapper_pg_dir 1184 */ 1185 1186 if (vmalloc_branch_delay_filled) 1187 /* get pgd offset in bytes */ 1188 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3); 1189 1190 #ifdef __PAGETABLE_PMD_FOLDED 1191 GET_CONTEXT(p, tmp); /* get context reg */ 1192 #endif 1193 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3); 1194 1195 if (use_lwx_insns()) { 1196 UASM_i_LWX(p, LOC_PTEP, scratch, ptr); 1197 } else { 1198 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */ 1199 uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */ 1200 } 1201 1202 #ifndef __PAGETABLE_PUD_FOLDED 1203 /* get pud offset in bytes */ 1204 uasm_i_dsrl_safe(p, scratch, tmp, PUD_SHIFT - 3); 1205 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PUD - 1) << 3); 1206 1207 if (use_lwx_insns()) { 1208 UASM_i_LWX(p, ptr, scratch, ptr); 1209 } else { 1210 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */ 1211 UASM_i_LW(p, ptr, 0, ptr); 1212 } 1213 /* ptr contains a pointer to PMD entry */ 1214 /* tmp contains the address */ 1215 #endif 1216 1217 #ifndef __PAGETABLE_PMD_FOLDED 1218 /* get pmd offset in bytes */ 1219 uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3); 1220 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3); 1221 GET_CONTEXT(p, tmp); /* get context reg */ 1222 1223 if (use_lwx_insns()) { 1224 UASM_i_LWX(p, scratch, scratch, ptr); 1225 } else { 1226 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */ 1227 UASM_i_LW(p, scratch, 0, ptr); 1228 } 1229 #endif 1230 /* Adjust the context during the load latency. */ 1231 build_adjust_context(p, tmp); 1232 1233 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1234 uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update); 1235 /* 1236 * The in the LWX case we don't want to do the load in the 1237 * delay slot. It cannot issue in the same cycle and may be 1238 * speculative and unneeded. 1239 */ 1240 if (use_lwx_insns()) 1241 uasm_i_nop(p); 1242 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 1243 1244 1245 /* build_update_entries */ 1246 if (use_lwx_insns()) { 1247 even = ptr; 1248 odd = tmp; 1249 UASM_i_LWX(p, even, scratch, tmp); 1250 UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t)); 1251 UASM_i_LWX(p, odd, scratch, tmp); 1252 } else { 1253 UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */ 1254 even = tmp; 1255 odd = ptr; 1256 UASM_i_LW(p, even, 0, ptr); /* get even pte */ 1257 UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */ 1258 } 1259 if (cpu_has_rixi) { 1260 uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL)); 1261 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ 1262 uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL)); 1263 } else { 1264 uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL)); 1265 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ 1266 uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL)); 1267 } 1268 UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */ 1269 1270 if (c0_scratch_reg >= 0) { 1271 uasm_i_ehb(p); 1272 UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg); 1273 build_tlb_write_entry(p, l, r, tlb_random); 1274 uasm_l_leave(l, *p); 1275 rv.restore_scratch = 1; 1276 } else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13) { 1277 build_tlb_write_entry(p, l, r, tlb_random); 1278 uasm_l_leave(l, *p); 1279 UASM_i_LW(p, scratch, scratchpad_offset(0), 0); 1280 } else { 1281 UASM_i_LW(p, scratch, scratchpad_offset(0), 0); 1282 build_tlb_write_entry(p, l, r, tlb_random); 1283 uasm_l_leave(l, *p); 1284 rv.restore_scratch = 1; 1285 } 1286 1287 uasm_i_eret(p); /* return from trap */ 1288 1289 return rv; 1290 } 1291 1292 /* 1293 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception 1294 * because EXL == 0. If we wrap, we can also use the 32 instruction 1295 * slots before the XTLB refill exception handler which belong to the 1296 * unused TLB refill exception. 1297 */ 1298 #define MIPS64_REFILL_INSNS 32 1299 1300 static void build_r4000_tlb_refill_handler(void) 1301 { 1302 u32 *p = tlb_handler; 1303 struct uasm_label *l = labels; 1304 struct uasm_reloc *r = relocs; 1305 u32 *f; 1306 unsigned int final_len; 1307 struct mips_huge_tlb_info htlb_info __maybe_unused; 1308 enum vmalloc64_mode vmalloc_mode __maybe_unused; 1309 1310 memset(tlb_handler, 0, sizeof(tlb_handler)); 1311 memset(labels, 0, sizeof(labels)); 1312 memset(relocs, 0, sizeof(relocs)); 1313 memset(final_handler, 0, sizeof(final_handler)); 1314 1315 if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) { 1316 htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, 1317 scratch_reg); 1318 vmalloc_mode = refill_scratch; 1319 } else { 1320 htlb_info.huge_pte = K0; 1321 htlb_info.restore_scratch = 0; 1322 htlb_info.need_reload_pte = true; 1323 vmalloc_mode = refill_noscratch; 1324 /* 1325 * create the plain linear handler 1326 */ 1327 if (bcm1250_m3_war()) { 1328 unsigned int segbits = 44; 1329 1330 uasm_i_dmfc0(&p, K0, C0_BADVADDR); 1331 uasm_i_dmfc0(&p, K1, C0_ENTRYHI); 1332 uasm_i_xor(&p, K0, K0, K1); 1333 uasm_i_dsrl_safe(&p, K1, K0, 62); 1334 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); 1335 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); 1336 uasm_i_or(&p, K0, K0, K1); 1337 uasm_il_bnez(&p, &r, K0, label_leave); 1338 /* No need for uasm_i_nop */ 1339 } 1340 1341 #ifdef CONFIG_64BIT 1342 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ 1343 #else 1344 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ 1345 #endif 1346 1347 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1348 build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update); 1349 #endif 1350 1351 build_get_ptep(&p, K0, K1); 1352 build_update_entries(&p, K0, K1); 1353 build_tlb_write_entry(&p, &l, &r, tlb_random); 1354 uasm_l_leave(&l, p); 1355 uasm_i_eret(&p); /* return from trap */ 1356 } 1357 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1358 uasm_l_tlb_huge_update(&l, p); 1359 if (htlb_info.need_reload_pte) 1360 UASM_i_LW(&p, htlb_info.huge_pte, 0, K1); 1361 build_huge_update_entries(&p, htlb_info.huge_pte, K1); 1362 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random, 1363 htlb_info.restore_scratch); 1364 #endif 1365 1366 #ifdef CONFIG_64BIT 1367 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode); 1368 #endif 1369 1370 /* 1371 * Overflow check: For the 64bit handler, we need at least one 1372 * free instruction slot for the wrap-around branch. In worst 1373 * case, if the intended insertion point is a delay slot, we 1374 * need three, with the second nop'ed and the third being 1375 * unused. 1376 */ 1377 switch (boot_cpu_type()) { 1378 default: 1379 if (sizeof(long) == 4) { 1380 case CPU_LOONGSON2EF: 1381 /* Loongson2 ebase is different than r4k, we have more space */ 1382 if ((p - tlb_handler) > 64) 1383 panic("TLB refill handler space exceeded"); 1384 /* 1385 * Now fold the handler in the TLB refill handler space. 1386 */ 1387 f = final_handler; 1388 /* Simplest case, just copy the handler. */ 1389 uasm_copy_handler(relocs, labels, tlb_handler, p, f); 1390 final_len = p - tlb_handler; 1391 break; 1392 } else { 1393 if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) 1394 || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) 1395 && uasm_insn_has_bdelay(relocs, 1396 tlb_handler + MIPS64_REFILL_INSNS - 3))) 1397 panic("TLB refill handler space exceeded"); 1398 /* 1399 * Now fold the handler in the TLB refill handler space. 1400 */ 1401 f = final_handler + MIPS64_REFILL_INSNS; 1402 if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) { 1403 /* Just copy the handler. */ 1404 uasm_copy_handler(relocs, labels, tlb_handler, p, f); 1405 final_len = p - tlb_handler; 1406 } else { 1407 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1408 const enum label_id ls = label_tlb_huge_update; 1409 #else 1410 const enum label_id ls = label_vmalloc; 1411 #endif 1412 u32 *split; 1413 int ov = 0; 1414 int i; 1415 1416 for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++) 1417 ; 1418 BUG_ON(i == ARRAY_SIZE(labels)); 1419 split = labels[i].addr; 1420 1421 /* 1422 * See if we have overflown one way or the other. 1423 */ 1424 if (split > tlb_handler + MIPS64_REFILL_INSNS || 1425 split < p - MIPS64_REFILL_INSNS) 1426 ov = 1; 1427 1428 if (ov) { 1429 /* 1430 * Split two instructions before the end. One 1431 * for the branch and one for the instruction 1432 * in the delay slot. 1433 */ 1434 split = tlb_handler + MIPS64_REFILL_INSNS - 2; 1435 1436 /* 1437 * If the branch would fall in a delay slot, 1438 * we must back up an additional instruction 1439 * so that it is no longer in a delay slot. 1440 */ 1441 if (uasm_insn_has_bdelay(relocs, split - 1)) 1442 split--; 1443 } 1444 /* Copy first part of the handler. */ 1445 uasm_copy_handler(relocs, labels, tlb_handler, split, f); 1446 f += split - tlb_handler; 1447 1448 if (ov) { 1449 /* Insert branch. */ 1450 uasm_l_split(&l, final_handler); 1451 uasm_il_b(&f, &r, label_split); 1452 if (uasm_insn_has_bdelay(relocs, split)) 1453 uasm_i_nop(&f); 1454 else { 1455 uasm_copy_handler(relocs, labels, 1456 split, split + 1, f); 1457 uasm_move_labels(labels, f, f + 1, -1); 1458 f++; 1459 split++; 1460 } 1461 } 1462 1463 /* Copy the rest of the handler. */ 1464 uasm_copy_handler(relocs, labels, split, p, final_handler); 1465 final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) + 1466 (p - split); 1467 } 1468 } 1469 break; 1470 } 1471 1472 uasm_resolve_relocs(relocs, labels); 1473 pr_debug("Wrote TLB refill handler (%u instructions).\n", 1474 final_len); 1475 1476 memcpy((void *)ebase, final_handler, 0x100); 1477 local_flush_icache_range(ebase, ebase + 0x100); 1478 dump_handler("r4000_tlb_refill", (u32 *)ebase, (u32 *)(ebase + 0x100)); 1479 } 1480 1481 static void setup_pw(void) 1482 { 1483 unsigned long pgd_i, pgd_w; 1484 #ifndef __PAGETABLE_PMD_FOLDED 1485 unsigned long pmd_i, pmd_w; 1486 #endif 1487 unsigned long pt_i, pt_w; 1488 unsigned long pte_i, pte_w; 1489 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1490 unsigned long psn; 1491 1492 psn = ilog2(_PAGE_HUGE); /* bit used to indicate huge page */ 1493 #endif 1494 pgd_i = PGDIR_SHIFT; /* 1st level PGD */ 1495 #ifndef __PAGETABLE_PMD_FOLDED 1496 pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_ORDER; 1497 1498 pmd_i = PMD_SHIFT; /* 2nd level PMD */ 1499 pmd_w = PMD_SHIFT - PAGE_SHIFT; 1500 #else 1501 pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_ORDER; 1502 #endif 1503 1504 pt_i = PAGE_SHIFT; /* 3rd level PTE */ 1505 pt_w = PAGE_SHIFT - 3; 1506 1507 pte_i = ilog2(_PAGE_GLOBAL); 1508 pte_w = 0; 1509 1510 #ifndef __PAGETABLE_PMD_FOLDED 1511 write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i); 1512 write_c0_pwsize(1 << 30 | pgd_w << 24 | pmd_w << 12 | pt_w << 6 | pte_w); 1513 #else 1514 write_c0_pwfield(pgd_i << 24 | pt_i << 6 | pte_i); 1515 write_c0_pwsize(1 << 30 | pgd_w << 24 | pt_w << 6 | pte_w); 1516 #endif 1517 1518 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1519 write_c0_pwctl(1 << 6 | psn); 1520 #endif 1521 write_c0_kpgd((long)swapper_pg_dir); 1522 kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */ 1523 } 1524 1525 static void build_loongson3_tlb_refill_handler(void) 1526 { 1527 u32 *p = tlb_handler; 1528 struct uasm_label *l = labels; 1529 struct uasm_reloc *r = relocs; 1530 1531 memset(labels, 0, sizeof(labels)); 1532 memset(relocs, 0, sizeof(relocs)); 1533 memset(tlb_handler, 0, sizeof(tlb_handler)); 1534 1535 if (check_for_high_segbits) { 1536 uasm_i_dmfc0(&p, K0, C0_BADVADDR); 1537 uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); 1538 uasm_il_beqz(&p, &r, K1, label_vmalloc); 1539 uasm_i_nop(&p); 1540 1541 uasm_il_bgez(&p, &r, K0, label_large_segbits_fault); 1542 uasm_i_nop(&p); 1543 uasm_l_vmalloc(&l, p); 1544 } 1545 1546 uasm_i_dmfc0(&p, K1, C0_PGD); 1547 1548 uasm_i_lddir(&p, K0, K1, 3); /* global page dir */ 1549 #ifndef __PAGETABLE_PMD_FOLDED 1550 uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */ 1551 #endif 1552 uasm_i_ldpte(&p, K1, 0); /* even */ 1553 uasm_i_ldpte(&p, K1, 1); /* odd */ 1554 uasm_i_tlbwr(&p); 1555 1556 /* restore page mask */ 1557 if (PM_DEFAULT_MASK >> 16) { 1558 uasm_i_lui(&p, K0, PM_DEFAULT_MASK >> 16); 1559 uasm_i_ori(&p, K0, K0, PM_DEFAULT_MASK & 0xffff); 1560 uasm_i_mtc0(&p, K0, C0_PAGEMASK); 1561 } else if (PM_DEFAULT_MASK) { 1562 uasm_i_ori(&p, K0, 0, PM_DEFAULT_MASK); 1563 uasm_i_mtc0(&p, K0, C0_PAGEMASK); 1564 } else { 1565 uasm_i_mtc0(&p, 0, C0_PAGEMASK); 1566 } 1567 1568 uasm_i_eret(&p); 1569 1570 if (check_for_high_segbits) { 1571 uasm_l_large_segbits_fault(&l, p); 1572 UASM_i_LA(&p, K1, (unsigned long)tlb_do_page_fault_0); 1573 uasm_i_jr(&p, K1); 1574 uasm_i_nop(&p); 1575 } 1576 1577 uasm_resolve_relocs(relocs, labels); 1578 memcpy((void *)(ebase + 0x80), tlb_handler, 0x80); 1579 local_flush_icache_range(ebase + 0x80, ebase + 0x100); 1580 dump_handler("loongson3_tlb_refill", 1581 (u32 *)(ebase + 0x80), (u32 *)(ebase + 0x100)); 1582 } 1583 1584 static void build_setup_pgd(void) 1585 { 1586 const int a0 = 4; 1587 const int __maybe_unused a1 = 5; 1588 const int __maybe_unused a2 = 6; 1589 u32 *p = (u32 *)msk_isa16_mode((ulong)tlbmiss_handler_setup_pgd); 1590 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 1591 long pgdc = (long)pgd_current; 1592 #endif 1593 1594 memset(p, 0, tlbmiss_handler_setup_pgd_end - (char *)p); 1595 memset(labels, 0, sizeof(labels)); 1596 memset(relocs, 0, sizeof(relocs)); 1597 pgd_reg = allocate_kscratch(); 1598 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT 1599 if (pgd_reg == -1) { 1600 struct uasm_label *l = labels; 1601 struct uasm_reloc *r = relocs; 1602 1603 /* PGD << 11 in c0_Context */ 1604 /* 1605 * If it is a ckseg0 address, convert to a physical 1606 * address. Shifting right by 29 and adding 4 will 1607 * result in zero for these addresses. 1608 * 1609 */ 1610 UASM_i_SRA(&p, a1, a0, 29); 1611 UASM_i_ADDIU(&p, a1, a1, 4); 1612 uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1); 1613 uasm_i_nop(&p); 1614 uasm_i_dinsm(&p, a0, 0, 29, 64 - 29); 1615 uasm_l_tlbl_goaround1(&l, p); 1616 UASM_i_SLL(&p, a0, a0, 11); 1617 UASM_i_MTC0(&p, a0, C0_CONTEXT); 1618 uasm_i_jr(&p, 31); 1619 uasm_i_ehb(&p); 1620 } else { 1621 /* PGD in c0_KScratch */ 1622 if (cpu_has_ldpte) 1623 UASM_i_MTC0(&p, a0, C0_PWBASE); 1624 else 1625 UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); 1626 uasm_i_jr(&p, 31); 1627 uasm_i_ehb(&p); 1628 } 1629 #else 1630 #ifdef CONFIG_SMP 1631 /* Save PGD to pgd_current[smp_processor_id()] */ 1632 UASM_i_CPUID_MFC0(&p, a1, SMP_CPUID_REG); 1633 UASM_i_SRL_SAFE(&p, a1, a1, SMP_CPUID_PTRSHIFT); 1634 UASM_i_LA_mostly(&p, a2, pgdc); 1635 UASM_i_ADDU(&p, a2, a2, a1); 1636 UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2); 1637 #else 1638 UASM_i_LA_mostly(&p, a2, pgdc); 1639 UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2); 1640 #endif /* SMP */ 1641 1642 /* if pgd_reg is allocated, save PGD also to scratch register */ 1643 if (pgd_reg != -1) { 1644 UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); 1645 uasm_i_jr(&p, 31); 1646 uasm_i_ehb(&p); 1647 } else { 1648 uasm_i_jr(&p, 31); 1649 uasm_i_nop(&p); 1650 } 1651 #endif 1652 if (p >= (u32 *)tlbmiss_handler_setup_pgd_end) 1653 panic("tlbmiss_handler_setup_pgd space exceeded"); 1654 1655 uasm_resolve_relocs(relocs, labels); 1656 pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n", 1657 (unsigned int)(p - (u32 *)tlbmiss_handler_setup_pgd)); 1658 1659 dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd, 1660 tlbmiss_handler_setup_pgd_end); 1661 } 1662 1663 static void 1664 iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) 1665 { 1666 #ifdef CONFIG_SMP 1667 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) 1668 uasm_i_sync(p, 0); 1669 # ifdef CONFIG_PHYS_ADDR_T_64BIT 1670 if (cpu_has_64bits) 1671 uasm_i_lld(p, pte, 0, ptr); 1672 else 1673 # endif 1674 UASM_i_LL(p, pte, 0, ptr); 1675 #else 1676 # ifdef CONFIG_PHYS_ADDR_T_64BIT 1677 if (cpu_has_64bits) 1678 uasm_i_ld(p, pte, 0, ptr); 1679 else 1680 # endif 1681 UASM_i_LW(p, pte, 0, ptr); 1682 #endif 1683 } 1684 1685 static void 1686 iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, 1687 unsigned int mode, unsigned int scratch) 1688 { 1689 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); 1690 unsigned int swmode = mode & ~hwmode; 1691 1692 if (IS_ENABLED(CONFIG_XPA) && !cpu_has_64bits) { 1693 uasm_i_lui(p, scratch, swmode >> 16); 1694 uasm_i_or(p, pte, pte, scratch); 1695 BUG_ON(swmode & 0xffff); 1696 } else { 1697 uasm_i_ori(p, pte, pte, mode); 1698 } 1699 1700 #ifdef CONFIG_SMP 1701 # ifdef CONFIG_PHYS_ADDR_T_64BIT 1702 if (cpu_has_64bits) 1703 uasm_i_scd(p, pte, 0, ptr); 1704 else 1705 # endif 1706 UASM_i_SC(p, pte, 0, ptr); 1707 1708 if (r10000_llsc_war()) 1709 uasm_il_beqzl(p, r, pte, label_smp_pgtable_change); 1710 else 1711 uasm_il_beqz(p, r, pte, label_smp_pgtable_change); 1712 1713 # ifdef CONFIG_PHYS_ADDR_T_64BIT 1714 if (!cpu_has_64bits) { 1715 /* no uasm_i_nop needed */ 1716 uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr); 1717 uasm_i_ori(p, pte, pte, hwmode); 1718 BUG_ON(hwmode & ~0xffff); 1719 uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr); 1720 uasm_il_beqz(p, r, pte, label_smp_pgtable_change); 1721 /* no uasm_i_nop needed */ 1722 uasm_i_lw(p, pte, 0, ptr); 1723 } else 1724 uasm_i_nop(p); 1725 # else 1726 uasm_i_nop(p); 1727 # endif 1728 #else 1729 # ifdef CONFIG_PHYS_ADDR_T_64BIT 1730 if (cpu_has_64bits) 1731 uasm_i_sd(p, pte, 0, ptr); 1732 else 1733 # endif 1734 UASM_i_SW(p, pte, 0, ptr); 1735 1736 # ifdef CONFIG_PHYS_ADDR_T_64BIT 1737 if (!cpu_has_64bits) { 1738 uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr); 1739 uasm_i_ori(p, pte, pte, hwmode); 1740 BUG_ON(hwmode & ~0xffff); 1741 uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr); 1742 uasm_i_lw(p, pte, 0, ptr); 1743 } 1744 # endif 1745 #endif 1746 } 1747 1748 /* 1749 * Check if PTE is present, if not then jump to LABEL. PTR points to 1750 * the page table where this PTE is located, PTE will be re-loaded 1751 * with it's original value. 1752 */ 1753 static void 1754 build_pte_present(u32 **p, struct uasm_reloc **r, 1755 int pte, int ptr, int scratch, enum label_id lid) 1756 { 1757 int t = scratch >= 0 ? scratch : pte; 1758 int cur = pte; 1759 1760 if (cpu_has_rixi) { 1761 if (use_bbit_insns()) { 1762 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); 1763 uasm_i_nop(p); 1764 } else { 1765 if (_PAGE_PRESENT_SHIFT) { 1766 uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT); 1767 cur = t; 1768 } 1769 uasm_i_andi(p, t, cur, 1); 1770 uasm_il_beqz(p, r, t, lid); 1771 if (pte == t) 1772 /* You lose the SMP race :-(*/ 1773 iPTE_LW(p, pte, ptr); 1774 } 1775 } else { 1776 if (_PAGE_PRESENT_SHIFT) { 1777 uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT); 1778 cur = t; 1779 } 1780 uasm_i_andi(p, t, cur, 1781 (_PAGE_PRESENT | _PAGE_NO_READ) >> _PAGE_PRESENT_SHIFT); 1782 uasm_i_xori(p, t, t, _PAGE_PRESENT >> _PAGE_PRESENT_SHIFT); 1783 uasm_il_bnez(p, r, t, lid); 1784 if (pte == t) 1785 /* You lose the SMP race :-(*/ 1786 iPTE_LW(p, pte, ptr); 1787 } 1788 } 1789 1790 /* Make PTE valid, store result in PTR. */ 1791 static void 1792 build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, 1793 unsigned int ptr, unsigned int scratch) 1794 { 1795 unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED; 1796 1797 iPTE_SW(p, r, pte, ptr, mode, scratch); 1798 } 1799 1800 /* 1801 * Check if PTE can be written to, if not branch to LABEL. Regardless 1802 * restore PTE with value from PTR when done. 1803 */ 1804 static void 1805 build_pte_writable(u32 **p, struct uasm_reloc **r, 1806 unsigned int pte, unsigned int ptr, int scratch, 1807 enum label_id lid) 1808 { 1809 int t = scratch >= 0 ? scratch : pte; 1810 int cur = pte; 1811 1812 if (_PAGE_PRESENT_SHIFT) { 1813 uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT); 1814 cur = t; 1815 } 1816 uasm_i_andi(p, t, cur, 1817 (_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT); 1818 uasm_i_xori(p, t, t, 1819 (_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT); 1820 uasm_il_bnez(p, r, t, lid); 1821 if (pte == t) 1822 /* You lose the SMP race :-(*/ 1823 iPTE_LW(p, pte, ptr); 1824 else 1825 uasm_i_nop(p); 1826 } 1827 1828 /* Make PTE writable, update software status bits as well, then store 1829 * at PTR. 1830 */ 1831 static void 1832 build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, 1833 unsigned int ptr, unsigned int scratch) 1834 { 1835 unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID 1836 | _PAGE_DIRTY); 1837 1838 iPTE_SW(p, r, pte, ptr, mode, scratch); 1839 } 1840 1841 /* 1842 * Check if PTE can be modified, if not branch to LABEL. Regardless 1843 * restore PTE with value from PTR when done. 1844 */ 1845 static void 1846 build_pte_modifiable(u32 **p, struct uasm_reloc **r, 1847 unsigned int pte, unsigned int ptr, int scratch, 1848 enum label_id lid) 1849 { 1850 if (use_bbit_insns()) { 1851 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid); 1852 uasm_i_nop(p); 1853 } else { 1854 int t = scratch >= 0 ? scratch : pte; 1855 uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT); 1856 uasm_i_andi(p, t, t, 1); 1857 uasm_il_beqz(p, r, t, lid); 1858 if (pte == t) 1859 /* You lose the SMP race :-(*/ 1860 iPTE_LW(p, pte, ptr); 1861 } 1862 } 1863 1864 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 1865 1866 1867 /* 1868 * R3000 style TLB load/store/modify handlers. 1869 */ 1870 1871 /* 1872 * This places the pte into ENTRYLO0 and writes it with tlbwi. 1873 * Then it returns. 1874 */ 1875 static void 1876 build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) 1877 { 1878 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 1879 uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */ 1880 uasm_i_tlbwi(p); 1881 uasm_i_jr(p, tmp); 1882 uasm_i_rfe(p); /* branch delay */ 1883 } 1884 1885 /* 1886 * This places the pte into ENTRYLO0 and writes it with tlbwi 1887 * or tlbwr as appropriate. This is because the index register 1888 * may have the probe fail bit set as a result of a trap on a 1889 * kseg2 access, i.e. without refill. Then it returns. 1890 */ 1891 static void 1892 build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, 1893 struct uasm_reloc **r, unsigned int pte, 1894 unsigned int tmp) 1895 { 1896 uasm_i_mfc0(p, tmp, C0_INDEX); 1897 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 1898 uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */ 1899 uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */ 1900 uasm_i_tlbwi(p); /* cp0 delay */ 1901 uasm_i_jr(p, tmp); 1902 uasm_i_rfe(p); /* branch delay */ 1903 uasm_l_r3000_write_probe_fail(l, *p); 1904 uasm_i_tlbwr(p); /* cp0 delay */ 1905 uasm_i_jr(p, tmp); 1906 uasm_i_rfe(p); /* branch delay */ 1907 } 1908 1909 static void 1910 build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, 1911 unsigned int ptr) 1912 { 1913 long pgdc = (long)pgd_current; 1914 1915 uasm_i_mfc0(p, pte, C0_BADVADDR); 1916 uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */ 1917 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); 1918 uasm_i_srl(p, pte, pte, 22); /* load delay */ 1919 uasm_i_sll(p, pte, pte, 2); 1920 uasm_i_addu(p, ptr, ptr, pte); 1921 uasm_i_mfc0(p, pte, C0_CONTEXT); 1922 uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */ 1923 uasm_i_andi(p, pte, pte, 0xffc); /* load delay */ 1924 uasm_i_addu(p, ptr, ptr, pte); 1925 uasm_i_lw(p, pte, 0, ptr); 1926 uasm_i_tlbp(p); /* load delay */ 1927 } 1928 1929 static void build_r3000_tlb_load_handler(void) 1930 { 1931 u32 *p = (u32 *)handle_tlbl; 1932 struct uasm_label *l = labels; 1933 struct uasm_reloc *r = relocs; 1934 1935 memset(p, 0, handle_tlbl_end - (char *)p); 1936 memset(labels, 0, sizeof(labels)); 1937 memset(relocs, 0, sizeof(relocs)); 1938 1939 build_r3000_tlbchange_handler_head(&p, K0, K1); 1940 build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl); 1941 uasm_i_nop(&p); /* load delay */ 1942 build_make_valid(&p, &r, K0, K1, -1); 1943 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1944 1945 uasm_l_nopage_tlbl(&l, p); 1946 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 1947 uasm_i_nop(&p); 1948 1949 if (p >= (u32 *)handle_tlbl_end) 1950 panic("TLB load handler fastpath space exceeded"); 1951 1952 uasm_resolve_relocs(relocs, labels); 1953 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", 1954 (unsigned int)(p - (u32 *)handle_tlbl)); 1955 1956 dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_end); 1957 } 1958 1959 static void build_r3000_tlb_store_handler(void) 1960 { 1961 u32 *p = (u32 *)handle_tlbs; 1962 struct uasm_label *l = labels; 1963 struct uasm_reloc *r = relocs; 1964 1965 memset(p, 0, handle_tlbs_end - (char *)p); 1966 memset(labels, 0, sizeof(labels)); 1967 memset(relocs, 0, sizeof(relocs)); 1968 1969 build_r3000_tlbchange_handler_head(&p, K0, K1); 1970 build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs); 1971 uasm_i_nop(&p); /* load delay */ 1972 build_make_write(&p, &r, K0, K1, -1); 1973 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1974 1975 uasm_l_nopage_tlbs(&l, p); 1976 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1977 uasm_i_nop(&p); 1978 1979 if (p >= (u32 *)handle_tlbs_end) 1980 panic("TLB store handler fastpath space exceeded"); 1981 1982 uasm_resolve_relocs(relocs, labels); 1983 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", 1984 (unsigned int)(p - (u32 *)handle_tlbs)); 1985 1986 dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_end); 1987 } 1988 1989 static void build_r3000_tlb_modify_handler(void) 1990 { 1991 u32 *p = (u32 *)handle_tlbm; 1992 struct uasm_label *l = labels; 1993 struct uasm_reloc *r = relocs; 1994 1995 memset(p, 0, handle_tlbm_end - (char *)p); 1996 memset(labels, 0, sizeof(labels)); 1997 memset(relocs, 0, sizeof(relocs)); 1998 1999 build_r3000_tlbchange_handler_head(&p, K0, K1); 2000 build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm); 2001 uasm_i_nop(&p); /* load delay */ 2002 build_make_write(&p, &r, K0, K1, -1); 2003 build_r3000_pte_reload_tlbwi(&p, K0, K1); 2004 2005 uasm_l_nopage_tlbm(&l, p); 2006 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2007 uasm_i_nop(&p); 2008 2009 if (p >= (u32 *)handle_tlbm_end) 2010 panic("TLB modify handler fastpath space exceeded"); 2011 2012 uasm_resolve_relocs(relocs, labels); 2013 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", 2014 (unsigned int)(p - (u32 *)handle_tlbm)); 2015 2016 dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_end); 2017 } 2018 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ 2019 2020 static bool cpu_has_tlbex_tlbp_race(void) 2021 { 2022 /* 2023 * When a Hardware Table Walker is running it can replace TLB entries 2024 * at any time, leading to a race between it & the CPU. 2025 */ 2026 if (cpu_has_htw) 2027 return true; 2028 2029 /* 2030 * If the CPU shares FTLB RAM with its siblings then our entry may be 2031 * replaced at any time by a sibling performing a write to the FTLB. 2032 */ 2033 if (cpu_has_shared_ftlb_ram) 2034 return true; 2035 2036 /* In all other cases there ought to be no race condition to handle */ 2037 return false; 2038 } 2039 2040 /* 2041 * R4000 style TLB load/store/modify handlers. 2042 */ 2043 static struct work_registers 2044 build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, 2045 struct uasm_reloc **r) 2046 { 2047 struct work_registers wr = build_get_work_registers(p); 2048 2049 #ifdef CONFIG_64BIT 2050 build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */ 2051 #else 2052 build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */ 2053 #endif 2054 2055 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 2056 /* 2057 * For huge tlb entries, pmd doesn't contain an address but 2058 * instead contains the tlb pte. Check the PAGE_HUGE bit and 2059 * see if we need to jump to huge tlb processing. 2060 */ 2061 build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update); 2062 #endif 2063 2064 UASM_i_MFC0(p, wr.r1, C0_BADVADDR); 2065 UASM_i_LW(p, wr.r2, 0, wr.r2); 2066 UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); 2067 uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2); 2068 UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1); 2069 2070 #ifdef CONFIG_SMP 2071 uasm_l_smp_pgtable_change(l, *p); 2072 #endif 2073 iPTE_LW(p, wr.r1, wr.r2); /* get even pte */ 2074 if (!m4kc_tlbp_war()) { 2075 build_tlb_probe_entry(p); 2076 if (cpu_has_tlbex_tlbp_race()) { 2077 /* race condition happens, leaving */ 2078 uasm_i_ehb(p); 2079 uasm_i_mfc0(p, wr.r3, C0_INDEX); 2080 uasm_il_bltz(p, r, wr.r3, label_leave); 2081 uasm_i_nop(p); 2082 } 2083 } 2084 return wr; 2085 } 2086 2087 static void 2088 build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, 2089 struct uasm_reloc **r, unsigned int tmp, 2090 unsigned int ptr) 2091 { 2092 uasm_i_ori(p, ptr, ptr, sizeof(pte_t)); 2093 uasm_i_xori(p, ptr, ptr, sizeof(pte_t)); 2094 build_update_entries(p, tmp, ptr); 2095 build_tlb_write_entry(p, l, r, tlb_indexed); 2096 uasm_l_leave(l, *p); 2097 build_restore_work_registers(p); 2098 uasm_i_eret(p); /* return from trap */ 2099 2100 #ifdef CONFIG_64BIT 2101 build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill); 2102 #endif 2103 } 2104 2105 static void build_r4000_tlb_load_handler(void) 2106 { 2107 u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbl); 2108 struct uasm_label *l = labels; 2109 struct uasm_reloc *r = relocs; 2110 struct work_registers wr; 2111 2112 memset(p, 0, handle_tlbl_end - (char *)p); 2113 memset(labels, 0, sizeof(labels)); 2114 memset(relocs, 0, sizeof(relocs)); 2115 2116 if (bcm1250_m3_war()) { 2117 unsigned int segbits = 44; 2118 2119 uasm_i_dmfc0(&p, K0, C0_BADVADDR); 2120 uasm_i_dmfc0(&p, K1, C0_ENTRYHI); 2121 uasm_i_xor(&p, K0, K0, K1); 2122 uasm_i_dsrl_safe(&p, K1, K0, 62); 2123 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); 2124 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); 2125 uasm_i_or(&p, K0, K0, K1); 2126 uasm_il_bnez(&p, &r, K0, label_leave); 2127 /* No need for uasm_i_nop */ 2128 } 2129 2130 wr = build_r4000_tlbchange_handler_head(&p, &l, &r); 2131 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); 2132 if (m4kc_tlbp_war()) 2133 build_tlb_probe_entry(&p); 2134 2135 if (cpu_has_rixi && !cpu_has_rixiex) { 2136 /* 2137 * If the page is not _PAGE_VALID, RI or XI could not 2138 * have triggered it. Skip the expensive test.. 2139 */ 2140 if (use_bbit_insns()) { 2141 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID), 2142 label_tlbl_goaround1); 2143 } else { 2144 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID); 2145 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1); 2146 } 2147 uasm_i_nop(&p); 2148 2149 /* 2150 * Warn if something may race with us & replace the TLB entry 2151 * before we read it here. Everything with such races should 2152 * also have dedicated RiXi exception handlers, so this 2153 * shouldn't be hit. 2154 */ 2155 WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path"); 2156 2157 uasm_i_tlbr(&p); 2158 2159 switch (current_cpu_type()) { 2160 default: 2161 if (cpu_has_mips_r2_exec_hazard) { 2162 uasm_i_ehb(&p); 2163 2164 case CPU_CAVIUM_OCTEON: 2165 case CPU_CAVIUM_OCTEON_PLUS: 2166 case CPU_CAVIUM_OCTEON2: 2167 break; 2168 } 2169 } 2170 2171 /* Examine entrylo 0 or 1 based on ptr. */ 2172 if (use_bbit_insns()) { 2173 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); 2174 } else { 2175 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t)); 2176 uasm_i_beqz(&p, wr.r3, 8); 2177 } 2178 /* load it in the delay slot*/ 2179 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0); 2180 /* load it if ptr is odd */ 2181 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1); 2182 /* 2183 * If the entryLo (now in wr.r3) is valid (bit 1), RI or 2184 * XI must have triggered it. 2185 */ 2186 if (use_bbit_insns()) { 2187 uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl); 2188 uasm_i_nop(&p); 2189 uasm_l_tlbl_goaround1(&l, p); 2190 } else { 2191 uasm_i_andi(&p, wr.r3, wr.r3, 2); 2192 uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl); 2193 uasm_i_nop(&p); 2194 } 2195 uasm_l_tlbl_goaround1(&l, p); 2196 } 2197 build_make_valid(&p, &r, wr.r1, wr.r2, wr.r3); 2198 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); 2199 2200 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 2201 /* 2202 * This is the entry point when build_r4000_tlbchange_handler_head 2203 * spots a huge page. 2204 */ 2205 uasm_l_tlb_huge_update(&l, p); 2206 iPTE_LW(&p, wr.r1, wr.r2); 2207 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); 2208 build_tlb_probe_entry(&p); 2209 2210 if (cpu_has_rixi && !cpu_has_rixiex) { 2211 /* 2212 * If the page is not _PAGE_VALID, RI or XI could not 2213 * have triggered it. Skip the expensive test.. 2214 */ 2215 if (use_bbit_insns()) { 2216 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID), 2217 label_tlbl_goaround2); 2218 } else { 2219 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID); 2220 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); 2221 } 2222 uasm_i_nop(&p); 2223 2224 /* 2225 * Warn if something may race with us & replace the TLB entry 2226 * before we read it here. Everything with such races should 2227 * also have dedicated RiXi exception handlers, so this 2228 * shouldn't be hit. 2229 */ 2230 WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path"); 2231 2232 uasm_i_tlbr(&p); 2233 2234 switch (current_cpu_type()) { 2235 default: 2236 if (cpu_has_mips_r2_exec_hazard) { 2237 uasm_i_ehb(&p); 2238 2239 case CPU_CAVIUM_OCTEON: 2240 case CPU_CAVIUM_OCTEON_PLUS: 2241 case CPU_CAVIUM_OCTEON2: 2242 break; 2243 } 2244 } 2245 2246 /* Examine entrylo 0 or 1 based on ptr. */ 2247 if (use_bbit_insns()) { 2248 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); 2249 } else { 2250 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t)); 2251 uasm_i_beqz(&p, wr.r3, 8); 2252 } 2253 /* load it in the delay slot*/ 2254 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0); 2255 /* load it if ptr is odd */ 2256 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1); 2257 /* 2258 * If the entryLo (now in wr.r3) is valid (bit 1), RI or 2259 * XI must have triggered it. 2260 */ 2261 if (use_bbit_insns()) { 2262 uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2); 2263 } else { 2264 uasm_i_andi(&p, wr.r3, wr.r3, 2); 2265 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); 2266 } 2267 if (PM_DEFAULT_MASK == 0) 2268 uasm_i_nop(&p); 2269 /* 2270 * We clobbered C0_PAGEMASK, restore it. On the other branch 2271 * it is restored in build_huge_tlb_write_entry. 2272 */ 2273 build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0); 2274 2275 uasm_l_tlbl_goaround2(&l, p); 2276 } 2277 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID)); 2278 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1); 2279 #endif 2280 2281 uasm_l_nopage_tlbl(&l, p); 2282 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) 2283 uasm_i_sync(&p, 0); 2284 build_restore_work_registers(&p); 2285 #ifdef CONFIG_CPU_MICROMIPS 2286 if ((unsigned long)tlb_do_page_fault_0 & 1) { 2287 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0)); 2288 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0)); 2289 uasm_i_jr(&p, K0); 2290 } else 2291 #endif 2292 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 2293 uasm_i_nop(&p); 2294 2295 if (p >= (u32 *)handle_tlbl_end) 2296 panic("TLB load handler fastpath space exceeded"); 2297 2298 uasm_resolve_relocs(relocs, labels); 2299 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", 2300 (unsigned int)(p - (u32 *)handle_tlbl)); 2301 2302 dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_end); 2303 } 2304 2305 static void build_r4000_tlb_store_handler(void) 2306 { 2307 u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbs); 2308 struct uasm_label *l = labels; 2309 struct uasm_reloc *r = relocs; 2310 struct work_registers wr; 2311 2312 memset(p, 0, handle_tlbs_end - (char *)p); 2313 memset(labels, 0, sizeof(labels)); 2314 memset(relocs, 0, sizeof(relocs)); 2315 2316 wr = build_r4000_tlbchange_handler_head(&p, &l, &r); 2317 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); 2318 if (m4kc_tlbp_war()) 2319 build_tlb_probe_entry(&p); 2320 build_make_write(&p, &r, wr.r1, wr.r2, wr.r3); 2321 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); 2322 2323 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 2324 /* 2325 * This is the entry point when 2326 * build_r4000_tlbchange_handler_head spots a huge page. 2327 */ 2328 uasm_l_tlb_huge_update(&l, p); 2329 iPTE_LW(&p, wr.r1, wr.r2); 2330 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); 2331 build_tlb_probe_entry(&p); 2332 uasm_i_ori(&p, wr.r1, wr.r1, 2333 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2334 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1); 2335 #endif 2336 2337 uasm_l_nopage_tlbs(&l, p); 2338 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) 2339 uasm_i_sync(&p, 0); 2340 build_restore_work_registers(&p); 2341 #ifdef CONFIG_CPU_MICROMIPS 2342 if ((unsigned long)tlb_do_page_fault_1 & 1) { 2343 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1)); 2344 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1)); 2345 uasm_i_jr(&p, K0); 2346 } else 2347 #endif 2348 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2349 uasm_i_nop(&p); 2350 2351 if (p >= (u32 *)handle_tlbs_end) 2352 panic("TLB store handler fastpath space exceeded"); 2353 2354 uasm_resolve_relocs(relocs, labels); 2355 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", 2356 (unsigned int)(p - (u32 *)handle_tlbs)); 2357 2358 dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_end); 2359 } 2360 2361 static void build_r4000_tlb_modify_handler(void) 2362 { 2363 u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbm); 2364 struct uasm_label *l = labels; 2365 struct uasm_reloc *r = relocs; 2366 struct work_registers wr; 2367 2368 memset(p, 0, handle_tlbm_end - (char *)p); 2369 memset(labels, 0, sizeof(labels)); 2370 memset(relocs, 0, sizeof(relocs)); 2371 2372 wr = build_r4000_tlbchange_handler_head(&p, &l, &r); 2373 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); 2374 if (m4kc_tlbp_war()) 2375 build_tlb_probe_entry(&p); 2376 /* Present and writable bits set, set accessed and dirty bits. */ 2377 build_make_write(&p, &r, wr.r1, wr.r2, wr.r3); 2378 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); 2379 2380 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 2381 /* 2382 * This is the entry point when 2383 * build_r4000_tlbchange_handler_head spots a huge page. 2384 */ 2385 uasm_l_tlb_huge_update(&l, p); 2386 iPTE_LW(&p, wr.r1, wr.r2); 2387 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); 2388 build_tlb_probe_entry(&p); 2389 uasm_i_ori(&p, wr.r1, wr.r1, 2390 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2391 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0); 2392 #endif 2393 2394 uasm_l_nopage_tlbm(&l, p); 2395 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) 2396 uasm_i_sync(&p, 0); 2397 build_restore_work_registers(&p); 2398 #ifdef CONFIG_CPU_MICROMIPS 2399 if ((unsigned long)tlb_do_page_fault_1 & 1) { 2400 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1)); 2401 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1)); 2402 uasm_i_jr(&p, K0); 2403 } else 2404 #endif 2405 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2406 uasm_i_nop(&p); 2407 2408 if (p >= (u32 *)handle_tlbm_end) 2409 panic("TLB modify handler fastpath space exceeded"); 2410 2411 uasm_resolve_relocs(relocs, labels); 2412 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", 2413 (unsigned int)(p - (u32 *)handle_tlbm)); 2414 2415 dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_end); 2416 } 2417 2418 static void flush_tlb_handlers(void) 2419 { 2420 local_flush_icache_range((unsigned long)handle_tlbl, 2421 (unsigned long)handle_tlbl_end); 2422 local_flush_icache_range((unsigned long)handle_tlbs, 2423 (unsigned long)handle_tlbs_end); 2424 local_flush_icache_range((unsigned long)handle_tlbm, 2425 (unsigned long)handle_tlbm_end); 2426 local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd, 2427 (unsigned long)tlbmiss_handler_setup_pgd_end); 2428 } 2429 2430 static void print_htw_config(void) 2431 { 2432 unsigned long config; 2433 unsigned int pwctl; 2434 const int field = 2 * sizeof(unsigned long); 2435 2436 config = read_c0_pwfield(); 2437 pr_debug("PWField (0x%0*lx): GDI: 0x%02lx UDI: 0x%02lx MDI: 0x%02lx PTI: 0x%02lx PTEI: 0x%02lx\n", 2438 field, config, 2439 (config & MIPS_PWFIELD_GDI_MASK) >> MIPS_PWFIELD_GDI_SHIFT, 2440 (config & MIPS_PWFIELD_UDI_MASK) >> MIPS_PWFIELD_UDI_SHIFT, 2441 (config & MIPS_PWFIELD_MDI_MASK) >> MIPS_PWFIELD_MDI_SHIFT, 2442 (config & MIPS_PWFIELD_PTI_MASK) >> MIPS_PWFIELD_PTI_SHIFT, 2443 (config & MIPS_PWFIELD_PTEI_MASK) >> MIPS_PWFIELD_PTEI_SHIFT); 2444 2445 config = read_c0_pwsize(); 2446 pr_debug("PWSize (0x%0*lx): PS: 0x%lx GDW: 0x%02lx UDW: 0x%02lx MDW: 0x%02lx PTW: 0x%02lx PTEW: 0x%02lx\n", 2447 field, config, 2448 (config & MIPS_PWSIZE_PS_MASK) >> MIPS_PWSIZE_PS_SHIFT, 2449 (config & MIPS_PWSIZE_GDW_MASK) >> MIPS_PWSIZE_GDW_SHIFT, 2450 (config & MIPS_PWSIZE_UDW_MASK) >> MIPS_PWSIZE_UDW_SHIFT, 2451 (config & MIPS_PWSIZE_MDW_MASK) >> MIPS_PWSIZE_MDW_SHIFT, 2452 (config & MIPS_PWSIZE_PTW_MASK) >> MIPS_PWSIZE_PTW_SHIFT, 2453 (config & MIPS_PWSIZE_PTEW_MASK) >> MIPS_PWSIZE_PTEW_SHIFT); 2454 2455 pwctl = read_c0_pwctl(); 2456 pr_debug("PWCtl (0x%x): PWEn: 0x%x XK: 0x%x XS: 0x%x XU: 0x%x DPH: 0x%x HugePg: 0x%x Psn: 0x%x\n", 2457 pwctl, 2458 (pwctl & MIPS_PWCTL_PWEN_MASK) >> MIPS_PWCTL_PWEN_SHIFT, 2459 (pwctl & MIPS_PWCTL_XK_MASK) >> MIPS_PWCTL_XK_SHIFT, 2460 (pwctl & MIPS_PWCTL_XS_MASK) >> MIPS_PWCTL_XS_SHIFT, 2461 (pwctl & MIPS_PWCTL_XU_MASK) >> MIPS_PWCTL_XU_SHIFT, 2462 (pwctl & MIPS_PWCTL_DPH_MASK) >> MIPS_PWCTL_DPH_SHIFT, 2463 (pwctl & MIPS_PWCTL_HUGEPG_MASK) >> MIPS_PWCTL_HUGEPG_SHIFT, 2464 (pwctl & MIPS_PWCTL_PSN_MASK) >> MIPS_PWCTL_PSN_SHIFT); 2465 } 2466 2467 static void config_htw_params(void) 2468 { 2469 unsigned long pwfield, pwsize, ptei; 2470 unsigned int config; 2471 2472 /* 2473 * We are using 2-level page tables, so we only need to 2474 * setup GDW and PTW appropriately. UDW and MDW will remain 0. 2475 * The default value of GDI/UDI/MDI/PTI is 0xc. It is illegal to 2476 * write values less than 0xc in these fields because the entire 2477 * write will be dropped. As a result of which, we must preserve 2478 * the original reset values and overwrite only what we really want. 2479 */ 2480 2481 pwfield = read_c0_pwfield(); 2482 /* re-initialize the GDI field */ 2483 pwfield &= ~MIPS_PWFIELD_GDI_MASK; 2484 pwfield |= PGDIR_SHIFT << MIPS_PWFIELD_GDI_SHIFT; 2485 /* re-initialize the PTI field including the even/odd bit */ 2486 pwfield &= ~MIPS_PWFIELD_PTI_MASK; 2487 pwfield |= PAGE_SHIFT << MIPS_PWFIELD_PTI_SHIFT; 2488 if (CONFIG_PGTABLE_LEVELS >= 3) { 2489 pwfield &= ~MIPS_PWFIELD_MDI_MASK; 2490 pwfield |= PMD_SHIFT << MIPS_PWFIELD_MDI_SHIFT; 2491 } 2492 /* Set the PTEI right shift */ 2493 ptei = _PAGE_GLOBAL_SHIFT << MIPS_PWFIELD_PTEI_SHIFT; 2494 pwfield |= ptei; 2495 write_c0_pwfield(pwfield); 2496 /* Check whether the PTEI value is supported */ 2497 back_to_back_c0_hazard(); 2498 pwfield = read_c0_pwfield(); 2499 if (((pwfield & MIPS_PWFIELD_PTEI_MASK) << MIPS_PWFIELD_PTEI_SHIFT) 2500 != ptei) { 2501 pr_warn("Unsupported PTEI field value: 0x%lx. HTW will not be enabled", 2502 ptei); 2503 /* 2504 * Drop option to avoid HTW being enabled via another path 2505 * (eg htw_reset()) 2506 */ 2507 current_cpu_data.options &= ~MIPS_CPU_HTW; 2508 return; 2509 } 2510 2511 pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT; 2512 pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT; 2513 if (CONFIG_PGTABLE_LEVELS >= 3) 2514 pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT; 2515 2516 /* Set pointer size to size of directory pointers */ 2517 if (IS_ENABLED(CONFIG_64BIT)) 2518 pwsize |= MIPS_PWSIZE_PS_MASK; 2519 /* PTEs may be multiple pointers long (e.g. with XPA) */ 2520 pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT) 2521 & MIPS_PWSIZE_PTEW_MASK; 2522 2523 write_c0_pwsize(pwsize); 2524 2525 /* Make sure everything is set before we enable the HTW */ 2526 back_to_back_c0_hazard(); 2527 2528 /* 2529 * Enable HTW (and only for XUSeg on 64-bit), and disable the rest of 2530 * the pwctl fields. 2531 */ 2532 config = 1 << MIPS_PWCTL_PWEN_SHIFT; 2533 if (IS_ENABLED(CONFIG_64BIT)) 2534 config |= MIPS_PWCTL_XU_MASK; 2535 write_c0_pwctl(config); 2536 pr_info("Hardware Page Table Walker enabled\n"); 2537 2538 print_htw_config(); 2539 } 2540 2541 static void config_xpa_params(void) 2542 { 2543 #ifdef CONFIG_XPA 2544 unsigned int pagegrain; 2545 2546 if (mips_xpa_disabled) { 2547 pr_info("Extended Physical Addressing (XPA) disabled\n"); 2548 return; 2549 } 2550 2551 pagegrain = read_c0_pagegrain(); 2552 write_c0_pagegrain(pagegrain | PG_ELPA); 2553 back_to_back_c0_hazard(); 2554 pagegrain = read_c0_pagegrain(); 2555 2556 if (pagegrain & PG_ELPA) 2557 pr_info("Extended Physical Addressing (XPA) enabled\n"); 2558 else 2559 panic("Extended Physical Addressing (XPA) disabled"); 2560 #endif 2561 } 2562 2563 static void check_pabits(void) 2564 { 2565 unsigned long entry; 2566 unsigned pabits, fillbits; 2567 2568 if (!cpu_has_rixi || !_PAGE_NO_EXEC) { 2569 /* 2570 * We'll only be making use of the fact that we can rotate bits 2571 * into the fill if the CPU supports RIXI, so don't bother 2572 * probing this for CPUs which don't. 2573 */ 2574 return; 2575 } 2576 2577 write_c0_entrylo0(~0ul); 2578 back_to_back_c0_hazard(); 2579 entry = read_c0_entrylo0(); 2580 2581 /* clear all non-PFN bits */ 2582 entry &= ~((1 << MIPS_ENTRYLO_PFN_SHIFT) - 1); 2583 entry &= ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); 2584 2585 /* find a lower bound on PABITS, and upper bound on fill bits */ 2586 pabits = fls_long(entry) + 6; 2587 fillbits = max_t(int, (int)BITS_PER_LONG - pabits, 0); 2588 2589 /* minus the RI & XI bits */ 2590 fillbits -= min_t(unsigned, fillbits, 2); 2591 2592 if (fillbits >= ilog2(_PAGE_NO_EXEC)) 2593 fill_includes_sw_bits = true; 2594 2595 pr_debug("Entry* registers contain %u fill bits\n", fillbits); 2596 } 2597 2598 void build_tlb_refill_handler(void) 2599 { 2600 /* 2601 * The refill handler is generated per-CPU, multi-node systems 2602 * may have local storage for it. The other handlers are only 2603 * needed once. 2604 */ 2605 static int run_once = 0; 2606 2607 if (IS_ENABLED(CONFIG_XPA) && !cpu_has_rixi) 2608 panic("Kernels supporting XPA currently require CPUs with RIXI"); 2609 2610 output_pgtable_bits_defines(); 2611 check_pabits(); 2612 2613 #ifdef CONFIG_64BIT 2614 check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); 2615 #endif 2616 2617 if (cpu_has_3kex) { 2618 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 2619 if (!run_once) { 2620 build_setup_pgd(); 2621 build_r3000_tlb_refill_handler(); 2622 build_r3000_tlb_load_handler(); 2623 build_r3000_tlb_store_handler(); 2624 build_r3000_tlb_modify_handler(); 2625 flush_tlb_handlers(); 2626 run_once++; 2627 } 2628 #else 2629 panic("No R3000 TLB refill handler"); 2630 #endif 2631 return; 2632 } 2633 2634 if (cpu_has_ldpte) 2635 setup_pw(); 2636 2637 if (!run_once) { 2638 scratch_reg = allocate_kscratch(); 2639 build_setup_pgd(); 2640 build_r4000_tlb_load_handler(); 2641 build_r4000_tlb_store_handler(); 2642 build_r4000_tlb_modify_handler(); 2643 if (cpu_has_ldpte) 2644 build_loongson3_tlb_refill_handler(); 2645 else 2646 build_r4000_tlb_refill_handler(); 2647 flush_tlb_handlers(); 2648 run_once++; 2649 } 2650 if (cpu_has_xpa) 2651 config_xpa_params(); 2652 if (cpu_has_htw) 2653 config_htw_params(); 2654 } 2655