1/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling. 2 * 3 * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net> 4 * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de) 5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) 6 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 7 */ 8 9#include <asm/head.h> 10#include <asm/asi.h> 11#include <asm/page.h> 12#include <asm/pgtable.h> 13#include <asm/tsb.h> 14 15 .text 16 .align 32 17 18kvmap_itlb: 19 /* g6: TAG TARGET */ 20 mov TLB_TAG_ACCESS, %g4 21 ldxa [%g4] ASI_IMMU, %g4 22 23 /* sun4v_itlb_miss branches here with the missing virtual 24 * address already loaded into %g4 25 */ 26kvmap_itlb_4v: 27 28 /* Catch kernel NULL pointer calls. */ 29 sethi %hi(PAGE_SIZE), %g5 30 cmp %g4, %g5 31 blu,pn %xcc, kvmap_itlb_longpath 32 nop 33 34 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load) 35 36kvmap_itlb_tsb_miss: 37 sethi %hi(LOW_OBP_ADDRESS), %g5 38 cmp %g4, %g5 39 blu,pn %xcc, kvmap_itlb_vmalloc_addr 40 mov 0x1, %g5 41 sllx %g5, 32, %g5 42 cmp %g4, %g5 43 blu,pn %xcc, kvmap_itlb_obp 44 nop 45 46kvmap_itlb_vmalloc_addr: 47 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) 48 49 TSB_LOCK_TAG(%g1, %g2, %g7) 50 51 /* Load and check PTE. */ 52 ldxa [%g5] ASI_PHYS_USE_EC, %g5 53 mov 1, %g7 54 sllx %g7, TSB_TAG_INVALID_BIT, %g7 55 brgez,a,pn %g5, kvmap_itlb_longpath 56 TSB_STORE(%g1, %g7) 57 58 TSB_WRITE(%g1, %g5, %g6) 59 60 /* fallthrough to TLB load */ 61 62kvmap_itlb_load: 63 64661: stxa %g5, [%g0] ASI_ITLB_DATA_IN 65 retry 66 .section .sun4v_2insn_patch, "ax" 67 .word 661b 68 nop 69 nop 70 .previous 71 72 /* For sun4v the ASI_ITLB_DATA_IN store and the retry 73 * instruction get nop'd out and we get here to branch 74 * to the sun4v tlb load code. The registers are setup 75 * as follows: 76 * 77 * %g4: vaddr 78 * %g5: PTE 79 * %g6: TAG 80 * 81 * The sun4v TLB load wants the PTE in %g3 so we fix that 82 * up here. 83 */ 84 ba,pt %xcc, sun4v_itlb_load 85 mov %g5, %g3 86 87kvmap_itlb_longpath: 88 89661: rdpr %pstate, %g5 90 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate 91 .section .sun4v_2insn_patch, "ax" 92 .word 661b 93 SET_GL(1) 94 nop 95 .previous 96 97 rdpr %tpc, %g5 98 ba,pt %xcc, sparc64_realfault_common 99 mov FAULT_CODE_ITLB, %g4 100 101kvmap_itlb_obp: 102 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath) 103 104 TSB_LOCK_TAG(%g1, %g2, %g7) 105 106 TSB_WRITE(%g1, %g5, %g6) 107 108 ba,pt %xcc, kvmap_itlb_load 109 nop 110 111kvmap_dtlb_obp: 112 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath) 113 114 TSB_LOCK_TAG(%g1, %g2, %g7) 115 116 TSB_WRITE(%g1, %g5, %g6) 117 118 ba,pt %xcc, kvmap_dtlb_load 119 nop 120 121 .align 32 122kvmap_dtlb_tsb4m_load: 123 TSB_LOCK_TAG(%g1, %g2, %g7) 124 TSB_WRITE(%g1, %g5, %g6) 125 ba,pt %xcc, kvmap_dtlb_load 126 nop 127 128kvmap_dtlb: 129 /* %g6: TAG TARGET */ 130 mov TLB_TAG_ACCESS, %g4 131 ldxa [%g4] ASI_DMMU, %g4 132 133 /* sun4v_dtlb_miss branches here with the missing virtual 134 * address already loaded into %g4 135 */ 136kvmap_dtlb_4v: 137 brgez,pn %g4, kvmap_dtlb_nonlinear 138 nop 139 140#ifdef CONFIG_DEBUG_PAGEALLOC 141 /* Index through the base page size TSB even for linear 142 * mappings when using page allocation debugging. 143 */ 144 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) 145#else 146 /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */ 147 KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) 148#endif 149 /* TSB entry address left in %g1, lookup linear PTE. 150 * Must preserve %g1 and %g6 (TAG). 151 */ 152kvmap_dtlb_tsb4m_miss: 153 /* Clear the PAGE_OFFSET top virtual bits, shift 154 * down to get PFN, and make sure PFN is in range. 155 */ 156 sllx %g4, 21, %g5 157 158 /* Check to see if we know about valid memory at the 4MB 159 * chunk this physical address will reside within. 160 */ 161 srlx %g5, 21 + 41, %g2 162 brnz,pn %g2, kvmap_dtlb_longpath 163 nop 164 165 /* This unconditional branch and delay-slot nop gets patched 166 * by the sethi sequence once the bitmap is properly setup. 167 */ 168 .globl valid_addr_bitmap_insn 169valid_addr_bitmap_insn: 170 ba,pt %xcc, 2f 171 nop 172 .subsection 2 173 .globl valid_addr_bitmap_patch 174valid_addr_bitmap_patch: 175 sethi %hi(sparc64_valid_addr_bitmap), %g7 176 or %g7, %lo(sparc64_valid_addr_bitmap), %g7 177 .previous 178 179 srlx %g5, 21 + 22, %g2 180 srlx %g2, 6, %g5 181 and %g2, 63, %g2 182 sllx %g5, 3, %g5 183 ldx [%g7 + %g5], %g5 184 mov 1, %g7 185 sllx %g7, %g2, %g7 186 andcc %g5, %g7, %g0 187 be,pn %xcc, kvmap_dtlb_longpath 188 1892: sethi %hi(kpte_linear_bitmap), %g2 190 191 /* Get the 256MB physical address index. */ 192 sllx %g4, 21, %g5 193 or %g2, %lo(kpte_linear_bitmap), %g2 194 srlx %g5, 21 + 28, %g5 195 and %g5, (32 - 1), %g7 196 197 /* Divide by 32 to get the offset into the bitmask. */ 198 srlx %g5, 5, %g5 199 add %g7, %g7, %g7 200 sllx %g5, 3, %g5 201 202 /* kern_linear_pte_xor[(mask >> shift) & 3)] */ 203 ldx [%g2 + %g5], %g2 204 srlx %g2, %g7, %g7 205 sethi %hi(kern_linear_pte_xor), %g5 206 and %g7, 3, %g7 207 or %g5, %lo(kern_linear_pte_xor), %g5 208 sllx %g7, 3, %g7 209 ldx [%g5 + %g7], %g2 210 211 .globl kvmap_linear_patch 212kvmap_linear_patch: 213 ba,pt %xcc, kvmap_dtlb_tsb4m_load 214 xor %g2, %g4, %g5 215 216kvmap_dtlb_vmalloc_addr: 217 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) 218 219 TSB_LOCK_TAG(%g1, %g2, %g7) 220 221 /* Load and check PTE. */ 222 ldxa [%g5] ASI_PHYS_USE_EC, %g5 223 mov 1, %g7 224 sllx %g7, TSB_TAG_INVALID_BIT, %g7 225 brgez,a,pn %g5, kvmap_dtlb_longpath 226 TSB_STORE(%g1, %g7) 227 228 TSB_WRITE(%g1, %g5, %g6) 229 230 /* fallthrough to TLB load */ 231 232kvmap_dtlb_load: 233 234661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB 235 retry 236 .section .sun4v_2insn_patch, "ax" 237 .word 661b 238 nop 239 nop 240 .previous 241 242 /* For sun4v the ASI_DTLB_DATA_IN store and the retry 243 * instruction get nop'd out and we get here to branch 244 * to the sun4v tlb load code. The registers are setup 245 * as follows: 246 * 247 * %g4: vaddr 248 * %g5: PTE 249 * %g6: TAG 250 * 251 * The sun4v TLB load wants the PTE in %g3 so we fix that 252 * up here. 253 */ 254 ba,pt %xcc, sun4v_dtlb_load 255 mov %g5, %g3 256 257#ifdef CONFIG_SPARSEMEM_VMEMMAP 258kvmap_vmemmap: 259 sub %g4, %g5, %g5 260 srlx %g5, 22, %g5 261 sethi %hi(vmemmap_table), %g1 262 sllx %g5, 3, %g5 263 or %g1, %lo(vmemmap_table), %g1 264 ba,pt %xcc, kvmap_dtlb_load 265 ldx [%g1 + %g5], %g5 266#endif 267 268kvmap_dtlb_nonlinear: 269 /* Catch kernel NULL pointer derefs. */ 270 sethi %hi(PAGE_SIZE), %g5 271 cmp %g4, %g5 272 bleu,pn %xcc, kvmap_dtlb_longpath 273 nop 274 275#ifdef CONFIG_SPARSEMEM_VMEMMAP 276 /* Do not use the TSB for vmemmap. */ 277 mov (VMEMMAP_BASE >> 40), %g5 278 sllx %g5, 40, %g5 279 cmp %g4,%g5 280 bgeu,pn %xcc, kvmap_vmemmap 281 nop 282#endif 283 284 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) 285 286kvmap_dtlb_tsbmiss: 287 sethi %hi(MODULES_VADDR), %g5 288 cmp %g4, %g5 289 blu,pn %xcc, kvmap_dtlb_longpath 290 mov (VMALLOC_END >> 40), %g5 291 sllx %g5, 40, %g5 292 cmp %g4, %g5 293 bgeu,pn %xcc, kvmap_dtlb_longpath 294 nop 295 296kvmap_check_obp: 297 sethi %hi(LOW_OBP_ADDRESS), %g5 298 cmp %g4, %g5 299 blu,pn %xcc, kvmap_dtlb_vmalloc_addr 300 mov 0x1, %g5 301 sllx %g5, 32, %g5 302 cmp %g4, %g5 303 blu,pn %xcc, kvmap_dtlb_obp 304 nop 305 ba,pt %xcc, kvmap_dtlb_vmalloc_addr 306 nop 307 308kvmap_dtlb_longpath: 309 310661: rdpr %pstate, %g5 311 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate 312 .section .sun4v_2insn_patch, "ax" 313 .word 661b 314 SET_GL(1) 315 ldxa [%g0] ASI_SCRATCHPAD, %g5 316 .previous 317 318 rdpr %tl, %g3 319 cmp %g3, 1 320 321661: mov TLB_TAG_ACCESS, %g4 322 ldxa [%g4] ASI_DMMU, %g5 323 .section .sun4v_2insn_patch, "ax" 324 .word 661b 325 ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5 326 nop 327 .previous 328 329 be,pt %xcc, sparc64_realfault_common 330 mov FAULT_CODE_DTLB, %g4 331 ba,pt %xcc, winfix_trampoline 332 nop 333