1/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling. 2 * 3 * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net> 4 * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de) 5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) 6 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 7 */ 8 9#include <asm/head.h> 10#include <asm/asi.h> 11#include <asm/page.h> 12#include <asm/pgtable.h> 13#include <asm/tsb.h> 14 15 .text 16 .align 32 17 18kvmap_itlb: 19 /* g6: TAG TARGET */ 20 mov TLB_TAG_ACCESS, %g4 21 ldxa [%g4] ASI_IMMU, %g4 22 23 /* sun4v_itlb_miss branches here with the missing virtual 24 * address already loaded into %g4 25 */ 26kvmap_itlb_4v: 27 28kvmap_itlb_nonlinear: 29 /* Catch kernel NULL pointer calls. */ 30 sethi %hi(PAGE_SIZE), %g5 31 cmp %g4, %g5 32 bleu,pn %xcc, kvmap_dtlb_longpath 33 nop 34 35 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load) 36 37kvmap_itlb_tsb_miss: 38 sethi %hi(LOW_OBP_ADDRESS), %g5 39 cmp %g4, %g5 40 blu,pn %xcc, kvmap_itlb_vmalloc_addr 41 mov 0x1, %g5 42 sllx %g5, 32, %g5 43 cmp %g4, %g5 44 blu,pn %xcc, kvmap_itlb_obp 45 nop 46 47kvmap_itlb_vmalloc_addr: 48 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) 49 50 TSB_LOCK_TAG(%g1, %g2, %g7) 51 52 /* Load and check PTE. */ 53 ldxa [%g5] ASI_PHYS_USE_EC, %g5 54 mov 1, %g7 55 sllx %g7, TSB_TAG_INVALID_BIT, %g7 56 brgez,a,pn %g5, kvmap_itlb_longpath 57 TSB_STORE(%g1, %g7) 58 59 TSB_WRITE(%g1, %g5, %g6) 60 61 /* fallthrough to TLB load */ 62 63kvmap_itlb_load: 64 65661: stxa %g5, [%g0] ASI_ITLB_DATA_IN 66 retry 67 .section .sun4v_2insn_patch, "ax" 68 .word 661b 69 nop 70 nop 71 .previous 72 73 /* For sun4v the ASI_ITLB_DATA_IN store and the retry 74 * instruction get nop'd out and we get here to branch 75 * to the sun4v tlb load code. The registers are setup 76 * as follows: 77 * 78 * %g4: vaddr 79 * %g5: PTE 80 * %g6: TAG 81 * 82 * The sun4v TLB load wants the PTE in %g3 so we fix that 83 * up here. 84 */ 85 ba,pt %xcc, sun4v_itlb_load 86 mov %g5, %g3 87 88kvmap_itlb_longpath: 89 90661: rdpr %pstate, %g5 91 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate 92 .section .sun4v_2insn_patch, "ax" 93 .word 661b 94 SET_GL(1) 95 nop 96 .previous 97 98 rdpr %tpc, %g5 99 ba,pt %xcc, sparc64_realfault_common 100 mov FAULT_CODE_ITLB, %g4 101 102kvmap_itlb_obp: 103 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath) 104 105 TSB_LOCK_TAG(%g1, %g2, %g7) 106 107 TSB_WRITE(%g1, %g5, %g6) 108 109 ba,pt %xcc, kvmap_itlb_load 110 nop 111 112kvmap_dtlb_obp: 113 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath) 114 115 TSB_LOCK_TAG(%g1, %g2, %g7) 116 117 TSB_WRITE(%g1, %g5, %g6) 118 119 ba,pt %xcc, kvmap_dtlb_load 120 nop 121 122 .align 32 123kvmap_dtlb_tsb4m_load: 124 TSB_LOCK_TAG(%g1, %g2, %g7) 125 TSB_WRITE(%g1, %g5, %g6) 126 ba,pt %xcc, kvmap_dtlb_load 127 nop 128 129kvmap_dtlb: 130 /* %g6: TAG TARGET */ 131 mov TLB_TAG_ACCESS, %g4 132 ldxa [%g4] ASI_DMMU, %g4 133 134 /* sun4v_dtlb_miss branches here with the missing virtual 135 * address already loaded into %g4 136 */ 137kvmap_dtlb_4v: 138 brgez,pn %g4, kvmap_dtlb_nonlinear 139 nop 140 141#ifdef CONFIG_DEBUG_PAGEALLOC 142 /* Index through the base page size TSB even for linear 143 * mappings when using page allocation debugging. 144 */ 145 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) 146#else 147 /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */ 148 KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) 149#endif 150 /* TSB entry address left in %g1, lookup linear PTE. 151 * Must preserve %g1 and %g6 (TAG). 152 */ 153kvmap_dtlb_tsb4m_miss: 154 /* Clear the PAGE_OFFSET top virtual bits, shift 155 * down to get PFN, and make sure PFN is in range. 156 */ 157 sllx %g4, 21, %g5 158 159 /* Check to see if we know about valid memory at the 4MB 160 * chunk this physical address will reside within. 161 */ 162 srlx %g5, 21 + 41, %g2 163 brnz,pn %g2, kvmap_dtlb_longpath 164 nop 165 166 /* This unconditional branch and delay-slot nop gets patched 167 * by the sethi sequence once the bitmap is properly setup. 168 */ 169 .globl valid_addr_bitmap_insn 170valid_addr_bitmap_insn: 171 ba,pt %xcc, 2f 172 nop 173 .subsection 2 174 .globl valid_addr_bitmap_patch 175valid_addr_bitmap_patch: 176 sethi %hi(sparc64_valid_addr_bitmap), %g7 177 or %g7, %lo(sparc64_valid_addr_bitmap), %g7 178 .previous 179 180 srlx %g5, 21 + 22, %g2 181 srlx %g2, 6, %g5 182 and %g2, 63, %g2 183 sllx %g5, 3, %g5 184 ldx [%g7 + %g5], %g5 185 mov 1, %g7 186 sllx %g7, %g2, %g7 187 andcc %g5, %g7, %g0 188 be,pn %xcc, kvmap_dtlb_longpath 189 1902: sethi %hi(kpte_linear_bitmap), %g2 191 192 /* Get the 256MB physical address index. */ 193 sllx %g4, 21, %g5 194 or %g2, %lo(kpte_linear_bitmap), %g2 195 srlx %g5, 21 + 28, %g5 196 and %g5, (32 - 1), %g7 197 198 /* Divide by 32 to get the offset into the bitmask. */ 199 srlx %g5, 5, %g5 200 add %g7, %g7, %g7 201 sllx %g5, 3, %g5 202 203 /* kern_linear_pte_xor[(mask >> shift) & 3)] */ 204 ldx [%g2 + %g5], %g2 205 srlx %g2, %g7, %g7 206 sethi %hi(kern_linear_pte_xor), %g5 207 and %g7, 3, %g7 208 or %g5, %lo(kern_linear_pte_xor), %g5 209 sllx %g7, 3, %g7 210 ldx [%g5 + %g7], %g2 211 212 .globl kvmap_linear_patch 213kvmap_linear_patch: 214 ba,pt %xcc, kvmap_dtlb_tsb4m_load 215 xor %g2, %g4, %g5 216 217kvmap_dtlb_vmalloc_addr: 218 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) 219 220 TSB_LOCK_TAG(%g1, %g2, %g7) 221 222 /* Load and check PTE. */ 223 ldxa [%g5] ASI_PHYS_USE_EC, %g5 224 mov 1, %g7 225 sllx %g7, TSB_TAG_INVALID_BIT, %g7 226 brgez,a,pn %g5, kvmap_dtlb_longpath 227 TSB_STORE(%g1, %g7) 228 229 TSB_WRITE(%g1, %g5, %g6) 230 231 /* fallthrough to TLB load */ 232 233kvmap_dtlb_load: 234 235661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB 236 retry 237 .section .sun4v_2insn_patch, "ax" 238 .word 661b 239 nop 240 nop 241 .previous 242 243 /* For sun4v the ASI_DTLB_DATA_IN store and the retry 244 * instruction get nop'd out and we get here to branch 245 * to the sun4v tlb load code. The registers are setup 246 * as follows: 247 * 248 * %g4: vaddr 249 * %g5: PTE 250 * %g6: TAG 251 * 252 * The sun4v TLB load wants the PTE in %g3 so we fix that 253 * up here. 254 */ 255 ba,pt %xcc, sun4v_dtlb_load 256 mov %g5, %g3 257 258#ifdef CONFIG_SPARSEMEM_VMEMMAP 259kvmap_vmemmap: 260 sub %g4, %g5, %g5 261 srlx %g5, 22, %g5 262 sethi %hi(vmemmap_table), %g1 263 sllx %g5, 3, %g5 264 or %g1, %lo(vmemmap_table), %g1 265 ba,pt %xcc, kvmap_dtlb_load 266 ldx [%g1 + %g5], %g5 267#endif 268 269kvmap_dtlb_nonlinear: 270 /* Catch kernel NULL pointer derefs. */ 271 sethi %hi(PAGE_SIZE), %g5 272 cmp %g4, %g5 273 bleu,pn %xcc, kvmap_dtlb_longpath 274 nop 275 276#ifdef CONFIG_SPARSEMEM_VMEMMAP 277 /* Do not use the TSB for vmemmap. */ 278 mov (VMEMMAP_BASE >> 40), %g5 279 sllx %g5, 40, %g5 280 cmp %g4,%g5 281 bgeu,pn %xcc, kvmap_vmemmap 282 nop 283#endif 284 285 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) 286 287kvmap_dtlb_tsbmiss: 288 sethi %hi(MODULES_VADDR), %g5 289 cmp %g4, %g5 290 blu,pn %xcc, kvmap_dtlb_longpath 291 mov (VMALLOC_END >> 40), %g5 292 sllx %g5, 40, %g5 293 cmp %g4, %g5 294 bgeu,pn %xcc, kvmap_dtlb_longpath 295 nop 296 297kvmap_check_obp: 298 sethi %hi(LOW_OBP_ADDRESS), %g5 299 cmp %g4, %g5 300 blu,pn %xcc, kvmap_dtlb_vmalloc_addr 301 mov 0x1, %g5 302 sllx %g5, 32, %g5 303 cmp %g4, %g5 304 blu,pn %xcc, kvmap_dtlb_obp 305 nop 306 ba,pt %xcc, kvmap_dtlb_vmalloc_addr 307 nop 308 309kvmap_dtlb_longpath: 310 311661: rdpr %pstate, %g5 312 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate 313 .section .sun4v_2insn_patch, "ax" 314 .word 661b 315 SET_GL(1) 316 ldxa [%g0] ASI_SCRATCHPAD, %g5 317 .previous 318 319 rdpr %tl, %g3 320 cmp %g3, 1 321 322661: mov TLB_TAG_ACCESS, %g4 323 ldxa [%g4] ASI_DMMU, %g5 324 .section .sun4v_2insn_patch, "ax" 325 .word 661b 326 ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5 327 nop 328 .previous 329 330 be,pt %xcc, sparc64_realfault_common 331 mov FAULT_CODE_DTLB, %g4 332 ba,pt %xcc, winfix_trampoline 333 nop 334