1/* 2 * trampoline.S: Jump start slave processors on sparc64. 3 * 4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 5 */ 6 7 8#include <asm/head.h> 9#include <asm/asi.h> 10#include <asm/lsu.h> 11#include <asm/dcr.h> 12#include <asm/dcu.h> 13#include <asm/pstate.h> 14#include <asm/page.h> 15#include <asm/pgtable.h> 16#include <asm/spitfire.h> 17#include <asm/processor.h> 18#include <asm/thread_info.h> 19#include <asm/mmu.h> 20#include <asm/hypervisor.h> 21#include <asm/cpudata.h> 22 23 .data 24 .align 8 25call_method: 26 .asciz "call-method" 27 .align 8 28itlb_load: 29 .asciz "SUNW,itlb-load" 30 .align 8 31dtlb_load: 32 .asciz "SUNW,dtlb-load" 33 34#define TRAMP_STACK_SIZE 1024 35 .align 16 36tramp_stack: 37 .skip TRAMP_STACK_SIZE 38 39 .align 8 40 .globl sparc64_cpu_startup, sparc64_cpu_startup_end 41sparc64_cpu_startup: 42 BRANCH_IF_SUN4V(g1, niagara_startup) 43 BRANCH_IF_CHEETAH_BASE(g1, g5, cheetah_startup) 44 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1, g5, cheetah_plus_startup) 45 46 ba,pt %xcc, spitfire_startup 47 nop 48 49cheetah_plus_startup: 50 /* Preserve OBP chosen DCU and DCR register settings. */ 51 ba,pt %xcc, cheetah_generic_startup 52 nop 53 54cheetah_startup: 55 mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1 56 wr %g1, %asr18 57 58 sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5 59 or %g5, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5 60 sllx %g5, 32, %g5 61 or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5 62 stxa %g5, [%g0] ASI_DCU_CONTROL_REG 63 membar #Sync 64 /* fallthru */ 65 66cheetah_generic_startup: 67 mov TSB_EXTENSION_P, %g3 68 stxa %g0, [%g3] ASI_DMMU 69 stxa %g0, [%g3] ASI_IMMU 70 membar #Sync 71 72 mov TSB_EXTENSION_S, %g3 73 stxa %g0, [%g3] ASI_DMMU 74 membar #Sync 75 76 mov TSB_EXTENSION_N, %g3 77 stxa %g0, [%g3] ASI_DMMU 78 stxa %g0, [%g3] ASI_IMMU 79 membar #Sync 80 /* fallthru */ 81 82niagara_startup: 83 /* Disable STICK_INT interrupts. */ 84 sethi %hi(0x80000000), %g5 85 sllx %g5, 32, %g5 86 wr %g5, %asr25 87 88 ba,pt %xcc, startup_continue 89 nop 90 91spitfire_startup: 92 mov (LSU_CONTROL_IC | LSU_CONTROL_DC | LSU_CONTROL_IM | LSU_CONTROL_DM), %g1 93 stxa %g1, [%g0] ASI_LSU_CONTROL 94 membar #Sync 95 96startup_continue: 97 mov %o0, %l0 98 BRANCH_IF_SUN4V(g1, niagara_lock_tlb) 99 100 sethi %hi(0x80000000), %g2 101 sllx %g2, 32, %g2 102 wr %g2, 0, %tick_cmpr 103 104 /* Call OBP by hand to lock KERNBASE into i/d tlbs. 105 * We lock 'num_kernel_image_mappings' consequetive entries. 106 */ 107 sethi %hi(prom_entry_lock), %g2 1081: ldstub [%g2 + %lo(prom_entry_lock)], %g1 109 brnz,pn %g1, 1b 110 nop 111 112 /* Get onto temporary stack which will be in the locked 113 * kernel image. 114 */ 115 sethi %hi(tramp_stack), %g1 116 or %g1, %lo(tramp_stack), %g1 117 add %g1, TRAMP_STACK_SIZE, %g1 118 sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp 119 flushw 120 121 /* Setup the loop variables: 122 * %l3: VADDR base 123 * %l4: TTE base 124 * %l5: Loop iterator, iterates from 0 to 'num_kernel_image_mappings' 125 * %l6: Number of TTE entries to map 126 * %l7: Highest TTE entry number, we count down 127 */ 128 sethi %hi(KERNBASE), %l3 129 sethi %hi(kern_locked_tte_data), %l4 130 ldx [%l4 + %lo(kern_locked_tte_data)], %l4 131 clr %l5 132 sethi %hi(num_kernel_image_mappings), %l6 133 lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 134 135 mov 15, %l7 136 BRANCH_IF_ANY_CHEETAH(g1,g5,2f) 137 138 mov 63, %l7 1392: 140 1413: 142 /* Lock into I-MMU */ 143 sethi %hi(call_method), %g2 144 or %g2, %lo(call_method), %g2 145 stx %g2, [%sp + 2047 + 128 + 0x00] 146 mov 5, %g2 147 stx %g2, [%sp + 2047 + 128 + 0x08] 148 mov 1, %g2 149 stx %g2, [%sp + 2047 + 128 + 0x10] 150 sethi %hi(itlb_load), %g2 151 or %g2, %lo(itlb_load), %g2 152 stx %g2, [%sp + 2047 + 128 + 0x18] 153 sethi %hi(prom_mmu_ihandle_cache), %g2 154 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 155 stx %g2, [%sp + 2047 + 128 + 0x20] 156 157 /* Each TTE maps 4MB, convert index to offset. */ 158 sllx %l5, 22, %g1 159 160 add %l3, %g1, %g2 161 stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR 162 add %l4, %g1, %g2 163 stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE 164 165 /* TTE index is highest minus loop index. */ 166 sub %l7, %l5, %g2 167 stx %g2, [%sp + 2047 + 128 + 0x38] 168 169 sethi %hi(p1275buf), %g2 170 or %g2, %lo(p1275buf), %g2 171 ldx [%g2 + 0x08], %o1 172 call %o1 173 add %sp, (2047 + 128), %o0 174 175 /* Lock into D-MMU */ 176 sethi %hi(call_method), %g2 177 or %g2, %lo(call_method), %g2 178 stx %g2, [%sp + 2047 + 128 + 0x00] 179 mov 5, %g2 180 stx %g2, [%sp + 2047 + 128 + 0x08] 181 mov 1, %g2 182 stx %g2, [%sp + 2047 + 128 + 0x10] 183 sethi %hi(dtlb_load), %g2 184 or %g2, %lo(dtlb_load), %g2 185 stx %g2, [%sp + 2047 + 128 + 0x18] 186 sethi %hi(prom_mmu_ihandle_cache), %g2 187 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 188 stx %g2, [%sp + 2047 + 128 + 0x20] 189 190 /* Each TTE maps 4MB, convert index to offset. */ 191 sllx %l5, 22, %g1 192 193 add %l3, %g1, %g2 194 stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR 195 add %l4, %g1, %g2 196 stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE 197 198 /* TTE index is highest minus loop index. */ 199 sub %l7, %l5, %g2 200 stx %g2, [%sp + 2047 + 128 + 0x38] 201 202 sethi %hi(p1275buf), %g2 203 or %g2, %lo(p1275buf), %g2 204 ldx [%g2 + 0x08], %o1 205 call %o1 206 add %sp, (2047 + 128), %o0 207 208 add %l5, 1, %l5 209 cmp %l5, %l6 210 bne,pt %xcc, 3b 211 nop 212 213 sethi %hi(prom_entry_lock), %g2 214 stb %g0, [%g2 + %lo(prom_entry_lock)] 215 216 ba,pt %xcc, after_lock_tlb 217 nop 218 219niagara_lock_tlb: 220 sethi %hi(KERNBASE), %l3 221 sethi %hi(kern_locked_tte_data), %l4 222 ldx [%l4 + %lo(kern_locked_tte_data)], %l4 223 clr %l5 224 sethi %hi(num_kernel_image_mappings), %l6 225 lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 226 2271: 228 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 229 sllx %l5, 22, %g2 230 add %l3, %g2, %o0 231 clr %o1 232 add %l4, %g2, %o2 233 mov HV_MMU_IMMU, %o3 234 ta HV_FAST_TRAP 235 236 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 237 sllx %l5, 22, %g2 238 add %l3, %g2, %o0 239 clr %o1 240 add %l4, %g2, %o2 241 mov HV_MMU_DMMU, %o3 242 ta HV_FAST_TRAP 243 244 add %l5, 1, %l5 245 cmp %l5, %l6 246 bne,pt %xcc, 1b 247 nop 248 249after_lock_tlb: 250 wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate 251 wr %g0, 0, %fprs 252 253 wr %g0, ASI_P, %asi 254 255 mov PRIMARY_CONTEXT, %g7 256 257661: stxa %g0, [%g7] ASI_DMMU 258 .section .sun4v_1insn_patch, "ax" 259 .word 661b 260 stxa %g0, [%g7] ASI_MMU 261 .previous 262 263 membar #Sync 264 mov SECONDARY_CONTEXT, %g7 265 266661: stxa %g0, [%g7] ASI_DMMU 267 .section .sun4v_1insn_patch, "ax" 268 .word 661b 269 stxa %g0, [%g7] ASI_MMU 270 .previous 271 272 membar #Sync 273 274 /* Everything we do here, until we properly take over the 275 * trap table, must be done with extreme care. We cannot 276 * make any references to %g6 (current thread pointer), 277 * %g4 (current task pointer), or %g5 (base of current cpu's 278 * per-cpu area) until we properly take over the trap table 279 * from the firmware and hypervisor. 280 * 281 * Get onto temporary stack which is in the locked kernel image. 282 */ 283 sethi %hi(tramp_stack), %g1 284 or %g1, %lo(tramp_stack), %g1 285 add %g1, TRAMP_STACK_SIZE, %g1 286 sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp 287 mov 0, %fp 288 289 /* Put garbage in these registers to trap any access to them. */ 290 set 0xdeadbeef, %g4 291 set 0xdeadbeef, %g5 292 set 0xdeadbeef, %g6 293 294 call init_irqwork_curcpu 295 nop 296 297 sethi %hi(tlb_type), %g3 298 lduw [%g3 + %lo(tlb_type)], %g2 299 cmp %g2, 3 300 bne,pt %icc, 1f 301 nop 302 303 call hard_smp_processor_id 304 nop 305 306 call sun4v_register_mondo_queues 307 nop 308 3091: call init_cur_cpu_trap 310 ldx [%l0], %o0 311 312 /* Start using proper page size encodings in ctx register. */ 313 sethi %hi(sparc64_kern_pri_context), %g3 314 ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 315 mov PRIMARY_CONTEXT, %g1 316 317661: stxa %g2, [%g1] ASI_DMMU 318 .section .sun4v_1insn_patch, "ax" 319 .word 661b 320 stxa %g2, [%g1] ASI_MMU 321 .previous 322 323 membar #Sync 324 325 wrpr %g0, 0, %wstate 326 327 sethi %hi(prom_entry_lock), %g2 3281: ldstub [%g2 + %lo(prom_entry_lock)], %g1 329 brnz,pn %g1, 1b 330 nop 331 332 /* As a hack, put &init_thread_union into %g6. 333 * prom_world() loads from here to restore the %asi 334 * register. 335 */ 336 sethi %hi(init_thread_union), %g6 337 or %g6, %lo(init_thread_union), %g6 338 339 sethi %hi(is_sun4v), %o0 340 lduw [%o0 + %lo(is_sun4v)], %o0 341 brz,pt %o0, 2f 342 nop 343 344 TRAP_LOAD_TRAP_BLOCK(%g2, %g3) 345 add %g2, TRAP_PER_CPU_FAULT_INFO, %g2 346 stxa %g2, [%g0] ASI_SCRATCHPAD 347 348 /* Compute physical address: 349 * 350 * paddr = kern_base + (mmfsa_vaddr - KERNBASE) 351 */ 352 sethi %hi(KERNBASE), %g3 353 sub %g2, %g3, %g2 354 sethi %hi(kern_base), %g3 355 ldx [%g3 + %lo(kern_base)], %g3 356 add %g2, %g3, %o1 357 sethi %hi(sparc64_ttable_tl0), %o0 358 359 set prom_set_trap_table_name, %g2 360 stx %g2, [%sp + 2047 + 128 + 0x00] 361 mov 2, %g2 362 stx %g2, [%sp + 2047 + 128 + 0x08] 363 mov 0, %g2 364 stx %g2, [%sp + 2047 + 128 + 0x10] 365 stx %o0, [%sp + 2047 + 128 + 0x18] 366 stx %o1, [%sp + 2047 + 128 + 0x20] 367 sethi %hi(p1275buf), %g2 368 or %g2, %lo(p1275buf), %g2 369 ldx [%g2 + 0x08], %o1 370 call %o1 371 add %sp, (2047 + 128), %o0 372 373 ba,pt %xcc, 3f 374 nop 375 3762: sethi %hi(sparc64_ttable_tl0), %o0 377 set prom_set_trap_table_name, %g2 378 stx %g2, [%sp + 2047 + 128 + 0x00] 379 mov 1, %g2 380 stx %g2, [%sp + 2047 + 128 + 0x08] 381 mov 0, %g2 382 stx %g2, [%sp + 2047 + 128 + 0x10] 383 stx %o0, [%sp + 2047 + 128 + 0x18] 384 sethi %hi(p1275buf), %g2 385 or %g2, %lo(p1275buf), %g2 386 ldx [%g2 + 0x08], %o1 387 call %o1 388 add %sp, (2047 + 128), %o0 389 3903: sethi %hi(prom_entry_lock), %g2 391 stb %g0, [%g2 + %lo(prom_entry_lock)] 392 393 ldx [%l0], %g6 394 ldx [%g6 + TI_TASK], %g4 395 396 mov 1, %g5 397 sllx %g5, THREAD_SHIFT, %g5 398 sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 399 add %g6, %g5, %sp 400 401 rdpr %pstate, %o1 402 or %o1, PSTATE_IE, %o1 403 wrpr %o1, 0, %pstate 404 405 call smp_callin 406 nop 407 408 call cpu_panic 409 nop 4101: b,a,pt %xcc, 1b 411 412 .align 8 413sparc64_cpu_startup_end: 414