1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * This file contains miscellaneous low-level functions. 4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * 6 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) 7 * and Paul Mackerras. 8 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com) 9 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) 10 */ 11 12#include <linux/sys.h> 13#include <asm/unistd.h> 14#include <asm/errno.h> 15#include <asm/processor.h> 16#include <asm/page.h> 17#include <asm/cache.h> 18#include <asm/ppc_asm.h> 19#include <asm/asm-offsets.h> 20#include <asm/cputable.h> 21#include <asm/thread_info.h> 22#include <asm/kexec.h> 23#include <asm/ptrace.h> 24#include <asm/mmu.h> 25#include <asm/export.h> 26#include <asm/feature-fixups.h> 27 28 .text 29 30_GLOBAL(call_do_softirq) 31 mflr r0 32 std r0,16(r1) 33 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) 34 mr r1,r3 35 bl __do_softirq 36 ld r1,0(r1) 37 ld r0,16(r1) 38 mtlr r0 39 blr 40 41_GLOBAL(call_do_irq) 42 mflr r0 43 std r0,16(r1) 44 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) 45 mr r1,r4 46 bl __do_irq 47 ld r1,0(r1) 48 ld r0,16(r1) 49 mtlr r0 50 blr 51 52_GLOBAL(__bswapdi2) 53EXPORT_SYMBOL(__bswapdi2) 54 srdi r8,r3,32 55 rlwinm r7,r3,8,0xffffffff 56 rlwimi r7,r3,24,0,7 57 rlwinm r9,r8,8,0xffffffff 58 rlwimi r7,r3,24,16,23 59 rlwimi r9,r8,24,0,7 60 rlwimi r9,r8,24,16,23 61 sldi r7,r7,32 62 or r3,r7,r9 63 blr 64 65 66#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX 67_GLOBAL(rmci_on) 68 sync 69 isync 70 li r3,0x100 71 rldicl r3,r3,32,0 72 mfspr r5,SPRN_HID4 73 or r5,r5,r3 74 sync 75 mtspr SPRN_HID4,r5 76 isync 77 slbia 78 isync 79 sync 80 blr 81 82_GLOBAL(rmci_off) 83 sync 84 isync 85 li r3,0x100 86 rldicl r3,r3,32,0 87 mfspr r5,SPRN_HID4 88 andc r5,r5,r3 89 sync 90 mtspr SPRN_HID4,r5 91 isync 92 slbia 93 isync 94 sync 95 blr 96#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ 97 98#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) 99 100/* 101 * Do an IO access in real mode 102 */ 103_GLOBAL(real_readb) 104 mfmsr r7 105 ori r0,r7,MSR_DR 106 xori r0,r0,MSR_DR 107 sync 108 mtmsrd r0 109 sync 110 isync 111 mfspr r6,SPRN_HID4 112 rldicl r5,r6,32,0 113 ori r5,r5,0x100 114 rldicl r5,r5,32,0 115 sync 116 mtspr SPRN_HID4,r5 117 isync 118 slbia 119 isync 120 lbz r3,0(r3) 121 sync 122 mtspr SPRN_HID4,r6 123 isync 124 slbia 125 isync 126 mtmsrd r7 127 sync 128 isync 129 blr 130 131 /* 132 * Do an IO access in real mode 133 */ 134_GLOBAL(real_writeb) 135 mfmsr r7 136 ori r0,r7,MSR_DR 137 xori r0,r0,MSR_DR 138 sync 139 mtmsrd r0 140 sync 141 isync 142 mfspr r6,SPRN_HID4 143 rldicl r5,r6,32,0 144 ori r5,r5,0x100 145 rldicl r5,r5,32,0 146 sync 147 mtspr SPRN_HID4,r5 148 isync 149 slbia 150 isync 151 stb r3,0(r4) 152 sync 153 mtspr SPRN_HID4,r6 154 isync 155 slbia 156 isync 157 mtmsrd r7 158 sync 159 isync 160 blr 161#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */ 162 163#ifdef CONFIG_PPC_PASEMI 164 165_GLOBAL(real_205_readb) 166 mfmsr r7 167 ori r0,r7,MSR_DR 168 xori r0,r0,MSR_DR 169 sync 170 mtmsrd r0 171 sync 172 isync 173 LBZCIX(R3,R0,R3) 174 isync 175 mtmsrd r7 176 sync 177 isync 178 blr 179 180_GLOBAL(real_205_writeb) 181 mfmsr r7 182 ori r0,r7,MSR_DR 183 xori r0,r0,MSR_DR 184 sync 185 mtmsrd r0 186 sync 187 isync 188 STBCIX(R3,R0,R4) 189 isync 190 mtmsrd r7 191 sync 192 isync 193 blr 194 195#endif /* CONFIG_PPC_PASEMI */ 196 197 198#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE) 199/* 200 * SCOM access functions for 970 (FX only for now) 201 * 202 * unsigned long scom970_read(unsigned int address); 203 * void scom970_write(unsigned int address, unsigned long value); 204 * 205 * The address passed in is the 24 bits register address. This code 206 * is 970 specific and will not check the status bits, so you should 207 * know what you are doing. 208 */ 209_GLOBAL(scom970_read) 210 /* interrupts off */ 211 mfmsr r4 212 ori r0,r4,MSR_EE 213 xori r0,r0,MSR_EE 214 mtmsrd r0,1 215 216 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits 217 * (including parity). On current CPUs they must be 0'd, 218 * and finally or in RW bit 219 */ 220 rlwinm r3,r3,8,0,15 221 ori r3,r3,0x8000 222 223 /* do the actual scom read */ 224 sync 225 mtspr SPRN_SCOMC,r3 226 isync 227 mfspr r3,SPRN_SCOMD 228 isync 229 mfspr r0,SPRN_SCOMC 230 isync 231 232 /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah 233 * that's the best we can do). Not implemented yet as we don't use 234 * the scom on any of the bogus CPUs yet, but may have to be done 235 * ultimately 236 */ 237 238 /* restore interrupts */ 239 mtmsrd r4,1 240 blr 241 242 243_GLOBAL(scom970_write) 244 /* interrupts off */ 245 mfmsr r5 246 ori r0,r5,MSR_EE 247 xori r0,r0,MSR_EE 248 mtmsrd r0,1 249 250 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits 251 * (including parity). On current CPUs they must be 0'd. 252 */ 253 254 rlwinm r3,r3,8,0,15 255 256 sync 257 mtspr SPRN_SCOMD,r4 /* write data */ 258 isync 259 mtspr SPRN_SCOMC,r3 /* write command */ 260 isync 261 mfspr 3,SPRN_SCOMC 262 isync 263 264 /* restore interrupts */ 265 mtmsrd r5,1 266 blr 267#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */ 268 269/* kexec_wait(phys_cpu) 270 * 271 * wait for the flag to change, indicating this kernel is going away but 272 * the slave code for the next one is at addresses 0 to 100. 273 * 274 * This is used by all slaves, even those that did not find a matching 275 * paca in the secondary startup code. 276 * 277 * Physical (hardware) cpu id should be in r3. 278 */ 279_GLOBAL(kexec_wait) 280 bl 1f 2811: mflr r5 282 addi r5,r5,kexec_flag-1b 283 28499: HMT_LOW 285#ifdef CONFIG_KEXEC_CORE /* use no memory without kexec */ 286 lwz r4,0(r5) 287 cmpwi 0,r4,0 288 beq 99b 289#ifdef CONFIG_PPC_BOOK3S_64 290 li r10,0x60 291 mfmsr r11 292 clrrdi r11,r11,1 /* Clear MSR_LE */ 293 mtsrr0 r10 294 mtsrr1 r11 295 rfid 296#else 297 /* Create TLB entry in book3e_secondary_core_init */ 298 li r4,0 299 ba 0x60 300#endif 301#endif 302 303/* this can be in text because we won't change it until we are 304 * running in real anyways 305 */ 306kexec_flag: 307 .long 0 308 309 310#ifdef CONFIG_KEXEC_CORE 311#ifdef CONFIG_PPC_BOOK3E 312/* 313 * BOOK3E has no real MMU mode, so we have to setup the initial TLB 314 * for a core to identity map v:0 to p:0. This current implementation 315 * assumes that 1G is enough for kexec. 316 */ 317kexec_create_tlb: 318 /* 319 * Invalidate all non-IPROT TLB entries to avoid any TLB conflict. 320 * IPROT TLB entries should be >= PAGE_OFFSET and thus not conflict. 321 */ 322 PPC_TLBILX_ALL(0,R0) 323 sync 324 isync 325 326 mfspr r10,SPRN_TLB1CFG 327 andi. r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */ 328 subi r10,r10,1 /* Last entry: no conflict with kernel text */ 329 lis r9,MAS0_TLBSEL(1)@h 330 rlwimi r9,r10,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r9) */ 331 332/* Set up a temp identity mapping v:0 to p:0 and return to it. */ 333 mtspr SPRN_MAS0,r9 334 335 lis r9,(MAS1_VALID|MAS1_IPROT)@h 336 ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l 337 mtspr SPRN_MAS1,r9 338 339 LOAD_REG_IMMEDIATE(r9, 0x0 | MAS2_M_IF_NEEDED) 340 mtspr SPRN_MAS2,r9 341 342 LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX) 343 mtspr SPRN_MAS3,r9 344 li r9,0 345 mtspr SPRN_MAS7,r9 346 347 tlbwe 348 isync 349 blr 350#endif 351 352/* kexec_smp_wait(void) 353 * 354 * call with interrupts off 355 * note: this is a terminal routine, it does not save lr 356 * 357 * get phys id from paca 358 * switch to real mode 359 * mark the paca as no longer used 360 * join other cpus in kexec_wait(phys_id) 361 */ 362_GLOBAL(kexec_smp_wait) 363 lhz r3,PACAHWCPUID(r13) 364 bl real_mode 365 366 li r4,KEXEC_STATE_REAL_MODE 367 stb r4,PACAKEXECSTATE(r13) 368 369 b kexec_wait 370 371/* 372 * switch to real mode (turn mmu off) 373 * we use the early kernel trick that the hardware ignores bits 374 * 0 and 1 (big endian) of the effective address in real mode 375 * 376 * don't overwrite r3 here, it is live for kexec_wait above. 377 */ 378real_mode: /* assume normal blr return */ 379#ifdef CONFIG_PPC_BOOK3E 380 /* Create an identity mapping. */ 381 b kexec_create_tlb 382#else 3831: li r9,MSR_RI 384 li r10,MSR_DR|MSR_IR 385 mflr r11 /* return address to SRR0 */ 386 mfmsr r12 387 andc r9,r12,r9 388 andc r10,r12,r10 389 390 mtmsrd r9,1 391 mtspr SPRN_SRR1,r10 392 mtspr SPRN_SRR0,r11 393 rfid 394#endif 395 396/* 397 * kexec_sequence(newstack, start, image, control, clear_all(), 398 copy_with_mmu_off) 399 * 400 * does the grungy work with stack switching and real mode switches 401 * also does simple calls to other code 402 */ 403 404_GLOBAL(kexec_sequence) 405 mflr r0 406 std r0,16(r1) 407 408 /* switch stacks to newstack -- &kexec_stack.stack */ 409 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) 410 mr r1,r3 411 412 li r0,0 413 std r0,16(r1) 414 415 /* save regs for local vars on new stack. 416 * yes, we won't go back, but ... 417 */ 418 std r31,-8(r1) 419 std r30,-16(r1) 420 std r29,-24(r1) 421 std r28,-32(r1) 422 std r27,-40(r1) 423 std r26,-48(r1) 424 std r25,-56(r1) 425 426 stdu r1,-STACK_FRAME_OVERHEAD-64(r1) 427 428 /* save args into preserved regs */ 429 mr r31,r3 /* newstack (both) */ 430 mr r30,r4 /* start (real) */ 431 mr r29,r5 /* image (virt) */ 432 mr r28,r6 /* control, unused */ 433 mr r27,r7 /* clear_all() fn desc */ 434 mr r26,r8 /* copy_with_mmu_off */ 435 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */ 436 437 /* disable interrupts, we are overwriting kernel data next */ 438#ifdef CONFIG_PPC_BOOK3E 439 wrteei 0 440#else 441 mfmsr r3 442 rlwinm r3,r3,0,17,15 443 mtmsrd r3,1 444#endif 445 446 /* We need to turn the MMU off unless we are in hash mode 447 * under a hypervisor 448 */ 449 cmpdi r26,0 450 beq 1f 451 bl real_mode 4521: 453 /* copy dest pages, flush whole dest image */ 454 mr r3,r29 455 bl kexec_copy_flush /* (image) */ 456 457 /* turn off mmu now if not done earlier */ 458 cmpdi r26,0 459 bne 1f 460 bl real_mode 461 462 /* copy 0x100 bytes starting at start to 0 */ 4631: li r3,0 464 mr r4,r30 /* start, aka phys mem offset */ 465 li r5,0x100 466 li r6,0 467 bl copy_and_flush /* (dest, src, copy limit, start offset) */ 4681: /* assume normal blr return */ 469 470 /* release other cpus to the new kernel secondary start at 0x60 */ 471 mflr r5 472 li r6,1 473 stw r6,kexec_flag-1b(5) 474 475 cmpdi r27,0 476 beq 1f 477 478 /* clear out hardware hash page table and tlb */ 479#ifdef PPC64_ELF_ABI_v1 480 ld r12,0(r27) /* deref function descriptor */ 481#else 482 mr r12,r27 483#endif 484 mtctr r12 485 bctrl /* mmu_hash_ops.hpte_clear_all(void); */ 486 487/* 488 * kexec image calling is: 489 * the first 0x100 bytes of the entry point are copied to 0 490 * 491 * all slaves branch to slave = 0x60 (absolute) 492 * slave(phys_cpu_id); 493 * 494 * master goes to start = entry point 495 * start(phys_cpu_id, start, 0); 496 * 497 * 498 * a wrapper is needed to call existing kernels, here is an approximate 499 * description of one method: 500 * 501 * v2: (2.6.10) 502 * start will be near the boot_block (maybe 0x100 bytes before it?) 503 * it will have a 0x60, which will b to boot_block, where it will wait 504 * and 0 will store phys into struct boot-block and load r3 from there, 505 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again 506 * 507 * v1: (2.6.9) 508 * boot block will have all cpus scanning device tree to see if they 509 * are the boot cpu ????? 510 * other device tree differences (prop sizes, va vs pa, etc)... 511 */ 5121: mr r3,r25 # my phys cpu 513 mr r4,r30 # start, aka phys mem offset 514 mtlr 4 515 li r5,0 516 blr /* image->start(physid, image->start, 0); */ 517#endif /* CONFIG_KEXEC_CORE */ 518