1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * kexec trampoline
4 *
5 * Based on code taken from kexec-tools and kexec-lite.
6 *
7 * Copyright (C) 2004 - 2005, Milton D Miller II, IBM Corporation
8 * Copyright (C) 2006, Mohan Kumar M, IBM Corporation
9 * Copyright (C) 2013, Anton Blanchard, IBM Corporation
10 */
11
12#include <asm/asm-compat.h>
13#include <asm/crashdump-ppc64.h>
14
15	.machine ppc64
16	.balign 256
17	.globl purgatory_start
18purgatory_start:
19	b	master
20
21	/* ABI: possible run_at_load flag at 0x5c */
22	.org purgatory_start + 0x5c
23	.globl run_at_load
24run_at_load:
25	.long 0
26	.size run_at_load, . - run_at_load
27
28	/* ABI: slaves start at 60 with r3=phys */
29	.org purgatory_start + 0x60
30slave:
31	b .
32	/* ABI: end of copied region */
33	.org purgatory_start + 0x100
34	.size purgatory_start, . - purgatory_start
35
36/*
37 * The above 0x100 bytes at purgatory_start are replaced with the
38 * code from the kernel (or next stage) by setup_purgatory().
39 */
40
41master:
42	or	%r1,%r1,%r1	/* low priority to let other threads catchup */
43	isync
44	mr	%r17,%r3	/* save cpu id to r17 */
45	mr	%r15,%r4	/* save physical address in reg15 */
46
47	/* Work out where we're running */
48	bcl	20, 31, 0f
490:	mflr	%r18
50
51	/*
52	 * Copy BACKUP_SRC_SIZE bytes from BACKUP_SRC_START to
53	 * backup_start 8 bytes at a time.
54	 *
55	 * Use r3 = dest, r4 = src, r5 = size, r6 = count
56	 */
57	ld	%r3, (backup_start - 0b)(%r18)
58	cmpdi	%cr0, %r3, 0
59	beq	.Lskip_copy	/* skip if there is no backup region */
60	lis	%r5, BACKUP_SRC_SIZE@h
61	ori	%r5, %r5, BACKUP_SRC_SIZE@l
62	cmpdi	%cr0, %r5, 0
63	beq	.Lskip_copy	/* skip if copy size is zero */
64	lis	%r4, BACKUP_SRC_START@h
65	ori	%r4, %r4, BACKUP_SRC_START@l
66	li	%r6, 0
67.Lcopy_loop:
68	ldx	%r0, %r6, %r4
69	stdx	%r0, %r6, %r3
70	addi	%r6, %r6, 8
71	cmpld	%cr0, %r6, %r5
72	blt	.Lcopy_loop
73
74.Lskip_copy:
75	or	%r3,%r3,%r3	/* ok now to high priority, lets boot */
76	lis	%r6,0x1
77	mtctr	%r6		/* delay a bit for slaves to catch up */
78	bdnz	.		/* before we overwrite 0-100 again */
79
80	/* load device-tree address */
81	ld	%r3, (dt_offset - 0b)(%r18)
82	mr	%r16,%r3	/* save dt address in reg16 */
83	li	%r4,20
84	LWZX_BE	%r6,%r3,%r4	/* fetch __be32 version number at byte 20 */
85	cmpwi	%cr0,%r6,2	/* v2 or later? */
86	blt	1f
87	li	%r4,28
88	STWX_BE	%r17,%r3,%r4	/* Store my cpu as __be32 at byte 28 */
891:
90	/* Load opal base and entry values in r8 & r9 respectively */
91	ld	%r8,(opal_base - 0b)(%r18)
92	ld	%r9,(opal_entry - 0b)(%r18)
93
94	/* load the kernel address */
95	ld	%r4,(kernel - 0b)(%r18)
96
97	/* load the run_at_load flag */
98	/* possibly patched by kexec */
99	ld	%r6,(run_at_load - 0b)(%r18)
100	/* and patch it into the kernel */
101	stw	%r6,(0x5c)(%r4)
102
103	mr	%r3,%r16	/* restore dt address */
104
105	li	%r5,0		/* r5 will be 0 for kernel */
106
107	mfmsr	%r11
108	andi.	%r10,%r11,1	/* test MSR_LE */
109	bne	.Little_endian
110
111	mtctr	%r4		/* prepare branch to */
112	bctr			/* start kernel */
113
114.Little_endian:
115	mtsrr0	%r4		/* prepare branch to */
116
117	clrrdi	%r11,%r11,1	/* clear MSR_LE */
118	mtsrr1	%r11
119
120	rfid			/* update MSR and start kernel */
121
122	.balign 8
123	.globl kernel
124kernel:
125	.8byte  0x0
126	.size kernel, . - kernel
127
128	.balign 8
129	.globl dt_offset
130dt_offset:
131	.8byte  0x0
132	.size dt_offset, . - dt_offset
133
134	.balign 8
135	.globl backup_start
136backup_start:
137	.8byte  0x0
138	.size backup_start, . - backup_start
139
140	.balign 8
141	.globl opal_base
142opal_base:
143	.8byte  0x0
144	.size opal_base, . - opal_base
145
146	.balign 8
147	.globl opal_entry
148opal_entry:
149	.8byte  0x0
150	.size opal_entry, . - opal_entry
151
152	.data
153	.balign 8
154.globl purgatory_sha256_digest
155purgatory_sha256_digest:
156	.skip	32
157	.size purgatory_sha256_digest, . - purgatory_sha256_digest
158
159	.balign 8
160.globl purgatory_sha_regions
161purgatory_sha_regions:
162	.skip	8 * 2 * 16
163	.size purgatory_sha_regions, . - purgatory_sha_regions
164