xref: /openbmc/linux/arch/parisc/kernel/head.S (revision 732a675a)
1/* This file is subject to the terms and conditions of the GNU General Public
2 * License.  See the file "COPYING" in the main directory of this archive
3 * for more details.
4 *
5 * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
6 * Copyright 1999 SuSE GmbH (Philipp Rumpf)
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
9 * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
10 * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
11 *
12 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
13 */
14
15#include <asm/asm-offsets.h>
16#include <asm/psw.h>
17#include <asm/pdc.h>
18
19#include <asm/assembly.h>
20#include <asm/pgtable.h>
21
22#include <linux/linkage.h>
23#include <linux/init.h>
24
25	.level	LEVEL
26
27	__INITDATA
28ENTRY(boot_args)
29	.word 0 /* arg0 */
30	.word 0 /* arg1 */
31	.word 0 /* arg2 */
32	.word 0 /* arg3 */
33END(boot_args)
34
35	__HEAD
36	.align	4
37	.import init_thread_union,data
38	.import fault_vector_20,code    /* IVA parisc 2.0 32 bit */
39#ifndef CONFIG_64BIT
40        .import fault_vector_11,code    /* IVA parisc 1.1 32 bit */
41	.import	$global$		/* forward declaration */
42#endif /*!CONFIG_64BIT*/
43	.export _stext,data		/* Kernel want it this way! */
44_stext:
45ENTRY(stext)
46	.proc
47	.callinfo
48
49	/* Make sure sr4-sr7 are set to zero for the kernel address space */
50	mtsp	%r0,%sr4
51	mtsp	%r0,%sr5
52	mtsp	%r0,%sr6
53	mtsp	%r0,%sr7
54
55	/* Clear BSS (shouldn't the boot loader do this?) */
56
57	.import __bss_start,data
58	.import __bss_stop,data
59
60	load32		PA(__bss_start),%r3
61	load32		PA(__bss_stop),%r4
62$bss_loop:
63	cmpb,<<,n       %r3,%r4,$bss_loop
64	stw,ma          %r0,4(%r3)
65
66	/* Save away the arguments the boot loader passed in (32 bit args) */
67	load32		PA(boot_args),%r1
68	stw,ma          %arg0,4(%r1)
69	stw,ma          %arg1,4(%r1)
70	stw,ma          %arg2,4(%r1)
71	stw,ma          %arg3,4(%r1)
72
73	/* Initialize startup VM. Just map first 8/16 MB of memory */
74	load32		PA(swapper_pg_dir),%r4
75	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
76	mtctl		%r4,%cr25	/* Initialize user root pointer */
77
78#if PT_NLEVELS == 3
79	/* Set pmd in pgd */
80	load32		PA(pmd0),%r5
81	shrd            %r5,PxD_VALUE_SHIFT,%r3
82	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
83	stw		%r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
84	ldo		ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
85#else
86	/* 2-level page table, so pmd == pgd */
87	ldo		ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
88#endif
89
90	/* Fill in pmd with enough pte directories */
91	load32		PA(pg0),%r1
92	SHRREG		%r1,PxD_VALUE_SHIFT,%r3
93	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
94
95	ldi		ASM_PT_INITIAL,%r1
96
971:
98	stw		%r3,0(%r4)
99	ldo		(PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
100	addib,>		-1,%r1,1b
101#if PT_NLEVELS == 3
102	ldo             ASM_PMD_ENTRY_SIZE(%r4),%r4
103#else
104	ldo             ASM_PGD_ENTRY_SIZE(%r4),%r4
105#endif
106
107
108	/* Now initialize the PTEs themselves */
109	ldo		0+_PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
110	ldi		(1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
111	load32		PA(pg0),%r1
112
113$pgt_fill_loop:
114	STREGM          %r3,ASM_PTE_ENTRY_SIZE(%r1)
115	ldo		(1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
116	addib,>		-1,%r11,$pgt_fill_loop
117	nop
118
119	/* Load the return address...er...crash 'n burn */
120	copy		%r0,%r2
121
122	/* And the RFI Target address too */
123	load32		start_kernel,%r11
124
125	/* And the initial task pointer */
126	load32		init_thread_union,%r6
127	mtctl           %r6,%cr30
128
129	/* And the stack pointer too */
130	ldo             THREAD_SZ_ALGN(%r6),%sp
131
132#ifdef CONFIG_SMP
133	/* Set the smp rendevous address into page zero.
134	** It would be safer to do this in init_smp_config() but
135	** it's just way easier to deal with here because
136	** of 64-bit function ptrs and the address is local to this file.
137	*/
138	load32		PA(smp_slave_stext),%r10
139	stw		%r10,0x10(%r0)	/* MEM_RENDEZ */
140	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI - assume addr < 4GB */
141
142	/* FALLTHROUGH */
143	.procend
144
145	/*
146	** Code Common to both Monarch and Slave processors.
147	** Entry:
148	**
149	**  1.1:
150	**    %r11 must contain RFI target address.
151	**    %r25/%r26 args to pass to target function
152	**    %r2  in case rfi target decides it didn't like something
153	**
154	**  2.0w:
155	**    %r3  PDCE_PROC address
156	**    %r11 RFI target address
157	**
158	** Caller must init: SR4-7, %sp, %r10, %cr24/25,
159	*/
160common_stext:
161	.proc
162	.callinfo
163#else
164	/* Clear PDC entry point - we won't use it */
165	stw		%r0,0x10(%r0)	/* MEM_RENDEZ */
166	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI */
167#endif /*CONFIG_SMP*/
168
169#ifdef CONFIG_64BIT
170	tophys_r1	%sp
171
172	/* Save the rfi target address */
173	ldd             TI_TASK-THREAD_SZ_ALGN(%sp), %r10
174	tophys_r1       %r10
175	std             %r11,  TASK_PT_GR11(%r10)
176	/* Switch to wide mode Superdome doesn't support narrow PDC
177	** calls.
178	*/
1791:	mfia            %rp             /* clear upper part of pcoq */
180	ldo             2f-1b(%rp),%rp
181	depdi           0,31,32,%rp
182	bv              (%rp)
183	ssm             PSW_SM_W,%r0
184
185        /* Set Wide mode as the "Default" (eg for traps)
186        ** First trap occurs *right* after (or part of) rfi for slave CPUs.
187        ** Someday, palo might not do this for the Monarch either.
188        */
1892:
190#define MEM_PDC_LO 0x388
191#define MEM_PDC_HI 0x35C
192	ldw             MEM_PDC_LO(%r0),%r3
193	ldw             MEM_PDC_HI(%r0),%r6
194	depd            %r6, 31, 32, %r3        /* move to upper word */
195
196	ldo             PDC_PSW(%r0),%arg0              /* 21 */
197	ldo             PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
198	ldo             PDC_PSW_WIDE_BIT(%r0),%arg2     /* 2 */
199	load32          PA(stext_pdc_ret), %rp
200	bv              (%r3)
201	copy            %r0,%arg3
202
203stext_pdc_ret:
204	/* restore rfi target address*/
205	ldd             TI_TASK-THREAD_SZ_ALGN(%sp), %r10
206	tophys_r1       %r10
207	ldd             TASK_PT_GR11(%r10), %r11
208	tovirt_r1       %sp
209#endif
210
211	/* PARANOID: clear user scratch/user space SR's */
212	mtsp	%r0,%sr0
213	mtsp	%r0,%sr1
214	mtsp	%r0,%sr2
215	mtsp	%r0,%sr3
216
217	/* Initialize Protection Registers */
218	mtctl	%r0,%cr8
219	mtctl	%r0,%cr9
220	mtctl	%r0,%cr12
221	mtctl	%r0,%cr13
222
223	/* Initialize the global data pointer */
224	loadgp
225
226	/* Set up our interrupt table.  HPMCs might not work after this!
227	 *
228	 * We need to install the correct iva for PA1.1 or PA2.0. The
229	 * following short sequence of instructions can determine this
230	 * (without being illegal on a PA1.1 machine).
231	 */
232#ifndef CONFIG_64BIT
233	ldi		32,%r10
234	mtctl		%r10,%cr11
235	.level 2.0
236	mfctl,w		%cr11,%r10
237	.level 1.1
238	comib,<>,n	0,%r10,$is_pa20
239	ldil		L%PA(fault_vector_11),%r10
240	b		$install_iva
241	ldo		R%PA(fault_vector_11)(%r10),%r10
242
243$is_pa20:
244	.level		LEVEL /* restore 1.1 || 2.0w */
245#endif /*!CONFIG_64BIT*/
246	load32		PA(fault_vector_20),%r10
247
248$install_iva:
249	mtctl		%r10,%cr14
250
251	b		aligned_rfi  /* Prepare to RFI! Man all the cannons! */
252	nop
253
254	.align 128
255aligned_rfi:
256	pcxt_ssm_bug
257
258	rsm		PSW_SM_QUIET,%r0	/* off troublesome PSW bits */
259	/* Don't need NOPs, have 8 compliant insn before rfi */
260
261	mtctl		%r0,%cr17	/* Clear IIASQ tail */
262	mtctl		%r0,%cr17	/* Clear IIASQ head */
263
264	/* Load RFI target into PC queue */
265	mtctl		%r11,%cr18	/* IIAOQ head */
266	ldo		4(%r11),%r11
267	mtctl		%r11,%cr18	/* IIAOQ tail */
268
269	load32		KERNEL_PSW,%r10
270	mtctl		%r10,%ipsw
271
272	/* Jump through hyperspace to Virt Mode */
273	rfi
274	nop
275
276	.procend
277
278#ifdef CONFIG_SMP
279
280	.import smp_init_current_idle_task,data
281	.import	smp_callin,code
282
283#ifndef CONFIG_64BIT
284smp_callin_rtn:
285        .proc
286	.callinfo
287	break	1,1		/*  Break if returned from start_secondary */
288	nop
289	nop
290        .procend
291#endif /*!CONFIG_64BIT*/
292
293/***************************************************************************
294* smp_slave_stext is executed by all non-monarch Processors when the Monarch
295* pokes the slave CPUs in smp.c:smp_boot_cpus().
296*
297* Once here, registers values are initialized in order to branch to virtual
298* mode. Once all available/eligible CPUs are in virtual mode, all are
299* released and start out by executing their own idle task.
300*****************************************************************************/
301smp_slave_stext:
302        .proc
303	.callinfo
304
305	/*
306	** Initialize Space registers
307	*/
308	mtsp	   %r0,%sr4
309	mtsp	   %r0,%sr5
310	mtsp	   %r0,%sr6
311	mtsp	   %r0,%sr7
312
313	/*  Initialize the SP - monarch sets up smp_init_current_idle_task */
314	load32		PA(smp_init_current_idle_task),%sp
315	LDREG		0(%sp),%sp	/* load task address */
316	tophys_r1	%sp
317	LDREG		TASK_THREAD_INFO(%sp),%sp
318	mtctl           %sp,%cr30       /* store in cr30 */
319	ldo             THREAD_SZ_ALGN(%sp),%sp
320
321	/* point CPU to kernel page tables */
322	load32		PA(swapper_pg_dir),%r4
323	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
324	mtctl		%r4,%cr25	/* Initialize user root pointer */
325
326#ifdef CONFIG_64BIT
327	/* Setup PDCE_PROC entry */
328	copy            %arg0,%r3
329#else
330	/* Load RFI *return* address in case smp_callin bails */
331	load32		smp_callin_rtn,%r2
332#endif
333
334	/* Load RFI target address.  */
335	load32		smp_callin,%r11
336
337	/* ok...common code can handle the rest */
338	b		common_stext
339	nop
340
341	.procend
342#endif /* CONFIG_SMP */
343
344ENDPROC(stext)
345
346#ifndef CONFIG_64BIT
347	.section .data.read_mostly
348
349	.align	4
350	.export	$global$,data
351
352	.type	$global$,@object
353	.size	$global$,4
354$global$:
355	.word 0
356#endif /*!CONFIG_64BIT*/
357