xref: /openbmc/linux/arch/parisc/kernel/head.S (revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2)
1/* This file is subject to the terms and conditions of the GNU General Public
2 * License.  See the file "COPYING" in the main directory of this archive
3 * for more details.
4 *
5 * Copyright (C) 1999 by Helge Deller
6 * Copyright 1999 SuSE GmbH (Philipp Rumpf)
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
9 * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
10 * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
11 *
12 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
13 */
14
15#include <linux/autoconf.h>	/* for CONFIG_SMP */
16
17#include <asm/offsets.h>
18#include <asm/psw.h>
19#include <asm/pdc.h>
20
21#include <asm/assembly.h>
22#include <asm/pgtable.h>
23
24	.level	LEVEL
25
26	.data
27
28	.export boot_args
29boot_args:
30	.word 0 /* arg0 */
31	.word 0 /* arg1 */
32	.word 0 /* arg2 */
33	.word 0 /* arg3 */
34
35	.text
36	.align	4
37	.import init_thread_union,data
38	.import fault_vector_20,code    /* IVA parisc 2.0 32 bit */
39#ifndef __LP64__
40        .import fault_vector_11,code    /* IVA parisc 1.1 32 bit */
41	.import	$global$		/* forward declaration */
42#endif /*!LP64*/
43	.export stext
44	.export _stext,data		/* Kernel want it this way! */
45_stext:
46stext:
47	.proc
48	.callinfo
49
50	/* Make sure sr4-sr7 are set to zero for the kernel address space */
51	mtsp	%r0,%sr4
52	mtsp	%r0,%sr5
53	mtsp	%r0,%sr6
54	mtsp	%r0,%sr7
55
56	/* Clear BSS (shouldn't the boot loader do this?) */
57
58	.import __bss_start,data
59	.import __bss_stop,data
60
61	load32		PA(__bss_start),%r3
62	load32		PA(__bss_stop),%r4
63$bss_loop:
64	cmpb,<<,n       %r3,%r4,$bss_loop
65	stw,ma          %r0,4(%r3)
66
67	/* Save away the arguments the boot loader passed in (32 bit args) */
68	load32		PA(boot_args),%r1
69	stw,ma          %arg0,4(%r1)
70	stw,ma          %arg1,4(%r1)
71	stw,ma          %arg2,4(%r1)
72	stw,ma          %arg3,4(%r1)
73
74	/* Initialize startup VM. Just map first 8/16 MB of memory */
75	load32		PA(swapper_pg_dir),%r4
76	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
77	mtctl		%r4,%cr25	/* Initialize user root pointer */
78
79#ifdef __LP64__
80	/* Set pmd in pgd */
81	load32		PA(pmd0),%r5
82	shrd            %r5,PxD_VALUE_SHIFT,%r3
83        ldo             (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
84	stw		%r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
85	ldo		ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
86#else
87	/* 2-level page table, so pmd == pgd */
88        ldo             ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
89#endif
90
91	/* Fill in pmd with enough pte directories */
92	load32		PA(pg0),%r1
93	SHRREG		%r1,PxD_VALUE_SHIFT,%r3
94	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
95
96	ldi		ASM_PT_INITIAL,%r1
97
981:
99	stw		%r3,0(%r4)
100	ldo		(ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
101	addib,>		-1,%r1,1b
102#ifdef __LP64__
103	ldo             ASM_PMD_ENTRY_SIZE(%r4),%r4
104#else
105	ldo             ASM_PGD_ENTRY_SIZE(%r4),%r4
106#endif
107
108
109	/* Now initialize the PTEs themselves */
110	ldo		_PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
111	load32		PA(pg0),%r1
112
113$pgt_fill_loop:
114	STREGM          %r3,ASM_PTE_ENTRY_SIZE(%r1)
115	ldo		ASM_PAGE_SIZE(%r3),%r3
116	bb,>=		%r3,31-KERNEL_INITIAL_ORDER,$pgt_fill_loop
117	nop
118
119	/* Load the return address...er...crash 'n burn */
120	copy		%r0,%r2
121
122	/* And the RFI Target address too */
123	load32		start_kernel,%r11
124
125	/* And the initial task pointer */
126	load32		init_thread_union,%r6
127	mtctl           %r6,%cr30
128
129	/* And the stack pointer too */
130	ldo             THREAD_SZ_ALGN(%r6),%sp
131
132	/* And the interrupt stack */
133	load32		interrupt_stack,%r6
134	mtctl           %r6,%cr31
135
136#ifdef CONFIG_SMP
137	/* Set the smp rendevous address into page zero.
138	** It would be safer to do this in init_smp_config() but
139	** it's just way easier to deal with here because
140	** of 64-bit function ptrs and the address is local to this file.
141	*/
142	load32		PA(smp_slave_stext),%r10
143	stw		%r10,0x10(%r0)	/* MEM_RENDEZ */
144	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI - assume addr < 4GB */
145
146	/* FALLTHROUGH */
147	.procend
148
149	/*
150	** Code Common to both Monarch and Slave processors.
151	** Entry:
152	**
153	**  1.1:
154	**    %r11 must contain RFI target address.
155	**    %r25/%r26 args to pass to target function
156	**    %r2  in case rfi target decides it didn't like something
157	**
158	**  2.0w:
159	**    %r3  PDCE_PROC address
160	**    %r11 RFI target address
161	**
162	** Caller must init: SR4-7, %sp, %r10, %cr24/25,
163	*/
164common_stext:
165	.proc
166	.callinfo
167#else
168	/* Clear PDC entry point - we won't use it */
169	stw		%r0,0x10(%r0)	/* MEM_RENDEZ */
170	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI */
171#endif /*CONFIG_SMP*/
172
173#ifdef __LP64__
174	tophys_r1	%sp
175
176	/* Save the rfi target address */
177	ldd             TI_TASK-THREAD_SZ_ALGN(%sp), %r10
178	tophys_r1       %r10
179	std             %r11,  TASK_PT_GR11(%r10)
180	/* Switch to wide mode Superdome doesn't support narrow PDC
181	** calls.
182	*/
1831:	mfia            %rp             /* clear upper part of pcoq */
184	ldo             2f-1b(%rp),%rp
185	depdi           0,31,32,%rp
186	bv              (%rp)
187	ssm             PSW_SM_W,%r0
188
189        /* Set Wide mode as the "Default" (eg for traps)
190        ** First trap occurs *right* after (or part of) rfi for slave CPUs.
191        ** Someday, palo might not do this for the Monarch either.
192        */
1932:
194#define MEM_PDC_LO 0x388
195#define MEM_PDC_HI 0x35C
196	ldw             MEM_PDC_LO(%r0),%r3
197	ldw             MEM_PDC_HI(%r0),%r6
198	depd            %r6, 31, 32, %r3        /* move to upper word */
199
200	ldo             PDC_PSW(%r0),%arg0              /* 21 */
201	ldo             PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
202	ldo             PDC_PSW_WIDE_BIT(%r0),%arg2     /* 2 */
203	load32          PA(stext_pdc_ret), %rp
204	bv              (%r3)
205	copy            %r0,%arg3
206
207stext_pdc_ret:
208	/* restore rfi target address*/
209	ldd             TI_TASK-THREAD_SZ_ALGN(%sp), %r10
210	tophys_r1       %r10
211	ldd             TASK_PT_GR11(%r10), %r11
212	tovirt_r1       %sp
213#endif
214
215	/* PARANOID: clear user scratch/user space SR's */
216	mtsp	%r0,%sr0
217	mtsp	%r0,%sr1
218	mtsp	%r0,%sr2
219	mtsp	%r0,%sr3
220
221	/* Initialize Protection Registers */
222	mtctl	%r0,%cr8
223	mtctl	%r0,%cr9
224	mtctl	%r0,%cr12
225	mtctl	%r0,%cr13
226
227	/* Prepare to RFI! Man all the cannons! */
228
229	/* Initialize the global data pointer */
230	loadgp
231
232	/* Set up our interrupt table.  HPMCs might not work after this!
233	 *
234	 * We need to install the correct iva for PA1.1 or PA2.0. The
235	 * following short sequence of instructions can determine this
236	 * (without being illegal on a PA1.1 machine).
237	 */
238#ifndef __LP64__
239	ldi		32,%r10
240	mtctl		%r10,%cr11
241	.level 2.0
242	mfctl,w		%cr11,%r10
243	.level 1.1
244	comib,<>,n	0,%r10,$is_pa20
245	ldil		L%PA(fault_vector_11),%r10
246	b		$install_iva
247	ldo		R%PA(fault_vector_11)(%r10),%r10
248
249$is_pa20:
250	.level		LEVEL /* restore 1.1 || 2.0w */
251#endif /*!LP64*/
252	load32		PA(fault_vector_20),%r10
253
254$install_iva:
255	mtctl		%r10,%cr14
256
257#ifdef __LP64__
258	b		aligned_rfi
259	nop
260
261	.align          256
262aligned_rfi:
263	ssm             0,0
264	nop             /* 1 */
265	nop             /* 2 */
266	nop             /* 3 */
267	nop             /* 4 */
268	nop             /* 5 */
269	nop             /* 6 */
270	nop             /* 7 */
271	nop             /* 8 */
272#endif
273
274#ifdef __LP64__ /* move to psw.h? */
275#define		PSW_BITS	PSW_Q+PSW_I+PSW_D+PSW_P+PSW_R
276#else
277#define		PSW_BITS	PSW_SM_Q
278#endif
279
280$rfi:
281	/* turn off troublesome PSW bits */
282	rsm		PSW_BITS,%r0
283
284	/* kernel PSW:
285	 *  - no interruptions except HPMC and TOC (which are handled by PDC)
286	 *  - Q bit set (IODC / PDC interruptions)
287	 *  - big-endian
288	 *  - virtually mapped
289	 */
290	load32		KERNEL_PSW,%r10
291	mtctl		%r10,%ipsw
292
293	/* Set the space pointers for the post-RFI world
294	** Clear the two-level IIA Space Queue, effectively setting
295	** Kernel space.
296	*/
297	mtctl		%r0,%cr17	/* Clear IIASQ tail */
298	mtctl		%r0,%cr17	/* Clear IIASQ head */
299
300	/* Load RFI target into PC queue */
301	mtctl		%r11,%cr18	/* IIAOQ head */
302	ldo		4(%r11),%r11
303	mtctl		%r11,%cr18	/* IIAOQ tail */
304
305	/* Jump to hyperspace */
306	rfi
307	nop
308
309	.procend
310
311#ifdef CONFIG_SMP
312
313	.import smp_init_current_idle_task,data
314	.import	smp_callin,code
315
316#ifndef __LP64__
317smp_callin_rtn:
318        .proc
319	.callinfo
320	break	1,1		/*  Break if returned from start_secondary */
321	nop
322	nop
323        .procend
324#endif /*!LP64*/
325
326/***************************************************************************
327* smp_slave_stext is executed by all non-monarch Processors when the Monarch
328* pokes the slave CPUs in smp.c:smp_boot_cpus().
329*
330* Once here, registers values are initialized in order to branch to virtual
331* mode. Once all available/eligible CPUs are in virtual mode, all are
332* released and start out by executing their own idle task.
333*****************************************************************************/
334smp_slave_stext:
335        .proc
336	.callinfo
337
338	/*
339	** Initialize Space registers
340	*/
341	mtsp	   %r0,%sr4
342	mtsp	   %r0,%sr5
343	mtsp	   %r0,%sr6
344	mtsp	   %r0,%sr7
345
346	/*  Initialize the SP - monarch sets up smp_init_current_idle_task */
347	load32		PA(smp_init_current_idle_task),%sp
348	LDREG		0(%sp),%sp	/* load task address */
349	tophys_r1	%sp
350	LDREG		TASK_THREAD_INFO(%sp),%sp
351	mtctl           %sp,%cr30       /* store in cr30 */
352	ldo             THREAD_SZ_ALGN(%sp),%sp
353
354	/* point CPU to kernel page tables */
355	load32		PA(swapper_pg_dir),%r4
356	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
357	mtctl		%r4,%cr25	/* Initialize user root pointer */
358
359#ifdef __LP64__
360	/* Setup PDCE_PROC entry */
361	copy            %arg0,%r3
362#else
363	/* Load RFI *return* address in case smp_callin bails */
364	load32		smp_callin_rtn,%r2
365#endif
366
367	/* Load RFI target address.  */
368	load32		smp_callin,%r11
369
370	/* ok...common code can handle the rest */
371	b		common_stext
372	nop
373
374	.procend
375#endif /* CONFIG_SMP */
376#ifndef __LP64__
377	.data
378
379	.align	4
380	.export	$global$,data
381
382	.type	$global$,@object
383	.size	$global$,4
384$global$:
385	.word 0
386#endif /*!LP64*/
387