xref: /openbmc/linux/arch/openrisc/kernel/head.S (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * OpenRISC head.S
4  *
5  * Linux architectural port borrowing liberally from similar works of
6  * others.  All original copyrights apply as per the original source
7  * declaration.
8  *
9  * Modifications for the OpenRISC architecture:
10  * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
11  * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
12  */
13 
14 #include <linux/linkage.h>
15 #include <linux/threads.h>
16 #include <linux/errno.h>
17 #include <linux/init.h>
18 #include <linux/serial_reg.h>
19 #include <linux/pgtable.h>
20 #include <asm/processor.h>
21 #include <asm/page.h>
22 #include <asm/mmu.h>
23 #include <asm/thread_info.h>
24 #include <asm/cache.h>
25 #include <asm/spr_defs.h>
26 #include <asm/asm-offsets.h>
27 #include <linux/of_fdt.h>
28 
29 #define tophys(rd,rs)				\
30 	l.movhi	rd,hi(-KERNELBASE)		;\
31 	l.add	rd,rd,rs
32 
33 #define CLEAR_GPR(gpr)				\
34 	l.movhi	gpr,0x0
35 
36 #define LOAD_SYMBOL_2_GPR(gpr,symbol)		\
37 	l.movhi gpr,hi(symbol)			;\
38 	l.ori   gpr,gpr,lo(symbol)
39 
40 
41 #define UART_BASE_ADD      0x90000000
42 
43 #define EXCEPTION_SR  (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_SM)
44 #define SYSCALL_SR  (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_IEE | SPR_SR_TEE | SPR_SR_SM)
45 
46 /* ============================================[ tmp store locations ]=== */
47 
48 #define SPR_SHADOW_GPR(x)	((x) + SPR_GPR_BASE + 32)
49 
50 /*
51  * emergency_print temporary stores
52  */
53 #ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS
54 #define EMERGENCY_PRINT_STORE_GPR4	l.mtspr r0,r4,SPR_SHADOW_GPR(14)
55 #define EMERGENCY_PRINT_LOAD_GPR4	l.mfspr r4,r0,SPR_SHADOW_GPR(14)
56 
57 #define EMERGENCY_PRINT_STORE_GPR5	l.mtspr r0,r5,SPR_SHADOW_GPR(15)
58 #define EMERGENCY_PRINT_LOAD_GPR5	l.mfspr r5,r0,SPR_SHADOW_GPR(15)
59 
60 #define EMERGENCY_PRINT_STORE_GPR6	l.mtspr r0,r6,SPR_SHADOW_GPR(16)
61 #define EMERGENCY_PRINT_LOAD_GPR6	l.mfspr r6,r0,SPR_SHADOW_GPR(16)
62 
63 #define EMERGENCY_PRINT_STORE_GPR7	l.mtspr r0,r7,SPR_SHADOW_GPR(7)
64 #define EMERGENCY_PRINT_LOAD_GPR7	l.mfspr r7,r0,SPR_SHADOW_GPR(7)
65 
66 #define EMERGENCY_PRINT_STORE_GPR8	l.mtspr r0,r8,SPR_SHADOW_GPR(8)
67 #define EMERGENCY_PRINT_LOAD_GPR8	l.mfspr r8,r0,SPR_SHADOW_GPR(8)
68 
69 #define EMERGENCY_PRINT_STORE_GPR9	l.mtspr r0,r9,SPR_SHADOW_GPR(9)
70 #define EMERGENCY_PRINT_LOAD_GPR9	l.mfspr r9,r0,SPR_SHADOW_GPR(9)
71 
72 #else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */
73 #define EMERGENCY_PRINT_STORE_GPR4	l.sw    0x20(r0),r4
74 #define EMERGENCY_PRINT_LOAD_GPR4	l.lwz   r4,0x20(r0)
75 
76 #define EMERGENCY_PRINT_STORE_GPR5	l.sw    0x24(r0),r5
77 #define EMERGENCY_PRINT_LOAD_GPR5	l.lwz   r5,0x24(r0)
78 
79 #define EMERGENCY_PRINT_STORE_GPR6	l.sw    0x28(r0),r6
80 #define EMERGENCY_PRINT_LOAD_GPR6	l.lwz   r6,0x28(r0)
81 
82 #define EMERGENCY_PRINT_STORE_GPR7	l.sw    0x2c(r0),r7
83 #define EMERGENCY_PRINT_LOAD_GPR7	l.lwz   r7,0x2c(r0)
84 
85 #define EMERGENCY_PRINT_STORE_GPR8	l.sw    0x30(r0),r8
86 #define EMERGENCY_PRINT_LOAD_GPR8	l.lwz   r8,0x30(r0)
87 
88 #define EMERGENCY_PRINT_STORE_GPR9	l.sw    0x34(r0),r9
89 #define EMERGENCY_PRINT_LOAD_GPR9	l.lwz   r9,0x34(r0)
90 
91 #endif
92 
93 /*
94  * TLB miss handlers temorary stores
95  */
96 #ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS
97 #define EXCEPTION_STORE_GPR2		l.mtspr r0,r2,SPR_SHADOW_GPR(2)
98 #define EXCEPTION_LOAD_GPR2		l.mfspr r2,r0,SPR_SHADOW_GPR(2)
99 
100 #define EXCEPTION_STORE_GPR3		l.mtspr r0,r3,SPR_SHADOW_GPR(3)
101 #define EXCEPTION_LOAD_GPR3		l.mfspr r3,r0,SPR_SHADOW_GPR(3)
102 
103 #define EXCEPTION_STORE_GPR4		l.mtspr r0,r4,SPR_SHADOW_GPR(4)
104 #define EXCEPTION_LOAD_GPR4		l.mfspr r4,r0,SPR_SHADOW_GPR(4)
105 
106 #define EXCEPTION_STORE_GPR5		l.mtspr r0,r5,SPR_SHADOW_GPR(5)
107 #define EXCEPTION_LOAD_GPR5		l.mfspr r5,r0,SPR_SHADOW_GPR(5)
108 
109 #define EXCEPTION_STORE_GPR6		l.mtspr r0,r6,SPR_SHADOW_GPR(6)
110 #define EXCEPTION_LOAD_GPR6		l.mfspr r6,r0,SPR_SHADOW_GPR(6)
111 
112 #else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */
113 #define EXCEPTION_STORE_GPR2		l.sw    0x64(r0),r2
114 #define EXCEPTION_LOAD_GPR2		l.lwz   r2,0x64(r0)
115 
116 #define EXCEPTION_STORE_GPR3		l.sw    0x68(r0),r3
117 #define EXCEPTION_LOAD_GPR3		l.lwz   r3,0x68(r0)
118 
119 #define EXCEPTION_STORE_GPR4		l.sw    0x6c(r0),r4
120 #define EXCEPTION_LOAD_GPR4		l.lwz   r4,0x6c(r0)
121 
122 #define EXCEPTION_STORE_GPR5		l.sw    0x70(r0),r5
123 #define EXCEPTION_LOAD_GPR5		l.lwz   r5,0x70(r0)
124 
125 #define EXCEPTION_STORE_GPR6		l.sw    0x74(r0),r6
126 #define EXCEPTION_LOAD_GPR6		l.lwz   r6,0x74(r0)
127 
128 #endif
129 
130 /*
131  * EXCEPTION_HANDLE temporary stores
132  */
133 
134 #ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS
135 #define EXCEPTION_T_STORE_GPR30		l.mtspr r0,r30,SPR_SHADOW_GPR(30)
136 #define EXCEPTION_T_LOAD_GPR30(reg)	l.mfspr reg,r0,SPR_SHADOW_GPR(30)
137 
138 #define EXCEPTION_T_STORE_GPR10		l.mtspr r0,r10,SPR_SHADOW_GPR(10)
139 #define EXCEPTION_T_LOAD_GPR10(reg)	l.mfspr reg,r0,SPR_SHADOW_GPR(10)
140 
141 #define EXCEPTION_T_STORE_SP		l.mtspr r0,r1,SPR_SHADOW_GPR(1)
142 #define EXCEPTION_T_LOAD_SP(reg)	l.mfspr reg,r0,SPR_SHADOW_GPR(1)
143 
144 #else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */
145 #define EXCEPTION_T_STORE_GPR30		l.sw    0x78(r0),r30
146 #define EXCEPTION_T_LOAD_GPR30(reg)	l.lwz   reg,0x78(r0)
147 
148 #define EXCEPTION_T_STORE_GPR10		l.sw    0x7c(r0),r10
149 #define EXCEPTION_T_LOAD_GPR10(reg)	l.lwz   reg,0x7c(r0)
150 
151 #define EXCEPTION_T_STORE_SP		l.sw    0x80(r0),r1
152 #define EXCEPTION_T_LOAD_SP(reg)	l.lwz   reg,0x80(r0)
153 #endif
154 
155 /* =========================================================[ macros ]=== */
156 
157 #ifdef CONFIG_SMP
158 #define GET_CURRENT_PGD(reg,t1)					\
159 	LOAD_SYMBOL_2_GPR(reg,current_pgd)			;\
160 	l.mfspr	t1,r0,SPR_COREID				;\
161 	l.slli	t1,t1,2						;\
162 	l.add	reg,reg,t1					;\
163 	tophys  (t1,reg)					;\
164 	l.lwz   reg,0(t1)
165 #else
166 #define GET_CURRENT_PGD(reg,t1)					\
167 	LOAD_SYMBOL_2_GPR(reg,current_pgd)			;\
168 	tophys  (t1,reg)					;\
169 	l.lwz   reg,0(t1)
170 #endif
171 
172 /* Load r10 from current_thread_info_set - clobbers r1 and r30 */
173 #ifdef CONFIG_SMP
174 #define GET_CURRENT_THREAD_INFO					\
175 	LOAD_SYMBOL_2_GPR(r1,current_thread_info_set)		;\
176 	tophys  (r30,r1)					;\
177 	l.mfspr	r10,r0,SPR_COREID				;\
178 	l.slli	r10,r10,2					;\
179 	l.add	r30,r30,r10					;\
180 	/* r10: current_thread_info  */				;\
181 	l.lwz   r10,0(r30)
182 #else
183 #define GET_CURRENT_THREAD_INFO					\
184 	LOAD_SYMBOL_2_GPR(r1,current_thread_info_set)		;\
185 	tophys  (r30,r1)					;\
186 	/* r10: current_thread_info  */				;\
187 	l.lwz   r10,0(r30)
188 #endif
189 
190 /*
191  * DSCR: this is a common hook for handling exceptions. it will save
192  *       the needed registers, set up stack and pointer to current
193  *	 then jump to the handler while enabling MMU
194  *
195  * PRMS: handler	- a function to jump to. it has to save the
196  *			remaining registers to kernel stack, call
197  *			appropriate arch-independant exception handler
198  *			and finaly jump to ret_from_except
199  *
200  * PREQ: unchanged state from the time exception happened
201  *
202  * POST: SAVED the following registers original value
203  *	       to the new created exception frame pointed to by r1
204  *
205  *	 r1  - ksp	pointing to the new (exception) frame
206  *	 r4  - EEAR     exception EA
207  *	 r10 - current	pointing to current_thread_info struct
208  *	 r12 - syscall  0, since we didn't come from syscall
209  *	 r30 - handler	address of the handler we'll jump to
210  *
211  *	 handler has to save remaining registers to the exception
212  *	 ksp frame *before* tainting them!
213  *
214  * NOTE: this function is not reentrant per se. reentrancy is guaranteed
215  *       by processor disabling all exceptions/interrupts when exception
216  *	 accours.
217  *
218  * OPTM: no need to make it so wasteful to extract ksp when in user mode
219  */
220 
221 #define EXCEPTION_HANDLE(handler)				\
222 	EXCEPTION_T_STORE_GPR30					;\
223 	l.mfspr r30,r0,SPR_ESR_BASE				;\
224 	l.andi  r30,r30,SPR_SR_SM				;\
225 	l.sfeqi r30,0						;\
226 	EXCEPTION_T_STORE_GPR10					;\
227 	l.bnf   2f                            /* kernel_mode */	;\
228 	 EXCEPTION_T_STORE_SP                 /* delay slot */	;\
229 1: /* user_mode:   */						;\
230 	GET_CURRENT_THREAD_INFO	 				;\
231 	tophys  (r30,r10)					;\
232 	l.lwz   r1,(TI_KSP)(r30)				;\
233 	/* fall through */					;\
234 2: /* kernel_mode: */						;\
235 	/* create new stack frame, save only needed gprs */	;\
236 	/* r1: KSP, r10: current, r4: EEAR, r31: __pa(KSP) */	;\
237 	/* r12:	temp, syscall indicator */			;\
238 	l.addi  r1,r1,-(INT_FRAME_SIZE)				;\
239 	/* r1 is KSP, r30 is __pa(KSP) */			;\
240 	tophys  (r30,r1)					;\
241 	l.sw    PT_GPR12(r30),r12				;\
242 	/* r4 use for tmp before EA */				;\
243 	l.mfspr r12,r0,SPR_EPCR_BASE				;\
244 	l.sw    PT_PC(r30),r12					;\
245 	l.mfspr r12,r0,SPR_ESR_BASE				;\
246 	l.sw    PT_SR(r30),r12					;\
247 	/* save r30 */						;\
248 	EXCEPTION_T_LOAD_GPR30(r12)				;\
249 	l.sw	PT_GPR30(r30),r12				;\
250 	/* save r10 as was prior to exception */		;\
251 	EXCEPTION_T_LOAD_GPR10(r12)				;\
252 	l.sw	PT_GPR10(r30),r12				;\
253 	/* save PT_SP as was prior to exception */		;\
254 	EXCEPTION_T_LOAD_SP(r12)				;\
255 	l.sw	PT_SP(r30),r12					;\
256 	/* save exception r4, set r4 = EA */			;\
257 	l.sw	PT_GPR4(r30),r4					;\
258 	l.mfspr r4,r0,SPR_EEAR_BASE				;\
259 	/* r12 == 1 if we come from syscall */			;\
260 	CLEAR_GPR(r12)						;\
261 	/* ----- turn on MMU ----- */				;\
262 	/* Carry DSX into exception SR */			;\
263 	l.mfspr r30,r0,SPR_SR					;\
264 	l.andi	r30,r30,SPR_SR_DSX				;\
265 	l.ori	r30,r30,(EXCEPTION_SR)				;\
266 	l.mtspr	r0,r30,SPR_ESR_BASE				;\
267 	/* r30:	EA address of handler */			;\
268 	LOAD_SYMBOL_2_GPR(r30,handler)				;\
269 	l.mtspr r0,r30,SPR_EPCR_BASE				;\
270 	l.rfe
271 
272 /*
273  * this doesn't work
274  *
275  *
276  * #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION
277  * #define UNHANDLED_EXCEPTION(handler)				\
278  *	l.ori   r3,r0,0x1					;\
279  *	l.mtspr r0,r3,SPR_SR					;\
280  *      l.movhi r3,hi(0xf0000100)				;\
281  *      l.ori   r3,r3,lo(0xf0000100)				;\
282  *	l.jr	r3						;\
283  *	l.nop	1
284  *
285  * #endif
286  */
287 
288 /* DSCR: this is the same as EXCEPTION_HANDLE(), we are just
289  *       a bit more carefull (if we have a PT_SP or current pointer
290  *       corruption) and set them up from 'current_set'
291  *
292  */
293 #define UNHANDLED_EXCEPTION(handler)				\
294 	EXCEPTION_T_STORE_GPR30					;\
295 	EXCEPTION_T_STORE_GPR10					;\
296 	EXCEPTION_T_STORE_SP					;\
297 	/* temporary store r3, r9 into r1, r10 */		;\
298 	l.addi	r1,r3,0x0					;\
299 	l.addi	r10,r9,0x0					;\
300 	LOAD_SYMBOL_2_GPR(r9,_string_unhandled_exception)	;\
301 	tophys	(r3,r9)						;\
302 	l.jal	_emergency_print				;\
303 	 l.nop							;\
304 	l.mfspr	r3,r0,SPR_NPC					;\
305 	l.jal	_emergency_print_nr				;\
306 	 l.andi	r3,r3,0x1f00					;\
307 	LOAD_SYMBOL_2_GPR(r9,_string_epc_prefix)		;\
308 	tophys	(r3,r9)						;\
309 	l.jal	_emergency_print				;\
310 	 l.nop							;\
311 	l.jal	_emergency_print_nr				;\
312 	 l.mfspr r3,r0,SPR_EPCR_BASE				;\
313 	LOAD_SYMBOL_2_GPR(r9,_string_nl)			;\
314 	tophys	(r3,r9)						;\
315 	l.jal	_emergency_print				;\
316 	 l.nop							;\
317 	/* end of printing */					;\
318 	l.addi	r3,r1,0x0					;\
319 	l.addi	r9,r10,0x0					;\
320 	/* extract current, ksp from current_set */		;\
321 	LOAD_SYMBOL_2_GPR(r1,_unhandled_stack_top)		;\
322 	LOAD_SYMBOL_2_GPR(r10,init_thread_union)		;\
323 	/* create new stack frame, save only needed gprs */	;\
324 	/* r1: KSP, r10: current, r31: __pa(KSP) */		;\
325 	/* r12:	temp, syscall indicator, r13 temp */		;\
326 	l.addi  r1,r1,-(INT_FRAME_SIZE)				;\
327 	/* r1 is KSP, r30 is __pa(KSP) */			;\
328 	tophys  (r30,r1)					;\
329 	l.sw    PT_GPR12(r30),r12					;\
330 	l.mfspr r12,r0,SPR_EPCR_BASE				;\
331 	l.sw    PT_PC(r30),r12					;\
332 	l.mfspr r12,r0,SPR_ESR_BASE				;\
333 	l.sw    PT_SR(r30),r12					;\
334 	/* save r31 */						;\
335 	EXCEPTION_T_LOAD_GPR30(r12)				;\
336 	l.sw	PT_GPR30(r30),r12					;\
337 	/* save r10 as was prior to exception */		;\
338 	EXCEPTION_T_LOAD_GPR10(r12)				;\
339 	l.sw	PT_GPR10(r30),r12					;\
340 	/* save PT_SP as was prior to exception */			;\
341 	EXCEPTION_T_LOAD_SP(r12)				;\
342 	l.sw	PT_SP(r30),r12					;\
343 	l.sw    PT_GPR13(r30),r13					;\
344 	/* --> */						;\
345 	/* save exception r4, set r4 = EA */			;\
346 	l.sw	PT_GPR4(r30),r4					;\
347 	l.mfspr r4,r0,SPR_EEAR_BASE				;\
348 	/* r12 == 1 if we come from syscall */			;\
349 	CLEAR_GPR(r12)						;\
350 	/* ----- play a MMU trick ----- */			;\
351 	l.ori	r30,r0,(EXCEPTION_SR)				;\
352 	l.mtspr	r0,r30,SPR_ESR_BASE				;\
353 	/* r31:	EA address of handler */			;\
354 	LOAD_SYMBOL_2_GPR(r30,handler)				;\
355 	l.mtspr r0,r30,SPR_EPCR_BASE				;\
356 	l.rfe
357 
358 /* =====================================================[ exceptions] === */
359 
360 /* ---[ 0x100: RESET exception ]----------------------------------------- */
361     .org 0x100
362 	/* Jump to .init code at _start which lives in the .head section
363 	 * and will be discarded after boot.
364 	 */
365 	LOAD_SYMBOL_2_GPR(r15, _start)
366 	tophys	(r13,r15)			/* MMU disabled */
367 	l.jr	r13
368 	 l.nop
369 
370 /* ---[ 0x200: BUS exception ]------------------------------------------- */
371     .org 0x200
372 _dispatch_bus_fault:
373 	EXCEPTION_HANDLE(_bus_fault_handler)
374 
375 /* ---[ 0x300: Data Page Fault exception ]------------------------------- */
376     .org 0x300
377 _dispatch_do_dpage_fault:
378 //      totaly disable timer interrupt
379 // 	l.mtspr	r0,r0,SPR_TTMR
380 //	DEBUG_TLB_PROBE(0x300)
381 //	EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x300)
382 	EXCEPTION_HANDLE(_data_page_fault_handler)
383 
384 /* ---[ 0x400: Insn Page Fault exception ]------------------------------- */
385     .org 0x400
386 _dispatch_do_ipage_fault:
387 //      totaly disable timer interrupt
388 //	l.mtspr	r0,r0,SPR_TTMR
389 //	DEBUG_TLB_PROBE(0x400)
390 //	EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x400)
391 	EXCEPTION_HANDLE(_insn_page_fault_handler)
392 
393 /* ---[ 0x500: Timer exception ]----------------------------------------- */
394     .org 0x500
395 	EXCEPTION_HANDLE(_timer_handler)
396 
397 /* ---[ 0x600: Alignment exception ]-------------------------------------- */
398     .org 0x600
399 	EXCEPTION_HANDLE(_alignment_handler)
400 
401 /* ---[ 0x700: Illegal insn exception ]---------------------------------- */
402     .org 0x700
403 	EXCEPTION_HANDLE(_illegal_instruction_handler)
404 
405 /* ---[ 0x800: External interrupt exception ]---------------------------- */
406     .org 0x800
407 	EXCEPTION_HANDLE(_external_irq_handler)
408 
409 /* ---[ 0x900: DTLB miss exception ]------------------------------------- */
410     .org 0x900
411 	l.j	boot_dtlb_miss_handler
412 	l.nop
413 
414 /* ---[ 0xa00: ITLB miss exception ]------------------------------------- */
415     .org 0xa00
416 	l.j	boot_itlb_miss_handler
417 	l.nop
418 
419 /* ---[ 0xb00: Range exception ]----------------------------------------- */
420     .org 0xb00
421 	UNHANDLED_EXCEPTION(_vector_0xb00)
422 
423 /* ---[ 0xc00: Syscall exception ]--------------------------------------- */
424     .org 0xc00
425 	EXCEPTION_HANDLE(_sys_call_handler)
426 
427 /* ---[ 0xd00: Floating point exception ]--------------------------------- */
428     .org 0xd00
429 	EXCEPTION_HANDLE(_fpe_trap_handler)
430 
431 /* ---[ 0xe00: Trap exception ]------------------------------------------ */
432     .org 0xe00
433 //	UNHANDLED_EXCEPTION(_vector_0xe00)
434 	EXCEPTION_HANDLE(_trap_handler)
435 
436 /* ---[ 0xf00: Reserved exception ]-------------------------------------- */
437     .org 0xf00
438 	UNHANDLED_EXCEPTION(_vector_0xf00)
439 
440 /* ---[ 0x1000: Reserved exception ]------------------------------------- */
441     .org 0x1000
442 	UNHANDLED_EXCEPTION(_vector_0x1000)
443 
444 /* ---[ 0x1100: Reserved exception ]------------------------------------- */
445     .org 0x1100
446 	UNHANDLED_EXCEPTION(_vector_0x1100)
447 
448 /* ---[ 0x1200: Reserved exception ]------------------------------------- */
449     .org 0x1200
450 	UNHANDLED_EXCEPTION(_vector_0x1200)
451 
452 /* ---[ 0x1300: Reserved exception ]------------------------------------- */
453     .org 0x1300
454 	UNHANDLED_EXCEPTION(_vector_0x1300)
455 
456 /* ---[ 0x1400: Reserved exception ]------------------------------------- */
457     .org 0x1400
458 	UNHANDLED_EXCEPTION(_vector_0x1400)
459 
460 /* ---[ 0x1500: Reserved exception ]------------------------------------- */
461     .org 0x1500
462 	UNHANDLED_EXCEPTION(_vector_0x1500)
463 
464 /* ---[ 0x1600: Reserved exception ]------------------------------------- */
465     .org 0x1600
466 	UNHANDLED_EXCEPTION(_vector_0x1600)
467 
468 /* ---[ 0x1700: Reserved exception ]------------------------------------- */
469     .org 0x1700
470 	UNHANDLED_EXCEPTION(_vector_0x1700)
471 
472 /* ---[ 0x1800: Reserved exception ]------------------------------------- */
473     .org 0x1800
474 	UNHANDLED_EXCEPTION(_vector_0x1800)
475 
476 /* ---[ 0x1900: Reserved exception ]------------------------------------- */
477     .org 0x1900
478 	UNHANDLED_EXCEPTION(_vector_0x1900)
479 
480 /* ---[ 0x1a00: Reserved exception ]------------------------------------- */
481     .org 0x1a00
482 	UNHANDLED_EXCEPTION(_vector_0x1a00)
483 
484 /* ---[ 0x1b00: Reserved exception ]------------------------------------- */
485     .org 0x1b00
486 	UNHANDLED_EXCEPTION(_vector_0x1b00)
487 
488 /* ---[ 0x1c00: Reserved exception ]------------------------------------- */
489     .org 0x1c00
490 	UNHANDLED_EXCEPTION(_vector_0x1c00)
491 
492 /* ---[ 0x1d00: Reserved exception ]------------------------------------- */
493     .org 0x1d00
494 	UNHANDLED_EXCEPTION(_vector_0x1d00)
495 
496 /* ---[ 0x1e00: Reserved exception ]------------------------------------- */
497     .org 0x1e00
498 	UNHANDLED_EXCEPTION(_vector_0x1e00)
499 
500 /* ---[ 0x1f00: Reserved exception ]------------------------------------- */
501     .org 0x1f00
502 	UNHANDLED_EXCEPTION(_vector_0x1f00)
503 
504     .org 0x2000
505 /* ===================================================[ kernel start ]=== */
506 
507 /*    .text*/
508 
509 /* This early stuff belongs in HEAD, but some of the functions below definitely
510  * don't... */
511 
512 	__HEAD
513 	.global _start
514 _start:
515 	/* Init r0 to zero as per spec */
516 	CLEAR_GPR(r0)
517 
518 	/* save kernel parameters */
519 	l.or	r25,r0,r3	/* pointer to fdt */
520 
521 	/*
522 	 * ensure a deterministic start
523 	 */
524 
525 	l.ori	r3,r0,0x1
526 	l.mtspr	r0,r3,SPR_SR
527 
528 	/*
529 	 * Start the TTCR as early as possible, so that the RNG can make use of
530 	 * measurements of boot time from the earliest opportunity. Especially
531 	 * important is that the TTCR does not return zero by the time we reach
532 	 * random_init().
533 	 */
534 	l.movhi r3,hi(SPR_TTMR_CR)
535 	l.mtspr r0,r3,SPR_TTMR
536 
537 	CLEAR_GPR(r1)
538 	CLEAR_GPR(r2)
539 	CLEAR_GPR(r3)
540 	CLEAR_GPR(r4)
541 	CLEAR_GPR(r5)
542 	CLEAR_GPR(r6)
543 	CLEAR_GPR(r7)
544 	CLEAR_GPR(r8)
545 	CLEAR_GPR(r9)
546 	CLEAR_GPR(r10)
547 	CLEAR_GPR(r11)
548 	CLEAR_GPR(r12)
549 	CLEAR_GPR(r13)
550 	CLEAR_GPR(r14)
551 	CLEAR_GPR(r15)
552 	CLEAR_GPR(r16)
553 	CLEAR_GPR(r17)
554 	CLEAR_GPR(r18)
555 	CLEAR_GPR(r19)
556 	CLEAR_GPR(r20)
557 	CLEAR_GPR(r21)
558 	CLEAR_GPR(r22)
559 	CLEAR_GPR(r23)
560 	CLEAR_GPR(r24)
561 	CLEAR_GPR(r26)
562 	CLEAR_GPR(r27)
563 	CLEAR_GPR(r28)
564 	CLEAR_GPR(r29)
565 	CLEAR_GPR(r30)
566 	CLEAR_GPR(r31)
567 
568 #ifdef CONFIG_SMP
569 	l.mfspr	r26,r0,SPR_COREID
570 	l.sfeq	r26,r0
571 	l.bnf	secondary_wait
572 	 l.nop
573 #endif
574 	/*
575 	 * set up initial ksp and current
576 	 */
577 	/* setup kernel stack */
578 	LOAD_SYMBOL_2_GPR(r1,init_thread_union + THREAD_SIZE)
579 	LOAD_SYMBOL_2_GPR(r10,init_thread_union)	// setup current
580 	tophys	(r31,r10)
581 	l.sw	TI_KSP(r31), r1
582 
583 	l.ori	r4,r0,0x0
584 
585 
586 	/*
587 	 * .data contains initialized data,
588 	 * .bss contains uninitialized data - clear it up
589 	 */
590 clear_bss:
591 	LOAD_SYMBOL_2_GPR(r24, __bss_start)
592 	LOAD_SYMBOL_2_GPR(r26, _end)
593 	tophys(r28,r24)
594 	tophys(r30,r26)
595 	CLEAR_GPR(r24)
596 	CLEAR_GPR(r26)
597 1:
598 	l.sw    (0)(r28),r0
599 	l.sfltu r28,r30
600 	l.bf    1b
601 	l.addi  r28,r28,4
602 
603 enable_ic:
604 	l.jal	_ic_enable
605 	 l.nop
606 
607 enable_dc:
608 	l.jal	_dc_enable
609 	 l.nop
610 
611 flush_tlb:
612 	l.jal	_flush_tlb
613 	 l.nop
614 
615 /* The MMU needs to be enabled before or1k_early_setup is called */
616 
617 enable_mmu:
618 	/*
619 	 * enable dmmu & immu
620 	 * SR[5] = 0, SR[6] = 0, 6th and 7th bit of SR set to 0
621 	 */
622 	l.mfspr	r30,r0,SPR_SR
623 	l.movhi	r28,hi(SPR_SR_DME | SPR_SR_IME)
624 	l.ori	r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
625 	l.or	r30,r30,r28
626 	l.mtspr	r0,r30,SPR_SR
627 	l.nop
628 	l.nop
629 	l.nop
630 	l.nop
631 	l.nop
632 	l.nop
633 	l.nop
634 	l.nop
635 	l.nop
636 	l.nop
637 	l.nop
638 	l.nop
639 	l.nop
640 	l.nop
641 	l.nop
642 	l.nop
643 
644 	// reset the simulation counters
645 	l.nop 5
646 
647 	/* check fdt header magic word */
648 	l.lwz	r3,0(r25)	/* load magic from fdt into r3 */
649 	l.movhi	r4,hi(OF_DT_HEADER)
650 	l.ori	r4,r4,lo(OF_DT_HEADER)
651 	l.sfeq	r3,r4
652 	l.bf	_fdt_found
653 	 l.nop
654 	/* magic number mismatch, set fdt pointer to null */
655 	l.or	r25,r0,r0
656 _fdt_found:
657 	/* pass fdt pointer to or1k_early_setup in r3 */
658 	l.or	r3,r0,r25
659 	LOAD_SYMBOL_2_GPR(r24, or1k_early_setup)
660 	l.jalr r24
661 	 l.nop
662 
663 clear_regs:
664 	/*
665 	 * clear all GPRS to increase determinism
666 	 */
667 	CLEAR_GPR(r2)
668 	CLEAR_GPR(r3)
669 	CLEAR_GPR(r4)
670 	CLEAR_GPR(r5)
671 	CLEAR_GPR(r6)
672 	CLEAR_GPR(r7)
673 	CLEAR_GPR(r8)
674 	CLEAR_GPR(r9)
675 	CLEAR_GPR(r11)
676 	CLEAR_GPR(r12)
677 	CLEAR_GPR(r13)
678 	CLEAR_GPR(r14)
679 	CLEAR_GPR(r15)
680 	CLEAR_GPR(r16)
681 	CLEAR_GPR(r17)
682 	CLEAR_GPR(r18)
683 	CLEAR_GPR(r19)
684 	CLEAR_GPR(r20)
685 	CLEAR_GPR(r21)
686 	CLEAR_GPR(r22)
687 	CLEAR_GPR(r23)
688 	CLEAR_GPR(r24)
689 	CLEAR_GPR(r25)
690 	CLEAR_GPR(r26)
691 	CLEAR_GPR(r27)
692 	CLEAR_GPR(r28)
693 	CLEAR_GPR(r29)
694 	CLEAR_GPR(r30)
695 	CLEAR_GPR(r31)
696 
697 jump_start_kernel:
698 	/*
699 	 * jump to kernel entry (start_kernel)
700 	 */
701 	LOAD_SYMBOL_2_GPR(r30, start_kernel)
702 	l.jr    r30
703 	 l.nop
704 
705 _flush_tlb:
706 	/*
707 	 *  I N V A L I D A T E   T L B   e n t r i e s
708 	 */
709 	LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0))
710 	LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0))
711 	l.addi	r7,r0,128 /* Maximum number of sets */
712 1:
713 	l.mtspr	r5,r0,0x0
714 	l.mtspr	r6,r0,0x0
715 
716 	l.addi	r5,r5,1
717 	l.addi	r6,r6,1
718 	l.sfeq	r7,r0
719 	l.bnf	1b
720 	 l.addi	r7,r7,-1
721 
722 	l.jr	r9
723 	 l.nop
724 
725 #ifdef CONFIG_SMP
726 secondary_wait:
727 	/* Doze the cpu until we are asked to run */
728 	/* If we dont have power management skip doze */
729 	l.mfspr r25,r0,SPR_UPR
730 	l.andi  r25,r25,SPR_UPR_PMP
731 	l.sfeq  r25,r0
732 	l.bf	secondary_check_release
733 	 l.nop
734 
735 	/* Setup special secondary exception handler */
736 	LOAD_SYMBOL_2_GPR(r3, _secondary_evbar)
737 	tophys(r25,r3)
738 	l.mtspr	r0,r25,SPR_EVBAR
739 
740 	/* Enable Interrupts */
741 	l.mfspr	r25,r0,SPR_SR
742 	l.ori	r25,r25,SPR_SR_IEE
743 	l.mtspr	r0,r25,SPR_SR
744 
745 	/* Unmask interrupts interrupts */
746 	l.mfspr r25,r0,SPR_PICMR
747 	l.ori   r25,r25,0xffff
748 	l.mtspr	r0,r25,SPR_PICMR
749 
750 	/* Doze */
751 	l.mfspr r25,r0,SPR_PMR
752 	LOAD_SYMBOL_2_GPR(r3, SPR_PMR_DME)
753 	l.or    r25,r25,r3
754 	l.mtspr r0,r25,SPR_PMR
755 
756 	/* Wakeup - Restore exception handler */
757 	l.mtspr	r0,r0,SPR_EVBAR
758 
759 secondary_check_release:
760 	/*
761 	 * Check if we actually got the release signal, if not go-back to
762 	 * sleep.
763 	 */
764 	l.mfspr	r25,r0,SPR_COREID
765 	LOAD_SYMBOL_2_GPR(r3, secondary_release)
766 	tophys(r4, r3)
767 	l.lwz	r3,0(r4)
768 	l.sfeq	r25,r3
769 	l.bnf	secondary_wait
770 	 l.nop
771 	/* fall through to secondary_init */
772 
773 secondary_init:
774 	/*
775 	 * set up initial ksp and current
776 	 */
777 	LOAD_SYMBOL_2_GPR(r10, secondary_thread_info)
778 	tophys	(r30,r10)
779 	l.lwz	r10,0(r30)
780 	l.addi	r1,r10,THREAD_SIZE
781 	tophys	(r30,r10)
782 	l.sw	TI_KSP(r30),r1
783 
784 	l.jal	_ic_enable
785 	 l.nop
786 
787 	l.jal	_dc_enable
788 	 l.nop
789 
790 	l.jal	_flush_tlb
791 	 l.nop
792 
793 	/*
794 	 * enable dmmu & immu
795 	 */
796 	l.mfspr	r30,r0,SPR_SR
797 	l.movhi	r28,hi(SPR_SR_DME | SPR_SR_IME)
798 	l.ori	r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
799 	l.or	r30,r30,r28
800 	/*
801 	 * This is a bit tricky, we need to switch over from physical addresses
802 	 * to virtual addresses on the fly.
803 	 * To do that, we first set up ESR with the IME and DME bits set.
804 	 * Then EPCR is set to secondary_start and then a l.rfe is issued to
805 	 * "jump" to that.
806 	 */
807 	l.mtspr	r0,r30,SPR_ESR_BASE
808 	LOAD_SYMBOL_2_GPR(r30, secondary_start)
809 	l.mtspr	r0,r30,SPR_EPCR_BASE
810 	l.rfe
811 
812 secondary_start:
813 	LOAD_SYMBOL_2_GPR(r30, secondary_start_kernel)
814 	l.jr    r30
815 	 l.nop
816 
817 #endif
818 
819 /* ========================================[ cache ]=== */
820 
821 	/* alignment here so we don't change memory offsets with
822 	 * memory controller defined
823 	 */
824 	.align 0x2000
825 
826 _ic_enable:
827 	/* Check if IC present and skip enabling otherwise */
828 	l.mfspr r24,r0,SPR_UPR
829 	l.andi  r26,r24,SPR_UPR_ICP
830 	l.sfeq  r26,r0
831 	l.bf	9f
832 	l.nop
833 
834 	/* Disable IC */
835 	l.mfspr r6,r0,SPR_SR
836 	l.addi  r5,r0,-1
837 	l.xori  r5,r5,SPR_SR_ICE
838 	l.and   r5,r6,r5
839 	l.mtspr r0,r5,SPR_SR
840 
841 	/* Establish cache block size
842 	   If BS=0, 16;
843 	   If BS=1, 32;
844 	   r14 contain block size
845 	*/
846 	l.mfspr r24,r0,SPR_ICCFGR
847 	l.andi	r26,r24,SPR_ICCFGR_CBS
848 	l.srli	r28,r26,7
849 	l.ori	r30,r0,16
850 	l.sll	r14,r30,r28
851 
852 	/* Establish number of cache sets
853 	   r16 contains number of cache sets
854 	   r28 contains log(# of cache sets)
855 	*/
856 	l.andi  r26,r24,SPR_ICCFGR_NCS
857 	l.srli 	r28,r26,3
858 	l.ori   r30,r0,1
859 	l.sll   r16,r30,r28
860 
861 	/* Invalidate IC */
862 	l.addi  r6,r0,0
863 	l.sll   r5,r14,r28
864 //        l.mul   r5,r14,r16
865 //	l.trap  1
866 //	l.addi  r5,r0,IC_SIZE
867 1:
868 	l.mtspr r0,r6,SPR_ICBIR
869 	l.sfne  r6,r5
870 	l.bf    1b
871 	l.add   r6,r6,r14
872  //       l.addi   r6,r6,IC_LINE
873 
874 	/* Enable IC */
875 	l.mfspr r6,r0,SPR_SR
876 	l.ori   r6,r6,SPR_SR_ICE
877 	l.mtspr r0,r6,SPR_SR
878 	l.nop
879 	l.nop
880 	l.nop
881 	l.nop
882 	l.nop
883 	l.nop
884 	l.nop
885 	l.nop
886 	l.nop
887 	l.nop
888 9:
889 	l.jr    r9
890 	l.nop
891 
892 _dc_enable:
893 	/* Check if DC present and skip enabling otherwise */
894 	l.mfspr r24,r0,SPR_UPR
895 	l.andi  r26,r24,SPR_UPR_DCP
896 	l.sfeq  r26,r0
897 	l.bf	9f
898 	l.nop
899 
900 	/* Disable DC */
901 	l.mfspr r6,r0,SPR_SR
902 	l.addi  r5,r0,-1
903 	l.xori  r5,r5,SPR_SR_DCE
904 	l.and   r5,r6,r5
905 	l.mtspr r0,r5,SPR_SR
906 
907 	/* Establish cache block size
908 	   If BS=0, 16;
909 	   If BS=1, 32;
910 	   r14 contain block size
911 	*/
912 	l.mfspr r24,r0,SPR_DCCFGR
913 	l.andi	r26,r24,SPR_DCCFGR_CBS
914 	l.srli	r28,r26,7
915 	l.ori	r30,r0,16
916 	l.sll	r14,r30,r28
917 
918 	/* Establish number of cache sets
919 	   r16 contains number of cache sets
920 	   r28 contains log(# of cache sets)
921 	*/
922 	l.andi  r26,r24,SPR_DCCFGR_NCS
923 	l.srli 	r28,r26,3
924 	l.ori   r30,r0,1
925 	l.sll   r16,r30,r28
926 
927 	/* Invalidate DC */
928 	l.addi  r6,r0,0
929 	l.sll   r5,r14,r28
930 1:
931 	l.mtspr r0,r6,SPR_DCBIR
932 	l.sfne  r6,r5
933 	l.bf    1b
934 	l.add   r6,r6,r14
935 
936 	/* Enable DC */
937 	l.mfspr r6,r0,SPR_SR
938 	l.ori   r6,r6,SPR_SR_DCE
939 	l.mtspr r0,r6,SPR_SR
940 9:
941 	l.jr    r9
942 	l.nop
943 
944 /* ===============================================[ page table masks ]=== */
945 
946 #define DTLB_UP_CONVERT_MASK  0x3fa
947 #define ITLB_UP_CONVERT_MASK  0x3a
948 
949 /* for SMP we'd have (this is a bit subtle, CC must be always set
950  * for SMP, but since we have _PAGE_PRESENT bit always defined
951  * we can just modify the mask)
952  */
953 #define DTLB_SMP_CONVERT_MASK  0x3fb
954 #define ITLB_SMP_CONVERT_MASK  0x3b
955 
956 /* ---[ boot dtlb miss handler ]----------------------------------------- */
957 
958 boot_dtlb_miss_handler:
959 
960 /* mask for DTLB_MR register: - (0) sets V (valid) bit,
961  *                            - (31-12) sets bits belonging to VPN (31-12)
962  */
963 #define DTLB_MR_MASK 0xfffff001
964 
965 /* mask for DTLB_TR register: - (2) sets CI (cache inhibit) bit,
966  *			      - (4) sets A (access) bit,
967  *                            - (5) sets D (dirty) bit,
968  *                            - (8) sets SRE (superuser read) bit
969  *                            - (9) sets SWE (superuser write) bit
970  *                            - (31-12) sets bits belonging to VPN (31-12)
971  */
972 #define DTLB_TR_MASK 0xfffff332
973 
974 /* These are for masking out the VPN/PPN value from the MR/TR registers...
975  * it's not the same as the PFN */
976 #define VPN_MASK 0xfffff000
977 #define PPN_MASK 0xfffff000
978 
979 
980 	EXCEPTION_STORE_GPR6
981 
982 #if 0
983 	l.mfspr r6,r0,SPR_ESR_BASE	   //
984 	l.andi  r6,r6,SPR_SR_SM            // are we in kernel mode ?
985 	l.sfeqi r6,0                       // r6 == 0x1 --> SM
986 	l.bf    exit_with_no_dtranslation  //
987 	l.nop
988 #endif
989 
990 	/* this could be optimized by moving storing of
991 	 * non r6 registers here, and jumping r6 restore
992 	 * if not in supervisor mode
993 	 */
994 
995 	EXCEPTION_STORE_GPR2
996 	EXCEPTION_STORE_GPR3
997 	EXCEPTION_STORE_GPR4
998 	EXCEPTION_STORE_GPR5
999 
1000 	l.mfspr r4,r0,SPR_EEAR_BASE        // get the offending EA
1001 
1002 immediate_translation:
1003 	CLEAR_GPR(r6)
1004 
1005 	l.srli	r3,r4,0xd                  // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
1006 
1007 	l.mfspr r6, r0, SPR_DMMUCFGR
1008 	l.andi	r6, r6, SPR_DMMUCFGR_NTS
1009 	l.srli	r6, r6, SPR_DMMUCFGR_NTS_OFF
1010 	l.ori	r5, r0, 0x1
1011 	l.sll	r5, r5, r6 	// r5 = number DMMU sets
1012 	l.addi	r6, r5, -1  	// r6 = nsets mask
1013 	l.and	r2, r3, r6	// r2 <- r3 % NSETS_MASK
1014 
1015 	l.or    r6,r6,r4                   // r6 <- r4
1016 	l.ori   r6,r6,~(VPN_MASK)          // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
1017 	l.movhi r5,hi(DTLB_MR_MASK)        // r5 <- ffff:0000.x000
1018 	l.ori   r5,r5,lo(DTLB_MR_MASK)     // r5 <- ffff:1111.x001 - apply DTLB_MR_MASK
1019 	l.and   r5,r5,r6                   // r5 <- VPN :VPN .x001 - we have DTLBMR entry
1020 	l.mtspr r2,r5,SPR_DTLBMR_BASE(0)   // set DTLBMR
1021 
1022 	/* set up DTLB with no translation for EA <= 0xbfffffff */
1023 	LOAD_SYMBOL_2_GPR(r6,0xbfffffff)
1024 	l.sfgeu  r6,r4                     // flag if r6 >= r4 (if 0xbfffffff >= EA)
1025 	l.bf     1f                        // goto out
1026 	l.and    r3,r4,r4                  // delay slot :: 24 <- r4 (if flag==1)
1027 
1028 	tophys(r3,r4)                      // r3 <- PA
1029 1:
1030 	l.ori   r3,r3,~(PPN_MASK)          // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
1031 	l.movhi r5,hi(DTLB_TR_MASK)        // r5 <- ffff:0000.x000
1032 	l.ori   r5,r5,lo(DTLB_TR_MASK)     // r5 <- ffff:1111.x330 - apply DTLB_MR_MASK
1033 	l.and   r5,r5,r3                   // r5 <- PPN :PPN .x330 - we have DTLBTR entry
1034 	l.mtspr r2,r5,SPR_DTLBTR_BASE(0)   // set DTLBTR
1035 
1036 	EXCEPTION_LOAD_GPR6
1037 	EXCEPTION_LOAD_GPR5
1038 	EXCEPTION_LOAD_GPR4
1039 	EXCEPTION_LOAD_GPR3
1040 	EXCEPTION_LOAD_GPR2
1041 
1042 	l.rfe                              // SR <- ESR, PC <- EPC
1043 
1044 exit_with_no_dtranslation:
1045 	/* EA out of memory or not in supervisor mode */
1046 	EXCEPTION_LOAD_GPR6
1047 	EXCEPTION_LOAD_GPR4
1048 	l.j	_dispatch_bus_fault
1049 
1050 /* ---[ boot itlb miss handler ]----------------------------------------- */
1051 
1052 boot_itlb_miss_handler:
1053 
1054 /* mask for ITLB_MR register: - sets V (valid) bit,
1055  *                            - sets bits belonging to VPN (15-12)
1056  */
1057 #define ITLB_MR_MASK 0xfffff001
1058 
1059 /* mask for ITLB_TR register: - sets A (access) bit,
1060  *                            - sets SXE (superuser execute) bit
1061  *                            - sets bits belonging to VPN (15-12)
1062  */
1063 #define ITLB_TR_MASK 0xfffff050
1064 
1065 /*
1066 #define VPN_MASK 0xffffe000
1067 #define PPN_MASK 0xffffe000
1068 */
1069 
1070 
1071 
1072 	EXCEPTION_STORE_GPR2
1073 	EXCEPTION_STORE_GPR3
1074 	EXCEPTION_STORE_GPR4
1075 	EXCEPTION_STORE_GPR5
1076 	EXCEPTION_STORE_GPR6
1077 
1078 #if 0
1079 	l.mfspr r6,r0,SPR_ESR_BASE         //
1080 	l.andi  r6,r6,SPR_SR_SM            // are we in kernel mode ?
1081 	l.sfeqi r6,0                       // r6 == 0x1 --> SM
1082 	l.bf    exit_with_no_itranslation
1083 	l.nop
1084 #endif
1085 
1086 
1087 	l.mfspr r4,r0,SPR_EEAR_BASE        // get the offending EA
1088 
1089 earlyearly:
1090 	CLEAR_GPR(r6)
1091 
1092 	l.srli  r3,r4,0xd                  // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
1093 
1094 	l.mfspr r6, r0, SPR_IMMUCFGR
1095 	l.andi	r6, r6, SPR_IMMUCFGR_NTS
1096 	l.srli	r6, r6, SPR_IMMUCFGR_NTS_OFF
1097 	l.ori	r5, r0, 0x1
1098 	l.sll	r5, r5, r6 	// r5 = number IMMU sets from IMMUCFGR
1099 	l.addi	r6, r5, -1  	// r6 = nsets mask
1100 	l.and	r2, r3, r6	// r2 <- r3 % NSETS_MASK
1101 
1102 	l.or    r6,r6,r4                   // r6 <- r4
1103 	l.ori   r6,r6,~(VPN_MASK)          // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
1104 	l.movhi r5,hi(ITLB_MR_MASK)        // r5 <- ffff:0000.x000
1105 	l.ori   r5,r5,lo(ITLB_MR_MASK)     // r5 <- ffff:1111.x001 - apply ITLB_MR_MASK
1106 	l.and   r5,r5,r6                   // r5 <- VPN :VPN .x001 - we have ITLBMR entry
1107 	l.mtspr r2,r5,SPR_ITLBMR_BASE(0)   // set ITLBMR
1108 
1109 	/*
1110 	 * set up ITLB with no translation for EA <= 0x0fffffff
1111 	 *
1112 	 * we need this for head.S mapping (EA = PA). if we move all functions
1113 	 * which run with mmu enabled into entry.S, we might be able to eliminate this.
1114 	 *
1115 	 */
1116 	LOAD_SYMBOL_2_GPR(r6,0x0fffffff)
1117 	l.sfgeu  r6,r4                     // flag if r6 >= r4 (if 0xb0ffffff >= EA)
1118 	l.bf     1f                        // goto out
1119 	l.and    r3,r4,r4                  // delay slot :: 24 <- r4 (if flag==1)
1120 
1121 	tophys(r3,r4)                      // r3 <- PA
1122 1:
1123 	l.ori   r3,r3,~(PPN_MASK)          // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
1124 	l.movhi r5,hi(ITLB_TR_MASK)        // r5 <- ffff:0000.x000
1125 	l.ori   r5,r5,lo(ITLB_TR_MASK)     // r5 <- ffff:1111.x050 - apply ITLB_MR_MASK
1126 	l.and   r5,r5,r3                   // r5 <- PPN :PPN .x050 - we have ITLBTR entry
1127 	l.mtspr r2,r5,SPR_ITLBTR_BASE(0)   // set ITLBTR
1128 
1129 	EXCEPTION_LOAD_GPR6
1130 	EXCEPTION_LOAD_GPR5
1131 	EXCEPTION_LOAD_GPR4
1132 	EXCEPTION_LOAD_GPR3
1133 	EXCEPTION_LOAD_GPR2
1134 
1135 	l.rfe                              // SR <- ESR, PC <- EPC
1136 
1137 exit_with_no_itranslation:
1138 	EXCEPTION_LOAD_GPR4
1139 	EXCEPTION_LOAD_GPR6
1140 	l.j    _dispatch_bus_fault
1141 	l.nop
1142 
1143 /* ====================================================================== */
1144 /*
1145  * Stuff below here shouldn't go into .head section... maybe this stuff
1146  * can be moved to entry.S ???
1147  */
1148 
1149 /* ==============================================[ DTLB miss handler ]=== */
1150 
1151 /*
1152  * Comments:
1153  *   Exception handlers are entered with MMU off so the following handler
1154  *   needs to use physical addressing
1155  *
1156  */
1157 
1158 	.text
1159 ENTRY(dtlb_miss_handler)
1160 	EXCEPTION_STORE_GPR2
1161 	EXCEPTION_STORE_GPR3
1162 	EXCEPTION_STORE_GPR4
1163 	/*
1164 	 * get EA of the miss
1165 	 */
1166 	l.mfspr	r2,r0,SPR_EEAR_BASE
1167 	/*
1168 	 * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
1169 	 */
1170 	GET_CURRENT_PGD(r3,r4)		// r3 is current_pgd, r4 is temp
1171 	l.srli	r4,r2,0x18		// >> PAGE_SHIFT + (PAGE_SHIFT - 2)
1172 	l.slli	r4,r4,0x2		// to get address << 2
1173 	l.add	r3,r4,r3		// r4 is pgd_index(daddr)
1174 	/*
1175 	 * if (pmd_none(*pmd))
1176 	 *   goto pmd_none:
1177 	 */
1178 	tophys	(r4,r3)
1179 	l.lwz	r3,0x0(r4)		// get *pmd value
1180 	l.sfne	r3,r0
1181 	l.bnf	d_pmd_none
1182 	 l.addi	r3,r0,0xffffe000	// PAGE_MASK
1183 
1184 d_pmd_good:
1185 	/*
1186 	 * pte = *pte_offset(pmd, daddr);
1187 	 */
1188 	l.lwz	r4,0x0(r4)		// get **pmd value
1189 	l.and	r4,r4,r3		// & PAGE_MASK
1190 	l.srli	r2,r2,0xd		// >> PAGE_SHIFT, r2 == EEAR
1191 	l.andi	r3,r2,0x7ff		// (1UL << PAGE_SHIFT - 2) - 1
1192 	l.slli	r3,r3,0x2		// to get address << 2
1193 	l.add	r3,r3,r4
1194 	l.lwz	r3,0x0(r3)		// this is pte at last
1195 	/*
1196 	 * if (!pte_present(pte))
1197 	 */
1198 	l.andi	r4,r3,0x1
1199 	l.sfne	r4,r0			// is pte present
1200 	l.bnf	d_pte_not_present
1201 	l.addi	r4,r0,0xffffe3fa	// PAGE_MASK | DTLB_UP_CONVERT_MASK
1202 	/*
1203 	 * fill DTLB TR register
1204 	 */
1205 	l.and	r4,r3,r4		// apply the mask
1206 	// Determine number of DMMU sets
1207 	l.mfspr r2, r0, SPR_DMMUCFGR
1208 	l.andi	r2, r2, SPR_DMMUCFGR_NTS
1209 	l.srli	r2, r2, SPR_DMMUCFGR_NTS_OFF
1210 	l.ori	r3, r0, 0x1
1211 	l.sll	r3, r3, r2 	// r3 = number DMMU sets DMMUCFGR
1212 	l.addi	r2, r3, -1  	// r2 = nsets mask
1213 	l.mfspr	r3, r0, SPR_EEAR_BASE
1214 	l.srli	r3, r3, 0xd	// >> PAGE_SHIFT
1215 	l.and	r2, r3, r2	// calc offset:	 & (NUM_TLB_ENTRIES-1)
1216 	                                                   //NUM_TLB_ENTRIES
1217 	l.mtspr	r2,r4,SPR_DTLBTR_BASE(0)
1218 	/*
1219 	 * fill DTLB MR register
1220 	 */
1221 	l.slli	r3, r3, 0xd		/* << PAGE_SHIFT => EA & PAGE_MASK */
1222 	l.ori	r4,r3,0x1		// set hardware valid bit: DTBL_MR entry
1223 	l.mtspr	r2,r4,SPR_DTLBMR_BASE(0)
1224 
1225 	EXCEPTION_LOAD_GPR2
1226 	EXCEPTION_LOAD_GPR3
1227 	EXCEPTION_LOAD_GPR4
1228 	l.rfe
1229 d_pmd_none:
1230 d_pte_not_present:
1231 	EXCEPTION_LOAD_GPR2
1232 	EXCEPTION_LOAD_GPR3
1233 	EXCEPTION_LOAD_GPR4
1234 	EXCEPTION_HANDLE(_dtlb_miss_page_fault_handler)
1235 
1236 /* ==============================================[ ITLB miss handler ]=== */
1237 ENTRY(itlb_miss_handler)
1238 	EXCEPTION_STORE_GPR2
1239 	EXCEPTION_STORE_GPR3
1240 	EXCEPTION_STORE_GPR4
1241 	/*
1242 	 * get EA of the miss
1243 	 */
1244 	l.mfspr	r2,r0,SPR_EEAR_BASE
1245 
1246 	/*
1247 	 * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
1248 	 *
1249 	 */
1250 	GET_CURRENT_PGD(r3,r4)		// r3 is current_pgd, r5 is temp
1251 	l.srli	r4,r2,0x18		// >> PAGE_SHIFT + (PAGE_SHIFT - 2)
1252 	l.slli	r4,r4,0x2		// to get address << 2
1253 	l.add	r3,r4,r3		// r4 is pgd_index(daddr)
1254 	/*
1255 	 * if (pmd_none(*pmd))
1256 	 *   goto pmd_none:
1257 	 */
1258 	tophys	(r4,r3)
1259 	l.lwz	r3,0x0(r4)		// get *pmd value
1260 	l.sfne	r3,r0
1261 	l.bnf	i_pmd_none
1262 	 l.addi	r3,r0,0xffffe000	// PAGE_MASK
1263 
1264 i_pmd_good:
1265 	/*
1266 	 * pte = *pte_offset(pmd, iaddr);
1267 	 *
1268 	 */
1269 	l.lwz	r4,0x0(r4)		// get **pmd value
1270 	l.and	r4,r4,r3		// & PAGE_MASK
1271 	l.srli	r2,r2,0xd		// >> PAGE_SHIFT, r2 == EEAR
1272 	l.andi	r3,r2,0x7ff		// (1UL << PAGE_SHIFT - 2) - 1
1273 	l.slli	r3,r3,0x2		// to get address << 2
1274 	l.add	r3,r3,r4
1275 	l.lwz	r3,0x0(r3)		// this is pte at last
1276 	/*
1277 	 * if (!pte_present(pte))
1278 	 *
1279 	 */
1280 	l.andi	r4,r3,0x1
1281 	l.sfne	r4,r0			// is pte present
1282 	l.bnf	i_pte_not_present
1283 	 l.addi	r4,r0,0xffffe03a	// PAGE_MASK | ITLB_UP_CONVERT_MASK
1284 	/*
1285 	 * fill ITLB TR register
1286 	 */
1287 	l.and	r4,r3,r4		// apply the mask
1288 	l.andi	r3,r3,0x7c0		// _PAGE_EXEC | _PAGE_SRE | _PAGE_SWE |  _PAGE_URE | _PAGE_UWE
1289 	l.sfeq	r3,r0
1290 	l.bf	itlb_tr_fill //_workaround
1291 	// Determine number of IMMU sets
1292 	l.mfspr r2, r0, SPR_IMMUCFGR
1293 	l.andi	r2, r2, SPR_IMMUCFGR_NTS
1294 	l.srli	r2, r2, SPR_IMMUCFGR_NTS_OFF
1295 	l.ori	r3, r0, 0x1
1296 	l.sll	r3, r3, r2 	// r3 = number IMMU sets IMMUCFGR
1297 	l.addi	r2, r3, -1  	// r2 = nsets mask
1298 	l.mfspr	r3, r0, SPR_EEAR_BASE
1299 	l.srli	r3, r3, 0xd	// >> PAGE_SHIFT
1300 	l.and	r2, r3, r2	// calc offset:	 & (NUM_TLB_ENTRIES-1)
1301 
1302 /*
1303  * __PHX__ :: fixme
1304  * we should not just blindly set executable flags,
1305  * but it does help with ping. the clean way would be to find out
1306  * (and fix it) why stack doesn't have execution permissions
1307  */
1308 
1309 itlb_tr_fill_workaround:
1310 	l.ori	r4,r4,0xc0		// | (SPR_ITLBTR_UXE | ITLBTR_SXE)
1311 itlb_tr_fill:
1312 	l.mtspr	r2,r4,SPR_ITLBTR_BASE(0)
1313 	/*
1314 	 * fill DTLB MR register
1315 	 */
1316 	l.slli	r3, r3, 0xd		/* << PAGE_SHIFT => EA & PAGE_MASK */
1317 	l.ori	r4,r3,0x1		// set hardware valid bit: ITBL_MR entry
1318 	l.mtspr	r2,r4,SPR_ITLBMR_BASE(0)
1319 
1320 	EXCEPTION_LOAD_GPR2
1321 	EXCEPTION_LOAD_GPR3
1322 	EXCEPTION_LOAD_GPR4
1323 	l.rfe
1324 
1325 i_pmd_none:
1326 i_pte_not_present:
1327 	EXCEPTION_LOAD_GPR2
1328 	EXCEPTION_LOAD_GPR3
1329 	EXCEPTION_LOAD_GPR4
1330 	EXCEPTION_HANDLE(_itlb_miss_page_fault_handler)
1331 
1332 /* ==============================================[ boot tlb handlers ]=== */
1333 
1334 
1335 /* =================================================[ debugging aids ]=== */
1336 
1337 /*
1338  * DESC: Prints ASCII character stored in r7
1339  *
1340  * PRMS: r7	- a 32-bit value with an ASCII character in the first byte
1341  *		position.
1342  *
1343  * PREQ: The UART at UART_BASE_ADD has to be initialized
1344  *
1345  * POST: internally used but restores:
1346  * 	 r4	- to store UART_BASE_ADD
1347  *	 r5	- for loading OFF_TXFULL / THRE,TEMT
1348  *	 r6	- for storing bitmask (SERIAL_8250)
1349  */
1350 ENTRY(_emergency_putc)
1351 	EMERGENCY_PRINT_STORE_GPR4
1352 	EMERGENCY_PRINT_STORE_GPR5
1353 	EMERGENCY_PRINT_STORE_GPR6
1354 
1355 	l.movhi r4,hi(UART_BASE_ADD)
1356 	l.ori	r4,r4,lo(UART_BASE_ADD)
1357 
1358 #if defined(CONFIG_SERIAL_LITEUART)
1359 	/* Check OFF_TXFULL status */
1360 1:      l.lwz	r5,4(r4)
1361 	l.andi	r5,r5,0xff
1362 	l.sfnei	r5,0
1363 	l.bf	1b
1364 	 l.nop
1365 
1366 	/* Write character */
1367 	l.andi	r7,r7,0xff
1368 	l.sw	0(r4),r7
1369 #elif defined(CONFIG_SERIAL_8250)
1370 	/* Check UART LSR THRE (hold) bit */
1371 	l.addi  r6,r0,0x20
1372 1:      l.lbz   r5,5(r4)
1373 	l.andi  r5,r5,0x20
1374 	l.sfeq  r5,r6
1375 	l.bnf   1b
1376 	 l.nop
1377 
1378 	/* Write character */
1379 	l.sb    0(r4),r7
1380 
1381 	/* Check UART LSR THRE|TEMT (hold, empty) bits */
1382 	l.addi  r6,r0,0x60
1383 1:      l.lbz   r5,5(r4)
1384 	l.andi  r5,r5,0x60
1385 	l.sfeq  r5,r6
1386 	l.bnf   1b
1387 	 l.nop
1388 #endif
1389 	EMERGENCY_PRINT_LOAD_GPR6
1390 	EMERGENCY_PRINT_LOAD_GPR5
1391 	EMERGENCY_PRINT_LOAD_GPR4
1392 	l.jr	r9
1393 	 l.nop
1394 
1395 /*
1396  * DSCR: prints a string referenced by r3.
1397  *
1398  * PRMS: r3     	- address of the first character of null
1399  *			terminated string to be printed
1400  *
1401  * PREQ: UART at UART_BASE_ADD has to be initialized
1402  *
1403  * POST: caller should be aware that r3, r9 are changed
1404  */
1405 ENTRY(_emergency_print)
1406 	EMERGENCY_PRINT_STORE_GPR7
1407 	EMERGENCY_PRINT_STORE_GPR9
1408 
1409 	/* Load character to r7, check for null terminator */
1410 2:	l.lbz	r7,0(r3)
1411 	l.sfeqi	r7,0x0
1412 	l.bf	9f
1413 	 l.nop
1414 
1415 	l.jal	_emergency_putc
1416 	 l.nop
1417 
1418 	/* next character */
1419 	l.j	2b
1420 	 l.addi	r3,r3,0x1
1421 
1422 9:
1423 	EMERGENCY_PRINT_LOAD_GPR9
1424 	EMERGENCY_PRINT_LOAD_GPR7
1425 	l.jr	r9
1426 	 l.nop
1427 
1428 /*
1429  * DSCR: prints a number in r3 in hex.
1430  *
1431  * PRMS: r3     	- a 32-bit unsigned integer
1432  *
1433  * PREQ: UART at UART_BASE_ADD has to be initialized
1434  *
1435  * POST: caller should be aware that r3, r9 are changed
1436  */
1437 ENTRY(_emergency_print_nr)
1438 	EMERGENCY_PRINT_STORE_GPR7
1439 	EMERGENCY_PRINT_STORE_GPR8
1440 	EMERGENCY_PRINT_STORE_GPR9
1441 
1442 	l.addi	r8,r0,32		// shift register
1443 
1444 1:	/* remove leading zeros */
1445 	l.addi	r8,r8,-0x4
1446 	l.srl	r7,r3,r8
1447 	l.andi	r7,r7,0xf
1448 
1449 	/* don't skip the last zero if number == 0x0 */
1450 	l.sfeqi	r8,0x4
1451 	l.bf	2f
1452 	 l.nop
1453 
1454 	l.sfeq	r7,r0
1455 	l.bf	1b
1456 	 l.nop
1457 
1458 2:
1459 	l.srl	r7,r3,r8
1460 
1461 	l.andi	r7,r7,0xf
1462 	l.sflts	r8,r0
1463 	 l.bf	9f
1464 
1465 	/* Numbers greater than 9 translate to a-f */
1466 	l.sfgtui r7,0x9
1467 	l.bnf	8f
1468 	 l.nop
1469 	l.addi	r7,r7,0x27
1470 
1471 	/* Convert to ascii and output character */
1472 8:	l.jal	_emergency_putc
1473 	 l.addi	r7,r7,0x30
1474 
1475 	/* next character */
1476 	l.j	2b
1477 	l.addi	r8,r8,-0x4
1478 
1479 9:
1480 	EMERGENCY_PRINT_LOAD_GPR9
1481 	EMERGENCY_PRINT_LOAD_GPR8
1482 	EMERGENCY_PRINT_LOAD_GPR7
1483 	l.jr	r9
1484 	 l.nop
1485 
1486 /*
1487  * This should be used for debugging only.
1488  * It messes up the Linux early serial output
1489  * somehow, so use it sparingly and essentially
1490  * only if you need to debug something that goes wrong
1491  * before Linux gets the early serial going.
1492  *
1493  * Furthermore, you'll have to make sure you set the
1494  * UART_DEVISOR correctly according to the system
1495  * clock rate.
1496  *
1497  *
1498  */
1499 
1500 
1501 
1502 #define SYS_CLK            20000000
1503 //#define SYS_CLK            1843200
1504 #define OR32_CONSOLE_BAUD  115200
1505 #define UART_DIVISOR       SYS_CLK/(16*OR32_CONSOLE_BAUD)
1506 
1507 ENTRY(_early_uart_init)
1508 	l.movhi	r3,hi(UART_BASE_ADD)
1509 	l.ori	r3,r3,lo(UART_BASE_ADD)
1510 
1511 #if defined(CONFIG_SERIAL_8250)
1512 	l.addi	r4,r0,0x7
1513 	l.sb	0x2(r3),r4
1514 
1515 	l.addi	r4,r0,0x0
1516 	l.sb	0x1(r3),r4
1517 
1518 	l.addi	r4,r0,0x3
1519 	l.sb	0x3(r3),r4
1520 
1521 	l.lbz	r5,3(r3)
1522 	l.ori	r4,r5,0x80
1523 	l.sb	0x3(r3),r4
1524 	l.addi	r4,r0,((UART_DIVISOR>>8) & 0x000000ff)
1525 	l.sb	UART_DLM(r3),r4
1526 	l.addi  r4,r0,((UART_DIVISOR) & 0x000000ff)
1527 	l.sb	UART_DLL(r3),r4
1528 	l.sb	0x3(r3),r5
1529 #endif
1530 
1531 	l.jr	r9
1532 	 l.nop
1533 
1534 	.align	0x1000
1535 	.global _secondary_evbar
1536 _secondary_evbar:
1537 
1538 	.space 0x800
1539 	/* Just disable interrupts and Return */
1540 	l.ori	r3,r0,SPR_SR_SM
1541 	l.mtspr	r0,r3,SPR_ESR_BASE
1542 	l.rfe
1543 
1544 
1545 	.section .rodata
1546 _string_unhandled_exception:
1547 	.string "\r\nRunarunaround: Unhandled exception 0x\0"
1548 
1549 _string_epc_prefix:
1550 	.string ": EPC=0x\0"
1551 
1552 _string_nl:
1553 	.string "\r\n\0"
1554 
1555 
1556 /* ========================================[ page aligned structures ]=== */
1557 
1558 /*
1559  * .data section should be page aligned
1560  *	(look into arch/openrisc/kernel/vmlinux.lds.S)
1561  */
1562 	.section .data,"aw"
1563 	.align	8192
1564 	.global  empty_zero_page
1565 empty_zero_page:
1566 	.space  8192
1567 
1568 	.global  swapper_pg_dir
1569 swapper_pg_dir:
1570 	.space  8192
1571 
1572 	.global	_unhandled_stack
1573 _unhandled_stack:
1574 	.space	8192
1575 _unhandled_stack_top:
1576 
1577 /* ============================================================[ EOF ]=== */
1578