xref: /openbmc/linux/arch/arc/include/asm/entry.h (revision 80ecbd24)
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
9  *  Stack switching code can no longer reliably rely on the fact that
10  *  if we are NOT in user mode, stack is switched to kernel mode.
11  *  e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
12  *  it's prologue including stack switching from user mode
13  *
14  * Vineetg: Aug 28th 2008: Bug #94984
15  *  -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
16  *   Normally CPU does this automatically, however when doing FAKE rtie,
17  *   we also need to explicitly do this. The problem in macros
18  *   FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
19  *   was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
20  *
21  * Vineetg: May 5th 2008
22  *  -Modified CALLEE_REG save/restore macros to handle the fact that
23  *      r25 contains the kernel current task ptr
24  *  - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
25  *  - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
26  *      address Write back load ld.ab instead of seperate ld/add instn
27  *
28  * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
29  */
30 
31 #ifndef __ASM_ARC_ENTRY_H
32 #define __ASM_ARC_ENTRY_H
33 
34 #ifdef __ASSEMBLY__
35 #include <asm/unistd.h>		/* For NR_syscalls defination */
36 #include <asm/asm-offsets.h>
37 #include <asm/arcregs.h>
38 #include <asm/ptrace.h>
39 #include <asm/processor.h>	/* For VMALLOC_START */
40 #include <asm/thread_info.h>	/* For THREAD_SIZE */
41 #include <asm/mmu.h>
42 
43 /* Note on the LD/ST addr modes with addr reg wback
44  *
45  * LD.a same as LD.aw
46  *
47  * LD.a    reg1, [reg2, x]  => Pre Incr
48  *      Eff Addr for load = [reg2 + x]
49  *
50  * LD.ab   reg1, [reg2, x]  => Post Incr
51  *      Eff Addr for load = [reg2]
52  */
53 
54 .macro PUSH reg
55 	st.a	\reg, [sp, -4]
56 .endm
57 
58 .macro PUSHAX aux
59 	lr	r9, [\aux]
60 	PUSH	r9
61 .endm
62 
63 .macro POP reg
64 	ld.ab	\reg, [sp, 4]
65 .endm
66 
67 .macro POPAX aux
68 	POP	r9
69 	sr	r9, [\aux]
70 .endm
71 
72 /*--------------------------------------------------------------
73  * Helpers to save/restore Scratch Regs:
74  * used by Interrupt/Exception Prologue/Epilogue
75  *-------------------------------------------------------------*/
76 .macro  SAVE_R0_TO_R12
77 	PUSH	r0
78 	PUSH	r1
79 	PUSH	r2
80 	PUSH	r3
81 	PUSH	r4
82 	PUSH	r5
83 	PUSH	r6
84 	PUSH	r7
85 	PUSH	r8
86 	PUSH	r9
87 	PUSH	r10
88 	PUSH	r11
89 	PUSH	r12
90 .endm
91 
92 .macro RESTORE_R12_TO_R0
93 	POP	r12
94 	POP	r11
95 	POP	r10
96 	POP	r9
97 	POP	r8
98 	POP	r7
99 	POP	r6
100 	POP	r5
101 	POP	r4
102 	POP	r3
103 	POP	r2
104 	POP	r1
105 	POP	r0
106 
107 #ifdef CONFIG_ARC_CURR_IN_REG
108 	ld	r25, [sp, 12]
109 #endif
110 .endm
111 
112 /*--------------------------------------------------------------
113  * Helpers to save/restore callee-saved regs:
114  * used by several macros below
115  *-------------------------------------------------------------*/
116 .macro SAVE_R13_TO_R24
117 	PUSH	r13
118 	PUSH	r14
119 	PUSH	r15
120 	PUSH	r16
121 	PUSH	r17
122 	PUSH	r18
123 	PUSH	r19
124 	PUSH	r20
125 	PUSH	r21
126 	PUSH	r22
127 	PUSH	r23
128 	PUSH	r24
129 .endm
130 
131 .macro RESTORE_R24_TO_R13
132 	POP	r24
133 	POP	r23
134 	POP	r22
135 	POP	r21
136 	POP	r20
137 	POP	r19
138 	POP	r18
139 	POP	r17
140 	POP	r16
141 	POP	r15
142 	POP	r14
143 	POP	r13
144 .endm
145 
146 #define OFF_USER_R25_FROM_R24	(SZ_CALLEE_REGS + SZ_PT_REGS - 8)/4
147 
148 /*--------------------------------------------------------------
149  * Collect User Mode callee regs as struct callee_regs - needed by
150  * fork/do_signal/unaligned-access-emulation.
151  * (By default only scratch regs are saved on entry to kernel)
152  *
153  * Special handling for r25 if used for caching Task Pointer.
154  * It would have been saved in task->thread.user_r25 already, but to keep
155  * the interface same it is copied into regular r25 placeholder in
156  * struct callee_regs.
157  *-------------------------------------------------------------*/
158 .macro SAVE_CALLEE_SAVED_USER
159 
160 	SAVE_R13_TO_R24
161 
162 #ifdef CONFIG_ARC_CURR_IN_REG
163 	; Retrieve orig r25 and save it on stack
164 	ld.as   r12, [sp, OFF_USER_R25_FROM_R24]
165 	st.a    r12, [sp, -4]
166 #else
167 	PUSH	r25
168 #endif
169 
170 .endm
171 
172 /*--------------------------------------------------------------
173  * Save kernel Mode callee regs at the time of Contect Switch.
174  *
175  * Special handling for r25 if used for caching Task Pointer.
176  * Kernel simply skips saving it since it will be loaded with
177  * incoming task pointer anyways
178  *-------------------------------------------------------------*/
179 .macro SAVE_CALLEE_SAVED_KERNEL
180 
181 	SAVE_R13_TO_R24
182 
183 #ifdef CONFIG_ARC_CURR_IN_REG
184 	sub     sp, sp, 4
185 #else
186 	PUSH	r25
187 #endif
188 .endm
189 
190 /*--------------------------------------------------------------
191  * Opposite of SAVE_CALLEE_SAVED_KERNEL
192  *-------------------------------------------------------------*/
193 .macro RESTORE_CALLEE_SAVED_KERNEL
194 
195 #ifdef CONFIG_ARC_CURR_IN_REG
196 	add     sp, sp, 4  /* skip usual r25 placeholder */
197 #else
198 	POP	r25
199 #endif
200 	RESTORE_R24_TO_R13
201 .endm
202 
203 /*--------------------------------------------------------------
204  * Opposite of SAVE_CALLEE_SAVED_USER
205  *
206  * ptrace tracer or unaligned-access fixup might have changed a user mode
207  * callee reg which is saved back to usual r25 storage location
208  *-------------------------------------------------------------*/
209 .macro RESTORE_CALLEE_SAVED_USER
210 
211 #ifdef CONFIG_ARC_CURR_IN_REG
212 	ld.ab   r12, [sp, 4]
213 	st.as   r12, [sp, OFF_USER_R25_FROM_R24]
214 #else
215 	POP	r25
216 #endif
217 	RESTORE_R24_TO_R13
218 .endm
219 
220 /*--------------------------------------------------------------
221  * Super FAST Restore callee saved regs by simply re-adjusting SP
222  *-------------------------------------------------------------*/
223 .macro DISCARD_CALLEE_SAVED_USER
224 	add     sp, sp, SZ_CALLEE_REGS
225 .endm
226 
227 /*-------------------------------------------------------------
228  * given a tsk struct, get to the base of it's kernel mode stack
229  * tsk->thread_info is really a PAGE, whose bottom hoists stack
230  * which grows upwards towards thread_info
231  *------------------------------------------------------------*/
232 
233 .macro GET_TSK_STACK_BASE tsk, out
234 
235 	/* Get task->thread_info (this is essentially start of a PAGE) */
236 	ld  \out, [\tsk, TASK_THREAD_INFO]
237 
238 	/* Go to end of page where stack begins (grows upwards) */
239 	add2 \out, \out, (THREAD_SIZE)/4
240 
241 .endm
242 
243 /*--------------------------------------------------------------
244  * Switch to Kernel Mode stack if SP points to User Mode stack
245  *
246  * Entry   : r9 contains pre-IRQ/exception/trap status32
247  * Exit    : SP is set to kernel mode stack pointer
248  *           If CURR_IN_REG, r25 set to "current" task pointer
249  * Clobbers: r9
250  *-------------------------------------------------------------*/
251 
252 .macro SWITCH_TO_KERNEL_STK
253 
254 	/* User Mode when this happened ? Yes: Proceed to switch stack */
255 	bbit1   r9, STATUS_U_BIT, 88f
256 
257 	/* OK we were already in kernel mode when this event happened, thus can
258 	 * assume SP is kernel mode SP. _NO_ need to do any stack switching
259 	 */
260 
261 #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
262 	/* However....
263 	 * If Level 2 Interrupts enabled, we may end up with a corner case:
264 	 * 1. User Task executing
265 	 * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
266 	 * 3. But before it could switch SP from USER to KERNEL stack
267 	 *      a L2 IRQ "Interrupts" L1
268 	 * Thay way although L2 IRQ happened in Kernel mode, stack is still
269 	 * not switched.
270 	 * To handle this, we may need to switch stack even if in kernel mode
271 	 * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
272 	 */
273 	brlo sp, VMALLOC_START, 88f
274 
275 	/* TODO: vineetg:
276 	 * We need to be a bit more cautious here. What if a kernel bug in
277 	 * L1 ISR, caused SP to go whaco (some small value which looks like
278 	 * USER stk) and then we take L2 ISR.
279 	 * Above brlo alone would treat it as a valid L1-L2 sceanrio
280 	 * instead of shouting alound
281 	 * The only feasible way is to make sure this L2 happened in
282 	 * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
283 	 * L1 ISR before it switches stack
284 	 */
285 
286 #endif
287 
288 	/* Save Pre Intr/Exception KERNEL MODE SP on kernel stack
289 	 * safe-keeping not really needed, but it keeps the epilogue code
290 	 * (SP restore) simpler/uniform.
291 	 */
292 	b.d	66f
293 	mov	r9, sp
294 
295 88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
296 
297 	GET_CURR_TASK_ON_CPU   r9
298 
299 	/* With current tsk in r9, get it's kernel mode stack base */
300 	GET_TSK_STACK_BASE  r9, r9
301 
302 66:
303 #ifdef CONFIG_ARC_CURR_IN_REG
304 	/*
305 	 * Treat r25 as scratch reg, save it on stack first
306 	 * Load it with current task pointer
307 	 */
308 	st	r25, [r9, -4]
309 	GET_CURR_TASK_ON_CPU   r25
310 #endif
311 
312 	/* Save Pre Intr/Exception User SP on kernel stack */
313 	st.a    sp, [r9, -16]	; Make room for orig_r0, ECR, user_r25
314 
315 	/* CAUTION:
316 	 * SP should be set at the very end when we are done with everything
317 	 * In case of 2 levels of interrupt we depend on value of SP to assume
318 	 * that everything else is done (loading r25 etc)
319 	 */
320 
321 	/* set SP to point to kernel mode stack */
322 	mov sp, r9
323 
324 	/* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
325 
326 .endm
327 
328 /*------------------------------------------------------------
329  * "FAKE" a rtie to return from CPU Exception context
330  * This is to re-enable Exceptions within exception
331  * Look at EV_ProtV to see how this is actually used
332  *-------------------------------------------------------------*/
333 
334 .macro FAKE_RET_FROM_EXCPN  reg
335 
336 	ld  \reg, [sp, PT_status32]
337 	bic  \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK)
338 	bset \reg, \reg, STATUS_L_BIT
339 	sr  \reg, [erstatus]
340 	mov \reg, 55f
341 	sr  \reg, [eret]
342 
343 	rtie
344 55:
345 .endm
346 
347 /*
348  * @reg [OUT] &thread_info of "current"
349  */
350 .macro GET_CURR_THR_INFO_FROM_SP  reg
351 	bic \reg, sp, (THREAD_SIZE - 1)
352 .endm
353 
354 /*
355  * @reg [OUT] thread_info->flags of "current"
356  */
357 .macro GET_CURR_THR_INFO_FLAGS  reg
358 	GET_CURR_THR_INFO_FROM_SP  \reg
359 	ld  \reg, [\reg, THREAD_INFO_FLAGS]
360 .endm
361 
362 /*--------------------------------------------------------------
363  * For early Exception Prologue, a core reg is temporarily needed to
364  * code the rest of prolog (stack switching). This is done by stashing
365  * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
366  *
367  * Before saving the full regfile - this reg is restored back, only
368  * to be saved again on kernel mode stack, as part of ptregs.
369  *-------------------------------------------------------------*/
370 .macro EXCPN_PROLOG_FREEUP_REG	reg
371 #ifdef CONFIG_SMP
372 	sr  \reg, [ARC_REG_SCRATCH_DATA0]
373 #else
374 	st  \reg, [@ex_saved_reg1]
375 #endif
376 .endm
377 
378 .macro EXCPN_PROLOG_RESTORE_REG	reg
379 #ifdef CONFIG_SMP
380 	lr  \reg, [ARC_REG_SCRATCH_DATA0]
381 #else
382 	ld  \reg, [@ex_saved_reg1]
383 #endif
384 .endm
385 
386 /*--------------------------------------------------------------
387  * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
388  * Requires SP to be already switched to kernel mode Stack
389  * sp points to the next free element on the stack at exit of this macro.
390  * Registers are pushed / popped in the order defined in struct ptregs
391  * in asm/ptrace.h
392  * Note that syscalls are implemented via TRAP which is also a exception
393  * from CPU's point of view
394  *-------------------------------------------------------------*/
395 .macro SAVE_ALL_SYS
396 
397 	lr	r9, [ecr]
398 	st      r9, [sp, 8]    /* ECR */
399 	st      r0, [sp, 4]    /* orig_r0, needed only for sys calls */
400 
401 	/* Restore r9 used to code the early prologue */
402 	EXCPN_PROLOG_RESTORE_REG  r9
403 
404 	SAVE_R0_TO_R12
405 	PUSH	gp
406 	PUSH	fp
407 	PUSH	blink
408 	PUSHAX	eret
409 	PUSHAX	erstatus
410 	PUSH	lp_count
411 	PUSHAX	lp_end
412 	PUSHAX	lp_start
413 	PUSHAX	erbta
414 .endm
415 
416 /*--------------------------------------------------------------
417  * Restore all registers used by system call or Exceptions
418  * SP should always be pointing to the next free stack element
419  * when entering this macro.
420  *
421  * NOTE:
422  *
423  * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
424  * for memory load operations. If used in that way interrupts are deffered
425  * by hardware and that is not good.
426  *-------------------------------------------------------------*/
427 .macro RESTORE_ALL_SYS
428 	POPAX	erbta
429 	POPAX	lp_start
430 	POPAX	lp_end
431 
432 	POP	r9
433 	mov	lp_count, r9	;LD to lp_count is not allowed
434 
435 	POPAX	erstatus
436 	POPAX	eret
437 	POP	blink
438 	POP	fp
439 	POP	gp
440 	RESTORE_R12_TO_R0
441 
442 	ld  sp, [sp] /* restore original sp */
443 	/* orig_r0, ECR, user_r25 skipped automatically */
444 .endm
445 
446 
447 /*--------------------------------------------------------------
448  * Save all registers used by interrupt handlers.
449  *-------------------------------------------------------------*/
450 .macro SAVE_ALL_INT1
451 
452 	/* restore original r9 to be saved as part of reg-file */
453 #ifdef CONFIG_SMP
454 	lr  r9, [ARC_REG_SCRATCH_DATA0]
455 #else
456 	ld  r9, [@int1_saved_reg]
457 #endif
458 
459 	/* now we are ready to save the remaining context :) */
460 	st      event_IRQ1, [sp, 8]    /* Dummy ECR */
461 	st      0, [sp, 4]    /* orig_r0 , N/A for IRQ */
462 
463 	SAVE_R0_TO_R12
464 	PUSH	gp
465 	PUSH	fp
466 	PUSH	blink
467 	PUSH	ilink1
468 	PUSHAX	status32_l1
469 	PUSH	lp_count
470 	PUSHAX	lp_end
471 	PUSHAX	lp_start
472 	PUSHAX	bta_l1
473 .endm
474 
475 .macro SAVE_ALL_INT2
476 
477 	/* TODO-vineetg: SMP we can't use global nor can we use
478 	*   SCRATCH0 as we do for int1 because while int1 is using
479 	*   it, int2 can come
480 	*/
481 	/* retsore original r9 , saved in sys_saved_r9 */
482 	ld  r9, [@int2_saved_reg]
483 
484 	/* now we are ready to save the remaining context :) */
485 	st      event_IRQ2, [sp, 8]    /* Dummy ECR */
486 	st      0, [sp, 4]    /* orig_r0 , N/A for IRQ */
487 
488 	SAVE_R0_TO_R12
489 	PUSH	gp
490 	PUSH	fp
491 	PUSH	blink
492 	PUSH	ilink2
493 	PUSHAX	status32_l2
494 	PUSH	lp_count
495 	PUSHAX	lp_end
496 	PUSHAX	lp_start
497 	PUSHAX	bta_l2
498 .endm
499 
500 /*--------------------------------------------------------------
501  * Restore all registers used by interrupt handlers.
502  *
503  * NOTE:
504  *
505  * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
506  * for memory load operations. If used in that way interrupts are deffered
507  * by hardware and that is not good.
508  *-------------------------------------------------------------*/
509 
510 .macro RESTORE_ALL_INT1
511 	POPAX	bta_l1
512 	POPAX	lp_start
513 	POPAX	lp_end
514 
515 	POP	r9
516 	mov	lp_count, r9	;LD to lp_count is not allowed
517 
518 	POPAX	status32_l1
519 	POP	ilink1
520 	POP	blink
521 	POP	fp
522 	POP	gp
523 	RESTORE_R12_TO_R0
524 
525 	ld  sp, [sp] /* restore original sp */
526 	/* orig_r0, ECR, user_r25 skipped automatically */
527 .endm
528 
529 .macro RESTORE_ALL_INT2
530 	POPAX	bta_l2
531 	POPAX	lp_start
532 	POPAX	lp_end
533 
534 	POP	r9
535 	mov	lp_count, r9	;LD to lp_count is not allowed
536 
537 	POPAX	status32_l2
538 	POP	ilink2
539 	POP	blink
540 	POP	fp
541 	POP	gp
542 	RESTORE_R12_TO_R0
543 
544 	ld  sp, [sp] /* restore original sp */
545 	/* orig_r0, ECR, user_r25 skipped automatically */
546 .endm
547 
548 
549 /* Get CPU-ID of this core */
550 .macro  GET_CPU_ID  reg
551 	lr  \reg, [identity]
552 	lsr \reg, \reg, 8
553 	bmsk \reg, \reg, 7
554 .endm
555 
556 #ifdef CONFIG_SMP
557 
558 /*-------------------------------------------------
559  * Retrieve the current running task on this CPU
560  * 1. Determine curr CPU id.
561  * 2. Use it to index into _current_task[ ]
562  */
563 .macro  GET_CURR_TASK_ON_CPU   reg
564 	GET_CPU_ID  \reg
565 	ld.as  \reg, [@_current_task, \reg]
566 .endm
567 
568 /*-------------------------------------------------
569  * Save a new task as the "current" task on this CPU
570  * 1. Determine curr CPU id.
571  * 2. Use it to index into _current_task[ ]
572  *
573  * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
574  * because ST r0, [r1, offset] can ONLY have s9 @offset
575  * while   LD can take s9 (4 byte insn) or LIMM (8 byte insn)
576  */
577 
578 .macro  SET_CURR_TASK_ON_CPU    tsk, tmp
579 	GET_CPU_ID  \tmp
580 	add2 \tmp, @_current_task, \tmp
581 	st   \tsk, [\tmp]
582 #ifdef CONFIG_ARC_CURR_IN_REG
583 	mov r25, \tsk
584 #endif
585 
586 .endm
587 
588 
589 #else   /* Uniprocessor implementation of macros */
590 
591 .macro  GET_CURR_TASK_ON_CPU    reg
592 	ld  \reg, [@_current_task]
593 .endm
594 
595 .macro  SET_CURR_TASK_ON_CPU    tsk, tmp
596 	st  \tsk, [@_current_task]
597 #ifdef CONFIG_ARC_CURR_IN_REG
598 	mov r25, \tsk
599 #endif
600 .endm
601 
602 #endif /* SMP / UNI */
603 
604 /* ------------------------------------------------------------------
605  * Get the ptr to some field of Current Task at @off in task struct
606  *  -Uses r25 for Current task ptr if that is enabled
607  */
608 
609 #ifdef CONFIG_ARC_CURR_IN_REG
610 
611 .macro GET_CURR_TASK_FIELD_PTR  off,  reg
612 	add \reg, r25, \off
613 .endm
614 
615 #else
616 
617 .macro GET_CURR_TASK_FIELD_PTR  off,  reg
618 	GET_CURR_TASK_ON_CPU  \reg
619 	add \reg, \reg, \off
620 .endm
621 
622 #endif	/* CONFIG_ARC_CURR_IN_REG */
623 
624 #endif  /* __ASSEMBLY__ */
625 
626 #endif  /* __ASM_ARC_ENTRY_H */
627