xref: /openbmc/linux/arch/arc/include/asm/entry.h (revision c4c11dd1)
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
9  *  Stack switching code can no longer reliably rely on the fact that
10  *  if we are NOT in user mode, stack is switched to kernel mode.
11  *  e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
12  *  it's prologue including stack switching from user mode
13  *
14  * Vineetg: Aug 28th 2008: Bug #94984
15  *  -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
16  *   Normally CPU does this automatically, however when doing FAKE rtie,
17  *   we also need to explicitly do this. The problem in macros
18  *   FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
19  *   was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
20  *
21  * Vineetg: May 5th 2008
22  *  -Modified CALLEE_REG save/restore macros to handle the fact that
23  *      r25 contains the kernel current task ptr
24  *  - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
25  *  - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
26  *      address Write back load ld.ab instead of seperate ld/add instn
27  *
28  * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
29  */
30 
31 #ifndef __ASM_ARC_ENTRY_H
32 #define __ASM_ARC_ENTRY_H
33 
34 #ifdef __ASSEMBLY__
35 #include <asm/unistd.h>		/* For NR_syscalls defination */
36 #include <asm/asm-offsets.h>
37 #include <asm/arcregs.h>
38 #include <asm/ptrace.h>
39 #include <asm/processor.h>	/* For VMALLOC_START */
40 #include <asm/thread_info.h>	/* For THREAD_SIZE */
41 
42 /* Note on the LD/ST addr modes with addr reg wback
43  *
44  * LD.a same as LD.aw
45  *
46  * LD.a    reg1, [reg2, x]  => Pre Incr
47  *      Eff Addr for load = [reg2 + x]
48  *
49  * LD.ab   reg1, [reg2, x]  => Post Incr
50  *      Eff Addr for load = [reg2]
51  */
52 
53 .macro PUSH reg
54 	st.a	\reg, [sp, -4]
55 .endm
56 
57 .macro PUSHAX aux
58 	lr	r9, [\aux]
59 	PUSH	r9
60 .endm
61 
62 .macro POP reg
63 	ld.ab	\reg, [sp, 4]
64 .endm
65 
66 .macro POPAX aux
67 	POP	r9
68 	sr	r9, [\aux]
69 .endm
70 
71 /*--------------------------------------------------------------
72  * Helpers to save/restore Scratch Regs:
73  * used by Interrupt/Exception Prologue/Epilogue
74  *-------------------------------------------------------------*/
75 .macro  SAVE_R0_TO_R12
76 	PUSH	r0
77 	PUSH	r1
78 	PUSH	r2
79 	PUSH	r3
80 	PUSH	r4
81 	PUSH	r5
82 	PUSH	r6
83 	PUSH	r7
84 	PUSH	r8
85 	PUSH	r9
86 	PUSH	r10
87 	PUSH	r11
88 	PUSH	r12
89 .endm
90 
91 .macro RESTORE_R12_TO_R0
92 	POP	r12
93 	POP	r11
94 	POP	r10
95 	POP	r9
96 	POP	r8
97 	POP	r7
98 	POP	r6
99 	POP	r5
100 	POP	r4
101 	POP	r3
102 	POP	r2
103 	POP	r1
104 	POP	r0
105 
106 #ifdef CONFIG_ARC_CURR_IN_REG
107 	ld	r25, [sp, 12]
108 #endif
109 .endm
110 
111 /*--------------------------------------------------------------
112  * Helpers to save/restore callee-saved regs:
113  * used by several macros below
114  *-------------------------------------------------------------*/
115 .macro SAVE_R13_TO_R24
116 	PUSH	r13
117 	PUSH	r14
118 	PUSH	r15
119 	PUSH	r16
120 	PUSH	r17
121 	PUSH	r18
122 	PUSH	r19
123 	PUSH	r20
124 	PUSH	r21
125 	PUSH	r22
126 	PUSH	r23
127 	PUSH	r24
128 .endm
129 
130 .macro RESTORE_R24_TO_R13
131 	POP	r24
132 	POP	r23
133 	POP	r22
134 	POP	r21
135 	POP	r20
136 	POP	r19
137 	POP	r18
138 	POP	r17
139 	POP	r16
140 	POP	r15
141 	POP	r14
142 	POP	r13
143 .endm
144 
145 #define OFF_USER_R25_FROM_R24	(SZ_CALLEE_REGS + SZ_PT_REGS - 8)/4
146 
147 /*--------------------------------------------------------------
148  * Collect User Mode callee regs as struct callee_regs - needed by
149  * fork/do_signal/unaligned-access-emulation.
150  * (By default only scratch regs are saved on entry to kernel)
151  *
152  * Special handling for r25 if used for caching Task Pointer.
153  * It would have been saved in task->thread.user_r25 already, but to keep
154  * the interface same it is copied into regular r25 placeholder in
155  * struct callee_regs.
156  *-------------------------------------------------------------*/
157 .macro SAVE_CALLEE_SAVED_USER
158 
159 	SAVE_R13_TO_R24
160 
161 #ifdef CONFIG_ARC_CURR_IN_REG
162 	; Retrieve orig r25 and save it on stack
163 	ld.as   r12, [sp, OFF_USER_R25_FROM_R24]
164 	st.a    r12, [sp, -4]
165 #else
166 	PUSH	r25
167 #endif
168 
169 .endm
170 
171 /*--------------------------------------------------------------
172  * Save kernel Mode callee regs at the time of Contect Switch.
173  *
174  * Special handling for r25 if used for caching Task Pointer.
175  * Kernel simply skips saving it since it will be loaded with
176  * incoming task pointer anyways
177  *-------------------------------------------------------------*/
178 .macro SAVE_CALLEE_SAVED_KERNEL
179 
180 	SAVE_R13_TO_R24
181 
182 #ifdef CONFIG_ARC_CURR_IN_REG
183 	sub     sp, sp, 4
184 #else
185 	PUSH	r25
186 #endif
187 .endm
188 
189 /*--------------------------------------------------------------
190  * Opposite of SAVE_CALLEE_SAVED_KERNEL
191  *-------------------------------------------------------------*/
192 .macro RESTORE_CALLEE_SAVED_KERNEL
193 
194 #ifdef CONFIG_ARC_CURR_IN_REG
195 	add     sp, sp, 4  /* skip usual r25 placeholder */
196 #else
197 	POP	r25
198 #endif
199 	RESTORE_R24_TO_R13
200 .endm
201 
202 /*--------------------------------------------------------------
203  * Opposite of SAVE_CALLEE_SAVED_USER
204  *
205  * ptrace tracer or unaligned-access fixup might have changed a user mode
206  * callee reg which is saved back to usual r25 storage location
207  *-------------------------------------------------------------*/
208 .macro RESTORE_CALLEE_SAVED_USER
209 
210 #ifdef CONFIG_ARC_CURR_IN_REG
211 	ld.ab   r12, [sp, 4]
212 	st.as   r12, [sp, OFF_USER_R25_FROM_R24]
213 #else
214 	POP	r25
215 #endif
216 	RESTORE_R24_TO_R13
217 .endm
218 
219 /*--------------------------------------------------------------
220  * Super FAST Restore callee saved regs by simply re-adjusting SP
221  *-------------------------------------------------------------*/
222 .macro DISCARD_CALLEE_SAVED_USER
223 	add     sp, sp, SZ_CALLEE_REGS
224 .endm
225 
226 /*-------------------------------------------------------------
227  * given a tsk struct, get to the base of it's kernel mode stack
228  * tsk->thread_info is really a PAGE, whose bottom hoists stack
229  * which grows upwards towards thread_info
230  *------------------------------------------------------------*/
231 
232 .macro GET_TSK_STACK_BASE tsk, out
233 
234 	/* Get task->thread_info (this is essentially start of a PAGE) */
235 	ld  \out, [\tsk, TASK_THREAD_INFO]
236 
237 	/* Go to end of page where stack begins (grows upwards) */
238 	add2 \out, \out, (THREAD_SIZE)/4
239 
240 .endm
241 
242 /*--------------------------------------------------------------
243  * Switch to Kernel Mode stack if SP points to User Mode stack
244  *
245  * Entry   : r9 contains pre-IRQ/exception/trap status32
246  * Exit    : SP is set to kernel mode stack pointer
247  *           If CURR_IN_REG, r25 set to "current" task pointer
248  * Clobbers: r9
249  *-------------------------------------------------------------*/
250 
251 .macro SWITCH_TO_KERNEL_STK
252 
253 	/* User Mode when this happened ? Yes: Proceed to switch stack */
254 	bbit1   r9, STATUS_U_BIT, 88f
255 
256 	/* OK we were already in kernel mode when this event happened, thus can
257 	 * assume SP is kernel mode SP. _NO_ need to do any stack switching
258 	 */
259 
260 #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
261 	/* However....
262 	 * If Level 2 Interrupts enabled, we may end up with a corner case:
263 	 * 1. User Task executing
264 	 * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
265 	 * 3. But before it could switch SP from USER to KERNEL stack
266 	 *      a L2 IRQ "Interrupts" L1
267 	 * Thay way although L2 IRQ happened in Kernel mode, stack is still
268 	 * not switched.
269 	 * To handle this, we may need to switch stack even if in kernel mode
270 	 * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
271 	 */
272 	brlo sp, VMALLOC_START, 88f
273 
274 	/* TODO: vineetg:
275 	 * We need to be a bit more cautious here. What if a kernel bug in
276 	 * L1 ISR, caused SP to go whaco (some small value which looks like
277 	 * USER stk) and then we take L2 ISR.
278 	 * Above brlo alone would treat it as a valid L1-L2 sceanrio
279 	 * instead of shouting alound
280 	 * The only feasible way is to make sure this L2 happened in
281 	 * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
282 	 * L1 ISR before it switches stack
283 	 */
284 
285 #endif
286 
287 	/* Save Pre Intr/Exception KERNEL MODE SP on kernel stack
288 	 * safe-keeping not really needed, but it keeps the epilogue code
289 	 * (SP restore) simpler/uniform.
290 	 */
291 	b.d	66f
292 	mov	r9, sp
293 
294 88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
295 
296 	GET_CURR_TASK_ON_CPU   r9
297 
298 	/* With current tsk in r9, get it's kernel mode stack base */
299 	GET_TSK_STACK_BASE  r9, r9
300 
301 66:
302 #ifdef CONFIG_ARC_CURR_IN_REG
303 	/*
304 	 * Treat r25 as scratch reg, save it on stack first
305 	 * Load it with current task pointer
306 	 */
307 	st	r25, [r9, -4]
308 	GET_CURR_TASK_ON_CPU   r25
309 #endif
310 
311 	/* Save Pre Intr/Exception User SP on kernel stack */
312 	st.a    sp, [r9, -16]	; Make room for orig_r0, ECR, user_r25
313 
314 	/* CAUTION:
315 	 * SP should be set at the very end when we are done with everything
316 	 * In case of 2 levels of interrupt we depend on value of SP to assume
317 	 * that everything else is done (loading r25 etc)
318 	 */
319 
320 	/* set SP to point to kernel mode stack */
321 	mov sp, r9
322 
323 	/* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
324 
325 .endm
326 
327 /*------------------------------------------------------------
328  * "FAKE" a rtie to return from CPU Exception context
329  * This is to re-enable Exceptions within exception
330  * Look at EV_ProtV to see how this is actually used
331  *-------------------------------------------------------------*/
332 
333 .macro FAKE_RET_FROM_EXCPN  reg
334 
335 	ld  \reg, [sp, PT_status32]
336 	bic  \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK)
337 	bset \reg, \reg, STATUS_L_BIT
338 	sr  \reg, [erstatus]
339 	mov \reg, 55f
340 	sr  \reg, [eret]
341 
342 	rtie
343 55:
344 .endm
345 
346 /*
347  * @reg [OUT] &thread_info of "current"
348  */
349 .macro GET_CURR_THR_INFO_FROM_SP  reg
350 	bic \reg, sp, (THREAD_SIZE - 1)
351 .endm
352 
353 /*
354  * @reg [OUT] thread_info->flags of "current"
355  */
356 .macro GET_CURR_THR_INFO_FLAGS  reg
357 	GET_CURR_THR_INFO_FROM_SP  \reg
358 	ld  \reg, [\reg, THREAD_INFO_FLAGS]
359 .endm
360 
361 /*--------------------------------------------------------------
362  * For early Exception Prologue, a core reg is temporarily needed to
363  * code the rest of prolog (stack switching). This is done by stashing
364  * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
365  *
366  * Before saving the full regfile - this reg is restored back, only
367  * to be saved again on kernel mode stack, as part of ptregs.
368  *-------------------------------------------------------------*/
369 .macro EXCPN_PROLOG_FREEUP_REG	reg
370 #ifdef CONFIG_SMP
371 	sr  \reg, [ARC_REG_SCRATCH_DATA0]
372 #else
373 	st  \reg, [@ex_saved_reg1]
374 #endif
375 .endm
376 
377 .macro EXCPN_PROLOG_RESTORE_REG	reg
378 #ifdef CONFIG_SMP
379 	lr  \reg, [ARC_REG_SCRATCH_DATA0]
380 #else
381 	ld  \reg, [@ex_saved_reg1]
382 #endif
383 .endm
384 
385 /*--------------------------------------------------------------
386  * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
387  * Requires SP to be already switched to kernel mode Stack
388  * sp points to the next free element on the stack at exit of this macro.
389  * Registers are pushed / popped in the order defined in struct ptregs
390  * in asm/ptrace.h
391  * Note that syscalls are implemented via TRAP which is also a exception
392  * from CPU's point of view
393  *-------------------------------------------------------------*/
394 .macro SAVE_ALL_SYS
395 
396 	lr	r9, [ecr]
397 	st      r9, [sp, 8]    /* ECR */
398 	st      r0, [sp, 4]    /* orig_r0, needed only for sys calls */
399 
400 	/* Restore r9 used to code the early prologue */
401 	EXCPN_PROLOG_RESTORE_REG  r9
402 
403 	SAVE_R0_TO_R12
404 	PUSH	gp
405 	PUSH	fp
406 	PUSH	blink
407 	PUSHAX	eret
408 	PUSHAX	erstatus
409 	PUSH	lp_count
410 	PUSHAX	lp_end
411 	PUSHAX	lp_start
412 	PUSHAX	erbta
413 .endm
414 
415 /*--------------------------------------------------------------
416  * Restore all registers used by system call or Exceptions
417  * SP should always be pointing to the next free stack element
418  * when entering this macro.
419  *
420  * NOTE:
421  *
422  * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
423  * for memory load operations. If used in that way interrupts are deffered
424  * by hardware and that is not good.
425  *-------------------------------------------------------------*/
426 .macro RESTORE_ALL_SYS
427 	POPAX	erbta
428 	POPAX	lp_start
429 	POPAX	lp_end
430 
431 	POP	r9
432 	mov	lp_count, r9	;LD to lp_count is not allowed
433 
434 	POPAX	erstatus
435 	POPAX	eret
436 	POP	blink
437 	POP	fp
438 	POP	gp
439 	RESTORE_R12_TO_R0
440 
441 	ld  sp, [sp] /* restore original sp */
442 	/* orig_r0, ECR, user_r25 skipped automatically */
443 .endm
444 
445 
446 /*--------------------------------------------------------------
447  * Save all registers used by interrupt handlers.
448  *-------------------------------------------------------------*/
449 .macro SAVE_ALL_INT1
450 
451 	/* restore original r9 to be saved as part of reg-file */
452 #ifdef CONFIG_SMP
453 	lr  r9, [ARC_REG_SCRATCH_DATA0]
454 #else
455 	ld  r9, [@int1_saved_reg]
456 #endif
457 
458 	/* now we are ready to save the remaining context :) */
459 	st      event_IRQ1, [sp, 8]    /* Dummy ECR */
460 	st      0, [sp, 4]    /* orig_r0 , N/A for IRQ */
461 
462 	SAVE_R0_TO_R12
463 	PUSH	gp
464 	PUSH	fp
465 	PUSH	blink
466 	PUSH	ilink1
467 	PUSHAX	status32_l1
468 	PUSH	lp_count
469 	PUSHAX	lp_end
470 	PUSHAX	lp_start
471 	PUSHAX	bta_l1
472 .endm
473 
474 .macro SAVE_ALL_INT2
475 
476 	/* TODO-vineetg: SMP we can't use global nor can we use
477 	*   SCRATCH0 as we do for int1 because while int1 is using
478 	*   it, int2 can come
479 	*/
480 	/* retsore original r9 , saved in sys_saved_r9 */
481 	ld  r9, [@int2_saved_reg]
482 
483 	/* now we are ready to save the remaining context :) */
484 	st      event_IRQ2, [sp, 8]    /* Dummy ECR */
485 	st      0, [sp, 4]    /* orig_r0 , N/A for IRQ */
486 
487 	SAVE_R0_TO_R12
488 	PUSH	gp
489 	PUSH	fp
490 	PUSH	blink
491 	PUSH	ilink2
492 	PUSHAX	status32_l2
493 	PUSH	lp_count
494 	PUSHAX	lp_end
495 	PUSHAX	lp_start
496 	PUSHAX	bta_l2
497 .endm
498 
499 /*--------------------------------------------------------------
500  * Restore all registers used by interrupt handlers.
501  *
502  * NOTE:
503  *
504  * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
505  * for memory load operations. If used in that way interrupts are deffered
506  * by hardware and that is not good.
507  *-------------------------------------------------------------*/
508 
509 .macro RESTORE_ALL_INT1
510 	POPAX	bta_l1
511 	POPAX	lp_start
512 	POPAX	lp_end
513 
514 	POP	r9
515 	mov	lp_count, r9	;LD to lp_count is not allowed
516 
517 	POPAX	status32_l1
518 	POP	ilink1
519 	POP	blink
520 	POP	fp
521 	POP	gp
522 	RESTORE_R12_TO_R0
523 
524 	ld  sp, [sp] /* restore original sp */
525 	/* orig_r0, ECR, user_r25 skipped automatically */
526 .endm
527 
528 .macro RESTORE_ALL_INT2
529 	POPAX	bta_l2
530 	POPAX	lp_start
531 	POPAX	lp_end
532 
533 	POP	r9
534 	mov	lp_count, r9	;LD to lp_count is not allowed
535 
536 	POPAX	status32_l2
537 	POP	ilink2
538 	POP	blink
539 	POP	fp
540 	POP	gp
541 	RESTORE_R12_TO_R0
542 
543 	ld  sp, [sp] /* restore original sp */
544 	/* orig_r0, ECR, user_r25 skipped automatically */
545 .endm
546 
547 
548 /* Get CPU-ID of this core */
549 .macro  GET_CPU_ID  reg
550 	lr  \reg, [identity]
551 	lsr \reg, \reg, 8
552 	bmsk \reg, \reg, 7
553 .endm
554 
555 #ifdef CONFIG_SMP
556 
557 /*-------------------------------------------------
558  * Retrieve the current running task on this CPU
559  * 1. Determine curr CPU id.
560  * 2. Use it to index into _current_task[ ]
561  */
562 .macro  GET_CURR_TASK_ON_CPU   reg
563 	GET_CPU_ID  \reg
564 	ld.as  \reg, [@_current_task, \reg]
565 .endm
566 
567 /*-------------------------------------------------
568  * Save a new task as the "current" task on this CPU
569  * 1. Determine curr CPU id.
570  * 2. Use it to index into _current_task[ ]
571  *
572  * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
573  * because ST r0, [r1, offset] can ONLY have s9 @offset
574  * while   LD can take s9 (4 byte insn) or LIMM (8 byte insn)
575  */
576 
577 .macro  SET_CURR_TASK_ON_CPU    tsk, tmp
578 	GET_CPU_ID  \tmp
579 	add2 \tmp, @_current_task, \tmp
580 	st   \tsk, [\tmp]
581 #ifdef CONFIG_ARC_CURR_IN_REG
582 	mov r25, \tsk
583 #endif
584 
585 .endm
586 
587 
588 #else   /* Uniprocessor implementation of macros */
589 
590 .macro  GET_CURR_TASK_ON_CPU    reg
591 	ld  \reg, [@_current_task]
592 .endm
593 
594 .macro  SET_CURR_TASK_ON_CPU    tsk, tmp
595 	st  \tsk, [@_current_task]
596 #ifdef CONFIG_ARC_CURR_IN_REG
597 	mov r25, \tsk
598 #endif
599 .endm
600 
601 #endif /* SMP / UNI */
602 
603 /* ------------------------------------------------------------------
604  * Get the ptr to some field of Current Task at @off in task struct
605  *  -Uses r25 for Current task ptr if that is enabled
606  */
607 
608 #ifdef CONFIG_ARC_CURR_IN_REG
609 
610 .macro GET_CURR_TASK_FIELD_PTR  off,  reg
611 	add \reg, r25, \off
612 .endm
613 
614 #else
615 
616 .macro GET_CURR_TASK_FIELD_PTR  off,  reg
617 	GET_CURR_TASK_ON_CPU  \reg
618 	add \reg, \reg, \off
619 .endm
620 
621 #endif	/* CONFIG_ARC_CURR_IN_REG */
622 
623 #endif  /* __ASSEMBLY__ */
624 
625 #endif  /* __ASM_ARC_ENTRY_H */
626