xref: /openbmc/linux/arch/mips/include/asm/stackframe.h (revision 7dd65feb)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle
7  * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
8  * Copyright (C) 1999 Silicon Graphics, Inc.
9  * Copyright (C) 2007  Maciej W. Rozycki
10  */
11 #ifndef _ASM_STACKFRAME_H
12 #define _ASM_STACKFRAME_H
13 
14 #include <linux/threads.h>
15 
16 #include <asm/asm.h>
17 #include <asm/asmmacro.h>
18 #include <asm/mipsregs.h>
19 #include <asm/asm-offsets.h>
20 
21 /*
22  * For SMTC kernel, global IE should be left set, and interrupts
23  * controlled exclusively via IXMT.
24  */
25 #ifdef CONFIG_MIPS_MT_SMTC
26 #define STATMASK 0x1e
27 #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
28 #define STATMASK 0x3f
29 #else
30 #define STATMASK 0x1f
31 #endif
32 
33 #ifdef CONFIG_MIPS_MT_SMTC
34 #include <asm/mipsmtregs.h>
35 #endif /* CONFIG_MIPS_MT_SMTC */
36 
37 		.macro	SAVE_AT
38 		.set	push
39 		.set	noat
40 		LONG_S	$1, PT_R1(sp)
41 		.set	pop
42 		.endm
43 
44 		.macro	SAVE_TEMP
45 #ifdef CONFIG_CPU_HAS_SMARTMIPS
46 		mflhxu	v1
47 		LONG_S	v1, PT_LO(sp)
48 		mflhxu	v1
49 		LONG_S	v1, PT_HI(sp)
50 		mflhxu	v1
51 		LONG_S	v1, PT_ACX(sp)
52 #else
53 		mfhi	v1
54 #endif
55 #ifdef CONFIG_32BIT
56 		LONG_S	$8, PT_R8(sp)
57 		LONG_S	$9, PT_R9(sp)
58 #endif
59 		LONG_S	$10, PT_R10(sp)
60 		LONG_S	$11, PT_R11(sp)
61 		LONG_S	$12, PT_R12(sp)
62 #ifndef CONFIG_CPU_HAS_SMARTMIPS
63 		LONG_S	v1, PT_HI(sp)
64 		mflo	v1
65 #endif
66 		LONG_S	$13, PT_R13(sp)
67 		LONG_S	$14, PT_R14(sp)
68 		LONG_S	$15, PT_R15(sp)
69 		LONG_S	$24, PT_R24(sp)
70 #ifndef CONFIG_CPU_HAS_SMARTMIPS
71 		LONG_S	v1, PT_LO(sp)
72 #endif
73 		.endm
74 
75 		.macro	SAVE_STATIC
76 		LONG_S	$16, PT_R16(sp)
77 		LONG_S	$17, PT_R17(sp)
78 		LONG_S	$18, PT_R18(sp)
79 		LONG_S	$19, PT_R19(sp)
80 		LONG_S	$20, PT_R20(sp)
81 		LONG_S	$21, PT_R21(sp)
82 		LONG_S	$22, PT_R22(sp)
83 		LONG_S	$23, PT_R23(sp)
84 		LONG_S	$30, PT_R30(sp)
85 		.endm
86 
87 #ifdef CONFIG_SMP
88 #ifdef CONFIG_MIPS_MT_SMTC
89 #define PTEBASE_SHIFT	19	/* TCBIND */
90 #define CPU_ID_REG CP0_TCBIND
91 #define CPU_ID_MFC0 mfc0
92 #elif defined(CONFIG_MIPS_PGD_C0_CONTEXT)
93 #define PTEBASE_SHIFT	48	/* XCONTEXT */
94 #define CPU_ID_REG CP0_XCONTEXT
95 #define CPU_ID_MFC0 MFC0
96 #else
97 #define PTEBASE_SHIFT	23	/* CONTEXT */
98 #define CPU_ID_REG CP0_CONTEXT
99 #define CPU_ID_MFC0 MFC0
100 #endif
101 		.macro	get_saved_sp	/* SMP variation */
102 		CPU_ID_MFC0	k0, CPU_ID_REG
103 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
104 		lui	k1, %hi(kernelsp)
105 #else
106 		lui	k1, %highest(kernelsp)
107 		daddiu	k1, %higher(kernelsp)
108 		dsll	k1, 16
109 		daddiu	k1, %hi(kernelsp)
110 		dsll	k1, 16
111 #endif
112 		LONG_SRL	k0, PTEBASE_SHIFT
113 		LONG_ADDU	k1, k0
114 		LONG_L	k1, %lo(kernelsp)(k1)
115 		.endm
116 
117 		.macro	set_saved_sp stackp temp temp2
118 		CPU_ID_MFC0	\temp, CPU_ID_REG
119 		LONG_SRL	\temp, PTEBASE_SHIFT
120 		LONG_S	\stackp, kernelsp(\temp)
121 		.endm
122 #else
123 		.macro	get_saved_sp	/* Uniprocessor variation */
124 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
125 		lui	k1, %hi(kernelsp)
126 #else
127 		lui	k1, %highest(kernelsp)
128 		daddiu	k1, %higher(kernelsp)
129 		dsll	k1, k1, 16
130 		daddiu	k1, %hi(kernelsp)
131 		dsll	k1, k1, 16
132 #endif
133 		LONG_L	k1, %lo(kernelsp)(k1)
134 		.endm
135 
136 		.macro	set_saved_sp stackp temp temp2
137 		LONG_S	\stackp, kernelsp
138 		.endm
139 #endif
140 
141 		.macro	SAVE_SOME
142 		.set	push
143 		.set	noat
144 		.set	reorder
145 		mfc0	k0, CP0_STATUS
146 		sll	k0, 3		/* extract cu0 bit */
147 		.set	noreorder
148 		bltz	k0, 8f
149 		 move	k1, sp
150 		.set	reorder
151 		/* Called from user mode, new stack. */
152 		get_saved_sp
153 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
154 8:		move	k0, sp
155 		PTR_SUBU sp, k1, PT_SIZE
156 #else
157 		.set	at=k0
158 8:		PTR_SUBU k1, PT_SIZE
159 		.set	noat
160 		move	k0, sp
161 		move	sp, k1
162 #endif
163 		LONG_S	k0, PT_R29(sp)
164 		LONG_S	$3, PT_R3(sp)
165 		/*
166 		 * You might think that you don't need to save $0,
167 		 * but the FPU emulator and gdb remote debug stub
168 		 * need it to operate correctly
169 		 */
170 		LONG_S	$0, PT_R0(sp)
171 		mfc0	v1, CP0_STATUS
172 		LONG_S	$2, PT_R2(sp)
173 #ifdef CONFIG_MIPS_MT_SMTC
174 		/*
175 		 * Ideally, these instructions would be shuffled in
176 		 * to cover the pipeline delay.
177 		 */
178 		.set	mips32
179 		mfc0	v1, CP0_TCSTATUS
180 		.set	mips0
181 		LONG_S	v1, PT_TCSTATUS(sp)
182 #endif /* CONFIG_MIPS_MT_SMTC */
183 		LONG_S	$4, PT_R4(sp)
184 		LONG_S	$5, PT_R5(sp)
185 		LONG_S	v1, PT_STATUS(sp)
186 		mfc0	v1, CP0_CAUSE
187 		LONG_S	$6, PT_R6(sp)
188 		LONG_S	$7, PT_R7(sp)
189 		LONG_S	v1, PT_CAUSE(sp)
190 		MFC0	v1, CP0_EPC
191 #ifdef CONFIG_64BIT
192 		LONG_S	$8, PT_R8(sp)
193 		LONG_S	$9, PT_R9(sp)
194 #endif
195 		LONG_S	$25, PT_R25(sp)
196 		LONG_S	$28, PT_R28(sp)
197 		LONG_S	$31, PT_R31(sp)
198 		LONG_S	v1, PT_EPC(sp)
199 		ori	$28, sp, _THREAD_MASK
200 		xori	$28, _THREAD_MASK
201 #ifdef CONFIG_CPU_CAVIUM_OCTEON
202 		.set    mips64
203 		pref    0, 0($28)       /* Prefetch the current pointer */
204 		pref    0, PT_R31(sp)   /* Prefetch the $31(ra) */
205 		/* The Octeon multiplier state is affected by general multiply
206 		    instructions. It must be saved before and kernel code might
207 		    corrupt it */
208 		jal     octeon_mult_save
209 		LONG_L  v1, 0($28)  /* Load the current pointer */
210 			 /* Restore $31(ra) that was changed by the jal */
211 		LONG_L  ra, PT_R31(sp)
212 		pref    0, 0(v1)    /* Prefetch the current thread */
213 #endif
214 		.set	pop
215 		.endm
216 
217 		.macro	SAVE_ALL
218 		SAVE_SOME
219 		SAVE_AT
220 		SAVE_TEMP
221 		SAVE_STATIC
222 		.endm
223 
224 		.macro	RESTORE_AT
225 		.set	push
226 		.set	noat
227 		LONG_L	$1,  PT_R1(sp)
228 		.set	pop
229 		.endm
230 
231 		.macro	RESTORE_TEMP
232 #ifdef CONFIG_CPU_HAS_SMARTMIPS
233 		LONG_L	$24, PT_ACX(sp)
234 		mtlhx	$24
235 		LONG_L	$24, PT_HI(sp)
236 		mtlhx	$24
237 		LONG_L	$24, PT_LO(sp)
238 		mtlhx	$24
239 #else
240 		LONG_L	$24, PT_LO(sp)
241 		mtlo	$24
242 		LONG_L	$24, PT_HI(sp)
243 		mthi	$24
244 #endif
245 #ifdef CONFIG_32BIT
246 		LONG_L	$8, PT_R8(sp)
247 		LONG_L	$9, PT_R9(sp)
248 #endif
249 		LONG_L	$10, PT_R10(sp)
250 		LONG_L	$11, PT_R11(sp)
251 		LONG_L	$12, PT_R12(sp)
252 		LONG_L	$13, PT_R13(sp)
253 		LONG_L	$14, PT_R14(sp)
254 		LONG_L	$15, PT_R15(sp)
255 		LONG_L	$24, PT_R24(sp)
256 		.endm
257 
258 		.macro	RESTORE_STATIC
259 		LONG_L	$16, PT_R16(sp)
260 		LONG_L	$17, PT_R17(sp)
261 		LONG_L	$18, PT_R18(sp)
262 		LONG_L	$19, PT_R19(sp)
263 		LONG_L	$20, PT_R20(sp)
264 		LONG_L	$21, PT_R21(sp)
265 		LONG_L	$22, PT_R22(sp)
266 		LONG_L	$23, PT_R23(sp)
267 		LONG_L	$30, PT_R30(sp)
268 		.endm
269 
270 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
271 
272 		.macro	RESTORE_SOME
273 		.set	push
274 		.set	reorder
275 		.set	noat
276 		mfc0	a0, CP0_STATUS
277 		li	v1, 0xff00
278 		ori	a0, STATMASK
279 		xori	a0, STATMASK
280 		mtc0	a0, CP0_STATUS
281 		and	a0, v1
282 		LONG_L	v0, PT_STATUS(sp)
283 		nor	v1, $0, v1
284 		and	v0, v1
285 		or	v0, a0
286 		mtc0	v0, CP0_STATUS
287 		LONG_L	$31, PT_R31(sp)
288 		LONG_L	$28, PT_R28(sp)
289 		LONG_L	$25, PT_R25(sp)
290 		LONG_L	$7,  PT_R7(sp)
291 		LONG_L	$6,  PT_R6(sp)
292 		LONG_L	$5,  PT_R5(sp)
293 		LONG_L	$4,  PT_R4(sp)
294 		LONG_L	$3,  PT_R3(sp)
295 		LONG_L	$2,  PT_R2(sp)
296 		.set	pop
297 		.endm
298 
299 		.macro	RESTORE_SP_AND_RET
300 		.set	push
301 		.set	noreorder
302 		LONG_L	k0, PT_EPC(sp)
303 		LONG_L	sp, PT_R29(sp)
304 		jr	k0
305 		 rfe
306 		.set	pop
307 		.endm
308 
309 #else
310 		.macro	RESTORE_SOME
311 		.set	push
312 		.set	reorder
313 		.set	noat
314 #ifdef CONFIG_MIPS_MT_SMTC
315 		.set	mips32r2
316 		/*
317 		 * We need to make sure the read-modify-write
318 		 * of Status below isn't perturbed by an interrupt
319 		 * or cross-TC access, so we need to do at least a DMT,
320 		 * protected by an interrupt-inhibit. But setting IXMT
321 		 * also creates a few-cycle window where an IPI could
322 		 * be queued and not be detected before potentially
323 		 * returning to a WAIT or user-mode loop. It must be
324 		 * replayed.
325 		 *
326 		 * We're in the middle of a context switch, and
327 		 * we can't dispatch it directly without trashing
328 		 * some registers, so we'll try to detect this unlikely
329 		 * case and program a software interrupt in the VPE,
330 		 * as would be done for a cross-VPE IPI.  To accomodate
331 		 * the handling of that case, we're doing a DVPE instead
332 		 * of just a DMT here to protect against other threads.
333 		 * This is a lot of cruft to cover a tiny window.
334 		 * If you can find a better design, implement it!
335 		 *
336 		 */
337 		mfc0	v0, CP0_TCSTATUS
338 		ori	v0, TCSTATUS_IXMT
339 		mtc0	v0, CP0_TCSTATUS
340 		_ehb
341 		DVPE	5				# dvpe a1
342 		jal	mips_ihb
343 #endif /* CONFIG_MIPS_MT_SMTC */
344 #ifdef CONFIG_CPU_CAVIUM_OCTEON
345 		/* Restore the Octeon multiplier state */
346 		jal	octeon_mult_restore
347 #endif
348 		mfc0	a0, CP0_STATUS
349 		ori	a0, STATMASK
350 		xori	a0, STATMASK
351 		mtc0	a0, CP0_STATUS
352 		li	v1, 0xff00
353 		and	a0, v1
354 		LONG_L	v0, PT_STATUS(sp)
355 		nor	v1, $0, v1
356 		and	v0, v1
357 		or	v0, a0
358 		mtc0	v0, CP0_STATUS
359 #ifdef CONFIG_MIPS_MT_SMTC
360 /*
361  * Only after EXL/ERL have been restored to status can we
362  * restore TCStatus.IXMT.
363  */
364 		LONG_L	v1, PT_TCSTATUS(sp)
365 		_ehb
366 		mfc0	a0, CP0_TCSTATUS
367 		andi	v1, TCSTATUS_IXMT
368 		bnez	v1, 0f
369 
370 /*
371  * We'd like to detect any IPIs queued in the tiny window
372  * above and request an software interrupt to service them
373  * when we ERET.
374  *
375  * Computing the offset into the IPIQ array of the executing
376  * TC's IPI queue in-line would be tedious.  We use part of
377  * the TCContext register to hold 16 bits of offset that we
378  * can add in-line to find the queue head.
379  */
380 		mfc0	v0, CP0_TCCONTEXT
381 		la	a2, IPIQ
382 		srl	v0, v0, 16
383 		addu	a2, a2, v0
384 		LONG_L	v0, 0(a2)
385 		beqz	v0, 0f
386 /*
387  * If we have a queue, provoke dispatch within the VPE by setting C_SW1
388  */
389 		mfc0	v0, CP0_CAUSE
390 		ori	v0, v0, C_SW1
391 		mtc0	v0, CP0_CAUSE
392 0:
393 		/*
394 		 * This test should really never branch but
395 		 * let's be prudent here.  Having atomized
396 		 * the shared register modifications, we can
397 		 * now EVPE, and must do so before interrupts
398 		 * are potentially re-enabled.
399 		 */
400 		andi	a1, a1, MVPCONTROL_EVP
401 		beqz	a1, 1f
402 		evpe
403 1:
404 		/* We know that TCStatua.IXMT should be set from above */
405 		xori	a0, a0, TCSTATUS_IXMT
406 		or	a0, a0, v1
407 		mtc0	a0, CP0_TCSTATUS
408 		_ehb
409 
410 		.set	mips0
411 #endif /* CONFIG_MIPS_MT_SMTC */
412 		LONG_L	v1, PT_EPC(sp)
413 		MTC0	v1, CP0_EPC
414 		LONG_L	$31, PT_R31(sp)
415 		LONG_L	$28, PT_R28(sp)
416 		LONG_L	$25, PT_R25(sp)
417 #ifdef CONFIG_64BIT
418 		LONG_L	$8, PT_R8(sp)
419 		LONG_L	$9, PT_R9(sp)
420 #endif
421 		LONG_L	$7,  PT_R7(sp)
422 		LONG_L	$6,  PT_R6(sp)
423 		LONG_L	$5,  PT_R5(sp)
424 		LONG_L	$4,  PT_R4(sp)
425 		LONG_L	$3,  PT_R3(sp)
426 		LONG_L	$2,  PT_R2(sp)
427 		.set	pop
428 		.endm
429 
430 		.macro	RESTORE_SP_AND_RET
431 		LONG_L	sp, PT_R29(sp)
432 		.set	mips3
433 		eret
434 		.set	mips0
435 		.endm
436 
437 #endif
438 
439 		.macro	RESTORE_SP
440 		LONG_L	sp, PT_R29(sp)
441 		.endm
442 
443 		.macro	RESTORE_ALL
444 		RESTORE_TEMP
445 		RESTORE_STATIC
446 		RESTORE_AT
447 		RESTORE_SOME
448 		RESTORE_SP
449 		.endm
450 
451 		.macro	RESTORE_ALL_AND_RET
452 		RESTORE_TEMP
453 		RESTORE_STATIC
454 		RESTORE_AT
455 		RESTORE_SOME
456 		RESTORE_SP_AND_RET
457 		.endm
458 
459 /*
460  * Move to kernel mode and disable interrupts.
461  * Set cp0 enable bit as sign that we're running on the kernel stack
462  */
463 		.macro	CLI
464 #if !defined(CONFIG_MIPS_MT_SMTC)
465 		mfc0	t0, CP0_STATUS
466 		li	t1, ST0_CU0 | STATMASK
467 		or	t0, t1
468 		xori	t0, STATMASK
469 		mtc0	t0, CP0_STATUS
470 #else /* CONFIG_MIPS_MT_SMTC */
471 		/*
472 		 * For SMTC, we need to set privilege
473 		 * and disable interrupts only for the
474 		 * current TC, using the TCStatus register.
475 		 */
476 		mfc0	t0, CP0_TCSTATUS
477 		/* Fortunately CU 0 is in the same place in both registers */
478 		/* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
479 		li	t1, ST0_CU0 | 0x08001c00
480 		or	t0, t1
481 		/* Clear TKSU, leave IXMT */
482 		xori	t0, 0x00001800
483 		mtc0	t0, CP0_TCSTATUS
484 		_ehb
485 		/* We need to leave the global IE bit set, but clear EXL...*/
486 		mfc0	t0, CP0_STATUS
487 		ori	t0, ST0_EXL | ST0_ERL
488 		xori	t0, ST0_EXL | ST0_ERL
489 		mtc0	t0, CP0_STATUS
490 #endif /* CONFIG_MIPS_MT_SMTC */
491 		irq_disable_hazard
492 		.endm
493 
494 /*
495  * Move to kernel mode and enable interrupts.
496  * Set cp0 enable bit as sign that we're running on the kernel stack
497  */
498 		.macro	STI
499 #if !defined(CONFIG_MIPS_MT_SMTC)
500 		mfc0	t0, CP0_STATUS
501 		li	t1, ST0_CU0 | STATMASK
502 		or	t0, t1
503 		xori	t0, STATMASK & ~1
504 		mtc0	t0, CP0_STATUS
505 #else /* CONFIG_MIPS_MT_SMTC */
506 		/*
507 		 * For SMTC, we need to set privilege
508 		 * and enable interrupts only for the
509 		 * current TC, using the TCStatus register.
510 		 */
511 		_ehb
512 		mfc0	t0, CP0_TCSTATUS
513 		/* Fortunately CU 0 is in the same place in both registers */
514 		/* Set TCU0, TKSU (for later inversion) and IXMT */
515 		li	t1, ST0_CU0 | 0x08001c00
516 		or	t0, t1
517 		/* Clear TKSU *and* IXMT */
518 		xori	t0, 0x00001c00
519 		mtc0	t0, CP0_TCSTATUS
520 		_ehb
521 		/* We need to leave the global IE bit set, but clear EXL...*/
522 		mfc0	t0, CP0_STATUS
523 		ori	t0, ST0_EXL
524 		xori	t0, ST0_EXL
525 		mtc0	t0, CP0_STATUS
526 		/* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
527 #endif /* CONFIG_MIPS_MT_SMTC */
528 		irq_enable_hazard
529 		.endm
530 
531 /*
532  * Just move to kernel mode and leave interrupts as they are.  Note
533  * for the R3000 this means copying the previous enable from IEp.
534  * Set cp0 enable bit as sign that we're running on the kernel stack
535  */
536 		.macro	KMODE
537 #ifdef CONFIG_MIPS_MT_SMTC
538 		/*
539 		 * This gets baroque in SMTC.  We want to
540 		 * protect the non-atomic clearing of EXL
541 		 * with DMT/EMT, but we don't want to take
542 		 * an interrupt while DMT is still in effect.
543 		 */
544 
545 		/* KMODE gets invoked from both reorder and noreorder code */
546 		.set	push
547 		.set	mips32r2
548 		.set	noreorder
549 		mfc0	v0, CP0_TCSTATUS
550 		andi	v1, v0, TCSTATUS_IXMT
551 		ori	v0, TCSTATUS_IXMT
552 		mtc0	v0, CP0_TCSTATUS
553 		_ehb
554 		DMT	2				# dmt	v0
555 		/*
556 		 * We don't know a priori if ra is "live"
557 		 */
558 		move	t0, ra
559 		jal	mips_ihb
560 		nop	/* delay slot */
561 		move	ra, t0
562 #endif /* CONFIG_MIPS_MT_SMTC */
563 		mfc0	t0, CP0_STATUS
564 		li	t1, ST0_CU0 | (STATMASK & ~1)
565 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
566 		andi	t2, t0, ST0_IEP
567 		srl	t2, 2
568 		or	t0, t2
569 #endif
570 		or	t0, t1
571 		xori	t0, STATMASK & ~1
572 		mtc0	t0, CP0_STATUS
573 #ifdef CONFIG_MIPS_MT_SMTC
574 		_ehb
575 		andi	v0, v0, VPECONTROL_TE
576 		beqz	v0, 2f
577 		nop	/* delay slot */
578 		emt
579 2:
580 		mfc0	v0, CP0_TCSTATUS
581 		/* Clear IXMT, then OR in previous value */
582 		ori	v0, TCSTATUS_IXMT
583 		xori	v0, TCSTATUS_IXMT
584 		or	v0, v1, v0
585 		mtc0	v0, CP0_TCSTATUS
586 		/*
587 		 * irq_disable_hazard below should expand to EHB
588 		 * on 24K/34K CPUS
589 		 */
590 		.set pop
591 #endif /* CONFIG_MIPS_MT_SMTC */
592 		irq_disable_hazard
593 		.endm
594 
595 #endif /* _ASM_STACKFRAME_H */
596