xref: /openbmc/linux/arch/mips/include/asm/stackframe.h (revision b04b4f78)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle
7  * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
8  * Copyright (C) 1999 Silicon Graphics, Inc.
9  * Copyright (C) 2007  Maciej W. Rozycki
10  */
11 #ifndef _ASM_STACKFRAME_H
12 #define _ASM_STACKFRAME_H
13 
14 #include <linux/threads.h>
15 
16 #include <asm/asm.h>
17 #include <asm/asmmacro.h>
18 #include <asm/mipsregs.h>
19 #include <asm/asm-offsets.h>
20 
21 /*
22  * For SMTC kernel, global IE should be left set, and interrupts
23  * controlled exclusively via IXMT.
24  */
25 #ifdef CONFIG_MIPS_MT_SMTC
26 #define STATMASK 0x1e
27 #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
28 #define STATMASK 0x3f
29 #else
30 #define STATMASK 0x1f
31 #endif
32 
33 #ifdef CONFIG_MIPS_MT_SMTC
34 #include <asm/mipsmtregs.h>
35 #endif /* CONFIG_MIPS_MT_SMTC */
36 
37 		.macro	SAVE_AT
38 		.set	push
39 		.set	noat
40 		LONG_S	$1, PT_R1(sp)
41 		.set	pop
42 		.endm
43 
44 		.macro	SAVE_TEMP
45 #ifdef CONFIG_CPU_HAS_SMARTMIPS
46 		mflhxu	v1
47 		LONG_S	v1, PT_LO(sp)
48 		mflhxu	v1
49 		LONG_S	v1, PT_HI(sp)
50 		mflhxu	v1
51 		LONG_S	v1, PT_ACX(sp)
52 #else
53 		mfhi	v1
54 		LONG_S	v1, PT_HI(sp)
55 		mflo	v1
56 		LONG_S	v1, PT_LO(sp)
57 #endif
58 #ifdef CONFIG_32BIT
59 		LONG_S	$8, PT_R8(sp)
60 		LONG_S	$9, PT_R9(sp)
61 #endif
62 		LONG_S	$10, PT_R10(sp)
63 		LONG_S	$11, PT_R11(sp)
64 		LONG_S	$12, PT_R12(sp)
65 		LONG_S	$13, PT_R13(sp)
66 		LONG_S	$14, PT_R14(sp)
67 		LONG_S	$15, PT_R15(sp)
68 		LONG_S	$24, PT_R24(sp)
69 		.endm
70 
71 		.macro	SAVE_STATIC
72 		LONG_S	$16, PT_R16(sp)
73 		LONG_S	$17, PT_R17(sp)
74 		LONG_S	$18, PT_R18(sp)
75 		LONG_S	$19, PT_R19(sp)
76 		LONG_S	$20, PT_R20(sp)
77 		LONG_S	$21, PT_R21(sp)
78 		LONG_S	$22, PT_R22(sp)
79 		LONG_S	$23, PT_R23(sp)
80 		LONG_S	$30, PT_R30(sp)
81 		.endm
82 
83 #ifdef CONFIG_SMP
84 #ifdef CONFIG_MIPS_MT_SMTC
85 #define PTEBASE_SHIFT	19	/* TCBIND */
86 #else
87 #define PTEBASE_SHIFT	23	/* CONTEXT */
88 #endif
89 		.macro	get_saved_sp	/* SMP variation */
90 #ifdef CONFIG_MIPS_MT_SMTC
91 		mfc0	k0, CP0_TCBIND
92 #else
93 		MFC0	k0, CP0_CONTEXT
94 #endif
95 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
96 		lui	k1, %hi(kernelsp)
97 #else
98 		lui	k1, %highest(kernelsp)
99 		daddiu	k1, %higher(kernelsp)
100 		dsll	k1, 16
101 		daddiu	k1, %hi(kernelsp)
102 		dsll	k1, 16
103 #endif
104 		LONG_SRL	k0, PTEBASE_SHIFT
105 		LONG_ADDU	k1, k0
106 		LONG_L	k1, %lo(kernelsp)(k1)
107 		.endm
108 
109 		.macro	set_saved_sp stackp temp temp2
110 #ifdef CONFIG_MIPS_MT_SMTC
111 		mfc0	\temp, CP0_TCBIND
112 #else
113 		MFC0	\temp, CP0_CONTEXT
114 #endif
115 		LONG_SRL	\temp, PTEBASE_SHIFT
116 		LONG_S	\stackp, kernelsp(\temp)
117 		.endm
118 #else
119 		.macro	get_saved_sp	/* Uniprocessor variation */
120 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
121 		lui	k1, %hi(kernelsp)
122 #else
123 		lui	k1, %highest(kernelsp)
124 		daddiu	k1, %higher(kernelsp)
125 		dsll	k1, k1, 16
126 		daddiu	k1, %hi(kernelsp)
127 		dsll	k1, k1, 16
128 #endif
129 		LONG_L	k1, %lo(kernelsp)(k1)
130 		.endm
131 
132 		.macro	set_saved_sp stackp temp temp2
133 		LONG_S	\stackp, kernelsp
134 		.endm
135 #endif
136 
137 		.macro	SAVE_SOME
138 		.set	push
139 		.set	noat
140 		.set	reorder
141 		mfc0	k0, CP0_STATUS
142 		sll	k0, 3		/* extract cu0 bit */
143 		.set	noreorder
144 		bltz	k0, 8f
145 		 move	k1, sp
146 		.set	reorder
147 		/* Called from user mode, new stack. */
148 		get_saved_sp
149 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
150 8:		move	k0, sp
151 		PTR_SUBU sp, k1, PT_SIZE
152 #else
153 		.set	at=k0
154 8:		PTR_SUBU k1, PT_SIZE
155 		.set	noat
156 		move	k0, sp
157 		move	sp, k1
158 #endif
159 		LONG_S	k0, PT_R29(sp)
160 		LONG_S	$3, PT_R3(sp)
161 		/*
162 		 * You might think that you don't need to save $0,
163 		 * but the FPU emulator and gdb remote debug stub
164 		 * need it to operate correctly
165 		 */
166 		LONG_S	$0, PT_R0(sp)
167 		mfc0	v1, CP0_STATUS
168 		LONG_S	$2, PT_R2(sp)
169 		LONG_S	v1, PT_STATUS(sp)
170 #ifdef CONFIG_MIPS_MT_SMTC
171 		/*
172 		 * Ideally, these instructions would be shuffled in
173 		 * to cover the pipeline delay.
174 		 */
175 		.set	mips32
176 		mfc0	v1, CP0_TCSTATUS
177 		.set	mips0
178 		LONG_S	v1, PT_TCSTATUS(sp)
179 #endif /* CONFIG_MIPS_MT_SMTC */
180 		LONG_S	$4, PT_R4(sp)
181 		mfc0	v1, CP0_CAUSE
182 		LONG_S	$5, PT_R5(sp)
183 		LONG_S	v1, PT_CAUSE(sp)
184 		LONG_S	$6, PT_R6(sp)
185 		MFC0	v1, CP0_EPC
186 		LONG_S	$7, PT_R7(sp)
187 #ifdef CONFIG_64BIT
188 		LONG_S	$8, PT_R8(sp)
189 		LONG_S	$9, PT_R9(sp)
190 #endif
191 		LONG_S	v1, PT_EPC(sp)
192 		LONG_S	$25, PT_R25(sp)
193 		LONG_S	$28, PT_R28(sp)
194 		LONG_S	$31, PT_R31(sp)
195 		ori	$28, sp, _THREAD_MASK
196 		xori	$28, _THREAD_MASK
197 #ifdef CONFIG_CPU_CAVIUM_OCTEON
198 		.set    mips64
199 		pref    0, 0($28)       /* Prefetch the current pointer */
200 		pref    0, PT_R31(sp)   /* Prefetch the $31(ra) */
201 		/* The Octeon multiplier state is affected by general multiply
202 		    instructions. It must be saved before and kernel code might
203 		    corrupt it */
204 		jal     octeon_mult_save
205 		LONG_L  v1, 0($28)  /* Load the current pointer */
206 			 /* Restore $31(ra) that was changed by the jal */
207 		LONG_L  ra, PT_R31(sp)
208 		pref    0, 0(v1)    /* Prefetch the current thread */
209 #endif
210 		.set	pop
211 		.endm
212 
213 		.macro	SAVE_ALL
214 		SAVE_SOME
215 		SAVE_AT
216 		SAVE_TEMP
217 		SAVE_STATIC
218 		.endm
219 
220 		.macro	RESTORE_AT
221 		.set	push
222 		.set	noat
223 		LONG_L	$1,  PT_R1(sp)
224 		.set	pop
225 		.endm
226 
227 		.macro	RESTORE_TEMP
228 #ifdef CONFIG_CPU_HAS_SMARTMIPS
229 		LONG_L	$24, PT_ACX(sp)
230 		mtlhx	$24
231 		LONG_L	$24, PT_HI(sp)
232 		mtlhx	$24
233 		LONG_L	$24, PT_LO(sp)
234 		mtlhx	$24
235 #else
236 		LONG_L	$24, PT_LO(sp)
237 		mtlo	$24
238 		LONG_L	$24, PT_HI(sp)
239 		mthi	$24
240 #endif
241 #ifdef CONFIG_32BIT
242 		LONG_L	$8, PT_R8(sp)
243 		LONG_L	$9, PT_R9(sp)
244 #endif
245 		LONG_L	$10, PT_R10(sp)
246 		LONG_L	$11, PT_R11(sp)
247 		LONG_L	$12, PT_R12(sp)
248 		LONG_L	$13, PT_R13(sp)
249 		LONG_L	$14, PT_R14(sp)
250 		LONG_L	$15, PT_R15(sp)
251 		LONG_L	$24, PT_R24(sp)
252 		.endm
253 
254 		.macro	RESTORE_STATIC
255 		LONG_L	$16, PT_R16(sp)
256 		LONG_L	$17, PT_R17(sp)
257 		LONG_L	$18, PT_R18(sp)
258 		LONG_L	$19, PT_R19(sp)
259 		LONG_L	$20, PT_R20(sp)
260 		LONG_L	$21, PT_R21(sp)
261 		LONG_L	$22, PT_R22(sp)
262 		LONG_L	$23, PT_R23(sp)
263 		LONG_L	$30, PT_R30(sp)
264 		.endm
265 
266 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
267 
268 		.macro	RESTORE_SOME
269 		.set	push
270 		.set	reorder
271 		.set	noat
272 		mfc0	a0, CP0_STATUS
273 		li	v1, 0xff00
274 		ori	a0, STATMASK
275 		xori	a0, STATMASK
276 		mtc0	a0, CP0_STATUS
277 		and	a0, v1
278 		LONG_L	v0, PT_STATUS(sp)
279 		nor	v1, $0, v1
280 		and	v0, v1
281 		or	v0, a0
282 		mtc0	v0, CP0_STATUS
283 		LONG_L	$31, PT_R31(sp)
284 		LONG_L	$28, PT_R28(sp)
285 		LONG_L	$25, PT_R25(sp)
286 		LONG_L	$7,  PT_R7(sp)
287 		LONG_L	$6,  PT_R6(sp)
288 		LONG_L	$5,  PT_R5(sp)
289 		LONG_L	$4,  PT_R4(sp)
290 		LONG_L	$3,  PT_R3(sp)
291 		LONG_L	$2,  PT_R2(sp)
292 		.set	pop
293 		.endm
294 
295 		.macro	RESTORE_SP_AND_RET
296 		.set	push
297 		.set	noreorder
298 		LONG_L	k0, PT_EPC(sp)
299 		LONG_L	sp, PT_R29(sp)
300 		jr	k0
301 		 rfe
302 		.set	pop
303 		.endm
304 
305 #else
306 		.macro	RESTORE_SOME
307 		.set	push
308 		.set	reorder
309 		.set	noat
310 #ifdef CONFIG_MIPS_MT_SMTC
311 		.set	mips32r2
312 		/*
313 		 * We need to make sure the read-modify-write
314 		 * of Status below isn't perturbed by an interrupt
315 		 * or cross-TC access, so we need to do at least a DMT,
316 		 * protected by an interrupt-inhibit. But setting IXMT
317 		 * also creates a few-cycle window where an IPI could
318 		 * be queued and not be detected before potentially
319 		 * returning to a WAIT or user-mode loop. It must be
320 		 * replayed.
321 		 *
322 		 * We're in the middle of a context switch, and
323 		 * we can't dispatch it directly without trashing
324 		 * some registers, so we'll try to detect this unlikely
325 		 * case and program a software interrupt in the VPE,
326 		 * as would be done for a cross-VPE IPI.  To accomodate
327 		 * the handling of that case, we're doing a DVPE instead
328 		 * of just a DMT here to protect against other threads.
329 		 * This is a lot of cruft to cover a tiny window.
330 		 * If you can find a better design, implement it!
331 		 *
332 		 */
333 		mfc0	v0, CP0_TCSTATUS
334 		ori	v0, TCSTATUS_IXMT
335 		mtc0	v0, CP0_TCSTATUS
336 		_ehb
337 		DVPE	5				# dvpe a1
338 		jal	mips_ihb
339 #endif /* CONFIG_MIPS_MT_SMTC */
340 #ifdef CONFIG_CPU_CAVIUM_OCTEON
341 		/* Restore the Octeon multiplier state */
342 		jal	octeon_mult_restore
343 #endif
344 		mfc0	a0, CP0_STATUS
345 		ori	a0, STATMASK
346 		xori	a0, STATMASK
347 		mtc0	a0, CP0_STATUS
348 		li	v1, 0xff00
349 		and	a0, v1
350 		LONG_L	v0, PT_STATUS(sp)
351 		nor	v1, $0, v1
352 		and	v0, v1
353 		or	v0, a0
354 		mtc0	v0, CP0_STATUS
355 #ifdef CONFIG_MIPS_MT_SMTC
356 /*
357  * Only after EXL/ERL have been restored to status can we
358  * restore TCStatus.IXMT.
359  */
360 		LONG_L	v1, PT_TCSTATUS(sp)
361 		_ehb
362 		mfc0	a0, CP0_TCSTATUS
363 		andi	v1, TCSTATUS_IXMT
364 		bnez	v1, 0f
365 
366 /*
367  * We'd like to detect any IPIs queued in the tiny window
368  * above and request an software interrupt to service them
369  * when we ERET.
370  *
371  * Computing the offset into the IPIQ array of the executing
372  * TC's IPI queue in-line would be tedious.  We use part of
373  * the TCContext register to hold 16 bits of offset that we
374  * can add in-line to find the queue head.
375  */
376 		mfc0	v0, CP0_TCCONTEXT
377 		la	a2, IPIQ
378 		srl	v0, v0, 16
379 		addu	a2, a2, v0
380 		LONG_L	v0, 0(a2)
381 		beqz	v0, 0f
382 /*
383  * If we have a queue, provoke dispatch within the VPE by setting C_SW1
384  */
385 		mfc0	v0, CP0_CAUSE
386 		ori	v0, v0, C_SW1
387 		mtc0	v0, CP0_CAUSE
388 0:
389 		/*
390 		 * This test should really never branch but
391 		 * let's be prudent here.  Having atomized
392 		 * the shared register modifications, we can
393 		 * now EVPE, and must do so before interrupts
394 		 * are potentially re-enabled.
395 		 */
396 		andi	a1, a1, MVPCONTROL_EVP
397 		beqz	a1, 1f
398 		evpe
399 1:
400 		/* We know that TCStatua.IXMT should be set from above */
401 		xori	a0, a0, TCSTATUS_IXMT
402 		or	a0, a0, v1
403 		mtc0	a0, CP0_TCSTATUS
404 		_ehb
405 
406 		.set	mips0
407 #endif /* CONFIG_MIPS_MT_SMTC */
408 		LONG_L	v1, PT_EPC(sp)
409 		MTC0	v1, CP0_EPC
410 		LONG_L	$31, PT_R31(sp)
411 		LONG_L	$28, PT_R28(sp)
412 		LONG_L	$25, PT_R25(sp)
413 #ifdef CONFIG_64BIT
414 		LONG_L	$8, PT_R8(sp)
415 		LONG_L	$9, PT_R9(sp)
416 #endif
417 		LONG_L	$7,  PT_R7(sp)
418 		LONG_L	$6,  PT_R6(sp)
419 		LONG_L	$5,  PT_R5(sp)
420 		LONG_L	$4,  PT_R4(sp)
421 		LONG_L	$3,  PT_R3(sp)
422 		LONG_L	$2,  PT_R2(sp)
423 		.set	pop
424 		.endm
425 
426 		.macro	RESTORE_SP_AND_RET
427 		LONG_L	sp, PT_R29(sp)
428 		.set	mips3
429 		eret
430 		.set	mips0
431 		.endm
432 
433 #endif
434 
435 		.macro	RESTORE_SP
436 		LONG_L	sp, PT_R29(sp)
437 		.endm
438 
439 		.macro	RESTORE_ALL
440 		RESTORE_TEMP
441 		RESTORE_STATIC
442 		RESTORE_AT
443 		RESTORE_SOME
444 		RESTORE_SP
445 		.endm
446 
447 		.macro	RESTORE_ALL_AND_RET
448 		RESTORE_TEMP
449 		RESTORE_STATIC
450 		RESTORE_AT
451 		RESTORE_SOME
452 		RESTORE_SP_AND_RET
453 		.endm
454 
455 /*
456  * Move to kernel mode and disable interrupts.
457  * Set cp0 enable bit as sign that we're running on the kernel stack
458  */
459 		.macro	CLI
460 #if !defined(CONFIG_MIPS_MT_SMTC)
461 		mfc0	t0, CP0_STATUS
462 		li	t1, ST0_CU0 | STATMASK
463 		or	t0, t1
464 		xori	t0, STATMASK
465 		mtc0	t0, CP0_STATUS
466 #else /* CONFIG_MIPS_MT_SMTC */
467 		/*
468 		 * For SMTC, we need to set privilege
469 		 * and disable interrupts only for the
470 		 * current TC, using the TCStatus register.
471 		 */
472 		mfc0	t0, CP0_TCSTATUS
473 		/* Fortunately CU 0 is in the same place in both registers */
474 		/* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
475 		li	t1, ST0_CU0 | 0x08001c00
476 		or	t0, t1
477 		/* Clear TKSU, leave IXMT */
478 		xori	t0, 0x00001800
479 		mtc0	t0, CP0_TCSTATUS
480 		_ehb
481 		/* We need to leave the global IE bit set, but clear EXL...*/
482 		mfc0	t0, CP0_STATUS
483 		ori	t0, ST0_EXL | ST0_ERL
484 		xori	t0, ST0_EXL | ST0_ERL
485 		mtc0	t0, CP0_STATUS
486 #endif /* CONFIG_MIPS_MT_SMTC */
487 		irq_disable_hazard
488 		.endm
489 
490 /*
491  * Move to kernel mode and enable interrupts.
492  * Set cp0 enable bit as sign that we're running on the kernel stack
493  */
494 		.macro	STI
495 #if !defined(CONFIG_MIPS_MT_SMTC)
496 		mfc0	t0, CP0_STATUS
497 		li	t1, ST0_CU0 | STATMASK
498 		or	t0, t1
499 		xori	t0, STATMASK & ~1
500 		mtc0	t0, CP0_STATUS
501 #else /* CONFIG_MIPS_MT_SMTC */
502 		/*
503 		 * For SMTC, we need to set privilege
504 		 * and enable interrupts only for the
505 		 * current TC, using the TCStatus register.
506 		 */
507 		_ehb
508 		mfc0	t0, CP0_TCSTATUS
509 		/* Fortunately CU 0 is in the same place in both registers */
510 		/* Set TCU0, TKSU (for later inversion) and IXMT */
511 		li	t1, ST0_CU0 | 0x08001c00
512 		or	t0, t1
513 		/* Clear TKSU *and* IXMT */
514 		xori	t0, 0x00001c00
515 		mtc0	t0, CP0_TCSTATUS
516 		_ehb
517 		/* We need to leave the global IE bit set, but clear EXL...*/
518 		mfc0	t0, CP0_STATUS
519 		ori	t0, ST0_EXL
520 		xori	t0, ST0_EXL
521 		mtc0	t0, CP0_STATUS
522 		/* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
523 #endif /* CONFIG_MIPS_MT_SMTC */
524 		irq_enable_hazard
525 		.endm
526 
527 /*
528  * Just move to kernel mode and leave interrupts as they are.  Note
529  * for the R3000 this means copying the previous enable from IEp.
530  * Set cp0 enable bit as sign that we're running on the kernel stack
531  */
532 		.macro	KMODE
533 #ifdef CONFIG_MIPS_MT_SMTC
534 		/*
535 		 * This gets baroque in SMTC.  We want to
536 		 * protect the non-atomic clearing of EXL
537 		 * with DMT/EMT, but we don't want to take
538 		 * an interrupt while DMT is still in effect.
539 		 */
540 
541 		/* KMODE gets invoked from both reorder and noreorder code */
542 		.set	push
543 		.set	mips32r2
544 		.set	noreorder
545 		mfc0	v0, CP0_TCSTATUS
546 		andi	v1, v0, TCSTATUS_IXMT
547 		ori	v0, TCSTATUS_IXMT
548 		mtc0	v0, CP0_TCSTATUS
549 		_ehb
550 		DMT	2				# dmt	v0
551 		/*
552 		 * We don't know a priori if ra is "live"
553 		 */
554 		move	t0, ra
555 		jal	mips_ihb
556 		nop	/* delay slot */
557 		move	ra, t0
558 #endif /* CONFIG_MIPS_MT_SMTC */
559 		mfc0	t0, CP0_STATUS
560 		li	t1, ST0_CU0 | (STATMASK & ~1)
561 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
562 		andi	t2, t0, ST0_IEP
563 		srl	t2, 2
564 		or	t0, t2
565 #endif
566 		or	t0, t1
567 		xori	t0, STATMASK & ~1
568 		mtc0	t0, CP0_STATUS
569 #ifdef CONFIG_MIPS_MT_SMTC
570 		_ehb
571 		andi	v0, v0, VPECONTROL_TE
572 		beqz	v0, 2f
573 		nop	/* delay slot */
574 		emt
575 2:
576 		mfc0	v0, CP0_TCSTATUS
577 		/* Clear IXMT, then OR in previous value */
578 		ori	v0, TCSTATUS_IXMT
579 		xori	v0, TCSTATUS_IXMT
580 		or	v0, v1, v0
581 		mtc0	v0, CP0_TCSTATUS
582 		/*
583 		 * irq_disable_hazard below should expand to EHB
584 		 * on 24K/34K CPUS
585 		 */
586 		.set pop
587 #endif /* CONFIG_MIPS_MT_SMTC */
588 		irq_disable_hazard
589 		.endm
590 
591 #endif /* _ASM_STACKFRAME_H */
592