xref: /openbmc/linux/arch/mips/include/asm/stackframe.h (revision 8dfdd02a)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle
7  * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
8  * Copyright (C) 1999 Silicon Graphics, Inc.
9  * Copyright (C) 2007  Maciej W. Rozycki
10  */
11 #ifndef _ASM_STACKFRAME_H
12 #define _ASM_STACKFRAME_H
13 
14 #include <linux/threads.h>
15 
16 #include <asm/asm.h>
17 #include <asm/asmmacro.h>
18 #include <asm/mipsregs.h>
19 #include <asm/asm-offsets.h>
20 
21 /*
22  * For SMTC kernel, global IE should be left set, and interrupts
23  * controlled exclusively via IXMT.
24  */
25 #ifdef CONFIG_MIPS_MT_SMTC
26 #define STATMASK 0x1e
27 #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
28 #define STATMASK 0x3f
29 #else
30 #define STATMASK 0x1f
31 #endif
32 
33 #ifdef CONFIG_MIPS_MT_SMTC
34 #include <asm/mipsmtregs.h>
35 #endif /* CONFIG_MIPS_MT_SMTC */
36 
37 		.macro	SAVE_AT
38 		.set	push
39 		.set	noat
40 		LONG_S	$1, PT_R1(sp)
41 		.set	pop
42 		.endm
43 
44 		.macro	SAVE_TEMP
45 #ifdef CONFIG_CPU_HAS_SMARTMIPS
46 		mflhxu	v1
47 		LONG_S	v1, PT_LO(sp)
48 		mflhxu	v1
49 		LONG_S	v1, PT_HI(sp)
50 		mflhxu	v1
51 		LONG_S	v1, PT_ACX(sp)
52 #else
53 		mfhi	v1
54 #endif
55 #ifdef CONFIG_32BIT
56 		LONG_S	$8, PT_R8(sp)
57 		LONG_S	$9, PT_R9(sp)
58 #endif
59 		LONG_S	$10, PT_R10(sp)
60 		LONG_S	$11, PT_R11(sp)
61 		LONG_S	$12, PT_R12(sp)
62 #ifndef CONFIG_CPU_HAS_SMARTMIPS
63 		LONG_S	v1, PT_HI(sp)
64 		mflo	v1
65 #endif
66 		LONG_S	$13, PT_R13(sp)
67 		LONG_S	$14, PT_R14(sp)
68 		LONG_S	$15, PT_R15(sp)
69 		LONG_S	$24, PT_R24(sp)
70 #ifndef CONFIG_CPU_HAS_SMARTMIPS
71 		LONG_S	v1, PT_LO(sp)
72 #endif
73 #ifdef CONFIG_CPU_CAVIUM_OCTEON
74 		/*
75 		 * The Octeon multiplier state is affected by general
76 		 * multiply instructions. It must be saved before and
77 		 * kernel code might corrupt it
78 		 */
79 		jal     octeon_mult_save
80 #endif
81 		.endm
82 
83 		.macro	SAVE_STATIC
84 		LONG_S	$16, PT_R16(sp)
85 		LONG_S	$17, PT_R17(sp)
86 		LONG_S	$18, PT_R18(sp)
87 		LONG_S	$19, PT_R19(sp)
88 		LONG_S	$20, PT_R20(sp)
89 		LONG_S	$21, PT_R21(sp)
90 		LONG_S	$22, PT_R22(sp)
91 		LONG_S	$23, PT_R23(sp)
92 		LONG_S	$30, PT_R30(sp)
93 		.endm
94 
95 #ifdef CONFIG_SMP
96 #ifdef CONFIG_MIPS_MT_SMTC
97 #define PTEBASE_SHIFT	19	/* TCBIND */
98 #define CPU_ID_REG CP0_TCBIND
99 #define CPU_ID_MFC0 mfc0
100 #elif defined(CONFIG_MIPS_PGD_C0_CONTEXT)
101 #define PTEBASE_SHIFT	48	/* XCONTEXT */
102 #define CPU_ID_REG CP0_XCONTEXT
103 #define CPU_ID_MFC0 MFC0
104 #else
105 #define PTEBASE_SHIFT	23	/* CONTEXT */
106 #define CPU_ID_REG CP0_CONTEXT
107 #define CPU_ID_MFC0 MFC0
108 #endif
109 		.macro	get_saved_sp	/* SMP variation */
110 		CPU_ID_MFC0	k0, CPU_ID_REG
111 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
112 		lui	k1, %hi(kernelsp)
113 #else
114 		lui	k1, %highest(kernelsp)
115 		daddiu	k1, %higher(kernelsp)
116 		dsll	k1, 16
117 		daddiu	k1, %hi(kernelsp)
118 		dsll	k1, 16
119 #endif
120 		LONG_SRL	k0, PTEBASE_SHIFT
121 		LONG_ADDU	k1, k0
122 		LONG_L	k1, %lo(kernelsp)(k1)
123 		.endm
124 
125 		.macro	set_saved_sp stackp temp temp2
126 		CPU_ID_MFC0	\temp, CPU_ID_REG
127 		LONG_SRL	\temp, PTEBASE_SHIFT
128 		LONG_S	\stackp, kernelsp(\temp)
129 		.endm
130 #else
131 		.macro	get_saved_sp	/* Uniprocessor variation */
132 #ifdef CONFIG_CPU_JUMP_WORKAROUNDS
133 		/*
134 		 * Clear BTB (branch target buffer), forbid RAS (return address
135 		 * stack) to workaround the Out-of-order Issue in Loongson2F
136 		 * via its diagnostic register.
137 		 */
138 		move	k0, ra
139 		jal	1f
140 		 nop
141 1:		jal	1f
142 		 nop
143 1:		jal	1f
144 		 nop
145 1:		jal	1f
146 		 nop
147 1:		move	ra, k0
148 		li	k0, 3
149 		mtc0	k0, $22
150 #endif /* CONFIG_CPU_JUMP_WORKAROUNDS */
151 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
152 		lui	k1, %hi(kernelsp)
153 #else
154 		lui	k1, %highest(kernelsp)
155 		daddiu	k1, %higher(kernelsp)
156 		dsll	k1, k1, 16
157 		daddiu	k1, %hi(kernelsp)
158 		dsll	k1, k1, 16
159 #endif
160 		LONG_L	k1, %lo(kernelsp)(k1)
161 		.endm
162 
163 		.macro	set_saved_sp stackp temp temp2
164 		LONG_S	\stackp, kernelsp
165 		.endm
166 #endif
167 
168 		.macro	SAVE_SOME
169 		.set	push
170 		.set	noat
171 		.set	reorder
172 		mfc0	k0, CP0_STATUS
173 		sll	k0, 3		/* extract cu0 bit */
174 		.set	noreorder
175 		bltz	k0, 8f
176 		 move	k1, sp
177 		.set	reorder
178 		/* Called from user mode, new stack. */
179 		get_saved_sp
180 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
181 8:		move	k0, sp
182 		PTR_SUBU sp, k1, PT_SIZE
183 #else
184 		.set	at=k0
185 8:		PTR_SUBU k1, PT_SIZE
186 		.set	noat
187 		move	k0, sp
188 		move	sp, k1
189 #endif
190 		LONG_S	k0, PT_R29(sp)
191 		LONG_S	$3, PT_R3(sp)
192 		/*
193 		 * You might think that you don't need to save $0,
194 		 * but the FPU emulator and gdb remote debug stub
195 		 * need it to operate correctly
196 		 */
197 		LONG_S	$0, PT_R0(sp)
198 		mfc0	v1, CP0_STATUS
199 		LONG_S	$2, PT_R2(sp)
200 		LONG_S	v1, PT_STATUS(sp)
201 #ifdef CONFIG_MIPS_MT_SMTC
202 		/*
203 		 * Ideally, these instructions would be shuffled in
204 		 * to cover the pipeline delay.
205 		 */
206 		.set	mips32
207 		mfc0	k0, CP0_TCSTATUS
208 		.set	mips0
209 		LONG_S	k0, PT_TCSTATUS(sp)
210 #endif /* CONFIG_MIPS_MT_SMTC */
211 		LONG_S	$4, PT_R4(sp)
212 		mfc0	v1, CP0_CAUSE
213 		LONG_S	$5, PT_R5(sp)
214 		LONG_S	v1, PT_CAUSE(sp)
215 		LONG_S	$6, PT_R6(sp)
216 		MFC0	v1, CP0_EPC
217 		LONG_S	$7, PT_R7(sp)
218 #ifdef CONFIG_64BIT
219 		LONG_S	$8, PT_R8(sp)
220 		LONG_S	$9, PT_R9(sp)
221 #endif
222 		LONG_S	v1, PT_EPC(sp)
223 		LONG_S	$25, PT_R25(sp)
224 		LONG_S	$28, PT_R28(sp)
225 		LONG_S	$31, PT_R31(sp)
226 		ori	$28, sp, _THREAD_MASK
227 		xori	$28, _THREAD_MASK
228 #ifdef CONFIG_CPU_CAVIUM_OCTEON
229 		.set    mips64
230 		pref    0, 0($28)       /* Prefetch the current pointer */
231 #endif
232 		.set	pop
233 		.endm
234 
235 		.macro	SAVE_ALL
236 		SAVE_SOME
237 		SAVE_AT
238 		SAVE_TEMP
239 		SAVE_STATIC
240 		.endm
241 
242 		.macro	RESTORE_AT
243 		.set	push
244 		.set	noat
245 		LONG_L	$1,  PT_R1(sp)
246 		.set	pop
247 		.endm
248 
249 		.macro	RESTORE_TEMP
250 #ifdef CONFIG_CPU_CAVIUM_OCTEON
251 		/* Restore the Octeon multiplier state */
252 		jal	octeon_mult_restore
253 #endif
254 #ifdef CONFIG_CPU_HAS_SMARTMIPS
255 		LONG_L	$24, PT_ACX(sp)
256 		mtlhx	$24
257 		LONG_L	$24, PT_HI(sp)
258 		mtlhx	$24
259 		LONG_L	$24, PT_LO(sp)
260 		mtlhx	$24
261 #else
262 		LONG_L	$24, PT_LO(sp)
263 		mtlo	$24
264 		LONG_L	$24, PT_HI(sp)
265 		mthi	$24
266 #endif
267 #ifdef CONFIG_32BIT
268 		LONG_L	$8, PT_R8(sp)
269 		LONG_L	$9, PT_R9(sp)
270 #endif
271 		LONG_L	$10, PT_R10(sp)
272 		LONG_L	$11, PT_R11(sp)
273 		LONG_L	$12, PT_R12(sp)
274 		LONG_L	$13, PT_R13(sp)
275 		LONG_L	$14, PT_R14(sp)
276 		LONG_L	$15, PT_R15(sp)
277 		LONG_L	$24, PT_R24(sp)
278 		.endm
279 
280 		.macro	RESTORE_STATIC
281 		LONG_L	$16, PT_R16(sp)
282 		LONG_L	$17, PT_R17(sp)
283 		LONG_L	$18, PT_R18(sp)
284 		LONG_L	$19, PT_R19(sp)
285 		LONG_L	$20, PT_R20(sp)
286 		LONG_L	$21, PT_R21(sp)
287 		LONG_L	$22, PT_R22(sp)
288 		LONG_L	$23, PT_R23(sp)
289 		LONG_L	$30, PT_R30(sp)
290 		.endm
291 
292 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
293 
294 		.macro	RESTORE_SOME
295 		.set	push
296 		.set	reorder
297 		.set	noat
298 		mfc0	a0, CP0_STATUS
299 		li	v1, 0xff00
300 		ori	a0, STATMASK
301 		xori	a0, STATMASK
302 		mtc0	a0, CP0_STATUS
303 		and	a0, v1
304 		LONG_L	v0, PT_STATUS(sp)
305 		nor	v1, $0, v1
306 		and	v0, v1
307 		or	v0, a0
308 		mtc0	v0, CP0_STATUS
309 		LONG_L	$31, PT_R31(sp)
310 		LONG_L	$28, PT_R28(sp)
311 		LONG_L	$25, PT_R25(sp)
312 		LONG_L	$7,  PT_R7(sp)
313 		LONG_L	$6,  PT_R6(sp)
314 		LONG_L	$5,  PT_R5(sp)
315 		LONG_L	$4,  PT_R4(sp)
316 		LONG_L	$3,  PT_R3(sp)
317 		LONG_L	$2,  PT_R2(sp)
318 		.set	pop
319 		.endm
320 
321 		.macro	RESTORE_SP_AND_RET
322 		.set	push
323 		.set	noreorder
324 		LONG_L	k0, PT_EPC(sp)
325 		LONG_L	sp, PT_R29(sp)
326 		jr	k0
327 		 rfe
328 		.set	pop
329 		.endm
330 
331 #else
332 		.macro	RESTORE_SOME
333 		.set	push
334 		.set	reorder
335 		.set	noat
336 #ifdef CONFIG_MIPS_MT_SMTC
337 		.set	mips32r2
338 		/*
339 		 * We need to make sure the read-modify-write
340 		 * of Status below isn't perturbed by an interrupt
341 		 * or cross-TC access, so we need to do at least a DMT,
342 		 * protected by an interrupt-inhibit. But setting IXMT
343 		 * also creates a few-cycle window where an IPI could
344 		 * be queued and not be detected before potentially
345 		 * returning to a WAIT or user-mode loop. It must be
346 		 * replayed.
347 		 *
348 		 * We're in the middle of a context switch, and
349 		 * we can't dispatch it directly without trashing
350 		 * some registers, so we'll try to detect this unlikely
351 		 * case and program a software interrupt in the VPE,
352 		 * as would be done for a cross-VPE IPI.  To accommodate
353 		 * the handling of that case, we're doing a DVPE instead
354 		 * of just a DMT here to protect against other threads.
355 		 * This is a lot of cruft to cover a tiny window.
356 		 * If you can find a better design, implement it!
357 		 *
358 		 */
359 		mfc0	v0, CP0_TCSTATUS
360 		ori	v0, TCSTATUS_IXMT
361 		mtc0	v0, CP0_TCSTATUS
362 		_ehb
363 		DVPE	5				# dvpe a1
364 		jal	mips_ihb
365 #endif /* CONFIG_MIPS_MT_SMTC */
366 		mfc0	a0, CP0_STATUS
367 		ori	a0, STATMASK
368 		xori	a0, STATMASK
369 		mtc0	a0, CP0_STATUS
370 		li	v1, 0xff00
371 		and	a0, v1
372 		LONG_L	v0, PT_STATUS(sp)
373 		nor	v1, $0, v1
374 		and	v0, v1
375 		or	v0, a0
376 		mtc0	v0, CP0_STATUS
377 #ifdef CONFIG_MIPS_MT_SMTC
378 /*
379  * Only after EXL/ERL have been restored to status can we
380  * restore TCStatus.IXMT.
381  */
382 		LONG_L	v1, PT_TCSTATUS(sp)
383 		_ehb
384 		mfc0	a0, CP0_TCSTATUS
385 		andi	v1, TCSTATUS_IXMT
386 		bnez	v1, 0f
387 
388 /*
389  * We'd like to detect any IPIs queued in the tiny window
390  * above and request an software interrupt to service them
391  * when we ERET.
392  *
393  * Computing the offset into the IPIQ array of the executing
394  * TC's IPI queue in-line would be tedious.  We use part of
395  * the TCContext register to hold 16 bits of offset that we
396  * can add in-line to find the queue head.
397  */
398 		mfc0	v0, CP0_TCCONTEXT
399 		la	a2, IPIQ
400 		srl	v0, v0, 16
401 		addu	a2, a2, v0
402 		LONG_L	v0, 0(a2)
403 		beqz	v0, 0f
404 /*
405  * If we have a queue, provoke dispatch within the VPE by setting C_SW1
406  */
407 		mfc0	v0, CP0_CAUSE
408 		ori	v0, v0, C_SW1
409 		mtc0	v0, CP0_CAUSE
410 0:
411 		/*
412 		 * This test should really never branch but
413 		 * let's be prudent here.  Having atomized
414 		 * the shared register modifications, we can
415 		 * now EVPE, and must do so before interrupts
416 		 * are potentially re-enabled.
417 		 */
418 		andi	a1, a1, MVPCONTROL_EVP
419 		beqz	a1, 1f
420 		evpe
421 1:
422 		/* We know that TCStatua.IXMT should be set from above */
423 		xori	a0, a0, TCSTATUS_IXMT
424 		or	a0, a0, v1
425 		mtc0	a0, CP0_TCSTATUS
426 		_ehb
427 
428 		.set	mips0
429 #endif /* CONFIG_MIPS_MT_SMTC */
430 		LONG_L	v1, PT_EPC(sp)
431 		MTC0	v1, CP0_EPC
432 		LONG_L	$31, PT_R31(sp)
433 		LONG_L	$28, PT_R28(sp)
434 		LONG_L	$25, PT_R25(sp)
435 #ifdef CONFIG_64BIT
436 		LONG_L	$8, PT_R8(sp)
437 		LONG_L	$9, PT_R9(sp)
438 #endif
439 		LONG_L	$7,  PT_R7(sp)
440 		LONG_L	$6,  PT_R6(sp)
441 		LONG_L	$5,  PT_R5(sp)
442 		LONG_L	$4,  PT_R4(sp)
443 		LONG_L	$3,  PT_R3(sp)
444 		LONG_L	$2,  PT_R2(sp)
445 		.set	pop
446 		.endm
447 
448 		.macro	RESTORE_SP_AND_RET
449 		LONG_L	sp, PT_R29(sp)
450 		.set	mips3
451 		eret
452 		.set	mips0
453 		.endm
454 
455 #endif
456 
457 		.macro	RESTORE_SP
458 		LONG_L	sp, PT_R29(sp)
459 		.endm
460 
461 		.macro	RESTORE_ALL
462 		RESTORE_TEMP
463 		RESTORE_STATIC
464 		RESTORE_AT
465 		RESTORE_SOME
466 		RESTORE_SP
467 		.endm
468 
469 		.macro	RESTORE_ALL_AND_RET
470 		RESTORE_TEMP
471 		RESTORE_STATIC
472 		RESTORE_AT
473 		RESTORE_SOME
474 		RESTORE_SP_AND_RET
475 		.endm
476 
477 /*
478  * Move to kernel mode and disable interrupts.
479  * Set cp0 enable bit as sign that we're running on the kernel stack
480  */
481 		.macro	CLI
482 #if !defined(CONFIG_MIPS_MT_SMTC)
483 		mfc0	t0, CP0_STATUS
484 		li	t1, ST0_CU0 | STATMASK
485 		or	t0, t1
486 		xori	t0, STATMASK
487 		mtc0	t0, CP0_STATUS
488 #else /* CONFIG_MIPS_MT_SMTC */
489 		/*
490 		 * For SMTC, we need to set privilege
491 		 * and disable interrupts only for the
492 		 * current TC, using the TCStatus register.
493 		 */
494 		mfc0	t0, CP0_TCSTATUS
495 		/* Fortunately CU 0 is in the same place in both registers */
496 		/* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
497 		li	t1, ST0_CU0 | 0x08001c00
498 		or	t0, t1
499 		/* Clear TKSU, leave IXMT */
500 		xori	t0, 0x00001800
501 		mtc0	t0, CP0_TCSTATUS
502 		_ehb
503 		/* We need to leave the global IE bit set, but clear EXL...*/
504 		mfc0	t0, CP0_STATUS
505 		ori	t0, ST0_EXL | ST0_ERL
506 		xori	t0, ST0_EXL | ST0_ERL
507 		mtc0	t0, CP0_STATUS
508 #endif /* CONFIG_MIPS_MT_SMTC */
509 		irq_disable_hazard
510 		.endm
511 
512 /*
513  * Move to kernel mode and enable interrupts.
514  * Set cp0 enable bit as sign that we're running on the kernel stack
515  */
516 		.macro	STI
517 #if !defined(CONFIG_MIPS_MT_SMTC)
518 		mfc0	t0, CP0_STATUS
519 		li	t1, ST0_CU0 | STATMASK
520 		or	t0, t1
521 		xori	t0, STATMASK & ~1
522 		mtc0	t0, CP0_STATUS
523 #else /* CONFIG_MIPS_MT_SMTC */
524 		/*
525 		 * For SMTC, we need to set privilege
526 		 * and enable interrupts only for the
527 		 * current TC, using the TCStatus register.
528 		 */
529 		_ehb
530 		mfc0	t0, CP0_TCSTATUS
531 		/* Fortunately CU 0 is in the same place in both registers */
532 		/* Set TCU0, TKSU (for later inversion) and IXMT */
533 		li	t1, ST0_CU0 | 0x08001c00
534 		or	t0, t1
535 		/* Clear TKSU *and* IXMT */
536 		xori	t0, 0x00001c00
537 		mtc0	t0, CP0_TCSTATUS
538 		_ehb
539 		/* We need to leave the global IE bit set, but clear EXL...*/
540 		mfc0	t0, CP0_STATUS
541 		ori	t0, ST0_EXL
542 		xori	t0, ST0_EXL
543 		mtc0	t0, CP0_STATUS
544 		/* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
545 #endif /* CONFIG_MIPS_MT_SMTC */
546 		irq_enable_hazard
547 		.endm
548 
549 /*
550  * Just move to kernel mode and leave interrupts as they are.  Note
551  * for the R3000 this means copying the previous enable from IEp.
552  * Set cp0 enable bit as sign that we're running on the kernel stack
553  */
554 		.macro	KMODE
555 #ifdef CONFIG_MIPS_MT_SMTC
556 		/*
557 		 * This gets baroque in SMTC.  We want to
558 		 * protect the non-atomic clearing of EXL
559 		 * with DMT/EMT, but we don't want to take
560 		 * an interrupt while DMT is still in effect.
561 		 */
562 
563 		/* KMODE gets invoked from both reorder and noreorder code */
564 		.set	push
565 		.set	mips32r2
566 		.set	noreorder
567 		mfc0	v0, CP0_TCSTATUS
568 		andi	v1, v0, TCSTATUS_IXMT
569 		ori	v0, TCSTATUS_IXMT
570 		mtc0	v0, CP0_TCSTATUS
571 		_ehb
572 		DMT	2				# dmt	v0
573 		/*
574 		 * We don't know a priori if ra is "live"
575 		 */
576 		move	t0, ra
577 		jal	mips_ihb
578 		nop	/* delay slot */
579 		move	ra, t0
580 #endif /* CONFIG_MIPS_MT_SMTC */
581 		mfc0	t0, CP0_STATUS
582 		li	t1, ST0_CU0 | (STATMASK & ~1)
583 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
584 		andi	t2, t0, ST0_IEP
585 		srl	t2, 2
586 		or	t0, t2
587 #endif
588 		or	t0, t1
589 		xori	t0, STATMASK & ~1
590 		mtc0	t0, CP0_STATUS
591 #ifdef CONFIG_MIPS_MT_SMTC
592 		_ehb
593 		andi	v0, v0, VPECONTROL_TE
594 		beqz	v0, 2f
595 		nop	/* delay slot */
596 		emt
597 2:
598 		mfc0	v0, CP0_TCSTATUS
599 		/* Clear IXMT, then OR in previous value */
600 		ori	v0, TCSTATUS_IXMT
601 		xori	v0, TCSTATUS_IXMT
602 		or	v0, v1, v0
603 		mtc0	v0, CP0_TCSTATUS
604 		/*
605 		 * irq_disable_hazard below should expand to EHB
606 		 * on 24K/34K CPUS
607 		 */
608 		.set pop
609 #endif /* CONFIG_MIPS_MT_SMTC */
610 		irq_disable_hazard
611 		.endm
612 
613 #endif /* _ASM_STACKFRAME_H */
614