xref: /openbmc/linux/arch/mips/include/asm/switch_to.h (revision ecc23d0a422a3118fcf6e4f0a46e17a6c2047b02)
1b81947c6SDavid Howells /*
2b81947c6SDavid Howells  * This file is subject to the terms and conditions of the GNU General Public
3b81947c6SDavid Howells  * License.  See the file "COPYING" in the main directory of this archive
4b81947c6SDavid Howells  * for more details.
5b81947c6SDavid Howells  *
6b81947c6SDavid Howells  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
7b81947c6SDavid Howells  * Copyright (C) 1996 by Paul M. Antoine
8b81947c6SDavid Howells  * Copyright (C) 1999 Silicon Graphics
9b81947c6SDavid Howells  * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
10b81947c6SDavid Howells  * Copyright (C) 2000 MIPS Technologies, Inc.
11b81947c6SDavid Howells  */
12b81947c6SDavid Howells #ifndef _ASM_SWITCH_TO_H
13b81947c6SDavid Howells #define _ASM_SWITCH_TO_H
14b81947c6SDavid Howells 
15b81947c6SDavid Howells #include <asm/cpu-features.h>
16b81947c6SDavid Howells #include <asm/watch.h>
17b81947c6SDavid Howells #include <asm/dsp.h>
182c952e06SJayachandran C #include <asm/cop2.h>
191a3d5957SPaul Burton #include <asm/fpu.h>
20b81947c6SDavid Howells 
21b81947c6SDavid Howells struct task_struct;
22b81947c6SDavid Howells 
238c0f8ab0SPaul Burton /**
248c0f8ab0SPaul Burton  * resume - resume execution of a task
258c0f8ab0SPaul Burton  * @prev:	The task previously executed.
268c0f8ab0SPaul Burton  * @next:	The task to begin executing.
278c0f8ab0SPaul Burton  * @next_ti:	task_thread_info(next).
288c0f8ab0SPaul Burton  *
298c0f8ab0SPaul Burton  * This function is used whilst scheduling to save the context of prev & load
308c0f8ab0SPaul Burton  * the context of next. Returns prev.
31b81947c6SDavid Howells  */
328c0f8ab0SPaul Burton extern asmlinkage struct task_struct *resume(struct task_struct *prev,
331a3d5957SPaul Burton 		struct task_struct *next, struct thread_info *next_ti);
34b81947c6SDavid Howells 
35b81947c6SDavid Howells extern unsigned int ll_bit;
36b81947c6SDavid Howells extern struct task_struct *ll_task;
37b81947c6SDavid Howells 
38b81947c6SDavid Howells #ifdef CONFIG_MIPS_MT_FPAFF
39b81947c6SDavid Howells 
40b81947c6SDavid Howells /*
41b81947c6SDavid Howells  * Handle the scheduler resume end of FPU affinity management.	We do this
42b81947c6SDavid Howells  * inline to try to keep the overhead down. If we have been forced to run on
43b81947c6SDavid Howells  * a "CPU" with an FPU because of a previous high level of FP computation,
44b81947c6SDavid Howells  * but did not actually use the FPU during the most recent time-slice (CU1
453bd37062SSebastian Andrzej Siewior  * isn't set), we undo the restriction on cpus_mask.
46b81947c6SDavid Howells  *
47b81947c6SDavid Howells  * We're not calling set_cpus_allowed() here, because we have no need to
48b81947c6SDavid Howells  * force prompt migration - we're already switching the current CPU to a
49b81947c6SDavid Howells  * different thread.
50b81947c6SDavid Howells  */
51b81947c6SDavid Howells 
52b81947c6SDavid Howells #define __mips_mt_fpaff_switch_to(prev)					\
53b81947c6SDavid Howells do {									\
54b81947c6SDavid Howells 	struct thread_info *__prev_ti = task_thread_info(prev);		\
55b81947c6SDavid Howells 									\
56b81947c6SDavid Howells 	if (cpu_has_fpu &&						\
57b81947c6SDavid Howells 	    test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) &&		\
58b81947c6SDavid Howells 	    (!(KSTK_STATUS(prev) & ST0_CU1))) {				\
59b81947c6SDavid Howells 		clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND);		\
603bd37062SSebastian Andrzej Siewior 		prev->cpus_mask = prev->thread.user_cpus_allowed;	\
61b81947c6SDavid Howells 	}								\
62b81947c6SDavid Howells 	next->thread.emulated_fp = 0;					\
63b81947c6SDavid Howells } while(0)
64b81947c6SDavid Howells 
65b81947c6SDavid Howells #else
66b81947c6SDavid Howells #define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
67b81947c6SDavid Howells #endif
68b81947c6SDavid Howells 
693b4b8239SPaul Burton /*
70ab7c01fdSSerge Semin  * Clear LLBit during context switches on MIPSr5+ such that eretnc can be used
713b4b8239SPaul Burton  * unconditionally when returning to userland in entry.S.
723b4b8239SPaul Burton  */
73ab7c01fdSSerge Semin #define __clear_r5_hw_ll_bit() do {					\
74ab7c01fdSSerge Semin 	if (cpu_has_mips_r5 || cpu_has_mips_r6)				\
757c151d3dSMarkos Chandras 		write_c0_lladdr(0);					\
763b4b8239SPaul Burton } while (0)
773b4b8239SPaul Burton 
783b4b8239SPaul Burton #define __clear_software_ll_bit() do {					\
79b81947c6SDavid Howells 	if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)	\
80b81947c6SDavid Howells 		ll_bit = 0;						\
81b81947c6SDavid Howells } while (0)
82b81947c6SDavid Howells 
83f51246efSRalf Baechle /*
845a1aca44SMaciej W. Rozycki  * Check FCSR for any unmasked exceptions pending set with `ptrace',
855a1aca44SMaciej W. Rozycki  * clear them and send a signal.
865a1aca44SMaciej W. Rozycki  */
8736a49803SPaul Burton #ifdef CONFIG_MIPS_FP_SUPPORT
885a1aca44SMaciej W. Rozycki # define __sanitize_fcr31(next)						\
895a1aca44SMaciej W. Rozycki do {									\
905a1aca44SMaciej W. Rozycki 	unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31);	\
915a1aca44SMaciej W. Rozycki 	void __user *pc;						\
925a1aca44SMaciej W. Rozycki 									\
935a1aca44SMaciej W. Rozycki 	if (unlikely(fcr31)) {						\
945a1aca44SMaciej W. Rozycki 		pc = (void __user *)task_pt_regs(next)->cp0_epc;	\
955a1aca44SMaciej W. Rozycki 		next->thread.fpu.fcr31 &= ~fcr31;			\
965a1aca44SMaciej W. Rozycki 		force_fcr31_sig(fcr31, pc, next);			\
975a1aca44SMaciej W. Rozycki 	}								\
985a1aca44SMaciej W. Rozycki } while (0)
9936a49803SPaul Burton #else
100*37204244SJonas Gorski # define __sanitize_fcr31(next) do { (void) (next); } while (0)
10136a49803SPaul Burton #endif
1025a1aca44SMaciej W. Rozycki 
1035a1aca44SMaciej W. Rozycki /*
104f51246efSRalf Baechle  * For newly created kernel threads switch_to() will return to
105f51246efSRalf Baechle  * ret_from_kernel_thread, newly created user threads to ret_from_fork.
106f51246efSRalf Baechle  * That is, everything following resume() will be skipped for new threads.
107f51246efSRalf Baechle  * So everything that matters to new threads should be placed before resume().
108f51246efSRalf Baechle  */
109b81947c6SDavid Howells #define switch_to(prev, next, last)					\
110b81947c6SDavid Howells do {									\
111b81947c6SDavid Howells 	__mips_mt_fpaff_switch_to(prev);				\
1121a3d5957SPaul Burton 	lose_fpu_inatomic(1, prev);					\
1135a1aca44SMaciej W. Rozycki 	if (tsk_used_math(next))					\
1145a1aca44SMaciej W. Rozycki 		__sanitize_fcr31(next);					\
115f51246efSRalf Baechle 	if (cpu_has_dsp) {						\
116b81947c6SDavid Howells 		__save_dsp(prev);					\
117f51246efSRalf Baechle 		__restore_dsp(next);					\
118f51246efSRalf Baechle 	}								\
119f51246efSRalf Baechle 	if (cop2_present) {						\
1204e43e5dfSHuacai Chen 		u32 status = read_c0_status();				\
1214e43e5dfSHuacai Chen 									\
122f51246efSRalf Baechle 		set_c0_status(ST0_CU2);					\
123f51246efSRalf Baechle 		if ((KSTK_STATUS(prev) & ST0_CU2)) {			\
1242c952e06SJayachandran C 			if (cop2_lazy_restore)				\
1252c952e06SJayachandran C 				KSTK_STATUS(prev) &= ~ST0_CU2;		\
12668c77d8aSRalf Baechle 			cop2_save(prev);				\
127f51246efSRalf Baechle 		}							\
128f51246efSRalf Baechle 		if (KSTK_STATUS(next) & ST0_CU2 &&			\
129f51246efSRalf Baechle 		    !cop2_lazy_restore) {				\
130f51246efSRalf Baechle 			cop2_restore(next);				\
131f51246efSRalf Baechle 		}							\
1324e43e5dfSHuacai Chen 		write_c0_status(status);				\
1332c952e06SJayachandran C 	}								\
134ab7c01fdSSerge Semin 	__clear_r5_hw_ll_bit();						\
135b81947c6SDavid Howells 	__clear_software_ll_bit();					\
136b81947c6SDavid Howells 	if (cpu_has_userlocal)						\
137f51246efSRalf Baechle 		write_c0_userlocal(task_thread_info(next)->tp_value);	\
138a7e89326SJames Hogan 	__restore_watch(next);						\
1391a3d5957SPaul Burton 	(last) = resume(prev, next, task_thread_info(next));		\
140b81947c6SDavid Howells } while (0)
141b81947c6SDavid Howells 
142b81947c6SDavid Howells #endif /* _ASM_SWITCH_TO_H */
143