1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2001 MIPS Technologies, Inc. 9 */ 10 11#include <asm/asm.h> 12#include <asm/asmmacro.h> 13#include <asm/regdef.h> 14#include <asm/mipsregs.h> 15#include <asm/stackframe.h> 16#include <asm/isadep.h> 17#include <asm/thread_info.h> 18#include <asm/war.h> 19#ifdef CONFIG_MIPS_MT_SMTC 20#include <asm/mipsmtregs.h> 21#endif 22 23#ifndef CONFIG_PREEMPT 24#define resume_kernel restore_all 25#else 26#define __ret_from_irq ret_from_exception 27#endif 28 29 .text 30 .align 5 31#ifndef CONFIG_PREEMPT 32FEXPORT(ret_from_exception) 33 local_irq_disable # preempt stop 34 b __ret_from_irq 35#endif 36FEXPORT(ret_from_irq) 37 LONG_S s0, TI_REGS($28) 38FEXPORT(__ret_from_irq) 39/* 40 * We can be coming here from a syscall done in the kernel space, 41 * e.g. a failed kernel_execve(). 42 */ 43resume_userspace_check: 44 LONG_L t0, PT_STATUS(sp) # returning to kernel mode? 45 andi t0, t0, KU_USER 46 beqz t0, resume_kernel 47 48resume_userspace: 49 local_irq_disable # make sure we dont miss an 50 # interrupt setting need_resched 51 # between sampling and return 52 LONG_L a2, TI_FLAGS($28) # current->work 53 andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace) 54 bnez t0, work_pending 55 j restore_all 56 57#ifdef CONFIG_PREEMPT 58resume_kernel: 59 local_irq_disable 60 lw t0, TI_PRE_COUNT($28) 61 bnez t0, restore_all 62need_resched: 63 LONG_L t0, TI_FLAGS($28) 64 andi t1, t0, _TIF_NEED_RESCHED 65 beqz t1, restore_all 66 LONG_L t0, PT_STATUS(sp) # Interrupts off? 67 andi t0, 1 68 beqz t0, restore_all 69 jal preempt_schedule_irq 70 b need_resched 71#endif 72 73FEXPORT(ret_from_kernel_thread) 74 jal schedule_tail # a0 = struct task_struct *prev 75 move a0, s1 76 jal s0 77 j syscall_exit 78 79FEXPORT(ret_from_fork) 80 jal schedule_tail # a0 = struct task_struct *prev 81 82FEXPORT(syscall_exit) 83 local_irq_disable # make sure need_resched and 84 # signals dont change between 85 # sampling and return 86 LONG_L a2, TI_FLAGS($28) # current->work 87 li t0, _TIF_ALLWORK_MASK 88 and t0, a2, t0 89 bnez t0, syscall_exit_work 90 91restore_all: # restore full frame 92#ifdef CONFIG_MIPS_MT_SMTC 93#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 94/* Re-arm any temporarily masked interrupts not explicitly "acked" */ 95 mfc0 v0, CP0_TCSTATUS 96 ori v1, v0, TCSTATUS_IXMT 97 mtc0 v1, CP0_TCSTATUS 98 andi v0, TCSTATUS_IXMT 99 _ehb 100 mfc0 t0, CP0_TCCONTEXT 101 DMT 9 # dmt t1 102 jal mips_ihb 103 mfc0 t2, CP0_STATUS 104 andi t3, t0, 0xff00 105 or t2, t2, t3 106 mtc0 t2, CP0_STATUS 107 _ehb 108 andi t1, t1, VPECONTROL_TE 109 beqz t1, 1f 110 EMT 1111: 112 mfc0 v1, CP0_TCSTATUS 113 /* We set IXMT above, XOR should clear it here */ 114 xori v1, v1, TCSTATUS_IXMT 115 or v1, v0, v1 116 mtc0 v1, CP0_TCSTATUS 117 _ehb 118 xor t0, t0, t3 119 mtc0 t0, CP0_TCCONTEXT 120#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ 121/* Detect and execute deferred IPI "interrupts" */ 122 LONG_L s0, TI_REGS($28) 123 LONG_S sp, TI_REGS($28) 124 jal deferred_smtc_ipi 125 LONG_S s0, TI_REGS($28) 126#endif /* CONFIG_MIPS_MT_SMTC */ 127 .set noat 128 RESTORE_TEMP 129 RESTORE_AT 130 RESTORE_STATIC 131restore_partial: # restore partial frame 132#ifdef CONFIG_TRACE_IRQFLAGS 133 SAVE_STATIC 134 SAVE_AT 135 SAVE_TEMP 136 LONG_L v0, PT_STATUS(sp) 137#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 138 and v0, ST0_IEP 139#else 140 and v0, ST0_IE 141#endif 142 beqz v0, 1f 143 jal trace_hardirqs_on 144 b 2f 1451: jal trace_hardirqs_off 1462: 147 RESTORE_TEMP 148 RESTORE_AT 149 RESTORE_STATIC 150#endif 151 RESTORE_SOME 152 RESTORE_SP_AND_RET 153 .set at 154 155work_pending: 156 andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS 157 beqz t0, work_notifysig 158work_resched: 159 jal schedule 160 161 local_irq_disable # make sure need_resched and 162 # signals dont change between 163 # sampling and return 164 LONG_L a2, TI_FLAGS($28) 165 andi t0, a2, _TIF_WORK_MASK # is there any work to be done 166 # other than syscall tracing? 167 beqz t0, restore_all 168 andi t0, a2, _TIF_NEED_RESCHED 169 bnez t0, work_resched 170 171work_notifysig: # deal with pending signals and 172 # notify-resume requests 173 move a0, sp 174 li a1, 0 175 jal do_notify_resume # a2 already loaded 176 j resume_userspace_check 177 178FEXPORT(syscall_exit_partial) 179 local_irq_disable # make sure need_resched doesn't 180 # change between and return 181 LONG_L a2, TI_FLAGS($28) # current->work 182 li t0, _TIF_ALLWORK_MASK 183 and t0, a2 184 beqz t0, restore_partial 185 SAVE_STATIC 186syscall_exit_work: 187 LONG_L t0, PT_STATUS(sp) # returning to kernel mode? 188 andi t0, t0, KU_USER 189 beqz t0, resume_kernel 190 li t0, _TIF_WORK_SYSCALL_EXIT 191 and t0, a2 # a2 is preloaded with TI_FLAGS 192 beqz t0, work_pending # trace bit set? 193 local_irq_enable # could let syscall_trace_leave() 194 # call schedule() instead 195 move a0, sp 196 jal syscall_trace_leave 197 b resume_userspace 198 199#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT) 200 201/* 202 * MIPS32R2 Instruction Hazard Barrier - must be called 203 * 204 * For C code use the inline version named instruction_hazard(). 205 */ 206LEAF(mips_ihb) 207 .set mips32r2 208 jr.hb ra 209 nop 210 END(mips_ihb) 211 212#endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */ 213