1 /* 2 * arch/sh/kernel/process.c 3 * 4 * This file handles the architecture-dependent parts of process handling.. 5 * 6 * Copyright (C) 1995 Linus Torvalds 7 * 8 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima 9 * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC 10 * Copyright (C) 2002 - 2008 Paul Mundt 11 * 12 * This file is subject to the terms and conditions of the GNU General Public 13 * License. See the file "COPYING" in the main directory of this archive 14 * for more details. 15 */ 16 #include <linux/module.h> 17 #include <linux/mm.h> 18 #include <linux/elfcore.h> 19 #include <linux/pm.h> 20 #include <linux/kallsyms.h> 21 #include <linux/kexec.h> 22 #include <linux/kdebug.h> 23 #include <linux/tick.h> 24 #include <linux/reboot.h> 25 #include <linux/fs.h> 26 #include <linux/ftrace.h> 27 #include <linux/preempt.h> 28 #include <asm/uaccess.h> 29 #include <asm/mmu_context.h> 30 #include <asm/pgalloc.h> 31 #include <asm/system.h> 32 #include <asm/ubc.h> 33 #include <asm/fpu.h> 34 #include <asm/syscalls.h> 35 #include <asm/watchdog.h> 36 37 int ubc_usercnt = 0; 38 39 #ifdef CONFIG_32BIT 40 static void watchdog_trigger_immediate(void) 41 { 42 sh_wdt_write_cnt(0xFF); 43 sh_wdt_write_csr(0xC2); 44 } 45 46 void machine_restart(char * __unused) 47 { 48 local_irq_disable(); 49 50 /* Use watchdog timer to trigger reset */ 51 watchdog_trigger_immediate(); 52 53 while (1) 54 cpu_sleep(); 55 } 56 #else 57 void machine_restart(char * __unused) 58 { 59 /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */ 60 asm volatile("ldc %0, sr\n\t" 61 "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001)); 62 } 63 #endif 64 65 void machine_halt(void) 66 { 67 local_irq_disable(); 68 69 while (1) 70 cpu_sleep(); 71 } 72 73 void machine_power_off(void) 74 { 75 if (pm_power_off) 76 pm_power_off(); 77 } 78 79 void show_regs(struct pt_regs * regs) 80 { 81 printk("\n"); 82 printk("Pid : %d, Comm: \t\t%s\n", task_pid_nr(current), current->comm); 83 printk("CPU : %d \t\t%s (%s %.*s)\n\n", 84 smp_processor_id(), print_tainted(), init_utsname()->release, 85 (int)strcspn(init_utsname()->version, " "), 86 init_utsname()->version); 87 88 print_symbol("PC is at %s\n", instruction_pointer(regs)); 89 print_symbol("PR is at %s\n", regs->pr); 90 91 printk("PC : %08lx SP : %08lx SR : %08lx ", 92 regs->pc, regs->regs[15], regs->sr); 93 #ifdef CONFIG_MMU 94 printk("TEA : %08x\n", ctrl_inl(MMU_TEA)); 95 #else 96 printk("\n"); 97 #endif 98 99 printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n", 100 regs->regs[0],regs->regs[1], 101 regs->regs[2],regs->regs[3]); 102 printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n", 103 regs->regs[4],regs->regs[5], 104 regs->regs[6],regs->regs[7]); 105 printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n", 106 regs->regs[8],regs->regs[9], 107 regs->regs[10],regs->regs[11]); 108 printk("R12 : %08lx R13 : %08lx R14 : %08lx\n", 109 regs->regs[12],regs->regs[13], 110 regs->regs[14]); 111 printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n", 112 regs->mach, regs->macl, regs->gbr, regs->pr); 113 114 show_trace(NULL, (unsigned long *)regs->regs[15], regs); 115 show_code(regs); 116 } 117 118 /* 119 * Create a kernel thread 120 */ 121 ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *)) 122 { 123 do_exit(fn(arg)); 124 } 125 126 /* Don't use this in BL=1(cli). Or else, CPU resets! */ 127 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 128 { 129 struct pt_regs regs; 130 int pid; 131 132 memset(®s, 0, sizeof(regs)); 133 regs.regs[4] = (unsigned long)arg; 134 regs.regs[5] = (unsigned long)fn; 135 136 regs.pc = (unsigned long)kernel_thread_helper; 137 regs.sr = (1 << 30); 138 139 /* Ok, create the new process.. */ 140 pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, 141 ®s, 0, NULL, NULL); 142 143 return pid; 144 } 145 146 /* 147 * Free current thread data structures etc.. 148 */ 149 void exit_thread(void) 150 { 151 if (current->thread.ubc_pc) { 152 current->thread.ubc_pc = 0; 153 ubc_usercnt -= 1; 154 } 155 } 156 157 void flush_thread(void) 158 { 159 #if defined(CONFIG_SH_FPU) 160 struct task_struct *tsk = current; 161 /* Forget lazy FPU state */ 162 clear_fpu(tsk, task_pt_regs(tsk)); 163 clear_used_math(); 164 #endif 165 } 166 167 void release_thread(struct task_struct *dead_task) 168 { 169 /* do nothing */ 170 } 171 172 /* Fill in the fpu structure for a core dump.. */ 173 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) 174 { 175 int fpvalid = 0; 176 177 #if defined(CONFIG_SH_FPU) 178 struct task_struct *tsk = current; 179 180 fpvalid = !!tsk_used_math(tsk); 181 if (fpvalid) 182 fpvalid = !fpregs_get(tsk, NULL, 0, 183 sizeof(struct user_fpu_struct), 184 fpu, NULL); 185 #endif 186 187 return fpvalid; 188 } 189 190 asmlinkage void ret_from_fork(void); 191 192 int copy_thread(unsigned long clone_flags, unsigned long usp, 193 unsigned long unused, 194 struct task_struct *p, struct pt_regs *regs) 195 { 196 struct thread_info *ti = task_thread_info(p); 197 struct pt_regs *childregs; 198 #if defined(CONFIG_SH_FPU) || defined(CONFIG_SH_DSP) 199 struct task_struct *tsk = current; 200 #endif 201 202 #if defined(CONFIG_SH_FPU) 203 unlazy_fpu(tsk, regs); 204 p->thread.fpu = tsk->thread.fpu; 205 copy_to_stopped_child_used_math(p); 206 #endif 207 208 #if defined(CONFIG_SH_DSP) 209 if (is_dsp_enabled(tsk)) { 210 /* We can use the __save_dsp or just copy the struct: 211 * __save_dsp(p); 212 * p->thread.dsp_status.status |= SR_DSP 213 */ 214 p->thread.dsp_status = tsk->thread.dsp_status; 215 } 216 #endif 217 218 childregs = task_pt_regs(p); 219 *childregs = *regs; 220 221 if (user_mode(regs)) { 222 childregs->regs[15] = usp; 223 ti->addr_limit = USER_DS; 224 } else { 225 childregs->regs[15] = (unsigned long)childregs; 226 ti->addr_limit = KERNEL_DS; 227 } 228 229 if (clone_flags & CLONE_SETTLS) 230 childregs->gbr = childregs->regs[0]; 231 232 childregs->regs[0] = 0; /* Set return value for child */ 233 234 p->thread.sp = (unsigned long) childregs; 235 p->thread.pc = (unsigned long) ret_from_fork; 236 237 p->thread.ubc_pc = 0; 238 239 return 0; 240 } 241 242 /* Tracing by user break controller. */ 243 static void ubc_set_tracing(int asid, unsigned long pc) 244 { 245 #if defined(CONFIG_CPU_SH4A) 246 unsigned long val; 247 248 val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE); 249 val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid)); 250 251 ctrl_outl(val, UBC_CBR0); 252 ctrl_outl(pc, UBC_CAR0); 253 ctrl_outl(0x0, UBC_CAMR0); 254 ctrl_outl(0x0, UBC_CBCR); 255 256 val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE); 257 ctrl_outl(val, UBC_CRR0); 258 259 /* Read UBC register that we wrote last, for checking update */ 260 val = ctrl_inl(UBC_CRR0); 261 262 #else /* CONFIG_CPU_SH4A */ 263 ctrl_outl(pc, UBC_BARA); 264 265 #ifdef CONFIG_MMU 266 ctrl_outb(asid, UBC_BASRA); 267 #endif 268 269 ctrl_outl(0, UBC_BAMRA); 270 271 if (current_cpu_data.type == CPU_SH7729 || 272 current_cpu_data.type == CPU_SH7710 || 273 current_cpu_data.type == CPU_SH7712 || 274 current_cpu_data.type == CPU_SH7203){ 275 ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA); 276 ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR); 277 } else { 278 ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA); 279 ctrl_outw(BRCR_PCBA, UBC_BRCR); 280 } 281 #endif /* CONFIG_CPU_SH4A */ 282 } 283 284 /* 285 * switch_to(x,y) should switch tasks from x to y. 286 * 287 */ 288 __notrace_funcgraph struct task_struct * 289 __switch_to(struct task_struct *prev, struct task_struct *next) 290 { 291 #if defined(CONFIG_SH_FPU) 292 unlazy_fpu(prev, task_pt_regs(prev)); 293 #endif 294 295 #ifdef CONFIG_MMU 296 /* 297 * Restore the kernel mode register 298 * k7 (r7_bank1) 299 */ 300 asm volatile("ldc %0, r7_bank" 301 : /* no output */ 302 : "r" (task_thread_info(next))); 303 #endif 304 305 /* If no tasks are using the UBC, we're done */ 306 if (ubc_usercnt == 0) 307 /* If no tasks are using the UBC, we're done */; 308 else if (next->thread.ubc_pc && next->mm) { 309 int asid = 0; 310 #ifdef CONFIG_MMU 311 asid |= cpu_asid(smp_processor_id(), next->mm); 312 #endif 313 ubc_set_tracing(asid, next->thread.ubc_pc); 314 } else { 315 #if defined(CONFIG_CPU_SH4A) 316 ctrl_outl(UBC_CBR_INIT, UBC_CBR0); 317 ctrl_outl(UBC_CRR_INIT, UBC_CRR0); 318 #else 319 ctrl_outw(0, UBC_BBRA); 320 ctrl_outw(0, UBC_BBRB); 321 #endif 322 } 323 324 return prev; 325 } 326 327 asmlinkage int sys_fork(unsigned long r4, unsigned long r5, 328 unsigned long r6, unsigned long r7, 329 struct pt_regs __regs) 330 { 331 #ifdef CONFIG_MMU 332 struct pt_regs *regs = RELOC_HIDE(&__regs, 0); 333 return do_fork(SIGCHLD, regs->regs[15], regs, 0, NULL, NULL); 334 #else 335 /* fork almost works, enough to trick you into looking elsewhere :-( */ 336 return -EINVAL; 337 #endif 338 } 339 340 asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, 341 unsigned long parent_tidptr, 342 unsigned long child_tidptr, 343 struct pt_regs __regs) 344 { 345 struct pt_regs *regs = RELOC_HIDE(&__regs, 0); 346 if (!newsp) 347 newsp = regs->regs[15]; 348 return do_fork(clone_flags, newsp, regs, 0, 349 (int __user *)parent_tidptr, 350 (int __user *)child_tidptr); 351 } 352 353 /* 354 * This is trivial, and on the face of it looks like it 355 * could equally well be done in user mode. 356 * 357 * Not so, for quite unobvious reasons - register pressure. 358 * In user mode vfork() cannot have a stack frame, and if 359 * done by calling the "clone()" system call directly, you 360 * do not have enough call-clobbered registers to hold all 361 * the information you need. 362 */ 363 asmlinkage int sys_vfork(unsigned long r4, unsigned long r5, 364 unsigned long r6, unsigned long r7, 365 struct pt_regs __regs) 366 { 367 struct pt_regs *regs = RELOC_HIDE(&__regs, 0); 368 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->regs[15], regs, 369 0, NULL, NULL); 370 } 371 372 /* 373 * sys_execve() executes a new program. 374 */ 375 asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv, 376 char __user * __user *uenvp, unsigned long r7, 377 struct pt_regs __regs) 378 { 379 struct pt_regs *regs = RELOC_HIDE(&__regs, 0); 380 int error; 381 char *filename; 382 383 filename = getname(ufilename); 384 error = PTR_ERR(filename); 385 if (IS_ERR(filename)) 386 goto out; 387 388 error = do_execve(filename, uargv, uenvp, regs); 389 putname(filename); 390 out: 391 return error; 392 } 393 394 unsigned long get_wchan(struct task_struct *p) 395 { 396 unsigned long pc; 397 398 if (!p || p == current || p->state == TASK_RUNNING) 399 return 0; 400 401 /* 402 * The same comment as on the Alpha applies here, too ... 403 */ 404 pc = thread_saved_pc(p); 405 406 #ifdef CONFIG_FRAME_POINTER 407 if (in_sched_functions(pc)) { 408 unsigned long schedule_frame = (unsigned long)p->thread.sp; 409 return ((unsigned long *)schedule_frame)[21]; 410 } 411 #endif 412 413 return pc; 414 } 415 416 asmlinkage void break_point_trap(void) 417 { 418 /* Clear tracing. */ 419 #if defined(CONFIG_CPU_SH4A) 420 ctrl_outl(UBC_CBR_INIT, UBC_CBR0); 421 ctrl_outl(UBC_CRR_INIT, UBC_CRR0); 422 #else 423 ctrl_outw(0, UBC_BBRA); 424 ctrl_outw(0, UBC_BBRB); 425 ctrl_outl(0, UBC_BRCR); 426 #endif 427 current->thread.ubc_pc = 0; 428 ubc_usercnt -= 1; 429 430 force_sig(SIGTRAP, current); 431 } 432