xref: /openbmc/linux/arch/mips/kernel/process.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7  * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  * Copyright (C) 2004 Thiemo Seufer
10  */
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/tick.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/stddef.h>
18 #include <linux/unistd.h>
19 #include <linux/ptrace.h>
20 #include <linux/slab.h>
21 #include <linux/mman.h>
22 #include <linux/personality.h>
23 #include <linux/sys.h>
24 #include <linux/user.h>
25 #include <linux/a.out.h>
26 #include <linux/init.h>
27 #include <linux/completion.h>
28 #include <linux/kallsyms.h>
29 #include <linux/random.h>
30 
31 #include <asm/asm.h>
32 #include <asm/bootinfo.h>
33 #include <asm/cpu.h>
34 #include <asm/dsp.h>
35 #include <asm/fpu.h>
36 #include <asm/pgtable.h>
37 #include <asm/system.h>
38 #include <asm/mipsregs.h>
39 #include <asm/processor.h>
40 #include <asm/uaccess.h>
41 #include <asm/io.h>
42 #include <asm/elf.h>
43 #include <asm/isadep.h>
44 #include <asm/inst.h>
45 #include <asm/stacktrace.h>
46 
47 /*
48  * The idle thread. There's no useful work to be done, so just try to conserve
49  * power and have a low exit latency (ie sit in a loop waiting for somebody to
50  * say that they'd like to reschedule)
51  */
52 void __noreturn cpu_idle(void)
53 {
54 	/* endless idle loop with no priority at all */
55 	while (1) {
56 		tick_nohz_stop_sched_tick();
57 		while (!need_resched()) {
58 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
59 			extern void smtc_idle_loop_hook(void);
60 
61 			smtc_idle_loop_hook();
62 #endif
63 			if (cpu_wait)
64 				(*cpu_wait)();
65 		}
66 		tick_nohz_restart_sched_tick();
67 		preempt_enable_no_resched();
68 		schedule();
69 		preempt_disable();
70 	}
71 }
72 
73 asmlinkage void ret_from_fork(void);
74 
75 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
76 {
77 	unsigned long status;
78 
79 	/* New thread loses kernel privileges. */
80 	status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|KU_MASK);
81 #ifdef CONFIG_64BIT
82 	status &= ~ST0_FR;
83 	status |= test_thread_flag(TIF_32BIT_REGS) ? 0 : ST0_FR;
84 #endif
85 	status |= KU_USER;
86 	regs->cp0_status = status;
87 	clear_used_math();
88 	clear_fpu_owner();
89 	if (cpu_has_dsp)
90 		__init_dsp();
91 	regs->cp0_epc = pc;
92 	regs->regs[29] = sp;
93 	current_thread_info()->addr_limit = USER_DS;
94 }
95 
96 void exit_thread(void)
97 {
98 }
99 
100 void flush_thread(void)
101 {
102 }
103 
104 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
105 	unsigned long unused, struct task_struct *p, struct pt_regs *regs)
106 {
107 	struct thread_info *ti = task_thread_info(p);
108 	struct pt_regs *childregs;
109 	long childksp;
110 	p->set_child_tid = p->clear_child_tid = NULL;
111 
112 	childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
113 
114 	preempt_disable();
115 
116 	if (is_fpu_owner())
117 		save_fp(p);
118 
119 	if (cpu_has_dsp)
120 		save_dsp(p);
121 
122 	preempt_enable();
123 
124 	/* set up new TSS. */
125 	childregs = (struct pt_regs *) childksp - 1;
126 	*childregs = *regs;
127 	childregs->regs[7] = 0;	/* Clear error flag */
128 
129 #if defined(CONFIG_BINFMT_IRIX)
130 	if (current->personality != PER_LINUX) {
131 		/* Under IRIX things are a little different. */
132 		childregs->regs[3] = 1;
133 		regs->regs[3] = 0;
134 	}
135 #endif
136 	childregs->regs[2] = 0;	/* Child gets zero as return value */
137 	regs->regs[2] = p->pid;
138 
139 	if (childregs->cp0_status & ST0_CU0) {
140 		childregs->regs[28] = (unsigned long) ti;
141 		childregs->regs[29] = childksp;
142 		ti->addr_limit = KERNEL_DS;
143 	} else {
144 		childregs->regs[29] = usp;
145 		ti->addr_limit = USER_DS;
146 	}
147 	p->thread.reg29 = (unsigned long) childregs;
148 	p->thread.reg31 = (unsigned long) ret_from_fork;
149 
150 	/*
151 	 * New tasks lose permission to use the fpu. This accelerates context
152 	 * switching for most programs since they don't use the fpu.
153 	 */
154 	p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
155 	childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
156 	clear_tsk_thread_flag(p, TIF_USEDFPU);
157 
158 #ifdef CONFIG_MIPS_MT_FPAFF
159 	/*
160 	 * FPU affinity support is cleaner if we track the
161 	 * user-visible CPU affinity from the very beginning.
162 	 * The generic cpus_allowed mask will already have
163 	 * been copied from the parent before copy_thread
164 	 * is invoked.
165 	 */
166 	p->thread.user_cpus_allowed = p->cpus_allowed;
167 #endif /* CONFIG_MIPS_MT_FPAFF */
168 
169 	if (clone_flags & CLONE_SETTLS)
170 		ti->tp_value = regs->regs[7];
171 
172 	return 0;
173 }
174 
175 /* Fill in the fpu structure for a core dump.. */
176 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
177 {
178 	memcpy(r, &current->thread.fpu, sizeof(current->thread.fpu));
179 
180 	return 1;
181 }
182 
183 void elf_dump_regs(elf_greg_t *gp, struct pt_regs *regs)
184 {
185 	int i;
186 
187 	for (i = 0; i < EF_R0; i++)
188 		gp[i] = 0;
189 	gp[EF_R0] = 0;
190 	for (i = 1; i <= 31; i++)
191 		gp[EF_R0 + i] = regs->regs[i];
192 	gp[EF_R26] = 0;
193 	gp[EF_R27] = 0;
194 	gp[EF_LO] = regs->lo;
195 	gp[EF_HI] = regs->hi;
196 	gp[EF_CP0_EPC] = regs->cp0_epc;
197 	gp[EF_CP0_BADVADDR] = regs->cp0_badvaddr;
198 	gp[EF_CP0_STATUS] = regs->cp0_status;
199 	gp[EF_CP0_CAUSE] = regs->cp0_cause;
200 #ifdef EF_UNUSED0
201 	gp[EF_UNUSED0] = 0;
202 #endif
203 }
204 
205 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
206 {
207 	elf_dump_regs(*regs, task_pt_regs(tsk));
208 	return 1;
209 }
210 
211 int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr)
212 {
213 	memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu));
214 
215 	return 1;
216 }
217 
218 /*
219  * Create a kernel thread
220  */
221 static void __noreturn kernel_thread_helper(void *arg, int (*fn)(void *))
222 {
223 	do_exit(fn(arg));
224 }
225 
226 long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
227 {
228 	struct pt_regs regs;
229 
230 	memset(&regs, 0, sizeof(regs));
231 
232 	regs.regs[4] = (unsigned long) arg;
233 	regs.regs[5] = (unsigned long) fn;
234 	regs.cp0_epc = (unsigned long) kernel_thread_helper;
235 	regs.cp0_status = read_c0_status();
236 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
237 	regs.cp0_status = (regs.cp0_status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
238 			  ((regs.cp0_status & (ST0_KUC | ST0_IEC)) << 2);
239 #else
240 	regs.cp0_status |= ST0_EXL;
241 #endif
242 
243 	/* Ok, create the new process.. */
244 	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
245 }
246 
247 /*
248  *
249  */
250 struct mips_frame_info {
251 	void		*func;
252 	unsigned long	func_size;
253 	int		frame_size;
254 	int		pc_offset;
255 };
256 
257 static inline int is_ra_save_ins(union mips_instruction *ip)
258 {
259 	/* sw / sd $ra, offset($sp) */
260 	return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
261 		ip->i_format.rs == 29 &&
262 		ip->i_format.rt == 31;
263 }
264 
265 static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
266 {
267 	if (ip->j_format.opcode == jal_op)
268 		return 1;
269 	if (ip->r_format.opcode != spec_op)
270 		return 0;
271 	return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
272 }
273 
274 static inline int is_sp_move_ins(union mips_instruction *ip)
275 {
276 	/* addiu/daddiu sp,sp,-imm */
277 	if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
278 		return 0;
279 	if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
280 		return 1;
281 	return 0;
282 }
283 
284 static int get_frame_info(struct mips_frame_info *info)
285 {
286 	union mips_instruction *ip = info->func;
287 	unsigned max_insns = info->func_size / sizeof(union mips_instruction);
288 	unsigned i;
289 
290 	info->pc_offset = -1;
291 	info->frame_size = 0;
292 
293 	if (!ip)
294 		goto err;
295 
296 	if (max_insns == 0)
297 		max_insns = 128U;	/* unknown function size */
298 	max_insns = min(128U, max_insns);
299 
300 	for (i = 0; i < max_insns; i++, ip++) {
301 
302 		if (is_jal_jalr_jr_ins(ip))
303 			break;
304 		if (!info->frame_size) {
305 			if (is_sp_move_ins(ip))
306 				info->frame_size = - ip->i_format.simmediate;
307 			continue;
308 		}
309 		if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
310 			info->pc_offset =
311 				ip->i_format.simmediate / sizeof(long);
312 			break;
313 		}
314 	}
315 	if (info->frame_size && info->pc_offset >= 0) /* nested */
316 		return 0;
317 	if (info->pc_offset < 0) /* leaf */
318 		return 1;
319 	/* prologue seems boggus... */
320 err:
321 	return -1;
322 }
323 
324 static struct mips_frame_info schedule_mfi __read_mostly;
325 
326 static int __init frame_info_init(void)
327 {
328 	unsigned long size = 0;
329 #ifdef CONFIG_KALLSYMS
330 	unsigned long ofs;
331 
332 	kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs);
333 #endif
334 	schedule_mfi.func = schedule;
335 	schedule_mfi.func_size = size;
336 
337 	get_frame_info(&schedule_mfi);
338 
339 	/*
340 	 * Without schedule() frame info, result given by
341 	 * thread_saved_pc() and get_wchan() are not reliable.
342 	 */
343 	if (schedule_mfi.pc_offset < 0)
344 		printk("Can't analyze schedule() prologue at %p\n", schedule);
345 
346 	return 0;
347 }
348 
349 arch_initcall(frame_info_init);
350 
351 /*
352  * Return saved PC of a blocked thread.
353  */
354 unsigned long thread_saved_pc(struct task_struct *tsk)
355 {
356 	struct thread_struct *t = &tsk->thread;
357 
358 	/* New born processes are a special case */
359 	if (t->reg31 == (unsigned long) ret_from_fork)
360 		return t->reg31;
361 	if (schedule_mfi.pc_offset < 0)
362 		return 0;
363 	return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
364 }
365 
366 
367 #ifdef CONFIG_KALLSYMS
368 /* used by show_backtrace() */
369 unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
370 			   unsigned long pc, unsigned long *ra)
371 {
372 	unsigned long stack_page;
373 	struct mips_frame_info info;
374 	unsigned long size, ofs;
375 	int leaf;
376 	extern void ret_from_irq(void);
377 	extern void ret_from_exception(void);
378 
379 	stack_page = (unsigned long)task_stack_page(task);
380 	if (!stack_page)
381 		return 0;
382 
383 	/*
384 	 * If we reached the bottom of interrupt context,
385 	 * return saved pc in pt_regs.
386 	 */
387 	if (pc == (unsigned long)ret_from_irq ||
388 	    pc == (unsigned long)ret_from_exception) {
389 		struct pt_regs *regs;
390 		if (*sp >= stack_page &&
391 		    *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
392 			regs = (struct pt_regs *)*sp;
393 			pc = regs->cp0_epc;
394 			if (__kernel_text_address(pc)) {
395 				*sp = regs->regs[29];
396 				*ra = regs->regs[31];
397 				return pc;
398 			}
399 		}
400 		return 0;
401 	}
402 	if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
403 		return 0;
404 	/*
405 	 * Return ra if an exception occured at the first instruction
406 	 */
407 	if (unlikely(ofs == 0)) {
408 		pc = *ra;
409 		*ra = 0;
410 		return pc;
411 	}
412 
413 	info.func = (void *)(pc - ofs);
414 	info.func_size = ofs;	/* analyze from start to ofs */
415 	leaf = get_frame_info(&info);
416 	if (leaf < 0)
417 		return 0;
418 
419 	if (*sp < stack_page ||
420 	    *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
421 		return 0;
422 
423 	if (leaf)
424 		/*
425 		 * For some extreme cases, get_frame_info() can
426 		 * consider wrongly a nested function as a leaf
427 		 * one. In that cases avoid to return always the
428 		 * same value.
429 		 */
430 		pc = pc != *ra ? *ra : 0;
431 	else
432 		pc = ((unsigned long *)(*sp))[info.pc_offset];
433 
434 	*sp += info.frame_size;
435 	*ra = 0;
436 	return __kernel_text_address(pc) ? pc : 0;
437 }
438 #endif
439 
440 /*
441  * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
442  */
443 unsigned long get_wchan(struct task_struct *task)
444 {
445 	unsigned long pc = 0;
446 #ifdef CONFIG_KALLSYMS
447 	unsigned long sp;
448 	unsigned long ra = 0;
449 #endif
450 
451 	if (!task || task == current || task->state == TASK_RUNNING)
452 		goto out;
453 	if (!task_stack_page(task))
454 		goto out;
455 
456 	pc = thread_saved_pc(task);
457 
458 #ifdef CONFIG_KALLSYMS
459 	sp = task->thread.reg29 + schedule_mfi.frame_size;
460 
461 	while (in_sched_functions(pc))
462 		pc = unwind_stack(task, &sp, pc, &ra);
463 #endif
464 
465 out:
466 	return pc;
467 }
468 
469 /*
470  * Don't forget that the stack pointer must be aligned on a 8 bytes
471  * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
472  */
473 unsigned long arch_align_stack(unsigned long sp)
474 {
475 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
476 		sp -= get_random_int() & ~PAGE_MASK;
477 
478 	return sp & ALMASK;
479 }
480