xref: /openbmc/linux/arch/mips/kernel/process.c (revision 8569c914)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7  * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  * Copyright (C) 2004 Thiemo Seufer
10  */
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/tick.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/stddef.h>
18 #include <linux/unistd.h>
19 #include <linux/ptrace.h>
20 #include <linux/slab.h>
21 #include <linux/mman.h>
22 #include <linux/personality.h>
23 #include <linux/sys.h>
24 #include <linux/user.h>
25 #include <linux/init.h>
26 #include <linux/completion.h>
27 #include <linux/kallsyms.h>
28 #include <linux/random.h>
29 
30 #include <asm/asm.h>
31 #include <asm/bootinfo.h>
32 #include <asm/cpu.h>
33 #include <asm/dsp.h>
34 #include <asm/fpu.h>
35 #include <asm/pgtable.h>
36 #include <asm/system.h>
37 #include <asm/mipsregs.h>
38 #include <asm/processor.h>
39 #include <asm/uaccess.h>
40 #include <asm/io.h>
41 #include <asm/elf.h>
42 #include <asm/isadep.h>
43 #include <asm/inst.h>
44 #include <asm/stacktrace.h>
45 
46 /*
47  * The idle thread. There's no useful work to be done, so just try to conserve
48  * power and have a low exit latency (ie sit in a loop waiting for somebody to
49  * say that they'd like to reschedule)
50  */
51 void __noreturn cpu_idle(void)
52 {
53 	/* endless idle loop with no priority at all */
54 	while (1) {
55 		tick_nohz_stop_sched_tick(1);
56 		while (!need_resched()) {
57 #ifdef CONFIG_MIPS_MT_SMTC
58 			extern void smtc_idle_loop_hook(void);
59 
60 			smtc_idle_loop_hook();
61 #endif
62 			if (cpu_wait)
63 				(*cpu_wait)();
64 		}
65 		tick_nohz_restart_sched_tick();
66 		preempt_enable_no_resched();
67 		schedule();
68 		preempt_disable();
69 	}
70 }
71 
72 asmlinkage void ret_from_fork(void);
73 
74 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
75 {
76 	unsigned long status;
77 
78 	/* New thread loses kernel privileges. */
79 	status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
80 #ifdef CONFIG_64BIT
81 	status |= test_thread_flag(TIF_32BIT_REGS) ? 0 : ST0_FR;
82 #endif
83 	status |= KU_USER;
84 	regs->cp0_status = status;
85 	clear_used_math();
86 	clear_fpu_owner();
87 	if (cpu_has_dsp)
88 		__init_dsp();
89 	regs->cp0_epc = pc;
90 	regs->regs[29] = sp;
91 	current_thread_info()->addr_limit = USER_DS;
92 }
93 
94 void exit_thread(void)
95 {
96 }
97 
98 void flush_thread(void)
99 {
100 }
101 
102 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
103 	unsigned long unused, struct task_struct *p, struct pt_regs *regs)
104 {
105 	struct thread_info *ti = task_thread_info(p);
106 	struct pt_regs *childregs;
107 	long childksp;
108 	p->set_child_tid = p->clear_child_tid = NULL;
109 
110 	childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
111 
112 	preempt_disable();
113 
114 	if (is_fpu_owner())
115 		save_fp(p);
116 
117 	if (cpu_has_dsp)
118 		save_dsp(p);
119 
120 	preempt_enable();
121 
122 	/* set up new TSS. */
123 	childregs = (struct pt_regs *) childksp - 1;
124 	*childregs = *regs;
125 	childregs->regs[7] = 0;	/* Clear error flag */
126 
127 	childregs->regs[2] = 0;	/* Child gets zero as return value */
128 	regs->regs[2] = p->pid;
129 
130 	if (childregs->cp0_status & ST0_CU0) {
131 		childregs->regs[28] = (unsigned long) ti;
132 		childregs->regs[29] = childksp;
133 		ti->addr_limit = KERNEL_DS;
134 	} else {
135 		childregs->regs[29] = usp;
136 		ti->addr_limit = USER_DS;
137 	}
138 	p->thread.reg29 = (unsigned long) childregs;
139 	p->thread.reg31 = (unsigned long) ret_from_fork;
140 
141 	/*
142 	 * New tasks lose permission to use the fpu. This accelerates context
143 	 * switching for most programs since they don't use the fpu.
144 	 */
145 	p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
146 	childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
147 
148 #ifdef CONFIG_MIPS_MT_SMTC
149 	/*
150 	 * SMTC restores TCStatus after Status, and the CU bits
151 	 * are aliased there.
152 	 */
153 	childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1);
154 #endif
155 	clear_tsk_thread_flag(p, TIF_USEDFPU);
156 
157 #ifdef CONFIG_MIPS_MT_FPAFF
158 	clear_tsk_thread_flag(p, TIF_FPUBOUND);
159 #endif /* CONFIG_MIPS_MT_FPAFF */
160 
161 	if (clone_flags & CLONE_SETTLS)
162 		ti->tp_value = regs->regs[7];
163 
164 	return 0;
165 }
166 
167 /* Fill in the fpu structure for a core dump.. */
168 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
169 {
170 	memcpy(r, &current->thread.fpu, sizeof(current->thread.fpu));
171 
172 	return 1;
173 }
174 
175 void elf_dump_regs(elf_greg_t *gp, struct pt_regs *regs)
176 {
177 	int i;
178 
179 	for (i = 0; i < EF_R0; i++)
180 		gp[i] = 0;
181 	gp[EF_R0] = 0;
182 	for (i = 1; i <= 31; i++)
183 		gp[EF_R0 + i] = regs->regs[i];
184 	gp[EF_R26] = 0;
185 	gp[EF_R27] = 0;
186 	gp[EF_LO] = regs->lo;
187 	gp[EF_HI] = regs->hi;
188 	gp[EF_CP0_EPC] = regs->cp0_epc;
189 	gp[EF_CP0_BADVADDR] = regs->cp0_badvaddr;
190 	gp[EF_CP0_STATUS] = regs->cp0_status;
191 	gp[EF_CP0_CAUSE] = regs->cp0_cause;
192 #ifdef EF_UNUSED0
193 	gp[EF_UNUSED0] = 0;
194 #endif
195 }
196 
197 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
198 {
199 	elf_dump_regs(*regs, task_pt_regs(tsk));
200 	return 1;
201 }
202 
203 int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr)
204 {
205 	memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu));
206 
207 	return 1;
208 }
209 
210 /*
211  * Create a kernel thread
212  */
213 static void __noreturn kernel_thread_helper(void *arg, int (*fn)(void *))
214 {
215 	do_exit(fn(arg));
216 }
217 
218 long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
219 {
220 	struct pt_regs regs;
221 
222 	memset(&regs, 0, sizeof(regs));
223 
224 	regs.regs[4] = (unsigned long) arg;
225 	regs.regs[5] = (unsigned long) fn;
226 	regs.cp0_epc = (unsigned long) kernel_thread_helper;
227 	regs.cp0_status = read_c0_status();
228 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
229 	regs.cp0_status = (regs.cp0_status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
230 			  ((regs.cp0_status & (ST0_KUC | ST0_IEC)) << 2);
231 #else
232 	regs.cp0_status |= ST0_EXL;
233 #endif
234 
235 	/* Ok, create the new process.. */
236 	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
237 }
238 
239 /*
240  *
241  */
242 struct mips_frame_info {
243 	void		*func;
244 	unsigned long	func_size;
245 	int		frame_size;
246 	int		pc_offset;
247 };
248 
249 static inline int is_ra_save_ins(union mips_instruction *ip)
250 {
251 	/* sw / sd $ra, offset($sp) */
252 	return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
253 		ip->i_format.rs == 29 &&
254 		ip->i_format.rt == 31;
255 }
256 
257 static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
258 {
259 	if (ip->j_format.opcode == jal_op)
260 		return 1;
261 	if (ip->r_format.opcode != spec_op)
262 		return 0;
263 	return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
264 }
265 
266 static inline int is_sp_move_ins(union mips_instruction *ip)
267 {
268 	/* addiu/daddiu sp,sp,-imm */
269 	if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
270 		return 0;
271 	if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
272 		return 1;
273 	return 0;
274 }
275 
276 static int get_frame_info(struct mips_frame_info *info)
277 {
278 	union mips_instruction *ip = info->func;
279 	unsigned max_insns = info->func_size / sizeof(union mips_instruction);
280 	unsigned i;
281 
282 	info->pc_offset = -1;
283 	info->frame_size = 0;
284 
285 	if (!ip)
286 		goto err;
287 
288 	if (max_insns == 0)
289 		max_insns = 128U;	/* unknown function size */
290 	max_insns = min(128U, max_insns);
291 
292 	for (i = 0; i < max_insns; i++, ip++) {
293 
294 		if (is_jal_jalr_jr_ins(ip))
295 			break;
296 		if (!info->frame_size) {
297 			if (is_sp_move_ins(ip))
298 				info->frame_size = - ip->i_format.simmediate;
299 			continue;
300 		}
301 		if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
302 			info->pc_offset =
303 				ip->i_format.simmediate / sizeof(long);
304 			break;
305 		}
306 	}
307 	if (info->frame_size && info->pc_offset >= 0) /* nested */
308 		return 0;
309 	if (info->pc_offset < 0) /* leaf */
310 		return 1;
311 	/* prologue seems boggus... */
312 err:
313 	return -1;
314 }
315 
316 static struct mips_frame_info schedule_mfi __read_mostly;
317 
318 static int __init frame_info_init(void)
319 {
320 	unsigned long size = 0;
321 #ifdef CONFIG_KALLSYMS
322 	unsigned long ofs;
323 
324 	kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs);
325 #endif
326 	schedule_mfi.func = schedule;
327 	schedule_mfi.func_size = size;
328 
329 	get_frame_info(&schedule_mfi);
330 
331 	/*
332 	 * Without schedule() frame info, result given by
333 	 * thread_saved_pc() and get_wchan() are not reliable.
334 	 */
335 	if (schedule_mfi.pc_offset < 0)
336 		printk("Can't analyze schedule() prologue at %p\n", schedule);
337 
338 	return 0;
339 }
340 
341 arch_initcall(frame_info_init);
342 
343 /*
344  * Return saved PC of a blocked thread.
345  */
346 unsigned long thread_saved_pc(struct task_struct *tsk)
347 {
348 	struct thread_struct *t = &tsk->thread;
349 
350 	/* New born processes are a special case */
351 	if (t->reg31 == (unsigned long) ret_from_fork)
352 		return t->reg31;
353 	if (schedule_mfi.pc_offset < 0)
354 		return 0;
355 	return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
356 }
357 
358 
359 #ifdef CONFIG_KALLSYMS
360 /* used by show_backtrace() */
361 unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
362 			   unsigned long pc, unsigned long *ra)
363 {
364 	unsigned long stack_page;
365 	struct mips_frame_info info;
366 	unsigned long size, ofs;
367 	int leaf;
368 	extern void ret_from_irq(void);
369 	extern void ret_from_exception(void);
370 
371 	stack_page = (unsigned long)task_stack_page(task);
372 	if (!stack_page)
373 		return 0;
374 
375 	/*
376 	 * If we reached the bottom of interrupt context,
377 	 * return saved pc in pt_regs.
378 	 */
379 	if (pc == (unsigned long)ret_from_irq ||
380 	    pc == (unsigned long)ret_from_exception) {
381 		struct pt_regs *regs;
382 		if (*sp >= stack_page &&
383 		    *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
384 			regs = (struct pt_regs *)*sp;
385 			pc = regs->cp0_epc;
386 			if (__kernel_text_address(pc)) {
387 				*sp = regs->regs[29];
388 				*ra = regs->regs[31];
389 				return pc;
390 			}
391 		}
392 		return 0;
393 	}
394 	if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
395 		return 0;
396 	/*
397 	 * Return ra if an exception occured at the first instruction
398 	 */
399 	if (unlikely(ofs == 0)) {
400 		pc = *ra;
401 		*ra = 0;
402 		return pc;
403 	}
404 
405 	info.func = (void *)(pc - ofs);
406 	info.func_size = ofs;	/* analyze from start to ofs */
407 	leaf = get_frame_info(&info);
408 	if (leaf < 0)
409 		return 0;
410 
411 	if (*sp < stack_page ||
412 	    *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
413 		return 0;
414 
415 	if (leaf)
416 		/*
417 		 * For some extreme cases, get_frame_info() can
418 		 * consider wrongly a nested function as a leaf
419 		 * one. In that cases avoid to return always the
420 		 * same value.
421 		 */
422 		pc = pc != *ra ? *ra : 0;
423 	else
424 		pc = ((unsigned long *)(*sp))[info.pc_offset];
425 
426 	*sp += info.frame_size;
427 	*ra = 0;
428 	return __kernel_text_address(pc) ? pc : 0;
429 }
430 #endif
431 
432 /*
433  * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
434  */
435 unsigned long get_wchan(struct task_struct *task)
436 {
437 	unsigned long pc = 0;
438 #ifdef CONFIG_KALLSYMS
439 	unsigned long sp;
440 	unsigned long ra = 0;
441 #endif
442 
443 	if (!task || task == current || task->state == TASK_RUNNING)
444 		goto out;
445 	if (!task_stack_page(task))
446 		goto out;
447 
448 	pc = thread_saved_pc(task);
449 
450 #ifdef CONFIG_KALLSYMS
451 	sp = task->thread.reg29 + schedule_mfi.frame_size;
452 
453 	while (in_sched_functions(pc))
454 		pc = unwind_stack(task, &sp, pc, &ra);
455 #endif
456 
457 out:
458 	return pc;
459 }
460 
461 /*
462  * Don't forget that the stack pointer must be aligned on a 8 bytes
463  * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
464  */
465 unsigned long arch_align_stack(unsigned long sp)
466 {
467 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
468 		sp -= get_random_int() & ~PAGE_MASK;
469 
470 	return sp & ALMASK;
471 }
472