xref: /openbmc/linux/arch/mips/kernel/process.c (revision 97da55fc)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7  * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  * Copyright (C) 2004 Thiemo Seufer
10  */
11 #include <linux/errno.h>
12 #include <linux/sched.h>
13 #include <linux/tick.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/stddef.h>
17 #include <linux/unistd.h>
18 #include <linux/export.h>
19 #include <linux/ptrace.h>
20 #include <linux/mman.h>
21 #include <linux/personality.h>
22 #include <linux/sys.h>
23 #include <linux/user.h>
24 #include <linux/init.h>
25 #include <linux/completion.h>
26 #include <linux/kallsyms.h>
27 #include <linux/random.h>
28 
29 #include <asm/asm.h>
30 #include <asm/bootinfo.h>
31 #include <asm/cpu.h>
32 #include <asm/dsp.h>
33 #include <asm/fpu.h>
34 #include <asm/pgtable.h>
35 #include <asm/mipsregs.h>
36 #include <asm/processor.h>
37 #include <asm/uaccess.h>
38 #include <asm/io.h>
39 #include <asm/elf.h>
40 #include <asm/isadep.h>
41 #include <asm/inst.h>
42 #include <asm/stacktrace.h>
43 
44 /*
45  * The idle thread. There's no useful work to be done, so just try to conserve
46  * power and have a low exit latency (ie sit in a loop waiting for somebody to
47  * say that they'd like to reschedule)
48  */
49 void __noreturn cpu_idle(void)
50 {
51 	int cpu;
52 
53 	/* CPU is going idle. */
54 	cpu = smp_processor_id();
55 
56 	/* endless idle loop with no priority at all */
57 	while (1) {
58 		tick_nohz_idle_enter();
59 		rcu_idle_enter();
60 		while (!need_resched() && cpu_online(cpu)) {
61 #ifdef CONFIG_MIPS_MT_SMTC
62 			extern void smtc_idle_loop_hook(void);
63 
64 			smtc_idle_loop_hook();
65 #endif
66 
67 			if (cpu_wait) {
68 				/* Don't trace irqs off for idle */
69 				stop_critical_timings();
70 				(*cpu_wait)();
71 				start_critical_timings();
72 			}
73 		}
74 #ifdef CONFIG_HOTPLUG_CPU
75 		if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map))
76 			play_dead();
77 #endif
78 		rcu_idle_exit();
79 		tick_nohz_idle_exit();
80 		schedule_preempt_disabled();
81 	}
82 }
83 
84 asmlinkage void ret_from_fork(void);
85 asmlinkage void ret_from_kernel_thread(void);
86 
87 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
88 {
89 	unsigned long status;
90 
91 	/* New thread loses kernel privileges. */
92 	status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
93 #ifdef CONFIG_64BIT
94 	status |= test_thread_flag(TIF_32BIT_REGS) ? 0 : ST0_FR;
95 #endif
96 	status |= KU_USER;
97 	regs->cp0_status = status;
98 	clear_used_math();
99 	clear_fpu_owner();
100 	if (cpu_has_dsp)
101 		__init_dsp();
102 	regs->cp0_epc = pc;
103 	regs->regs[29] = sp;
104 }
105 
106 void exit_thread(void)
107 {
108 }
109 
110 void flush_thread(void)
111 {
112 }
113 
114 int copy_thread(unsigned long clone_flags, unsigned long usp,
115 	unsigned long arg, struct task_struct *p)
116 {
117 	struct thread_info *ti = task_thread_info(p);
118 	struct pt_regs *childregs, *regs = current_pt_regs();
119 	unsigned long childksp;
120 	p->set_child_tid = p->clear_child_tid = NULL;
121 
122 	childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
123 
124 	preempt_disable();
125 
126 	if (is_fpu_owner())
127 		save_fp(p);
128 
129 	if (cpu_has_dsp)
130 		save_dsp(p);
131 
132 	preempt_enable();
133 
134 	/* set up new TSS. */
135 	childregs = (struct pt_regs *) childksp - 1;
136 	/*  Put the stack after the struct pt_regs.  */
137 	childksp = (unsigned long) childregs;
138 	p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
139 	if (unlikely(p->flags & PF_KTHREAD)) {
140 		unsigned long status = p->thread.cp0_status;
141 		memset(childregs, 0, sizeof(struct pt_regs));
142 		ti->addr_limit = KERNEL_DS;
143 		p->thread.reg16 = usp; /* fn */
144 		p->thread.reg17 = arg;
145 		p->thread.reg29 = childksp;
146 		p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
147 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
148 		status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
149 			 ((status & (ST0_KUC | ST0_IEC)) << 2);
150 #else
151 		status |= ST0_EXL;
152 #endif
153 		childregs->cp0_status = status;
154 		return 0;
155 	}
156 	*childregs = *regs;
157 	childregs->regs[7] = 0; /* Clear error flag */
158 	childregs->regs[2] = 0; /* Child gets zero as return value */
159 	if (usp)
160 		childregs->regs[29] = usp;
161 	ti->addr_limit = USER_DS;
162 
163 	p->thread.reg29 = (unsigned long) childregs;
164 	p->thread.reg31 = (unsigned long) ret_from_fork;
165 
166 	/*
167 	 * New tasks lose permission to use the fpu. This accelerates context
168 	 * switching for most programs since they don't use the fpu.
169 	 */
170 	childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
171 
172 #ifdef CONFIG_MIPS_MT_SMTC
173 	/*
174 	 * SMTC restores TCStatus after Status, and the CU bits
175 	 * are aliased there.
176 	 */
177 	childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1);
178 #endif
179 	clear_tsk_thread_flag(p, TIF_USEDFPU);
180 
181 #ifdef CONFIG_MIPS_MT_FPAFF
182 	clear_tsk_thread_flag(p, TIF_FPUBOUND);
183 #endif /* CONFIG_MIPS_MT_FPAFF */
184 
185 	if (clone_flags & CLONE_SETTLS)
186 		ti->tp_value = regs->regs[7];
187 
188 	return 0;
189 }
190 
191 /* Fill in the fpu structure for a core dump.. */
192 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
193 {
194 	memcpy(r, &current->thread.fpu, sizeof(current->thread.fpu));
195 
196 	return 1;
197 }
198 
199 void elf_dump_regs(elf_greg_t *gp, struct pt_regs *regs)
200 {
201 	int i;
202 
203 	for (i = 0; i < EF_R0; i++)
204 		gp[i] = 0;
205 	gp[EF_R0] = 0;
206 	for (i = 1; i <= 31; i++)
207 		gp[EF_R0 + i] = regs->regs[i];
208 	gp[EF_R26] = 0;
209 	gp[EF_R27] = 0;
210 	gp[EF_LO] = regs->lo;
211 	gp[EF_HI] = regs->hi;
212 	gp[EF_CP0_EPC] = regs->cp0_epc;
213 	gp[EF_CP0_BADVADDR] = regs->cp0_badvaddr;
214 	gp[EF_CP0_STATUS] = regs->cp0_status;
215 	gp[EF_CP0_CAUSE] = regs->cp0_cause;
216 #ifdef EF_UNUSED0
217 	gp[EF_UNUSED0] = 0;
218 #endif
219 }
220 
221 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
222 {
223 	elf_dump_regs(*regs, task_pt_regs(tsk));
224 	return 1;
225 }
226 
227 int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr)
228 {
229 	memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu));
230 
231 	return 1;
232 }
233 
234 /*
235  *
236  */
237 struct mips_frame_info {
238 	void		*func;
239 	unsigned long	func_size;
240 	int		frame_size;
241 	int		pc_offset;
242 };
243 
244 static inline int is_ra_save_ins(union mips_instruction *ip)
245 {
246 	/* sw / sd $ra, offset($sp) */
247 	return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
248 		ip->i_format.rs == 29 &&
249 		ip->i_format.rt == 31;
250 }
251 
252 static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
253 {
254 	if (ip->j_format.opcode == jal_op)
255 		return 1;
256 	if (ip->r_format.opcode != spec_op)
257 		return 0;
258 	return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
259 }
260 
261 static inline int is_sp_move_ins(union mips_instruction *ip)
262 {
263 	/* addiu/daddiu sp,sp,-imm */
264 	if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
265 		return 0;
266 	if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
267 		return 1;
268 	return 0;
269 }
270 
271 static int get_frame_info(struct mips_frame_info *info)
272 {
273 	union mips_instruction *ip = info->func;
274 	unsigned max_insns = info->func_size / sizeof(union mips_instruction);
275 	unsigned i;
276 
277 	info->pc_offset = -1;
278 	info->frame_size = 0;
279 
280 	if (!ip)
281 		goto err;
282 
283 	if (max_insns == 0)
284 		max_insns = 128U;	/* unknown function size */
285 	max_insns = min(128U, max_insns);
286 
287 	for (i = 0; i < max_insns; i++, ip++) {
288 
289 		if (is_jal_jalr_jr_ins(ip))
290 			break;
291 		if (!info->frame_size) {
292 			if (is_sp_move_ins(ip))
293 				info->frame_size = - ip->i_format.simmediate;
294 			continue;
295 		}
296 		if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
297 			info->pc_offset =
298 				ip->i_format.simmediate / sizeof(long);
299 			break;
300 		}
301 	}
302 	if (info->frame_size && info->pc_offset >= 0) /* nested */
303 		return 0;
304 	if (info->pc_offset < 0) /* leaf */
305 		return 1;
306 	/* prologue seems boggus... */
307 err:
308 	return -1;
309 }
310 
311 static struct mips_frame_info schedule_mfi __read_mostly;
312 
313 static int __init frame_info_init(void)
314 {
315 	unsigned long size = 0;
316 #ifdef CONFIG_KALLSYMS
317 	unsigned long ofs;
318 
319 	kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs);
320 #endif
321 	schedule_mfi.func = schedule;
322 	schedule_mfi.func_size = size;
323 
324 	get_frame_info(&schedule_mfi);
325 
326 	/*
327 	 * Without schedule() frame info, result given by
328 	 * thread_saved_pc() and get_wchan() are not reliable.
329 	 */
330 	if (schedule_mfi.pc_offset < 0)
331 		printk("Can't analyze schedule() prologue at %p\n", schedule);
332 
333 	return 0;
334 }
335 
336 arch_initcall(frame_info_init);
337 
338 /*
339  * Return saved PC of a blocked thread.
340  */
341 unsigned long thread_saved_pc(struct task_struct *tsk)
342 {
343 	struct thread_struct *t = &tsk->thread;
344 
345 	/* New born processes are a special case */
346 	if (t->reg31 == (unsigned long) ret_from_fork)
347 		return t->reg31;
348 	if (schedule_mfi.pc_offset < 0)
349 		return 0;
350 	return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
351 }
352 
353 
354 #ifdef CONFIG_KALLSYMS
355 /* generic stack unwinding function */
356 unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
357 					      unsigned long *sp,
358 					      unsigned long pc,
359 					      unsigned long *ra)
360 {
361 	struct mips_frame_info info;
362 	unsigned long size, ofs;
363 	int leaf;
364 	extern void ret_from_irq(void);
365 	extern void ret_from_exception(void);
366 
367 	if (!stack_page)
368 		return 0;
369 
370 	/*
371 	 * If we reached the bottom of interrupt context,
372 	 * return saved pc in pt_regs.
373 	 */
374 	if (pc == (unsigned long)ret_from_irq ||
375 	    pc == (unsigned long)ret_from_exception) {
376 		struct pt_regs *regs;
377 		if (*sp >= stack_page &&
378 		    *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
379 			regs = (struct pt_regs *)*sp;
380 			pc = regs->cp0_epc;
381 			if (__kernel_text_address(pc)) {
382 				*sp = regs->regs[29];
383 				*ra = regs->regs[31];
384 				return pc;
385 			}
386 		}
387 		return 0;
388 	}
389 	if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
390 		return 0;
391 	/*
392 	 * Return ra if an exception occurred at the first instruction
393 	 */
394 	if (unlikely(ofs == 0)) {
395 		pc = *ra;
396 		*ra = 0;
397 		return pc;
398 	}
399 
400 	info.func = (void *)(pc - ofs);
401 	info.func_size = ofs;	/* analyze from start to ofs */
402 	leaf = get_frame_info(&info);
403 	if (leaf < 0)
404 		return 0;
405 
406 	if (*sp < stack_page ||
407 	    *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
408 		return 0;
409 
410 	if (leaf)
411 		/*
412 		 * For some extreme cases, get_frame_info() can
413 		 * consider wrongly a nested function as a leaf
414 		 * one. In that cases avoid to return always the
415 		 * same value.
416 		 */
417 		pc = pc != *ra ? *ra : 0;
418 	else
419 		pc = ((unsigned long *)(*sp))[info.pc_offset];
420 
421 	*sp += info.frame_size;
422 	*ra = 0;
423 	return __kernel_text_address(pc) ? pc : 0;
424 }
425 EXPORT_SYMBOL(unwind_stack_by_address);
426 
427 /* used by show_backtrace() */
428 unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
429 			   unsigned long pc, unsigned long *ra)
430 {
431 	unsigned long stack_page = (unsigned long)task_stack_page(task);
432 	return unwind_stack_by_address(stack_page, sp, pc, ra);
433 }
434 #endif
435 
436 /*
437  * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
438  */
439 unsigned long get_wchan(struct task_struct *task)
440 {
441 	unsigned long pc = 0;
442 #ifdef CONFIG_KALLSYMS
443 	unsigned long sp;
444 	unsigned long ra = 0;
445 #endif
446 
447 	if (!task || task == current || task->state == TASK_RUNNING)
448 		goto out;
449 	if (!task_stack_page(task))
450 		goto out;
451 
452 	pc = thread_saved_pc(task);
453 
454 #ifdef CONFIG_KALLSYMS
455 	sp = task->thread.reg29 + schedule_mfi.frame_size;
456 
457 	while (in_sched_functions(pc))
458 		pc = unwind_stack(task, &sp, pc, &ra);
459 #endif
460 
461 out:
462 	return pc;
463 }
464 
465 /*
466  * Don't forget that the stack pointer must be aligned on a 8 bytes
467  * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
468  */
469 unsigned long arch_align_stack(unsigned long sp)
470 {
471 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
472 		sp -= get_random_int() & ~PAGE_MASK;
473 
474 	return sp & ALMASK;
475 }
476