xref: /openbmc/linux/arch/x86/kernel/process_32.c (revision 75f25bd3)
1 /*
2  *  Copyright (C) 1995  Linus Torvalds
3  *
4  *  Pentium III FXSR, SSE support
5  *	Gareth Hughes <gareth@valinux.com>, May 2000
6  */
7 
8 /*
9  * This file handles the architecture-dependent parts of process handling..
10  */
11 
12 #include <linux/stackprotector.h>
13 #include <linux/cpu.h>
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/fs.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/elfcore.h>
20 #include <linux/smp.h>
21 #include <linux/stddef.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/user.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <linux/reboot.h>
28 #include <linux/init.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/module.h>
31 #include <linux/kallsyms.h>
32 #include <linux/ptrace.h>
33 #include <linux/personality.h>
34 #include <linux/tick.h>
35 #include <linux/percpu.h>
36 #include <linux/prctl.h>
37 #include <linux/ftrace.h>
38 #include <linux/uaccess.h>
39 #include <linux/io.h>
40 #include <linux/kdebug.h>
41 #include <linux/cpuidle.h>
42 
43 #include <asm/pgtable.h>
44 #include <asm/system.h>
45 #include <asm/ldt.h>
46 #include <asm/processor.h>
47 #include <asm/i387.h>
48 #include <asm/desc.h>
49 #ifdef CONFIG_MATH_EMULATION
50 #include <asm/math_emu.h>
51 #endif
52 
53 #include <linux/err.h>
54 
55 #include <asm/tlbflush.h>
56 #include <asm/cpu.h>
57 #include <asm/idle.h>
58 #include <asm/syscalls.h>
59 #include <asm/debugreg.h>
60 
61 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
62 
63 /*
64  * Return saved PC of a blocked thread.
65  */
66 unsigned long thread_saved_pc(struct task_struct *tsk)
67 {
68 	return ((unsigned long *)tsk->thread.sp)[3];
69 }
70 
71 #ifndef CONFIG_SMP
72 static inline void play_dead(void)
73 {
74 	BUG();
75 }
76 #endif
77 
78 /*
79  * The idle thread. There's no useful work to be
80  * done, so just try to conserve power and have a
81  * low exit latency (ie sit in a loop waiting for
82  * somebody to say that they'd like to reschedule)
83  */
84 void cpu_idle(void)
85 {
86 	int cpu = smp_processor_id();
87 
88 	/*
89 	 * If we're the non-boot CPU, nothing set the stack canary up
90 	 * for us.  CPU0 already has it initialized but no harm in
91 	 * doing it again.  This is a good place for updating it, as
92 	 * we wont ever return from this function (so the invalid
93 	 * canaries already on the stack wont ever trigger).
94 	 */
95 	boot_init_stack_canary();
96 
97 	current_thread_info()->status |= TS_POLLING;
98 
99 	/* endless idle loop with no priority at all */
100 	while (1) {
101 		tick_nohz_stop_sched_tick(1);
102 		while (!need_resched()) {
103 
104 			check_pgt_cache();
105 			rmb();
106 
107 			if (cpu_is_offline(cpu))
108 				play_dead();
109 
110 			local_irq_disable();
111 			/* Don't trace irqs off for idle */
112 			stop_critical_timings();
113 			if (cpuidle_idle_call())
114 				pm_idle();
115 			start_critical_timings();
116 		}
117 		tick_nohz_restart_sched_tick();
118 		preempt_enable_no_resched();
119 		schedule();
120 		preempt_disable();
121 	}
122 }
123 
124 void __show_regs(struct pt_regs *regs, int all)
125 {
126 	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
127 	unsigned long d0, d1, d2, d3, d6, d7;
128 	unsigned long sp;
129 	unsigned short ss, gs;
130 
131 	if (user_mode_vm(regs)) {
132 		sp = regs->sp;
133 		ss = regs->ss & 0xffff;
134 		gs = get_user_gs(regs);
135 	} else {
136 		sp = kernel_stack_pointer(regs);
137 		savesegment(ss, ss);
138 		savesegment(gs, gs);
139 	}
140 
141 	show_regs_common();
142 
143 	printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
144 			(u16)regs->cs, regs->ip, regs->flags,
145 			smp_processor_id());
146 	print_symbol("EIP is at %s\n", regs->ip);
147 
148 	printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
149 		regs->ax, regs->bx, regs->cx, regs->dx);
150 	printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
151 		regs->si, regs->di, regs->bp, sp);
152 	printk(KERN_DEFAULT " DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
153 	       (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss);
154 
155 	if (!all)
156 		return;
157 
158 	cr0 = read_cr0();
159 	cr2 = read_cr2();
160 	cr3 = read_cr3();
161 	cr4 = read_cr4_safe();
162 	printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
163 			cr0, cr2, cr3, cr4);
164 
165 	get_debugreg(d0, 0);
166 	get_debugreg(d1, 1);
167 	get_debugreg(d2, 2);
168 	get_debugreg(d3, 3);
169 	printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
170 			d0, d1, d2, d3);
171 
172 	get_debugreg(d6, 6);
173 	get_debugreg(d7, 7);
174 	printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n",
175 			d6, d7);
176 }
177 
178 void release_thread(struct task_struct *dead_task)
179 {
180 	BUG_ON(dead_task->mm);
181 	release_vm86_irqs(dead_task);
182 }
183 
184 /*
185  * This gets called before we allocate a new thread and copy
186  * the current task into it.
187  */
188 void prepare_to_copy(struct task_struct *tsk)
189 {
190 	unlazy_fpu(tsk);
191 }
192 
193 int copy_thread(unsigned long clone_flags, unsigned long sp,
194 	unsigned long unused,
195 	struct task_struct *p, struct pt_regs *regs)
196 {
197 	struct pt_regs *childregs;
198 	struct task_struct *tsk;
199 	int err;
200 
201 	childregs = task_pt_regs(p);
202 	*childregs = *regs;
203 	childregs->ax = 0;
204 	childregs->sp = sp;
205 
206 	p->thread.sp = (unsigned long) childregs;
207 	p->thread.sp0 = (unsigned long) (childregs+1);
208 
209 	p->thread.ip = (unsigned long) ret_from_fork;
210 
211 	task_user_gs(p) = get_user_gs(regs);
212 
213 	p->thread.io_bitmap_ptr = NULL;
214 	tsk = current;
215 	err = -ENOMEM;
216 
217 	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
218 
219 	if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
220 		p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
221 						IO_BITMAP_BYTES, GFP_KERNEL);
222 		if (!p->thread.io_bitmap_ptr) {
223 			p->thread.io_bitmap_max = 0;
224 			return -ENOMEM;
225 		}
226 		set_tsk_thread_flag(p, TIF_IO_BITMAP);
227 	}
228 
229 	err = 0;
230 
231 	/*
232 	 * Set a new TLS for the child thread?
233 	 */
234 	if (clone_flags & CLONE_SETTLS)
235 		err = do_set_thread_area(p, -1,
236 			(struct user_desc __user *)childregs->si, 0);
237 
238 	if (err && p->thread.io_bitmap_ptr) {
239 		kfree(p->thread.io_bitmap_ptr);
240 		p->thread.io_bitmap_max = 0;
241 	}
242 	return err;
243 }
244 
245 void
246 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
247 {
248 	set_user_gs(regs, 0);
249 	regs->fs		= 0;
250 	regs->ds		= __USER_DS;
251 	regs->es		= __USER_DS;
252 	regs->ss		= __USER_DS;
253 	regs->cs		= __USER_CS;
254 	regs->ip		= new_ip;
255 	regs->sp		= new_sp;
256 	/*
257 	 * Free the old FP and other extended state
258 	 */
259 	free_thread_xstate(current);
260 }
261 EXPORT_SYMBOL_GPL(start_thread);
262 
263 
264 /*
265  *	switch_to(x,yn) should switch tasks from x to y.
266  *
267  * We fsave/fwait so that an exception goes off at the right time
268  * (as a call from the fsave or fwait in effect) rather than to
269  * the wrong process. Lazy FP saving no longer makes any sense
270  * with modern CPU's, and this simplifies a lot of things (SMP
271  * and UP become the same).
272  *
273  * NOTE! We used to use the x86 hardware context switching. The
274  * reason for not using it any more becomes apparent when you
275  * try to recover gracefully from saved state that is no longer
276  * valid (stale segment register values in particular). With the
277  * hardware task-switch, there is no way to fix up bad state in
278  * a reasonable manner.
279  *
280  * The fact that Intel documents the hardware task-switching to
281  * be slow is a fairly red herring - this code is not noticeably
282  * faster. However, there _is_ some room for improvement here,
283  * so the performance issues may eventually be a valid point.
284  * More important, however, is the fact that this allows us much
285  * more flexibility.
286  *
287  * The return value (in %ax) will be the "prev" task after
288  * the task-switch, and shows up in ret_from_fork in entry.S,
289  * for example.
290  */
291 __notrace_funcgraph struct task_struct *
292 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
293 {
294 	struct thread_struct *prev = &prev_p->thread,
295 				 *next = &next_p->thread;
296 	int cpu = smp_processor_id();
297 	struct tss_struct *tss = &per_cpu(init_tss, cpu);
298 	bool preload_fpu;
299 
300 	/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
301 
302 	/*
303 	 * If the task has used fpu the last 5 timeslices, just do a full
304 	 * restore of the math state immediately to avoid the trap; the
305 	 * chances of needing FPU soon are obviously high now
306 	 */
307 	preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
308 
309 	__unlazy_fpu(prev_p);
310 
311 	/* we're going to use this soon, after a few expensive things */
312 	if (preload_fpu)
313 		prefetch(next->fpu.state);
314 
315 	/*
316 	 * Reload esp0.
317 	 */
318 	load_sp0(tss, next);
319 
320 	/*
321 	 * Save away %gs. No need to save %fs, as it was saved on the
322 	 * stack on entry.  No need to save %es and %ds, as those are
323 	 * always kernel segments while inside the kernel.  Doing this
324 	 * before setting the new TLS descriptors avoids the situation
325 	 * where we temporarily have non-reloadable segments in %fs
326 	 * and %gs.  This could be an issue if the NMI handler ever
327 	 * used %fs or %gs (it does not today), or if the kernel is
328 	 * running inside of a hypervisor layer.
329 	 */
330 	lazy_save_gs(prev->gs);
331 
332 	/*
333 	 * Load the per-thread Thread-Local Storage descriptor.
334 	 */
335 	load_TLS(next, cpu);
336 
337 	/*
338 	 * Restore IOPL if needed.  In normal use, the flags restore
339 	 * in the switch assembly will handle this.  But if the kernel
340 	 * is running virtualized at a non-zero CPL, the popf will
341 	 * not restore flags, so it must be done in a separate step.
342 	 */
343 	if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
344 		set_iopl_mask(next->iopl);
345 
346 	/*
347 	 * Now maybe handle debug registers and/or IO bitmaps
348 	 */
349 	if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
350 		     task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
351 		__switch_to_xtra(prev_p, next_p, tss);
352 
353 	/* If we're going to preload the fpu context, make sure clts
354 	   is run while we're batching the cpu state updates. */
355 	if (preload_fpu)
356 		clts();
357 
358 	/*
359 	 * Leave lazy mode, flushing any hypercalls made here.
360 	 * This must be done before restoring TLS segments so
361 	 * the GDT and LDT are properly updated, and must be
362 	 * done before math_state_restore, so the TS bit is up
363 	 * to date.
364 	 */
365 	arch_end_context_switch(next_p);
366 
367 	if (preload_fpu)
368 		__math_state_restore();
369 
370 	/*
371 	 * Restore %gs if needed (which is common)
372 	 */
373 	if (prev->gs | next->gs)
374 		lazy_load_gs(next->gs);
375 
376 	percpu_write(current_task, next_p);
377 
378 	return prev_p;
379 }
380 
381 #define top_esp                (THREAD_SIZE - sizeof(unsigned long))
382 #define top_ebp                (THREAD_SIZE - 2*sizeof(unsigned long))
383 
384 unsigned long get_wchan(struct task_struct *p)
385 {
386 	unsigned long bp, sp, ip;
387 	unsigned long stack_page;
388 	int count = 0;
389 	if (!p || p == current || p->state == TASK_RUNNING)
390 		return 0;
391 	stack_page = (unsigned long)task_stack_page(p);
392 	sp = p->thread.sp;
393 	if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
394 		return 0;
395 	/* include/asm-i386/system.h:switch_to() pushes bp last. */
396 	bp = *(unsigned long *) sp;
397 	do {
398 		if (bp < stack_page || bp > top_ebp+stack_page)
399 			return 0;
400 		ip = *(unsigned long *) (bp+4);
401 		if (!in_sched_functions(ip))
402 			return ip;
403 		bp = *(unsigned long *) bp;
404 	} while (count++ < 16);
405 	return 0;
406 }
407 
408