xref: /openbmc/linux/arch/x86/kernel/process_32.c (revision 930beb5a)
1 /*
2  *  Copyright (C) 1995  Linus Torvalds
3  *
4  *  Pentium III FXSR, SSE support
5  *	Gareth Hughes <gareth@valinux.com>, May 2000
6  */
7 
8 /*
9  * This file handles the architecture-dependent parts of process handling..
10  */
11 
12 #include <linux/cpu.h>
13 #include <linux/errno.h>
14 #include <linux/sched.h>
15 #include <linux/fs.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/elfcore.h>
19 #include <linux/smp.h>
20 #include <linux/stddef.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/user.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/reboot.h>
27 #include <linux/init.h>
28 #include <linux/mc146818rtc.h>
29 #include <linux/module.h>
30 #include <linux/kallsyms.h>
31 #include <linux/ptrace.h>
32 #include <linux/personality.h>
33 #include <linux/percpu.h>
34 #include <linux/prctl.h>
35 #include <linux/ftrace.h>
36 #include <linux/uaccess.h>
37 #include <linux/io.h>
38 #include <linux/kdebug.h>
39 
40 #include <asm/pgtable.h>
41 #include <asm/ldt.h>
42 #include <asm/processor.h>
43 #include <asm/i387.h>
44 #include <asm/fpu-internal.h>
45 #include <asm/desc.h>
46 #ifdef CONFIG_MATH_EMULATION
47 #include <asm/math_emu.h>
48 #endif
49 
50 #include <linux/err.h>
51 
52 #include <asm/tlbflush.h>
53 #include <asm/cpu.h>
54 #include <asm/idle.h>
55 #include <asm/syscalls.h>
56 #include <asm/debugreg.h>
57 #include <asm/switch_to.h>
58 
59 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
60 asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
61 
62 /*
63  * Return saved PC of a blocked thread.
64  */
65 unsigned long thread_saved_pc(struct task_struct *tsk)
66 {
67 	return ((unsigned long *)tsk->thread.sp)[3];
68 }
69 
70 void __show_regs(struct pt_regs *regs, int all)
71 {
72 	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
73 	unsigned long d0, d1, d2, d3, d6, d7;
74 	unsigned long sp;
75 	unsigned short ss, gs;
76 
77 	if (user_mode_vm(regs)) {
78 		sp = regs->sp;
79 		ss = regs->ss & 0xffff;
80 		gs = get_user_gs(regs);
81 	} else {
82 		sp = kernel_stack_pointer(regs);
83 		savesegment(ss, ss);
84 		savesegment(gs, gs);
85 	}
86 
87 	printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
88 			(u16)regs->cs, regs->ip, regs->flags,
89 			smp_processor_id());
90 	print_symbol("EIP is at %s\n", regs->ip);
91 
92 	printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
93 		regs->ax, regs->bx, regs->cx, regs->dx);
94 	printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
95 		regs->si, regs->di, regs->bp, sp);
96 	printk(KERN_DEFAULT " DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
97 	       (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss);
98 
99 	if (!all)
100 		return;
101 
102 	cr0 = read_cr0();
103 	cr2 = read_cr2();
104 	cr3 = read_cr3();
105 	cr4 = read_cr4_safe();
106 	printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
107 			cr0, cr2, cr3, cr4);
108 
109 	get_debugreg(d0, 0);
110 	get_debugreg(d1, 1);
111 	get_debugreg(d2, 2);
112 	get_debugreg(d3, 3);
113 	get_debugreg(d6, 6);
114 	get_debugreg(d7, 7);
115 
116 	/* Only print out debug registers if they are in their non-default state. */
117 	if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
118 	    (d6 == DR6_RESERVED) && (d7 == 0x400))
119 		return;
120 
121 	printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
122 			d0, d1, d2, d3);
123 	printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n",
124 			d6, d7);
125 }
126 
127 void release_thread(struct task_struct *dead_task)
128 {
129 	BUG_ON(dead_task->mm);
130 	release_vm86_irqs(dead_task);
131 }
132 
133 int copy_thread(unsigned long clone_flags, unsigned long sp,
134 	unsigned long arg, struct task_struct *p)
135 {
136 	struct pt_regs *childregs = task_pt_regs(p);
137 	struct task_struct *tsk;
138 	int err;
139 
140 	p->thread.sp = (unsigned long) childregs;
141 	p->thread.sp0 = (unsigned long) (childregs+1);
142 
143 	if (unlikely(p->flags & PF_KTHREAD)) {
144 		/* kernel thread */
145 		memset(childregs, 0, sizeof(struct pt_regs));
146 		p->thread.ip = (unsigned long) ret_from_kernel_thread;
147 		task_user_gs(p) = __KERNEL_STACK_CANARY;
148 		childregs->ds = __USER_DS;
149 		childregs->es = __USER_DS;
150 		childregs->fs = __KERNEL_PERCPU;
151 		childregs->bx = sp;	/* function */
152 		childregs->bp = arg;
153 		childregs->orig_ax = -1;
154 		childregs->cs = __KERNEL_CS | get_kernel_rpl();
155 		childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
156 		p->thread.fpu_counter = 0;
157 		p->thread.io_bitmap_ptr = NULL;
158 		memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
159 		return 0;
160 	}
161 	*childregs = *current_pt_regs();
162 	childregs->ax = 0;
163 	if (sp)
164 		childregs->sp = sp;
165 
166 	p->thread.ip = (unsigned long) ret_from_fork;
167 	task_user_gs(p) = get_user_gs(current_pt_regs());
168 
169 	p->thread.fpu_counter = 0;
170 	p->thread.io_bitmap_ptr = NULL;
171 	tsk = current;
172 	err = -ENOMEM;
173 
174 	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
175 
176 	if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
177 		p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
178 						IO_BITMAP_BYTES, GFP_KERNEL);
179 		if (!p->thread.io_bitmap_ptr) {
180 			p->thread.io_bitmap_max = 0;
181 			return -ENOMEM;
182 		}
183 		set_tsk_thread_flag(p, TIF_IO_BITMAP);
184 	}
185 
186 	err = 0;
187 
188 	/*
189 	 * Set a new TLS for the child thread?
190 	 */
191 	if (clone_flags & CLONE_SETTLS)
192 		err = do_set_thread_area(p, -1,
193 			(struct user_desc __user *)childregs->si, 0);
194 
195 	if (err && p->thread.io_bitmap_ptr) {
196 		kfree(p->thread.io_bitmap_ptr);
197 		p->thread.io_bitmap_max = 0;
198 	}
199 	return err;
200 }
201 
202 void
203 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
204 {
205 	set_user_gs(regs, 0);
206 	regs->fs		= 0;
207 	regs->ds		= __USER_DS;
208 	regs->es		= __USER_DS;
209 	regs->ss		= __USER_DS;
210 	regs->cs		= __USER_CS;
211 	regs->ip		= new_ip;
212 	regs->sp		= new_sp;
213 	regs->flags		= X86_EFLAGS_IF;
214 	/*
215 	 * force it to the iret return path by making it look as if there was
216 	 * some work pending.
217 	 */
218 	set_thread_flag(TIF_NOTIFY_RESUME);
219 }
220 EXPORT_SYMBOL_GPL(start_thread);
221 
222 
223 /*
224  *	switch_to(x,y) should switch tasks from x to y.
225  *
226  * We fsave/fwait so that an exception goes off at the right time
227  * (as a call from the fsave or fwait in effect) rather than to
228  * the wrong process. Lazy FP saving no longer makes any sense
229  * with modern CPU's, and this simplifies a lot of things (SMP
230  * and UP become the same).
231  *
232  * NOTE! We used to use the x86 hardware context switching. The
233  * reason for not using it any more becomes apparent when you
234  * try to recover gracefully from saved state that is no longer
235  * valid (stale segment register values in particular). With the
236  * hardware task-switch, there is no way to fix up bad state in
237  * a reasonable manner.
238  *
239  * The fact that Intel documents the hardware task-switching to
240  * be slow is a fairly red herring - this code is not noticeably
241  * faster. However, there _is_ some room for improvement here,
242  * so the performance issues may eventually be a valid point.
243  * More important, however, is the fact that this allows us much
244  * more flexibility.
245  *
246  * The return value (in %ax) will be the "prev" task after
247  * the task-switch, and shows up in ret_from_fork in entry.S,
248  * for example.
249  */
250 __visible __notrace_funcgraph struct task_struct *
251 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
252 {
253 	struct thread_struct *prev = &prev_p->thread,
254 				 *next = &next_p->thread;
255 	int cpu = smp_processor_id();
256 	struct tss_struct *tss = &per_cpu(init_tss, cpu);
257 	fpu_switch_t fpu;
258 
259 	/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
260 
261 	fpu = switch_fpu_prepare(prev_p, next_p, cpu);
262 
263 	/*
264 	 * Reload esp0.
265 	 */
266 	load_sp0(tss, next);
267 
268 	/*
269 	 * Save away %gs. No need to save %fs, as it was saved on the
270 	 * stack on entry.  No need to save %es and %ds, as those are
271 	 * always kernel segments while inside the kernel.  Doing this
272 	 * before setting the new TLS descriptors avoids the situation
273 	 * where we temporarily have non-reloadable segments in %fs
274 	 * and %gs.  This could be an issue if the NMI handler ever
275 	 * used %fs or %gs (it does not today), or if the kernel is
276 	 * running inside of a hypervisor layer.
277 	 */
278 	lazy_save_gs(prev->gs);
279 
280 	/*
281 	 * Load the per-thread Thread-Local Storage descriptor.
282 	 */
283 	load_TLS(next, cpu);
284 
285 	/*
286 	 * Restore IOPL if needed.  In normal use, the flags restore
287 	 * in the switch assembly will handle this.  But if the kernel
288 	 * is running virtualized at a non-zero CPL, the popf will
289 	 * not restore flags, so it must be done in a separate step.
290 	 */
291 	if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
292 		set_iopl_mask(next->iopl);
293 
294 	/*
295 	 * If it were not for PREEMPT_ACTIVE we could guarantee that the
296 	 * preempt_count of all tasks was equal here and this would not be
297 	 * needed.
298 	 */
299 	task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
300 	this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
301 
302 	/*
303 	 * Now maybe handle debug registers and/or IO bitmaps
304 	 */
305 	if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
306 		     task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
307 		__switch_to_xtra(prev_p, next_p, tss);
308 
309 	/*
310 	 * Leave lazy mode, flushing any hypercalls made here.
311 	 * This must be done before restoring TLS segments so
312 	 * the GDT and LDT are properly updated, and must be
313 	 * done before math_state_restore, so the TS bit is up
314 	 * to date.
315 	 */
316 	arch_end_context_switch(next_p);
317 
318 	/*
319 	 * Restore %gs if needed (which is common)
320 	 */
321 	if (prev->gs | next->gs)
322 		lazy_load_gs(next->gs);
323 
324 	switch_fpu_finish(next_p, fpu);
325 
326 	this_cpu_write(current_task, next_p);
327 
328 	return prev_p;
329 }
330 
331 #define top_esp                (THREAD_SIZE - sizeof(unsigned long))
332 #define top_ebp                (THREAD_SIZE - 2*sizeof(unsigned long))
333 
334 unsigned long get_wchan(struct task_struct *p)
335 {
336 	unsigned long bp, sp, ip;
337 	unsigned long stack_page;
338 	int count = 0;
339 	if (!p || p == current || p->state == TASK_RUNNING)
340 		return 0;
341 	stack_page = (unsigned long)task_stack_page(p);
342 	sp = p->thread.sp;
343 	if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
344 		return 0;
345 	/* include/asm-i386/system.h:switch_to() pushes bp last. */
346 	bp = *(unsigned long *) sp;
347 	do {
348 		if (bp < stack_page || bp > top_ebp+stack_page)
349 			return 0;
350 		ip = *(unsigned long *) (bp+4);
351 		if (!in_sched_functions(ip))
352 			return ip;
353 		bp = *(unsigned long *) bp;
354 	} while (count++ < 16);
355 	return 0;
356 }
357 
358