xref: /openbmc/linux/arch/um/kernel/process.c (revision 275876e2)
1 /*
2  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3  * Copyright 2003 PathScale, Inc.
4  * Licensed under the GPL
5  */
6 
7 #include <linux/stddef.h>
8 #include <linux/err.h>
9 #include <linux/hardirq.h>
10 #include <linux/mm.h>
11 #include <linux/module.h>
12 #include <linux/personality.h>
13 #include <linux/proc_fs.h>
14 #include <linux/ptrace.h>
15 #include <linux/random.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/seq_file.h>
19 #include <linux/tick.h>
20 #include <linux/threads.h>
21 #include <linux/tracehook.h>
22 #include <asm/current.h>
23 #include <asm/pgtable.h>
24 #include <asm/mmu_context.h>
25 #include <asm/uaccess.h>
26 #include <as-layout.h>
27 #include <kern_util.h>
28 #include <os.h>
29 #include <skas.h>
30 
31 /*
32  * This is a per-cpu array.  A processor only modifies its entry and it only
33  * cares about its entry, so it's OK if another processor is modifying its
34  * entry.
35  */
36 struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
37 
38 static inline int external_pid(void)
39 {
40 	/* FIXME: Need to look up userspace_pid by cpu */
41 	return userspace_pid[0];
42 }
43 
44 int pid_to_processor_id(int pid)
45 {
46 	int i;
47 
48 	for (i = 0; i < ncpus; i++) {
49 		if (cpu_tasks[i].pid == pid)
50 			return i;
51 	}
52 	return -1;
53 }
54 
55 void free_stack(unsigned long stack, int order)
56 {
57 	free_pages(stack, order);
58 }
59 
60 unsigned long alloc_stack(int order, int atomic)
61 {
62 	unsigned long page;
63 	gfp_t flags = GFP_KERNEL;
64 
65 	if (atomic)
66 		flags = GFP_ATOMIC;
67 	page = __get_free_pages(flags, order);
68 
69 	return page;
70 }
71 
72 static inline void set_current(struct task_struct *task)
73 {
74 	cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
75 		{ external_pid(), task });
76 }
77 
78 extern void arch_switch_to(struct task_struct *to);
79 
80 void *__switch_to(struct task_struct *from, struct task_struct *to)
81 {
82 	to->thread.prev_sched = from;
83 	set_current(to);
84 
85 	switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
86 	arch_switch_to(current);
87 
88 	return current->thread.prev_sched;
89 }
90 
91 void interrupt_end(void)
92 {
93 	if (need_resched())
94 		schedule();
95 	if (test_thread_flag(TIF_SIGPENDING))
96 		do_signal();
97 	if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
98 		tracehook_notify_resume(&current->thread.regs);
99 }
100 
101 void exit_thread(void)
102 {
103 }
104 
105 int get_current_pid(void)
106 {
107 	return task_pid_nr(current);
108 }
109 
110 /*
111  * This is called magically, by its address being stuffed in a jmp_buf
112  * and being longjmp-d to.
113  */
114 void new_thread_handler(void)
115 {
116 	int (*fn)(void *), n;
117 	void *arg;
118 
119 	if (current->thread.prev_sched != NULL)
120 		schedule_tail(current->thread.prev_sched);
121 	current->thread.prev_sched = NULL;
122 
123 	fn = current->thread.request.u.thread.proc;
124 	arg = current->thread.request.u.thread.arg;
125 
126 	/*
127 	 * callback returns only if the kernel thread execs a process
128 	 */
129 	n = fn(arg);
130 	userspace(&current->thread.regs.regs);
131 }
132 
133 /* Called magically, see new_thread_handler above */
134 void fork_handler(void)
135 {
136 	force_flush_all();
137 
138 	schedule_tail(current->thread.prev_sched);
139 
140 	/*
141 	 * XXX: if interrupt_end() calls schedule, this call to
142 	 * arch_switch_to isn't needed. We could want to apply this to
143 	 * improve performance. -bb
144 	 */
145 	arch_switch_to(current);
146 
147 	current->thread.prev_sched = NULL;
148 
149 	userspace(&current->thread.regs.regs);
150 }
151 
152 int copy_thread(unsigned long clone_flags, unsigned long sp,
153 		unsigned long arg, struct task_struct * p)
154 {
155 	void (*handler)(void);
156 	int kthread = current->flags & PF_KTHREAD;
157 	int ret = 0;
158 
159 	p->thread = (struct thread_struct) INIT_THREAD;
160 
161 	if (!kthread) {
162 	  	memcpy(&p->thread.regs.regs, current_pt_regs(),
163 		       sizeof(p->thread.regs.regs));
164 		PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
165 		if (sp != 0)
166 			REGS_SP(p->thread.regs.regs.gp) = sp;
167 
168 		handler = fork_handler;
169 
170 		arch_copy_thread(&current->thread.arch, &p->thread.arch);
171 	} else {
172 		get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
173 		p->thread.request.u.thread.proc = (int (*)(void *))sp;
174 		p->thread.request.u.thread.arg = (void *)arg;
175 		handler = new_thread_handler;
176 	}
177 
178 	new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
179 
180 	if (!kthread) {
181 		clear_flushed_tls(p);
182 
183 		/*
184 		 * Set a new TLS for the child thread?
185 		 */
186 		if (clone_flags & CLONE_SETTLS)
187 			ret = arch_copy_tls(p);
188 	}
189 
190 	return ret;
191 }
192 
193 void initial_thread_cb(void (*proc)(void *), void *arg)
194 {
195 	int save_kmalloc_ok = kmalloc_ok;
196 
197 	kmalloc_ok = 0;
198 	initial_thread_cb_skas(proc, arg);
199 	kmalloc_ok = save_kmalloc_ok;
200 }
201 
202 void arch_cpu_idle(void)
203 {
204 	unsigned long long nsecs;
205 
206 	cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
207 	nsecs = disable_timer();
208 	idle_sleep(nsecs);
209 	local_irq_enable();
210 }
211 
212 int __cant_sleep(void) {
213 	return in_atomic() || irqs_disabled() || in_interrupt();
214 	/* Is in_interrupt() really needed? */
215 }
216 
217 int user_context(unsigned long sp)
218 {
219 	unsigned long stack;
220 
221 	stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
222 	return stack != (unsigned long) current_thread_info();
223 }
224 
225 extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
226 
227 void do_uml_exitcalls(void)
228 {
229 	exitcall_t *call;
230 
231 	call = &__uml_exitcall_end;
232 	while (--call >= &__uml_exitcall_begin)
233 		(*call)();
234 }
235 
236 char *uml_strdup(const char *string)
237 {
238 	return kstrdup(string, GFP_KERNEL);
239 }
240 EXPORT_SYMBOL(uml_strdup);
241 
242 int copy_to_user_proc(void __user *to, void *from, int size)
243 {
244 	return copy_to_user(to, from, size);
245 }
246 
247 int copy_from_user_proc(void *to, void __user *from, int size)
248 {
249 	return copy_from_user(to, from, size);
250 }
251 
252 int clear_user_proc(void __user *buf, int size)
253 {
254 	return clear_user(buf, size);
255 }
256 
257 int strlen_user_proc(char __user *str)
258 {
259 	return strlen_user(str);
260 }
261 
262 int smp_sigio_handler(void)
263 {
264 #ifdef CONFIG_SMP
265 	int cpu = current_thread_info()->cpu;
266 	IPI_handler(cpu);
267 	if (cpu != 0)
268 		return 1;
269 #endif
270 	return 0;
271 }
272 
273 int cpu(void)
274 {
275 	return current_thread_info()->cpu;
276 }
277 
278 static atomic_t using_sysemu = ATOMIC_INIT(0);
279 int sysemu_supported;
280 
281 void set_using_sysemu(int value)
282 {
283 	if (value > sysemu_supported)
284 		return;
285 	atomic_set(&using_sysemu, value);
286 }
287 
288 int get_using_sysemu(void)
289 {
290 	return atomic_read(&using_sysemu);
291 }
292 
293 static int sysemu_proc_show(struct seq_file *m, void *v)
294 {
295 	seq_printf(m, "%d\n", get_using_sysemu());
296 	return 0;
297 }
298 
299 static int sysemu_proc_open(struct inode *inode, struct file *file)
300 {
301 	return single_open(file, sysemu_proc_show, NULL);
302 }
303 
304 static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
305 				 size_t count, loff_t *pos)
306 {
307 	char tmp[2];
308 
309 	if (copy_from_user(tmp, buf, 1))
310 		return -EFAULT;
311 
312 	if (tmp[0] >= '0' && tmp[0] <= '2')
313 		set_using_sysemu(tmp[0] - '0');
314 	/* We use the first char, but pretend to write everything */
315 	return count;
316 }
317 
318 static const struct file_operations sysemu_proc_fops = {
319 	.owner		= THIS_MODULE,
320 	.open		= sysemu_proc_open,
321 	.read		= seq_read,
322 	.llseek		= seq_lseek,
323 	.release	= single_release,
324 	.write		= sysemu_proc_write,
325 };
326 
327 int __init make_proc_sysemu(void)
328 {
329 	struct proc_dir_entry *ent;
330 	if (!sysemu_supported)
331 		return 0;
332 
333 	ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
334 
335 	if (ent == NULL)
336 	{
337 		printk(KERN_WARNING "Failed to register /proc/sysemu\n");
338 		return 0;
339 	}
340 
341 	return 0;
342 }
343 
344 late_initcall(make_proc_sysemu);
345 
346 int singlestepping(void * t)
347 {
348 	struct task_struct *task = t ? t : current;
349 
350 	if (!(task->ptrace & PT_DTRACE))
351 		return 0;
352 
353 	if (task->thread.singlestep_syscall)
354 		return 1;
355 
356 	return 2;
357 }
358 
359 /*
360  * Only x86 and x86_64 have an arch_align_stack().
361  * All other arches have "#define arch_align_stack(x) (x)"
362  * in their asm/exec.h
363  * As this is included in UML from asm-um/system-generic.h,
364  * we can use it to behave as the subarch does.
365  */
366 #ifndef arch_align_stack
367 unsigned long arch_align_stack(unsigned long sp)
368 {
369 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
370 		sp -= get_random_int() % 8192;
371 	return sp & ~0xf;
372 }
373 #endif
374 
375 unsigned long get_wchan(struct task_struct *p)
376 {
377 	unsigned long stack_page, sp, ip;
378 	bool seen_sched = 0;
379 
380 	if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
381 		return 0;
382 
383 	stack_page = (unsigned long) task_stack_page(p);
384 	/* Bail if the process has no kernel stack for some reason */
385 	if (stack_page == 0)
386 		return 0;
387 
388 	sp = p->thread.switch_buf->JB_SP;
389 	/*
390 	 * Bail if the stack pointer is below the bottom of the kernel
391 	 * stack for some reason
392 	 */
393 	if (sp < stack_page)
394 		return 0;
395 
396 	while (sp < stack_page + THREAD_SIZE) {
397 		ip = *((unsigned long *) sp);
398 		if (in_sched_functions(ip))
399 			/* Ignore everything until we're above the scheduler */
400 			seen_sched = 1;
401 		else if (kernel_text_address(ip) && seen_sched)
402 			return ip;
403 
404 		sp += sizeof(unsigned long);
405 	}
406 
407 	return 0;
408 }
409 
410 int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
411 {
412 	int cpu = current_thread_info()->cpu;
413 
414 	return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
415 }
416 
417