xref: /openbmc/linux/arch/um/kernel/skas/process.c (revision 64c70b1c)
1 /*
2  * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3  * Licensed under the GPL
4  */
5 
6 #include "linux/sched.h"
7 #include "linux/slab.h"
8 #include "linux/ptrace.h"
9 #include "linux/proc_fs.h"
10 #include "linux/file.h"
11 #include "linux/errno.h"
12 #include "linux/init.h"
13 #include "asm/uaccess.h"
14 #include "asm/atomic.h"
15 #include "kern_util.h"
16 #include "as-layout.h"
17 #include "skas.h"
18 #include "os.h"
19 #include "tlb.h"
20 #include "kern.h"
21 #include "mode.h"
22 #include "registers.h"
23 
24 void switch_to_skas(void *prev, void *next)
25 {
26 	struct task_struct *from, *to;
27 
28 	from = prev;
29 	to = next;
30 
31 	/* XXX need to check runqueues[cpu].idle */
32 	if(current->pid == 0)
33 		switch_timers(0);
34 
35 	switch_threads(&from->thread.mode.skas.switch_buf,
36 		       &to->thread.mode.skas.switch_buf);
37 
38 	arch_switch_to_skas(current->thread.prev_sched, current);
39 
40 	if(current->pid == 0)
41 		switch_timers(1);
42 }
43 
44 extern void schedule_tail(struct task_struct *prev);
45 
46 /* This is called magically, by its address being stuffed in a jmp_buf
47  * and being longjmp-d to.
48  */
49 void new_thread_handler(void)
50 {
51 	int (*fn)(void *), n;
52 	void *arg;
53 
54 	if(current->thread.prev_sched != NULL)
55 		schedule_tail(current->thread.prev_sched);
56 	current->thread.prev_sched = NULL;
57 
58 	fn = current->thread.request.u.thread.proc;
59 	arg = current->thread.request.u.thread.arg;
60 
61 	/* The return value is 1 if the kernel thread execs a process,
62 	 * 0 if it just exits
63 	 */
64 	n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
65 	if(n == 1){
66 		/* Handle any immediate reschedules or signals */
67 		interrupt_end();
68 		userspace(&current->thread.regs.regs);
69 	}
70 	else do_exit(0);
71 }
72 
73 void release_thread_skas(struct task_struct *task)
74 {
75 }
76 
77 /* Called magically, see new_thread_handler above */
78 void fork_handler(void)
79 {
80 	force_flush_all();
81 	if(current->thread.prev_sched == NULL)
82 		panic("blech");
83 
84 	schedule_tail(current->thread.prev_sched);
85 
86 	/* XXX: if interrupt_end() calls schedule, this call to
87 	 * arch_switch_to_skas isn't needed. We could want to apply this to
88 	 * improve performance. -bb */
89 	arch_switch_to_skas(current->thread.prev_sched, current);
90 
91 	current->thread.prev_sched = NULL;
92 
93 /* Handle any immediate reschedules or signals */
94 	interrupt_end();
95 
96 	userspace(&current->thread.regs.regs);
97 }
98 
99 int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
100 		     unsigned long stack_top, struct task_struct * p,
101 		     struct pt_regs *regs)
102 {
103 	void (*handler)(void);
104 
105 	if(current->thread.forking){
106 	  	memcpy(&p->thread.regs.regs.skas, &regs->regs.skas,
107 		       sizeof(p->thread.regs.regs.skas));
108 		REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.skas.regs, 0);
109 		if(sp != 0) REGS_SP(p->thread.regs.regs.skas.regs) = sp;
110 
111 		handler = fork_handler;
112 
113 		arch_copy_thread(&current->thread.arch, &p->thread.arch);
114 	}
115 	else {
116 		init_thread_registers(&p->thread.regs.regs);
117 		p->thread.request.u.thread = current->thread.request.u.thread;
118 		handler = new_thread_handler;
119 	}
120 
121 	new_thread(task_stack_page(p), &p->thread.mode.skas.switch_buf,
122 		   handler);
123 	return(0);
124 }
125 
126 int new_mm(unsigned long stack)
127 {
128 	int fd;
129 
130 	fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0);
131 	if(fd < 0)
132 		return(fd);
133 
134 	if(skas_needs_stub)
135 		map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack);
136 
137 	return(fd);
138 }
139 
140 void init_idle_skas(void)
141 {
142 	cpu_tasks[current_thread->cpu].pid = os_getpid();
143 	default_idle();
144 }
145 
146 extern void start_kernel(void);
147 
148 static int start_kernel_proc(void *unused)
149 {
150 	int pid;
151 
152 	block_signals();
153 	pid = os_getpid();
154 
155 	cpu_tasks[0].pid = pid;
156 	cpu_tasks[0].task = current;
157 #ifdef CONFIG_SMP
158 	cpu_online_map = cpumask_of_cpu(0);
159 #endif
160 	start_kernel();
161 	return(0);
162 }
163 
164 extern int userspace_pid[];
165 
166 extern char cpu0_irqstack[];
167 
168 int start_uml_skas(void)
169 {
170 	stack_protections((unsigned long) &cpu0_irqstack);
171 	set_sigstack(cpu0_irqstack, THREAD_SIZE);
172 	if(proc_mm)
173 		userspace_pid[0] = start_userspace(0);
174 
175 	init_new_thread_signals();
176 
177 	init_task.thread.request.u.thread.proc = start_kernel_proc;
178 	init_task.thread.request.u.thread.arg = NULL;
179 	return(start_idle_thread(task_stack_page(&init_task),
180 				 &init_task.thread.mode.skas.switch_buf));
181 }
182 
183 int external_pid_skas(struct task_struct *task)
184 {
185 	/* FIXME: Need to look up userspace_pid by cpu */
186 	return(userspace_pid[0]);
187 }
188 
189 int thread_pid_skas(struct task_struct *task)
190 {
191 	/* FIXME: Need to look up userspace_pid by cpu */
192 	return(userspace_pid[0]);
193 }
194 
195 void kill_off_processes_skas(void)
196 {
197 	if(proc_mm)
198 		/*
199 		 * FIXME: need to loop over userspace_pids in
200 		 * kill_off_processes_skas
201 		 */
202 		os_kill_ptraced_process(userspace_pid[0], 1);
203 	else {
204 		struct task_struct *p;
205 		int pid, me;
206 
207 		me = os_getpid();
208 		for_each_process(p){
209 			if(p->mm == NULL)
210 				continue;
211 
212 			pid = p->mm->context.skas.id.u.pid;
213 			os_kill_ptraced_process(pid, 1);
214 		}
215 	}
216 }
217 
218 unsigned long current_stub_stack(void)
219 {
220 	if(current->mm == NULL)
221 		return(0);
222 
223 	return(current->mm->context.skas.id.stack);
224 }
225