1 /* 2 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk}) 3 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de) 4 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 5 * Copyright 2003 PathScale, Inc. 6 * Licensed under the GPL 7 */ 8 9 #include <linux/stddef.h> 10 #include <linux/err.h> 11 #include <linux/hardirq.h> 12 #include <linux/mm.h> 13 #include <linux/module.h> 14 #include <linux/personality.h> 15 #include <linux/proc_fs.h> 16 #include <linux/ptrace.h> 17 #include <linux/random.h> 18 #include <linux/slab.h> 19 #include <linux/sched.h> 20 #include <linux/seq_file.h> 21 #include <linux/tick.h> 22 #include <linux/threads.h> 23 #include <linux/tracehook.h> 24 #include <asm/current.h> 25 #include <asm/pgtable.h> 26 #include <asm/mmu_context.h> 27 #include <linux/uaccess.h> 28 #include <as-layout.h> 29 #include <kern_util.h> 30 #include <os.h> 31 #include <skas.h> 32 #include <timer-internal.h> 33 34 /* 35 * This is a per-cpu array. A processor only modifies its entry and it only 36 * cares about its entry, so it's OK if another processor is modifying its 37 * entry. 38 */ 39 struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } }; 40 41 static inline int external_pid(void) 42 { 43 /* FIXME: Need to look up userspace_pid by cpu */ 44 return userspace_pid[0]; 45 } 46 47 int pid_to_processor_id(int pid) 48 { 49 int i; 50 51 for (i = 0; i < ncpus; i++) { 52 if (cpu_tasks[i].pid == pid) 53 return i; 54 } 55 return -1; 56 } 57 58 void free_stack(unsigned long stack, int order) 59 { 60 free_pages(stack, order); 61 } 62 63 unsigned long alloc_stack(int order, int atomic) 64 { 65 unsigned long page; 66 gfp_t flags = GFP_KERNEL; 67 68 if (atomic) 69 flags = GFP_ATOMIC; 70 page = __get_free_pages(flags, order); 71 72 return page; 73 } 74 75 static inline void set_current(struct task_struct *task) 76 { 77 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task) 78 { external_pid(), task }); 79 } 80 81 extern void arch_switch_to(struct task_struct *to); 82 83 void *__switch_to(struct task_struct *from, struct task_struct *to) 84 { 85 to->thread.prev_sched = from; 86 set_current(to); 87 88 switch_threads(&from->thread.switch_buf, &to->thread.switch_buf); 89 arch_switch_to(current); 90 91 return current->thread.prev_sched; 92 } 93 94 void interrupt_end(void) 95 { 96 struct pt_regs *regs = ¤t->thread.regs; 97 98 if (need_resched()) 99 schedule(); 100 if (test_thread_flag(TIF_SIGPENDING)) 101 do_signal(regs); 102 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) 103 tracehook_notify_resume(regs); 104 } 105 106 int get_current_pid(void) 107 { 108 return task_pid_nr(current); 109 } 110 111 /* 112 * This is called magically, by its address being stuffed in a jmp_buf 113 * and being longjmp-d to. 114 */ 115 void new_thread_handler(void) 116 { 117 int (*fn)(void *), n; 118 void *arg; 119 120 if (current->thread.prev_sched != NULL) 121 schedule_tail(current->thread.prev_sched); 122 current->thread.prev_sched = NULL; 123 124 fn = current->thread.request.u.thread.proc; 125 arg = current->thread.request.u.thread.arg; 126 127 /* 128 * callback returns only if the kernel thread execs a process 129 */ 130 n = fn(arg); 131 userspace(¤t->thread.regs.regs); 132 } 133 134 /* Called magically, see new_thread_handler above */ 135 void fork_handler(void) 136 { 137 force_flush_all(); 138 139 schedule_tail(current->thread.prev_sched); 140 141 /* 142 * XXX: if interrupt_end() calls schedule, this call to 143 * arch_switch_to isn't needed. We could want to apply this to 144 * improve performance. -bb 145 */ 146 arch_switch_to(current); 147 148 current->thread.prev_sched = NULL; 149 150 userspace(¤t->thread.regs.regs); 151 } 152 153 int copy_thread(unsigned long clone_flags, unsigned long sp, 154 unsigned long arg, struct task_struct * p) 155 { 156 void (*handler)(void); 157 int kthread = current->flags & PF_KTHREAD; 158 int ret = 0; 159 160 p->thread = (struct thread_struct) INIT_THREAD; 161 162 if (!kthread) { 163 memcpy(&p->thread.regs.regs, current_pt_regs(), 164 sizeof(p->thread.regs.regs)); 165 PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0); 166 if (sp != 0) 167 REGS_SP(p->thread.regs.regs.gp) = sp; 168 169 handler = fork_handler; 170 171 arch_copy_thread(¤t->thread.arch, &p->thread.arch); 172 } else { 173 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp); 174 p->thread.request.u.thread.proc = (int (*)(void *))sp; 175 p->thread.request.u.thread.arg = (void *)arg; 176 handler = new_thread_handler; 177 } 178 179 new_thread(task_stack_page(p), &p->thread.switch_buf, handler); 180 181 if (!kthread) { 182 clear_flushed_tls(p); 183 184 /* 185 * Set a new TLS for the child thread? 186 */ 187 if (clone_flags & CLONE_SETTLS) 188 ret = arch_copy_tls(p); 189 } 190 191 return ret; 192 } 193 194 void initial_thread_cb(void (*proc)(void *), void *arg) 195 { 196 int save_kmalloc_ok = kmalloc_ok; 197 198 kmalloc_ok = 0; 199 initial_thread_cb_skas(proc, arg); 200 kmalloc_ok = save_kmalloc_ok; 201 } 202 203 void arch_cpu_idle(void) 204 { 205 cpu_tasks[current_thread_info()->cpu].pid = os_getpid(); 206 os_idle_sleep(UM_NSEC_PER_SEC); 207 local_irq_enable(); 208 } 209 210 int __cant_sleep(void) { 211 return in_atomic() || irqs_disabled() || in_interrupt(); 212 /* Is in_interrupt() really needed? */ 213 } 214 215 int user_context(unsigned long sp) 216 { 217 unsigned long stack; 218 219 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER); 220 return stack != (unsigned long) current_thread_info(); 221 } 222 223 extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end; 224 225 void do_uml_exitcalls(void) 226 { 227 exitcall_t *call; 228 229 call = &__uml_exitcall_end; 230 while (--call >= &__uml_exitcall_begin) 231 (*call)(); 232 } 233 234 char *uml_strdup(const char *string) 235 { 236 return kstrdup(string, GFP_KERNEL); 237 } 238 EXPORT_SYMBOL(uml_strdup); 239 240 int copy_to_user_proc(void __user *to, void *from, int size) 241 { 242 return copy_to_user(to, from, size); 243 } 244 245 int copy_from_user_proc(void *to, void __user *from, int size) 246 { 247 return copy_from_user(to, from, size); 248 } 249 250 int clear_user_proc(void __user *buf, int size) 251 { 252 return clear_user(buf, size); 253 } 254 255 int strlen_user_proc(char __user *str) 256 { 257 return strlen_user(str); 258 } 259 260 int cpu(void) 261 { 262 return current_thread_info()->cpu; 263 } 264 265 static atomic_t using_sysemu = ATOMIC_INIT(0); 266 int sysemu_supported; 267 268 void set_using_sysemu(int value) 269 { 270 if (value > sysemu_supported) 271 return; 272 atomic_set(&using_sysemu, value); 273 } 274 275 int get_using_sysemu(void) 276 { 277 return atomic_read(&using_sysemu); 278 } 279 280 static int sysemu_proc_show(struct seq_file *m, void *v) 281 { 282 seq_printf(m, "%d\n", get_using_sysemu()); 283 return 0; 284 } 285 286 static int sysemu_proc_open(struct inode *inode, struct file *file) 287 { 288 return single_open(file, sysemu_proc_show, NULL); 289 } 290 291 static ssize_t sysemu_proc_write(struct file *file, const char __user *buf, 292 size_t count, loff_t *pos) 293 { 294 char tmp[2]; 295 296 if (copy_from_user(tmp, buf, 1)) 297 return -EFAULT; 298 299 if (tmp[0] >= '0' && tmp[0] <= '2') 300 set_using_sysemu(tmp[0] - '0'); 301 /* We use the first char, but pretend to write everything */ 302 return count; 303 } 304 305 static const struct file_operations sysemu_proc_fops = { 306 .owner = THIS_MODULE, 307 .open = sysemu_proc_open, 308 .read = seq_read, 309 .llseek = seq_lseek, 310 .release = single_release, 311 .write = sysemu_proc_write, 312 }; 313 314 int __init make_proc_sysemu(void) 315 { 316 struct proc_dir_entry *ent; 317 if (!sysemu_supported) 318 return 0; 319 320 ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops); 321 322 if (ent == NULL) 323 { 324 printk(KERN_WARNING "Failed to register /proc/sysemu\n"); 325 return 0; 326 } 327 328 return 0; 329 } 330 331 late_initcall(make_proc_sysemu); 332 333 int singlestepping(void * t) 334 { 335 struct task_struct *task = t ? t : current; 336 337 if (!(task->ptrace & PT_DTRACE)) 338 return 0; 339 340 if (task->thread.singlestep_syscall) 341 return 1; 342 343 return 2; 344 } 345 346 /* 347 * Only x86 and x86_64 have an arch_align_stack(). 348 * All other arches have "#define arch_align_stack(x) (x)" 349 * in their asm/exec.h 350 * As this is included in UML from asm-um/system-generic.h, 351 * we can use it to behave as the subarch does. 352 */ 353 #ifndef arch_align_stack 354 unsigned long arch_align_stack(unsigned long sp) 355 { 356 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 357 sp -= get_random_int() % 8192; 358 return sp & ~0xf; 359 } 360 #endif 361 362 unsigned long get_wchan(struct task_struct *p) 363 { 364 unsigned long stack_page, sp, ip; 365 bool seen_sched = 0; 366 367 if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING)) 368 return 0; 369 370 stack_page = (unsigned long) task_stack_page(p); 371 /* Bail if the process has no kernel stack for some reason */ 372 if (stack_page == 0) 373 return 0; 374 375 sp = p->thread.switch_buf->JB_SP; 376 /* 377 * Bail if the stack pointer is below the bottom of the kernel 378 * stack for some reason 379 */ 380 if (sp < stack_page) 381 return 0; 382 383 while (sp < stack_page + THREAD_SIZE) { 384 ip = *((unsigned long *) sp); 385 if (in_sched_functions(ip)) 386 /* Ignore everything until we're above the scheduler */ 387 seen_sched = 1; 388 else if (kernel_text_address(ip) && seen_sched) 389 return ip; 390 391 sp += sizeof(unsigned long); 392 } 393 394 return 0; 395 } 396 397 int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu) 398 { 399 int cpu = current_thread_info()->cpu; 400 401 return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu); 402 } 403 404