1 /* 2 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk}) 3 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de) 4 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 5 * Copyright 2003 PathScale, Inc. 6 * Licensed under the GPL 7 */ 8 9 #include <linux/stddef.h> 10 #include <linux/err.h> 11 #include <linux/hardirq.h> 12 #include <linux/mm.h> 13 #include <linux/module.h> 14 #include <linux/personality.h> 15 #include <linux/proc_fs.h> 16 #include <linux/ptrace.h> 17 #include <linux/random.h> 18 #include <linux/slab.h> 19 #include <linux/sched.h> 20 #include <linux/seq_file.h> 21 #include <linux/tick.h> 22 #include <linux/threads.h> 23 #include <linux/tracehook.h> 24 #include <asm/current.h> 25 #include <asm/pgtable.h> 26 #include <asm/mmu_context.h> 27 #include <asm/uaccess.h> 28 #include <as-layout.h> 29 #include <kern_util.h> 30 #include <os.h> 31 #include <skas.h> 32 #include <timer-internal.h> 33 34 /* 35 * This is a per-cpu array. A processor only modifies its entry and it only 36 * cares about its entry, so it's OK if another processor is modifying its 37 * entry. 38 */ 39 struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } }; 40 41 static inline int external_pid(void) 42 { 43 /* FIXME: Need to look up userspace_pid by cpu */ 44 return userspace_pid[0]; 45 } 46 47 int pid_to_processor_id(int pid) 48 { 49 int i; 50 51 for (i = 0; i < ncpus; i++) { 52 if (cpu_tasks[i].pid == pid) 53 return i; 54 } 55 return -1; 56 } 57 58 void free_stack(unsigned long stack, int order) 59 { 60 free_pages(stack, order); 61 } 62 63 unsigned long alloc_stack(int order, int atomic) 64 { 65 unsigned long page; 66 gfp_t flags = GFP_KERNEL; 67 68 if (atomic) 69 flags = GFP_ATOMIC; 70 page = __get_free_pages(flags, order); 71 72 return page; 73 } 74 75 static inline void set_current(struct task_struct *task) 76 { 77 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task) 78 { external_pid(), task }); 79 } 80 81 extern void arch_switch_to(struct task_struct *to); 82 83 void *__switch_to(struct task_struct *from, struct task_struct *to) 84 { 85 to->thread.prev_sched = from; 86 set_current(to); 87 88 switch_threads(&from->thread.switch_buf, &to->thread.switch_buf); 89 arch_switch_to(current); 90 91 return current->thread.prev_sched; 92 } 93 94 void interrupt_end(void) 95 { 96 struct pt_regs *regs = ¤t->thread.regs; 97 98 if (need_resched()) 99 schedule(); 100 if (test_thread_flag(TIF_SIGPENDING)) 101 do_signal(regs); 102 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) 103 tracehook_notify_resume(regs); 104 } 105 106 void exit_thread(void) 107 { 108 } 109 110 int get_current_pid(void) 111 { 112 return task_pid_nr(current); 113 } 114 115 /* 116 * This is called magically, by its address being stuffed in a jmp_buf 117 * and being longjmp-d to. 118 */ 119 void new_thread_handler(void) 120 { 121 int (*fn)(void *), n; 122 void *arg; 123 124 if (current->thread.prev_sched != NULL) 125 schedule_tail(current->thread.prev_sched); 126 current->thread.prev_sched = NULL; 127 128 fn = current->thread.request.u.thread.proc; 129 arg = current->thread.request.u.thread.arg; 130 131 /* 132 * callback returns only if the kernel thread execs a process 133 */ 134 n = fn(arg); 135 userspace(¤t->thread.regs.regs); 136 } 137 138 /* Called magically, see new_thread_handler above */ 139 void fork_handler(void) 140 { 141 force_flush_all(); 142 143 schedule_tail(current->thread.prev_sched); 144 145 /* 146 * XXX: if interrupt_end() calls schedule, this call to 147 * arch_switch_to isn't needed. We could want to apply this to 148 * improve performance. -bb 149 */ 150 arch_switch_to(current); 151 152 current->thread.prev_sched = NULL; 153 154 userspace(¤t->thread.regs.regs); 155 } 156 157 int copy_thread(unsigned long clone_flags, unsigned long sp, 158 unsigned long arg, struct task_struct * p) 159 { 160 void (*handler)(void); 161 int kthread = current->flags & PF_KTHREAD; 162 int ret = 0; 163 164 p->thread = (struct thread_struct) INIT_THREAD; 165 166 if (!kthread) { 167 memcpy(&p->thread.regs.regs, current_pt_regs(), 168 sizeof(p->thread.regs.regs)); 169 PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0); 170 if (sp != 0) 171 REGS_SP(p->thread.regs.regs.gp) = sp; 172 173 handler = fork_handler; 174 175 arch_copy_thread(¤t->thread.arch, &p->thread.arch); 176 } else { 177 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp); 178 p->thread.request.u.thread.proc = (int (*)(void *))sp; 179 p->thread.request.u.thread.arg = (void *)arg; 180 handler = new_thread_handler; 181 } 182 183 new_thread(task_stack_page(p), &p->thread.switch_buf, handler); 184 185 if (!kthread) { 186 clear_flushed_tls(p); 187 188 /* 189 * Set a new TLS for the child thread? 190 */ 191 if (clone_flags & CLONE_SETTLS) 192 ret = arch_copy_tls(p); 193 } 194 195 return ret; 196 } 197 198 void initial_thread_cb(void (*proc)(void *), void *arg) 199 { 200 int save_kmalloc_ok = kmalloc_ok; 201 202 kmalloc_ok = 0; 203 initial_thread_cb_skas(proc, arg); 204 kmalloc_ok = save_kmalloc_ok; 205 } 206 207 void arch_cpu_idle(void) 208 { 209 cpu_tasks[current_thread_info()->cpu].pid = os_getpid(); 210 os_idle_sleep(UM_NSEC_PER_SEC); 211 local_irq_enable(); 212 } 213 214 int __cant_sleep(void) { 215 return in_atomic() || irqs_disabled() || in_interrupt(); 216 /* Is in_interrupt() really needed? */ 217 } 218 219 int user_context(unsigned long sp) 220 { 221 unsigned long stack; 222 223 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER); 224 return stack != (unsigned long) current_thread_info(); 225 } 226 227 extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end; 228 229 void do_uml_exitcalls(void) 230 { 231 exitcall_t *call; 232 233 call = &__uml_exitcall_end; 234 while (--call >= &__uml_exitcall_begin) 235 (*call)(); 236 } 237 238 char *uml_strdup(const char *string) 239 { 240 return kstrdup(string, GFP_KERNEL); 241 } 242 EXPORT_SYMBOL(uml_strdup); 243 244 int copy_to_user_proc(void __user *to, void *from, int size) 245 { 246 return copy_to_user(to, from, size); 247 } 248 249 int copy_from_user_proc(void *to, void __user *from, int size) 250 { 251 return copy_from_user(to, from, size); 252 } 253 254 int clear_user_proc(void __user *buf, int size) 255 { 256 return clear_user(buf, size); 257 } 258 259 int strlen_user_proc(char __user *str) 260 { 261 return strlen_user(str); 262 } 263 264 int cpu(void) 265 { 266 return current_thread_info()->cpu; 267 } 268 269 static atomic_t using_sysemu = ATOMIC_INIT(0); 270 int sysemu_supported; 271 272 void set_using_sysemu(int value) 273 { 274 if (value > sysemu_supported) 275 return; 276 atomic_set(&using_sysemu, value); 277 } 278 279 int get_using_sysemu(void) 280 { 281 return atomic_read(&using_sysemu); 282 } 283 284 static int sysemu_proc_show(struct seq_file *m, void *v) 285 { 286 seq_printf(m, "%d\n", get_using_sysemu()); 287 return 0; 288 } 289 290 static int sysemu_proc_open(struct inode *inode, struct file *file) 291 { 292 return single_open(file, sysemu_proc_show, NULL); 293 } 294 295 static ssize_t sysemu_proc_write(struct file *file, const char __user *buf, 296 size_t count, loff_t *pos) 297 { 298 char tmp[2]; 299 300 if (copy_from_user(tmp, buf, 1)) 301 return -EFAULT; 302 303 if (tmp[0] >= '0' && tmp[0] <= '2') 304 set_using_sysemu(tmp[0] - '0'); 305 /* We use the first char, but pretend to write everything */ 306 return count; 307 } 308 309 static const struct file_operations sysemu_proc_fops = { 310 .owner = THIS_MODULE, 311 .open = sysemu_proc_open, 312 .read = seq_read, 313 .llseek = seq_lseek, 314 .release = single_release, 315 .write = sysemu_proc_write, 316 }; 317 318 int __init make_proc_sysemu(void) 319 { 320 struct proc_dir_entry *ent; 321 if (!sysemu_supported) 322 return 0; 323 324 ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops); 325 326 if (ent == NULL) 327 { 328 printk(KERN_WARNING "Failed to register /proc/sysemu\n"); 329 return 0; 330 } 331 332 return 0; 333 } 334 335 late_initcall(make_proc_sysemu); 336 337 int singlestepping(void * t) 338 { 339 struct task_struct *task = t ? t : current; 340 341 if (!(task->ptrace & PT_DTRACE)) 342 return 0; 343 344 if (task->thread.singlestep_syscall) 345 return 1; 346 347 return 2; 348 } 349 350 /* 351 * Only x86 and x86_64 have an arch_align_stack(). 352 * All other arches have "#define arch_align_stack(x) (x)" 353 * in their asm/exec.h 354 * As this is included in UML from asm-um/system-generic.h, 355 * we can use it to behave as the subarch does. 356 */ 357 #ifndef arch_align_stack 358 unsigned long arch_align_stack(unsigned long sp) 359 { 360 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 361 sp -= get_random_int() % 8192; 362 return sp & ~0xf; 363 } 364 #endif 365 366 unsigned long get_wchan(struct task_struct *p) 367 { 368 unsigned long stack_page, sp, ip; 369 bool seen_sched = 0; 370 371 if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING)) 372 return 0; 373 374 stack_page = (unsigned long) task_stack_page(p); 375 /* Bail if the process has no kernel stack for some reason */ 376 if (stack_page == 0) 377 return 0; 378 379 sp = p->thread.switch_buf->JB_SP; 380 /* 381 * Bail if the stack pointer is below the bottom of the kernel 382 * stack for some reason 383 */ 384 if (sp < stack_page) 385 return 0; 386 387 while (sp < stack_page + THREAD_SIZE) { 388 ip = *((unsigned long *) sp); 389 if (in_sched_functions(ip)) 390 /* Ignore everything until we're above the scheduler */ 391 seen_sched = 1; 392 else if (kernel_text_address(ip) && seen_sched) 393 return ip; 394 395 sp += sizeof(unsigned long); 396 } 397 398 return 0; 399 } 400 401 int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu) 402 { 403 int cpu = current_thread_info()->cpu; 404 405 return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu); 406 } 407 408