1 /* 2 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk}) 3 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de) 4 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 5 * Copyright 2003 PathScale, Inc. 6 * Licensed under the GPL 7 */ 8 9 #include <linux/stddef.h> 10 #include <linux/err.h> 11 #include <linux/hardirq.h> 12 #include <linux/mm.h> 13 #include <linux/module.h> 14 #include <linux/personality.h> 15 #include <linux/proc_fs.h> 16 #include <linux/ptrace.h> 17 #include <linux/random.h> 18 #include <linux/slab.h> 19 #include <linux/sched.h> 20 #include <linux/sched/debug.h> 21 #include <linux/sched/task.h> 22 #include <linux/sched/task_stack.h> 23 #include <linux/seq_file.h> 24 #include <linux/tick.h> 25 #include <linux/threads.h> 26 #include <linux/tracehook.h> 27 #include <asm/current.h> 28 #include <asm/pgtable.h> 29 #include <asm/mmu_context.h> 30 #include <linux/uaccess.h> 31 #include <as-layout.h> 32 #include <kern_util.h> 33 #include <os.h> 34 #include <skas.h> 35 #include <timer-internal.h> 36 37 /* 38 * This is a per-cpu array. A processor only modifies its entry and it only 39 * cares about its entry, so it's OK if another processor is modifying its 40 * entry. 41 */ 42 struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } }; 43 44 static inline int external_pid(void) 45 { 46 /* FIXME: Need to look up userspace_pid by cpu */ 47 return userspace_pid[0]; 48 } 49 50 int pid_to_processor_id(int pid) 51 { 52 int i; 53 54 for (i = 0; i < ncpus; i++) { 55 if (cpu_tasks[i].pid == pid) 56 return i; 57 } 58 return -1; 59 } 60 61 void free_stack(unsigned long stack, int order) 62 { 63 free_pages(stack, order); 64 } 65 66 unsigned long alloc_stack(int order, int atomic) 67 { 68 unsigned long page; 69 gfp_t flags = GFP_KERNEL; 70 71 if (atomic) 72 flags = GFP_ATOMIC; 73 page = __get_free_pages(flags, order); 74 75 return page; 76 } 77 78 static inline void set_current(struct task_struct *task) 79 { 80 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task) 81 { external_pid(), task }); 82 } 83 84 extern void arch_switch_to(struct task_struct *to); 85 86 void *__switch_to(struct task_struct *from, struct task_struct *to) 87 { 88 to->thread.prev_sched = from; 89 set_current(to); 90 91 switch_threads(&from->thread.switch_buf, &to->thread.switch_buf); 92 arch_switch_to(current); 93 94 return current->thread.prev_sched; 95 } 96 97 void interrupt_end(void) 98 { 99 struct pt_regs *regs = ¤t->thread.regs; 100 101 if (need_resched()) 102 schedule(); 103 if (test_thread_flag(TIF_SIGPENDING)) 104 do_signal(regs); 105 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) 106 tracehook_notify_resume(regs); 107 } 108 109 int get_current_pid(void) 110 { 111 return task_pid_nr(current); 112 } 113 114 /* 115 * This is called magically, by its address being stuffed in a jmp_buf 116 * and being longjmp-d to. 117 */ 118 void new_thread_handler(void) 119 { 120 int (*fn)(void *), n; 121 void *arg; 122 123 if (current->thread.prev_sched != NULL) 124 schedule_tail(current->thread.prev_sched); 125 current->thread.prev_sched = NULL; 126 127 fn = current->thread.request.u.thread.proc; 128 arg = current->thread.request.u.thread.arg; 129 130 /* 131 * callback returns only if the kernel thread execs a process 132 */ 133 n = fn(arg); 134 userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs); 135 } 136 137 /* Called magically, see new_thread_handler above */ 138 void fork_handler(void) 139 { 140 force_flush_all(); 141 142 schedule_tail(current->thread.prev_sched); 143 144 /* 145 * XXX: if interrupt_end() calls schedule, this call to 146 * arch_switch_to isn't needed. We could want to apply this to 147 * improve performance. -bb 148 */ 149 arch_switch_to(current); 150 151 current->thread.prev_sched = NULL; 152 153 userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs); 154 } 155 156 int copy_thread(unsigned long clone_flags, unsigned long sp, 157 unsigned long arg, struct task_struct * p) 158 { 159 void (*handler)(void); 160 int kthread = current->flags & PF_KTHREAD; 161 int ret = 0; 162 163 p->thread = (struct thread_struct) INIT_THREAD; 164 165 if (!kthread) { 166 memcpy(&p->thread.regs.regs, current_pt_regs(), 167 sizeof(p->thread.regs.regs)); 168 PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0); 169 if (sp != 0) 170 REGS_SP(p->thread.regs.regs.gp) = sp; 171 172 handler = fork_handler; 173 174 arch_copy_thread(¤t->thread.arch, &p->thread.arch); 175 } else { 176 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp); 177 p->thread.request.u.thread.proc = (int (*)(void *))sp; 178 p->thread.request.u.thread.arg = (void *)arg; 179 handler = new_thread_handler; 180 } 181 182 new_thread(task_stack_page(p), &p->thread.switch_buf, handler); 183 184 if (!kthread) { 185 clear_flushed_tls(p); 186 187 /* 188 * Set a new TLS for the child thread? 189 */ 190 if (clone_flags & CLONE_SETTLS) 191 ret = arch_copy_tls(p); 192 } 193 194 return ret; 195 } 196 197 void initial_thread_cb(void (*proc)(void *), void *arg) 198 { 199 int save_kmalloc_ok = kmalloc_ok; 200 201 kmalloc_ok = 0; 202 initial_thread_cb_skas(proc, arg); 203 kmalloc_ok = save_kmalloc_ok; 204 } 205 206 void arch_cpu_idle(void) 207 { 208 cpu_tasks[current_thread_info()->cpu].pid = os_getpid(); 209 os_idle_sleep(UM_NSEC_PER_SEC); 210 local_irq_enable(); 211 } 212 213 int __cant_sleep(void) { 214 return in_atomic() || irqs_disabled() || in_interrupt(); 215 /* Is in_interrupt() really needed? */ 216 } 217 218 int user_context(unsigned long sp) 219 { 220 unsigned long stack; 221 222 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER); 223 return stack != (unsigned long) current_thread_info(); 224 } 225 226 extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end; 227 228 void do_uml_exitcalls(void) 229 { 230 exitcall_t *call; 231 232 call = &__uml_exitcall_end; 233 while (--call >= &__uml_exitcall_begin) 234 (*call)(); 235 } 236 237 char *uml_strdup(const char *string) 238 { 239 return kstrdup(string, GFP_KERNEL); 240 } 241 EXPORT_SYMBOL(uml_strdup); 242 243 int copy_to_user_proc(void __user *to, void *from, int size) 244 { 245 return copy_to_user(to, from, size); 246 } 247 248 int copy_from_user_proc(void *to, void __user *from, int size) 249 { 250 return copy_from_user(to, from, size); 251 } 252 253 int clear_user_proc(void __user *buf, int size) 254 { 255 return clear_user(buf, size); 256 } 257 258 int cpu(void) 259 { 260 return current_thread_info()->cpu; 261 } 262 263 static atomic_t using_sysemu = ATOMIC_INIT(0); 264 int sysemu_supported; 265 266 void set_using_sysemu(int value) 267 { 268 if (value > sysemu_supported) 269 return; 270 atomic_set(&using_sysemu, value); 271 } 272 273 int get_using_sysemu(void) 274 { 275 return atomic_read(&using_sysemu); 276 } 277 278 static int sysemu_proc_show(struct seq_file *m, void *v) 279 { 280 seq_printf(m, "%d\n", get_using_sysemu()); 281 return 0; 282 } 283 284 static int sysemu_proc_open(struct inode *inode, struct file *file) 285 { 286 return single_open(file, sysemu_proc_show, NULL); 287 } 288 289 static ssize_t sysemu_proc_write(struct file *file, const char __user *buf, 290 size_t count, loff_t *pos) 291 { 292 char tmp[2]; 293 294 if (copy_from_user(tmp, buf, 1)) 295 return -EFAULT; 296 297 if (tmp[0] >= '0' && tmp[0] <= '2') 298 set_using_sysemu(tmp[0] - '0'); 299 /* We use the first char, but pretend to write everything */ 300 return count; 301 } 302 303 static const struct file_operations sysemu_proc_fops = { 304 .owner = THIS_MODULE, 305 .open = sysemu_proc_open, 306 .read = seq_read, 307 .llseek = seq_lseek, 308 .release = single_release, 309 .write = sysemu_proc_write, 310 }; 311 312 int __init make_proc_sysemu(void) 313 { 314 struct proc_dir_entry *ent; 315 if (!sysemu_supported) 316 return 0; 317 318 ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops); 319 320 if (ent == NULL) 321 { 322 printk(KERN_WARNING "Failed to register /proc/sysemu\n"); 323 return 0; 324 } 325 326 return 0; 327 } 328 329 late_initcall(make_proc_sysemu); 330 331 int singlestepping(void * t) 332 { 333 struct task_struct *task = t ? t : current; 334 335 if (!(task->ptrace & PT_DTRACE)) 336 return 0; 337 338 if (task->thread.singlestep_syscall) 339 return 1; 340 341 return 2; 342 } 343 344 /* 345 * Only x86 and x86_64 have an arch_align_stack(). 346 * All other arches have "#define arch_align_stack(x) (x)" 347 * in their asm/exec.h 348 * As this is included in UML from asm-um/system-generic.h, 349 * we can use it to behave as the subarch does. 350 */ 351 #ifndef arch_align_stack 352 unsigned long arch_align_stack(unsigned long sp) 353 { 354 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 355 sp -= get_random_int() % 8192; 356 return sp & ~0xf; 357 } 358 #endif 359 360 unsigned long get_wchan(struct task_struct *p) 361 { 362 unsigned long stack_page, sp, ip; 363 bool seen_sched = 0; 364 365 if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING)) 366 return 0; 367 368 stack_page = (unsigned long) task_stack_page(p); 369 /* Bail if the process has no kernel stack for some reason */ 370 if (stack_page == 0) 371 return 0; 372 373 sp = p->thread.switch_buf->JB_SP; 374 /* 375 * Bail if the stack pointer is below the bottom of the kernel 376 * stack for some reason 377 */ 378 if (sp < stack_page) 379 return 0; 380 381 while (sp < stack_page + THREAD_SIZE) { 382 ip = *((unsigned long *) sp); 383 if (in_sched_functions(ip)) 384 /* Ignore everything until we're above the scheduler */ 385 seen_sched = 1; 386 else if (kernel_text_address(ip) && seen_sched) 387 return ip; 388 389 sp += sizeof(unsigned long); 390 } 391 392 return 0; 393 } 394 395 int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu) 396 { 397 int cpu = current_thread_info()->cpu; 398 399 return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu); 400 } 401 402