1995473aeSJeff Dike /* 2995473aeSJeff Dike * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) 3995473aeSJeff Dike * Copyright 2003 PathScale, Inc. 4995473aeSJeff Dike * Licensed under the GPL 5995473aeSJeff Dike */ 6995473aeSJeff Dike 7995473aeSJeff Dike #include "linux/kernel.h" 8995473aeSJeff Dike #include "linux/sched.h" 9995473aeSJeff Dike #include "linux/interrupt.h" 10995473aeSJeff Dike #include "linux/string.h" 11995473aeSJeff Dike #include "linux/mm.h" 12995473aeSJeff Dike #include "linux/slab.h" 13995473aeSJeff Dike #include "linux/utsname.h" 14995473aeSJeff Dike #include "linux/fs.h" 15995473aeSJeff Dike #include "linux/utime.h" 16995473aeSJeff Dike #include "linux/smp_lock.h" 17995473aeSJeff Dike #include "linux/module.h" 18995473aeSJeff Dike #include "linux/init.h" 19995473aeSJeff Dike #include "linux/capability.h" 20995473aeSJeff Dike #include "linux/vmalloc.h" 21995473aeSJeff Dike #include "linux/spinlock.h" 22995473aeSJeff Dike #include "linux/proc_fs.h" 23995473aeSJeff Dike #include "linux/ptrace.h" 24995473aeSJeff Dike #include "linux/random.h" 25995473aeSJeff Dike #include "linux/personality.h" 26995473aeSJeff Dike #include "asm/unistd.h" 27995473aeSJeff Dike #include "asm/mman.h" 28995473aeSJeff Dike #include "asm/segment.h" 29995473aeSJeff Dike #include "asm/stat.h" 30995473aeSJeff Dike #include "asm/pgtable.h" 31995473aeSJeff Dike #include "asm/processor.h" 32995473aeSJeff Dike #include "asm/tlbflush.h" 33995473aeSJeff Dike #include "asm/uaccess.h" 34995473aeSJeff Dike #include "asm/user.h" 35995473aeSJeff Dike #include "kern_util.h" 364ff83ce1SJeff Dike #include "as-layout.h" 37995473aeSJeff Dike #include "kern.h" 38995473aeSJeff Dike #include "signal_kern.h" 39995473aeSJeff Dike #include "init.h" 40995473aeSJeff Dike #include "irq_user.h" 41995473aeSJeff Dike #include "mem_user.h" 42995473aeSJeff Dike #include "tlb.h" 43995473aeSJeff Dike #include "frame_kern.h" 44995473aeSJeff Dike #include "sigcontext.h" 45995473aeSJeff Dike #include "os.h" 4677bf4400SJeff Dike #include "skas.h" 47995473aeSJeff Dike 48995473aeSJeff Dike /* This is a per-cpu array. A processor only modifies its entry and it only 49995473aeSJeff Dike * cares about its entry, so it's OK if another processor is modifying its 50995473aeSJeff Dike * entry. 51995473aeSJeff Dike */ 52995473aeSJeff Dike struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } }; 53995473aeSJeff Dike 546e21aec3SJeff Dike static inline int external_pid(struct task_struct *task) 55995473aeSJeff Dike { 5677bf4400SJeff Dike /* FIXME: Need to look up userspace_pid by cpu */ 5777bf4400SJeff Dike return(userspace_pid[0]); 58995473aeSJeff Dike } 59995473aeSJeff Dike 60995473aeSJeff Dike int pid_to_processor_id(int pid) 61995473aeSJeff Dike { 62995473aeSJeff Dike int i; 63995473aeSJeff Dike 64995473aeSJeff Dike for(i = 0; i < ncpus; i++){ 656e21aec3SJeff Dike if(cpu_tasks[i].pid == pid) 666e21aec3SJeff Dike return i; 67995473aeSJeff Dike } 686e21aec3SJeff Dike return -1; 69995473aeSJeff Dike } 70995473aeSJeff Dike 71995473aeSJeff Dike void free_stack(unsigned long stack, int order) 72995473aeSJeff Dike { 73995473aeSJeff Dike free_pages(stack, order); 74995473aeSJeff Dike } 75995473aeSJeff Dike 76995473aeSJeff Dike unsigned long alloc_stack(int order, int atomic) 77995473aeSJeff Dike { 78995473aeSJeff Dike unsigned long page; 79995473aeSJeff Dike gfp_t flags = GFP_KERNEL; 80995473aeSJeff Dike 81995473aeSJeff Dike if (atomic) 82995473aeSJeff Dike flags = GFP_ATOMIC; 83995473aeSJeff Dike page = __get_free_pages(flags, order); 84995473aeSJeff Dike if (page == 0) 856e21aec3SJeff Dike return 0; 865c8aaceaSJeff Dike 876e21aec3SJeff Dike return page; 88995473aeSJeff Dike } 89995473aeSJeff Dike 90995473aeSJeff Dike int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 91995473aeSJeff Dike { 92995473aeSJeff Dike int pid; 93995473aeSJeff Dike 94995473aeSJeff Dike current->thread.request.u.thread.proc = fn; 95995473aeSJeff Dike current->thread.request.u.thread.arg = arg; 96995473aeSJeff Dike pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0, 97995473aeSJeff Dike ¤t->thread.regs, 0, NULL, NULL); 986e21aec3SJeff Dike return pid; 99995473aeSJeff Dike } 100995473aeSJeff Dike 1016e21aec3SJeff Dike static inline void set_current(struct task_struct *task) 102995473aeSJeff Dike { 103995473aeSJeff Dike cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task) 104995473aeSJeff Dike { external_pid(task), task }); 105995473aeSJeff Dike } 106995473aeSJeff Dike 10777bf4400SJeff Dike extern void arch_switch_to(struct task_struct *from, struct task_struct *to); 10877bf4400SJeff Dike 109995473aeSJeff Dike void *_switch_to(void *prev, void *next, void *last) 110995473aeSJeff Dike { 111995473aeSJeff Dike struct task_struct *from = prev; 112995473aeSJeff Dike struct task_struct *to= next; 113995473aeSJeff Dike 114995473aeSJeff Dike to->thread.prev_sched = from; 115995473aeSJeff Dike set_current(to); 116995473aeSJeff Dike 117995473aeSJeff Dike do { 118995473aeSJeff Dike current->thread.saved_task = NULL; 11977bf4400SJeff Dike 12077bf4400SJeff Dike /* XXX need to check runqueues[cpu].idle */ 12177bf4400SJeff Dike if(current->pid == 0) 12277bf4400SJeff Dike switch_timers(0); 12377bf4400SJeff Dike 12477bf4400SJeff Dike switch_threads(&from->thread.switch_buf, 12577bf4400SJeff Dike &to->thread.switch_buf); 12677bf4400SJeff Dike 12777bf4400SJeff Dike arch_switch_to(current->thread.prev_sched, current); 12877bf4400SJeff Dike 12977bf4400SJeff Dike if(current->pid == 0) 13077bf4400SJeff Dike switch_timers(1); 13177bf4400SJeff Dike 132995473aeSJeff Dike if(current->thread.saved_task) 133995473aeSJeff Dike show_regs(&(current->thread.regs)); 134995473aeSJeff Dike next= current->thread.saved_task; 135995473aeSJeff Dike prev= current; 136995473aeSJeff Dike } while(current->thread.saved_task); 137995473aeSJeff Dike 1386e21aec3SJeff Dike return current->thread.prev_sched; 139995473aeSJeff Dike 140995473aeSJeff Dike } 141995473aeSJeff Dike 142995473aeSJeff Dike void interrupt_end(void) 143995473aeSJeff Dike { 1446e21aec3SJeff Dike if(need_resched()) 1456e21aec3SJeff Dike schedule(); 1466e21aec3SJeff Dike if(test_tsk_thread_flag(current, TIF_SIGPENDING)) 1476e21aec3SJeff Dike do_signal(); 148995473aeSJeff Dike } 149995473aeSJeff Dike 150995473aeSJeff Dike void exit_thread(void) 151995473aeSJeff Dike { 152995473aeSJeff Dike } 153995473aeSJeff Dike 154995473aeSJeff Dike void *get_current(void) 155995473aeSJeff Dike { 1566e21aec3SJeff Dike return current; 157995473aeSJeff Dike } 158995473aeSJeff Dike 15977bf4400SJeff Dike extern void schedule_tail(struct task_struct *prev); 16077bf4400SJeff Dike 16177bf4400SJeff Dike /* This is called magically, by its address being stuffed in a jmp_buf 16277bf4400SJeff Dike * and being longjmp-d to. 16377bf4400SJeff Dike */ 16477bf4400SJeff Dike void new_thread_handler(void) 16577bf4400SJeff Dike { 16677bf4400SJeff Dike int (*fn)(void *), n; 16777bf4400SJeff Dike void *arg; 16877bf4400SJeff Dike 16977bf4400SJeff Dike if(current->thread.prev_sched != NULL) 17077bf4400SJeff Dike schedule_tail(current->thread.prev_sched); 17177bf4400SJeff Dike current->thread.prev_sched = NULL; 17277bf4400SJeff Dike 17377bf4400SJeff Dike fn = current->thread.request.u.thread.proc; 17477bf4400SJeff Dike arg = current->thread.request.u.thread.arg; 17577bf4400SJeff Dike 17677bf4400SJeff Dike /* The return value is 1 if the kernel thread execs a process, 17777bf4400SJeff Dike * 0 if it just exits 17877bf4400SJeff Dike */ 17977bf4400SJeff Dike n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf); 18077bf4400SJeff Dike if(n == 1){ 18177bf4400SJeff Dike /* Handle any immediate reschedules or signals */ 18277bf4400SJeff Dike interrupt_end(); 18377bf4400SJeff Dike userspace(¤t->thread.regs.regs); 18477bf4400SJeff Dike } 18577bf4400SJeff Dike else do_exit(0); 18677bf4400SJeff Dike } 18777bf4400SJeff Dike 18877bf4400SJeff Dike /* Called magically, see new_thread_handler above */ 18977bf4400SJeff Dike void fork_handler(void) 19077bf4400SJeff Dike { 19177bf4400SJeff Dike force_flush_all(); 19277bf4400SJeff Dike if(current->thread.prev_sched == NULL) 19377bf4400SJeff Dike panic("blech"); 19477bf4400SJeff Dike 19577bf4400SJeff Dike schedule_tail(current->thread.prev_sched); 19677bf4400SJeff Dike 19777bf4400SJeff Dike /* XXX: if interrupt_end() calls schedule, this call to 19877bf4400SJeff Dike * arch_switch_to isn't needed. We could want to apply this to 19977bf4400SJeff Dike * improve performance. -bb */ 20077bf4400SJeff Dike arch_switch_to(current->thread.prev_sched, current); 20177bf4400SJeff Dike 20277bf4400SJeff Dike current->thread.prev_sched = NULL; 20377bf4400SJeff Dike 20477bf4400SJeff Dike /* Handle any immediate reschedules or signals */ 20577bf4400SJeff Dike interrupt_end(); 20677bf4400SJeff Dike 20777bf4400SJeff Dike userspace(¤t->thread.regs.regs); 20877bf4400SJeff Dike } 20977bf4400SJeff Dike 210995473aeSJeff Dike int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 211995473aeSJeff Dike unsigned long stack_top, struct task_struct * p, 212995473aeSJeff Dike struct pt_regs *regs) 213995473aeSJeff Dike { 21477bf4400SJeff Dike void (*handler)(void); 21577bf4400SJeff Dike int ret = 0; 216995473aeSJeff Dike 217995473aeSJeff Dike p->thread = (struct thread_struct) INIT_THREAD; 218995473aeSJeff Dike 21977bf4400SJeff Dike if(current->thread.forking){ 22077bf4400SJeff Dike memcpy(&p->thread.regs.regs, ®s->regs, 22177bf4400SJeff Dike sizeof(p->thread.regs.regs)); 22277bf4400SJeff Dike REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.regs, 0); 22377bf4400SJeff Dike if(sp != 0) 22477bf4400SJeff Dike REGS_SP(p->thread.regs.regs.regs) = sp; 225995473aeSJeff Dike 22677bf4400SJeff Dike handler = fork_handler; 22777bf4400SJeff Dike 22877bf4400SJeff Dike arch_copy_thread(¤t->thread.arch, &p->thread.arch); 22977bf4400SJeff Dike } 23077bf4400SJeff Dike else { 23177bf4400SJeff Dike init_thread_registers(&p->thread.regs.regs); 23277bf4400SJeff Dike p->thread.request.u.thread = current->thread.request.u.thread; 23377bf4400SJeff Dike handler = new_thread_handler; 23477bf4400SJeff Dike } 23577bf4400SJeff Dike 23677bf4400SJeff Dike new_thread(task_stack_page(p), &p->thread.switch_buf, handler); 23777bf4400SJeff Dike 23877bf4400SJeff Dike if (current->thread.forking) { 239995473aeSJeff Dike clear_flushed_tls(p); 240995473aeSJeff Dike 241995473aeSJeff Dike /* 242995473aeSJeff Dike * Set a new TLS for the child thread? 243995473aeSJeff Dike */ 244995473aeSJeff Dike if (clone_flags & CLONE_SETTLS) 245995473aeSJeff Dike ret = arch_copy_tls(p); 24677bf4400SJeff Dike } 247995473aeSJeff Dike 248995473aeSJeff Dike return ret; 249995473aeSJeff Dike } 250995473aeSJeff Dike 251995473aeSJeff Dike void initial_thread_cb(void (*proc)(void *), void *arg) 252995473aeSJeff Dike { 253995473aeSJeff Dike int save_kmalloc_ok = kmalloc_ok; 254995473aeSJeff Dike 255995473aeSJeff Dike kmalloc_ok = 0; 2566aa802ceSJeff Dike initial_thread_cb_skas(proc, arg); 257995473aeSJeff Dike kmalloc_ok = save_kmalloc_ok; 258995473aeSJeff Dike } 259995473aeSJeff Dike 260995473aeSJeff Dike void default_idle(void) 261995473aeSJeff Dike { 262995473aeSJeff Dike while(1){ 263995473aeSJeff Dike /* endless idle loop with no priority at all */ 264995473aeSJeff Dike 265995473aeSJeff Dike /* 266995473aeSJeff Dike * although we are an idle CPU, we do not want to 267995473aeSJeff Dike * get into the scheduler unnecessarily. 268995473aeSJeff Dike */ 269995473aeSJeff Dike if(need_resched()) 270995473aeSJeff Dike schedule(); 271995473aeSJeff Dike 272995473aeSJeff Dike idle_sleep(10); 273995473aeSJeff Dike } 274995473aeSJeff Dike } 275995473aeSJeff Dike 276995473aeSJeff Dike void cpu_idle(void) 277995473aeSJeff Dike { 27877bf4400SJeff Dike cpu_tasks[current_thread->cpu].pid = os_getpid(); 27977bf4400SJeff Dike default_idle(); 280995473aeSJeff Dike } 281995473aeSJeff Dike 282995473aeSJeff Dike void *um_virt_to_phys(struct task_struct *task, unsigned long addr, 283995473aeSJeff Dike pte_t *pte_out) 284995473aeSJeff Dike { 285995473aeSJeff Dike pgd_t *pgd; 286995473aeSJeff Dike pud_t *pud; 287995473aeSJeff Dike pmd_t *pmd; 288995473aeSJeff Dike pte_t *pte; 289995473aeSJeff Dike pte_t ptent; 290995473aeSJeff Dike 291995473aeSJeff Dike if(task->mm == NULL) 2926e21aec3SJeff Dike return ERR_PTR(-EINVAL); 293995473aeSJeff Dike pgd = pgd_offset(task->mm, addr); 294995473aeSJeff Dike if(!pgd_present(*pgd)) 2956e21aec3SJeff Dike return ERR_PTR(-EINVAL); 296995473aeSJeff Dike 297995473aeSJeff Dike pud = pud_offset(pgd, addr); 298995473aeSJeff Dike if(!pud_present(*pud)) 2996e21aec3SJeff Dike return ERR_PTR(-EINVAL); 300995473aeSJeff Dike 301995473aeSJeff Dike pmd = pmd_offset(pud, addr); 302995473aeSJeff Dike if(!pmd_present(*pmd)) 3036e21aec3SJeff Dike return ERR_PTR(-EINVAL); 304995473aeSJeff Dike 305995473aeSJeff Dike pte = pte_offset_kernel(pmd, addr); 306995473aeSJeff Dike ptent = *pte; 307995473aeSJeff Dike if(!pte_present(ptent)) 3086e21aec3SJeff Dike return ERR_PTR(-EINVAL); 309995473aeSJeff Dike 310995473aeSJeff Dike if(pte_out != NULL) 311995473aeSJeff Dike *pte_out = ptent; 3126e21aec3SJeff Dike return (void *) (pte_val(ptent) & PAGE_MASK) + (addr & ~PAGE_MASK); 313995473aeSJeff Dike } 314995473aeSJeff Dike 315995473aeSJeff Dike char *current_cmd(void) 316995473aeSJeff Dike { 317995473aeSJeff Dike #if defined(CONFIG_SMP) || defined(CONFIG_HIGHMEM) 3186e21aec3SJeff Dike return "(Unknown)"; 319995473aeSJeff Dike #else 320995473aeSJeff Dike void *addr = um_virt_to_phys(current, current->mm->arg_start, NULL); 321995473aeSJeff Dike return IS_ERR(addr) ? "(Unknown)": __va((unsigned long) addr); 322995473aeSJeff Dike #endif 323995473aeSJeff Dike } 324995473aeSJeff Dike 325995473aeSJeff Dike void dump_thread(struct pt_regs *regs, struct user *u) 326995473aeSJeff Dike { 327995473aeSJeff Dike } 328995473aeSJeff Dike 329995473aeSJeff Dike int __cant_sleep(void) { 330995473aeSJeff Dike return in_atomic() || irqs_disabled() || in_interrupt(); 331995473aeSJeff Dike /* Is in_interrupt() really needed? */ 332995473aeSJeff Dike } 333995473aeSJeff Dike 334995473aeSJeff Dike int user_context(unsigned long sp) 335995473aeSJeff Dike { 336995473aeSJeff Dike unsigned long stack; 337995473aeSJeff Dike 338995473aeSJeff Dike stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER); 3396e21aec3SJeff Dike return stack != (unsigned long) current_thread; 340995473aeSJeff Dike } 341995473aeSJeff Dike 342995473aeSJeff Dike extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end; 343995473aeSJeff Dike 344995473aeSJeff Dike void do_uml_exitcalls(void) 345995473aeSJeff Dike { 346995473aeSJeff Dike exitcall_t *call; 347995473aeSJeff Dike 348995473aeSJeff Dike call = &__uml_exitcall_end; 349995473aeSJeff Dike while (--call >= &__uml_exitcall_begin) 350995473aeSJeff Dike (*call)(); 351995473aeSJeff Dike } 352995473aeSJeff Dike 353995473aeSJeff Dike char *uml_strdup(char *string) 354995473aeSJeff Dike { 355995473aeSJeff Dike return kstrdup(string, GFP_KERNEL); 356995473aeSJeff Dike } 357995473aeSJeff Dike 358995473aeSJeff Dike int copy_to_user_proc(void __user *to, void *from, int size) 359995473aeSJeff Dike { 3606e21aec3SJeff Dike return copy_to_user(to, from, size); 361995473aeSJeff Dike } 362995473aeSJeff Dike 363995473aeSJeff Dike int copy_from_user_proc(void *to, void __user *from, int size) 364995473aeSJeff Dike { 3656e21aec3SJeff Dike return copy_from_user(to, from, size); 366995473aeSJeff Dike } 367995473aeSJeff Dike 368995473aeSJeff Dike int clear_user_proc(void __user *buf, int size) 369995473aeSJeff Dike { 3706e21aec3SJeff Dike return clear_user(buf, size); 371995473aeSJeff Dike } 372995473aeSJeff Dike 373995473aeSJeff Dike int strlen_user_proc(char __user *str) 374995473aeSJeff Dike { 3756e21aec3SJeff Dike return strlen_user(str); 376995473aeSJeff Dike } 377995473aeSJeff Dike 378995473aeSJeff Dike int smp_sigio_handler(void) 379995473aeSJeff Dike { 380995473aeSJeff Dike #ifdef CONFIG_SMP 381995473aeSJeff Dike int cpu = current_thread->cpu; 382995473aeSJeff Dike IPI_handler(cpu); 383995473aeSJeff Dike if(cpu != 0) 3846e21aec3SJeff Dike return 1; 385995473aeSJeff Dike #endif 3866e21aec3SJeff Dike return 0; 387995473aeSJeff Dike } 388995473aeSJeff Dike 389995473aeSJeff Dike int cpu(void) 390995473aeSJeff Dike { 3916e21aec3SJeff Dike return current_thread->cpu; 392995473aeSJeff Dike } 393995473aeSJeff Dike 394995473aeSJeff Dike static atomic_t using_sysemu = ATOMIC_INIT(0); 395995473aeSJeff Dike int sysemu_supported; 396995473aeSJeff Dike 397995473aeSJeff Dike void set_using_sysemu(int value) 398995473aeSJeff Dike { 399995473aeSJeff Dike if (value > sysemu_supported) 400995473aeSJeff Dike return; 401995473aeSJeff Dike atomic_set(&using_sysemu, value); 402995473aeSJeff Dike } 403995473aeSJeff Dike 404995473aeSJeff Dike int get_using_sysemu(void) 405995473aeSJeff Dike { 406995473aeSJeff Dike return atomic_read(&using_sysemu); 407995473aeSJeff Dike } 408995473aeSJeff Dike 409995473aeSJeff Dike static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data) 410995473aeSJeff Dike { 411995473aeSJeff Dike if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size) /*No overflow*/ 412995473aeSJeff Dike *eof = 1; 413995473aeSJeff Dike 414995473aeSJeff Dike return strlen(buf); 415995473aeSJeff Dike } 416995473aeSJeff Dike 417995473aeSJeff Dike static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned long count,void *data) 418995473aeSJeff Dike { 419995473aeSJeff Dike char tmp[2]; 420995473aeSJeff Dike 421995473aeSJeff Dike if (copy_from_user(tmp, buf, 1)) 422995473aeSJeff Dike return -EFAULT; 423995473aeSJeff Dike 424995473aeSJeff Dike if (tmp[0] >= '0' && tmp[0] <= '2') 425995473aeSJeff Dike set_using_sysemu(tmp[0] - '0'); 426995473aeSJeff Dike return count; /*We use the first char, but pretend to write everything*/ 427995473aeSJeff Dike } 428995473aeSJeff Dike 429995473aeSJeff Dike int __init make_proc_sysemu(void) 430995473aeSJeff Dike { 431995473aeSJeff Dike struct proc_dir_entry *ent; 432995473aeSJeff Dike if (!sysemu_supported) 433995473aeSJeff Dike return 0; 434995473aeSJeff Dike 435995473aeSJeff Dike ent = create_proc_entry("sysemu", 0600, &proc_root); 436995473aeSJeff Dike 437995473aeSJeff Dike if (ent == NULL) 438995473aeSJeff Dike { 439995473aeSJeff Dike printk(KERN_WARNING "Failed to register /proc/sysemu\n"); 4406e21aec3SJeff Dike return 0; 441995473aeSJeff Dike } 442995473aeSJeff Dike 443995473aeSJeff Dike ent->read_proc = proc_read_sysemu; 444995473aeSJeff Dike ent->write_proc = proc_write_sysemu; 445995473aeSJeff Dike 446995473aeSJeff Dike return 0; 447995473aeSJeff Dike } 448995473aeSJeff Dike 449995473aeSJeff Dike late_initcall(make_proc_sysemu); 450995473aeSJeff Dike 451995473aeSJeff Dike int singlestepping(void * t) 452995473aeSJeff Dike { 453995473aeSJeff Dike struct task_struct *task = t ? t : current; 454995473aeSJeff Dike 455995473aeSJeff Dike if ( ! (task->ptrace & PT_DTRACE) ) 456995473aeSJeff Dike return(0); 457995473aeSJeff Dike 458995473aeSJeff Dike if (task->thread.singlestep_syscall) 459995473aeSJeff Dike return(1); 460995473aeSJeff Dike 461995473aeSJeff Dike return 2; 462995473aeSJeff Dike } 463995473aeSJeff Dike 464995473aeSJeff Dike /* 465995473aeSJeff Dike * Only x86 and x86_64 have an arch_align_stack(). 466995473aeSJeff Dike * All other arches have "#define arch_align_stack(x) (x)" 467995473aeSJeff Dike * in their asm/system.h 468995473aeSJeff Dike * As this is included in UML from asm-um/system-generic.h, 469995473aeSJeff Dike * we can use it to behave as the subarch does. 470995473aeSJeff Dike */ 471995473aeSJeff Dike #ifndef arch_align_stack 472995473aeSJeff Dike unsigned long arch_align_stack(unsigned long sp) 473995473aeSJeff Dike { 474995473aeSJeff Dike if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 475995473aeSJeff Dike sp -= get_random_int() % 8192; 476995473aeSJeff Dike return sp & ~0xf; 477995473aeSJeff Dike } 478995473aeSJeff Dike #endif 479