process.c (77bf4400319db9d2a8af6b00c2be6faa0f3d07cb) | process.c (ba180fd437156f7fd8cfb2fdd021d949eeef08d6) |
---|---|
1/* | 1/* |
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | 2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 * Copyright 2003 PathScale, Inc. 4 * Licensed under the GPL 5 */ 6 | 3 * Copyright 2003 PathScale, Inc. 4 * Licensed under the GPL 5 */ 6 |
7#include "linux/kernel.h" 8#include "linux/sched.h" 9#include "linux/interrupt.h" 10#include "linux/string.h" | 7#include "linux/stddef.h" 8#include "linux/err.h" 9#include "linux/hardirq.h" |
11#include "linux/mm.h" | 10#include "linux/mm.h" |
12#include "linux/slab.h" 13#include "linux/utsname.h" 14#include "linux/fs.h" 15#include "linux/utime.h" 16#include "linux/smp_lock.h" 17#include "linux/module.h" 18#include "linux/init.h" 19#include "linux/capability.h" 20#include "linux/vmalloc.h" 21#include "linux/spinlock.h" | 11#include "linux/personality.h" |
22#include "linux/proc_fs.h" 23#include "linux/ptrace.h" 24#include "linux/random.h" | 12#include "linux/proc_fs.h" 13#include "linux/ptrace.h" 14#include "linux/random.h" |
25#include "linux/personality.h" 26#include "asm/unistd.h" 27#include "asm/mman.h" 28#include "asm/segment.h" 29#include "asm/stat.h" | 15#include "linux/sched.h" 16#include "linux/threads.h" |
30#include "asm/pgtable.h" | 17#include "asm/pgtable.h" |
31#include "asm/processor.h" 32#include "asm/tlbflush.h" | |
33#include "asm/uaccess.h" | 18#include "asm/uaccess.h" |
34#include "asm/user.h" 35#include "kern_util.h" | |
36#include "as-layout.h" | 19#include "as-layout.h" |
37#include "kern.h" 38#include "signal_kern.h" 39#include "init.h" 40#include "irq_user.h" 41#include "mem_user.h" 42#include "tlb.h" 43#include "frame_kern.h" 44#include "sigcontext.h" | 20#include "kern_util.h" |
45#include "os.h" 46#include "skas.h" | 21#include "os.h" 22#include "skas.h" |
23#include "tlb.h" |
|
47 | 24 |
48/* This is a per-cpu array. A processor only modifies its entry and it only | 25/* 26 * This is a per-cpu array. A processor only modifies its entry and it only |
49 * cares about its entry, so it's OK if another processor is modifying its 50 * entry. 51 */ 52struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } }; 53 54static inline int external_pid(struct task_struct *task) 55{ 56 /* FIXME: Need to look up userspace_pid by cpu */ | 27 * cares about its entry, so it's OK if another processor is modifying its 28 * entry. 29 */ 30struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } }; 31 32static inline int external_pid(struct task_struct *task) 33{ 34 /* FIXME: Need to look up userspace_pid by cpu */ |
57 return(userspace_pid[0]); | 35 return userspace_pid[0]; |
58} 59 60int pid_to_processor_id(int pid) 61{ 62 int i; 63 | 36} 37 38int pid_to_processor_id(int pid) 39{ 40 int i; 41 |
64 for(i = 0; i < ncpus; i++){ 65 if(cpu_tasks[i].pid == pid) | 42 for(i = 0; i < ncpus; i++) { 43 if (cpu_tasks[i].pid == pid) |
66 return i; 67 } 68 return -1; 69} 70 71void free_stack(unsigned long stack, int order) 72{ 73 free_pages(stack, order); --- 39 unchanged lines hidden (view full) --- 113 114 to->thread.prev_sched = from; 115 set_current(to); 116 117 do { 118 current->thread.saved_task = NULL; 119 120 /* XXX need to check runqueues[cpu].idle */ | 44 return i; 45 } 46 return -1; 47} 48 49void free_stack(unsigned long stack, int order) 50{ 51 free_pages(stack, order); --- 39 unchanged lines hidden (view full) --- 91 92 to->thread.prev_sched = from; 93 set_current(to); 94 95 do { 96 current->thread.saved_task = NULL; 97 98 /* XXX need to check runqueues[cpu].idle */ |
121 if(current->pid == 0) | 99 if (current->pid == 0) |
122 switch_timers(0); 123 124 switch_threads(&from->thread.switch_buf, 125 &to->thread.switch_buf); 126 127 arch_switch_to(current->thread.prev_sched, current); 128 | 100 switch_timers(0); 101 102 switch_threads(&from->thread.switch_buf, 103 &to->thread.switch_buf); 104 105 arch_switch_to(current->thread.prev_sched, current); 106 |
129 if(current->pid == 0) | 107 if (current->pid == 0) |
130 switch_timers(1); 131 | 108 switch_timers(1); 109 |
132 if(current->thread.saved_task) | 110 if (current->thread.saved_task) |
133 show_regs(&(current->thread.regs)); 134 next= current->thread.saved_task; 135 prev= current; 136 } while(current->thread.saved_task); 137 138 return current->thread.prev_sched; 139 140} 141 142void interrupt_end(void) 143{ | 111 show_regs(&(current->thread.regs)); 112 next= current->thread.saved_task; 113 prev= current; 114 } while(current->thread.saved_task); 115 116 return current->thread.prev_sched; 117 118} 119 120void interrupt_end(void) 121{ |
144 if(need_resched()) | 122 if (need_resched()) |
145 schedule(); | 123 schedule(); |
146 if(test_tsk_thread_flag(current, TIF_SIGPENDING)) | 124 if (test_tsk_thread_flag(current, TIF_SIGPENDING)) |
147 do_signal(); 148} 149 150void exit_thread(void) 151{ 152} 153 154void *get_current(void) 155{ 156 return current; 157} 158 159extern void schedule_tail(struct task_struct *prev); 160 | 125 do_signal(); 126} 127 128void exit_thread(void) 129{ 130} 131 132void *get_current(void) 133{ 134 return current; 135} 136 137extern void schedule_tail(struct task_struct *prev); 138 |
161/* This is called magically, by its address being stuffed in a jmp_buf | 139/* 140 * This is called magically, by its address being stuffed in a jmp_buf |
162 * and being longjmp-d to. 163 */ 164void new_thread_handler(void) 165{ 166 int (*fn)(void *), n; 167 void *arg; 168 | 141 * and being longjmp-d to. 142 */ 143void new_thread_handler(void) 144{ 145 int (*fn)(void *), n; 146 void *arg; 147 |
169 if(current->thread.prev_sched != NULL) | 148 if (current->thread.prev_sched != NULL) |
170 schedule_tail(current->thread.prev_sched); 171 current->thread.prev_sched = NULL; 172 173 fn = current->thread.request.u.thread.proc; 174 arg = current->thread.request.u.thread.arg; 175 | 149 schedule_tail(current->thread.prev_sched); 150 current->thread.prev_sched = NULL; 151 152 fn = current->thread.request.u.thread.proc; 153 arg = current->thread.request.u.thread.arg; 154 |
176 /* The return value is 1 if the kernel thread execs a process, | 155 /* 156 * The return value is 1 if the kernel thread execs a process, |
177 * 0 if it just exits 178 */ 179 n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf); | 157 * 0 if it just exits 158 */ 159 n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf); |
180 if(n == 1){ | 160 if (n == 1) { |
181 /* Handle any immediate reschedules or signals */ 182 interrupt_end(); 183 userspace(¤t->thread.regs.regs); 184 } 185 else do_exit(0); 186} 187 188/* Called magically, see new_thread_handler above */ 189void fork_handler(void) 190{ 191 force_flush_all(); | 161 /* Handle any immediate reschedules or signals */ 162 interrupt_end(); 163 userspace(¤t->thread.regs.regs); 164 } 165 else do_exit(0); 166} 167 168/* Called magically, see new_thread_handler above */ 169void fork_handler(void) 170{ 171 force_flush_all(); |
192 if(current->thread.prev_sched == NULL) | 172 if (current->thread.prev_sched == NULL) |
193 panic("blech"); 194 195 schedule_tail(current->thread.prev_sched); 196 | 173 panic("blech"); 174 175 schedule_tail(current->thread.prev_sched); 176 |
197 /* XXX: if interrupt_end() calls schedule, this call to | 177 /* 178 * XXX: if interrupt_end() calls schedule, this call to |
198 * arch_switch_to isn't needed. We could want to apply this to | 179 * arch_switch_to isn't needed. We could want to apply this to |
199 * improve performance. -bb */ | 180 * improve performance. -bb 181 */ |
200 arch_switch_to(current->thread.prev_sched, current); 201 202 current->thread.prev_sched = NULL; 203 204 /* Handle any immediate reschedules or signals */ 205 interrupt_end(); 206 207 userspace(¤t->thread.regs.regs); 208} 209 210int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 211 unsigned long stack_top, struct task_struct * p, 212 struct pt_regs *regs) 213{ 214 void (*handler)(void); 215 int ret = 0; 216 217 p->thread = (struct thread_struct) INIT_THREAD; 218 | 182 arch_switch_to(current->thread.prev_sched, current); 183 184 current->thread.prev_sched = NULL; 185 186 /* Handle any immediate reschedules or signals */ 187 interrupt_end(); 188 189 userspace(¤t->thread.regs.regs); 190} 191 192int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 193 unsigned long stack_top, struct task_struct * p, 194 struct pt_regs *regs) 195{ 196 void (*handler)(void); 197 int ret = 0; 198 199 p->thread = (struct thread_struct) INIT_THREAD; 200 |
219 if(current->thread.forking){ | 201 if (current->thread.forking) { |
220 memcpy(&p->thread.regs.regs, ®s->regs, 221 sizeof(p->thread.regs.regs)); 222 REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.regs, 0); | 202 memcpy(&p->thread.regs.regs, ®s->regs, 203 sizeof(p->thread.regs.regs)); 204 REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.regs, 0); |
223 if(sp != 0) | 205 if (sp != 0) |
224 REGS_SP(p->thread.regs.regs.regs) = sp; 225 226 handler = fork_handler; 227 228 arch_copy_thread(¤t->thread.arch, &p->thread.arch); 229 } 230 else { 231 init_thread_registers(&p->thread.regs.regs); --- 22 unchanged lines hidden (view full) --- 254 255 kmalloc_ok = 0; 256 initial_thread_cb_skas(proc, arg); 257 kmalloc_ok = save_kmalloc_ok; 258} 259 260void default_idle(void) 261{ | 206 REGS_SP(p->thread.regs.regs.regs) = sp; 207 208 handler = fork_handler; 209 210 arch_copy_thread(¤t->thread.arch, &p->thread.arch); 211 } 212 else { 213 init_thread_registers(&p->thread.regs.regs); --- 22 unchanged lines hidden (view full) --- 236 237 kmalloc_ok = 0; 238 initial_thread_cb_skas(proc, arg); 239 kmalloc_ok = save_kmalloc_ok; 240} 241 242void default_idle(void) 243{ |
262 while(1){ | 244 while(1) { |
263 /* endless idle loop with no priority at all */ 264 265 /* 266 * although we are an idle CPU, we do not want to 267 * get into the scheduler unnecessarily. 268 */ | 245 /* endless idle loop with no priority at all */ 246 247 /* 248 * although we are an idle CPU, we do not want to 249 * get into the scheduler unnecessarily. 250 */ |
269 if(need_resched()) | 251 if (need_resched()) |
270 schedule(); 271 272 idle_sleep(10); 273 } 274} 275 276void cpu_idle(void) 277{ --- 5 unchanged lines hidden (view full) --- 283 pte_t *pte_out) 284{ 285 pgd_t *pgd; 286 pud_t *pud; 287 pmd_t *pmd; 288 pte_t *pte; 289 pte_t ptent; 290 | 252 schedule(); 253 254 idle_sleep(10); 255 } 256} 257 258void cpu_idle(void) 259{ --- 5 unchanged lines hidden (view full) --- 265 pte_t *pte_out) 266{ 267 pgd_t *pgd; 268 pud_t *pud; 269 pmd_t *pmd; 270 pte_t *pte; 271 pte_t ptent; 272 |
291 if(task->mm == NULL) | 273 if (task->mm == NULL) |
292 return ERR_PTR(-EINVAL); 293 pgd = pgd_offset(task->mm, addr); | 274 return ERR_PTR(-EINVAL); 275 pgd = pgd_offset(task->mm, addr); |
294 if(!pgd_present(*pgd)) | 276 if (!pgd_present(*pgd)) |
295 return ERR_PTR(-EINVAL); 296 297 pud = pud_offset(pgd, addr); | 277 return ERR_PTR(-EINVAL); 278 279 pud = pud_offset(pgd, addr); |
298 if(!pud_present(*pud)) | 280 if (!pud_present(*pud)) |
299 return ERR_PTR(-EINVAL); 300 301 pmd = pmd_offset(pud, addr); | 281 return ERR_PTR(-EINVAL); 282 283 pmd = pmd_offset(pud, addr); |
302 if(!pmd_present(*pmd)) | 284 if (!pmd_present(*pmd)) |
303 return ERR_PTR(-EINVAL); 304 305 pte = pte_offset_kernel(pmd, addr); 306 ptent = *pte; | 285 return ERR_PTR(-EINVAL); 286 287 pte = pte_offset_kernel(pmd, addr); 288 ptent = *pte; |
307 if(!pte_present(ptent)) | 289 if (!pte_present(ptent)) |
308 return ERR_PTR(-EINVAL); 309 | 290 return ERR_PTR(-EINVAL); 291 |
310 if(pte_out != NULL) | 292 if (pte_out != NULL) |
311 *pte_out = ptent; 312 return (void *) (pte_val(ptent) & PAGE_MASK) + (addr & ~PAGE_MASK); 313} 314 315char *current_cmd(void) 316{ 317#if defined(CONFIG_SMP) || defined(CONFIG_HIGHMEM) 318 return "(Unknown)"; --- 56 unchanged lines hidden (view full) --- 375 return strlen_user(str); 376} 377 378int smp_sigio_handler(void) 379{ 380#ifdef CONFIG_SMP 381 int cpu = current_thread->cpu; 382 IPI_handler(cpu); | 293 *pte_out = ptent; 294 return (void *) (pte_val(ptent) & PAGE_MASK) + (addr & ~PAGE_MASK); 295} 296 297char *current_cmd(void) 298{ 299#if defined(CONFIG_SMP) || defined(CONFIG_HIGHMEM) 300 return "(Unknown)"; --- 56 unchanged lines hidden (view full) --- 357 return strlen_user(str); 358} 359 360int smp_sigio_handler(void) 361{ 362#ifdef CONFIG_SMP 363 int cpu = current_thread->cpu; 364 IPI_handler(cpu); |
383 if(cpu != 0) | 365 if (cpu != 0) |
384 return 1; 385#endif 386 return 0; 387} 388 389int cpu(void) 390{ 391 return current_thread->cpu; --- 11 unchanged lines hidden (view full) --- 403 404int get_using_sysemu(void) 405{ 406 return atomic_read(&using_sysemu); 407} 408 409static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data) 410{ | 366 return 1; 367#endif 368 return 0; 369} 370 371int cpu(void) 372{ 373 return current_thread->cpu; --- 11 unchanged lines hidden (view full) --- 385 386int get_using_sysemu(void) 387{ 388 return atomic_read(&using_sysemu); 389} 390 391static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data) 392{ |
411 if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size) /*No overflow*/ | 393 if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size) 394 /* No overflow */ |
412 *eof = 1; 413 414 return strlen(buf); 415} 416 417static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned long count,void *data) 418{ 419 char tmp[2]; 420 421 if (copy_from_user(tmp, buf, 1)) 422 return -EFAULT; 423 424 if (tmp[0] >= '0' && tmp[0] <= '2') 425 set_using_sysemu(tmp[0] - '0'); | 395 *eof = 1; 396 397 return strlen(buf); 398} 399 400static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned long count,void *data) 401{ 402 char tmp[2]; 403 404 if (copy_from_user(tmp, buf, 1)) 405 return -EFAULT; 406 407 if (tmp[0] >= '0' && tmp[0] <= '2') 408 set_using_sysemu(tmp[0] - '0'); |
426 return count; /*We use the first char, but pretend to write everything*/ | 409 /* We use the first char, but pretend to write everything */ 410 return count; |
427} 428 429int __init make_proc_sysemu(void) 430{ 431 struct proc_dir_entry *ent; 432 if (!sysemu_supported) 433 return 0; 434 --- 13 unchanged lines hidden (view full) --- 448 449late_initcall(make_proc_sysemu); 450 451int singlestepping(void * t) 452{ 453 struct task_struct *task = t ? t : current; 454 455 if ( ! (task->ptrace & PT_DTRACE) ) | 411} 412 413int __init make_proc_sysemu(void) 414{ 415 struct proc_dir_entry *ent; 416 if (!sysemu_supported) 417 return 0; 418 --- 13 unchanged lines hidden (view full) --- 432 433late_initcall(make_proc_sysemu); 434 435int singlestepping(void * t) 436{ 437 struct task_struct *task = t ? t : current; 438 439 if ( ! (task->ptrace & PT_DTRACE) ) |
456 return(0); | 440 return 0; |
457 458 if (task->thread.singlestep_syscall) | 441 442 if (task->thread.singlestep_syscall) |
459 return(1); | 443 return 1; |
460 461 return 2; 462} 463 464/* 465 * Only x86 and x86_64 have an arch_align_stack(). 466 * All other arches have "#define arch_align_stack(x) (x)" 467 * in their asm/system.h 468 * As this is included in UML from asm-um/system-generic.h, 469 * we can use it to behave as the subarch does. 470 */ 471#ifndef arch_align_stack 472unsigned long arch_align_stack(unsigned long sp) 473{ 474 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 475 sp -= get_random_int() % 8192; 476 return sp & ~0xf; 477} 478#endif | 444 445 return 2; 446} 447 448/* 449 * Only x86 and x86_64 have an arch_align_stack(). 450 * All other arches have "#define arch_align_stack(x) (x)" 451 * in their asm/system.h 452 * As this is included in UML from asm-um/system-generic.h, 453 * we can use it to behave as the subarch does. 454 */ 455#ifndef arch_align_stack 456unsigned long arch_align_stack(unsigned long sp) 457{ 458 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 459 sp -= get_random_int() % 8192; 460 return sp & ~0xf; 461} 462#endif |