1 /* 2 * Copyright (C) 1995 Linus Torvalds 3 * 4 * Pentium III FXSR, SSE support 5 * Gareth Hughes <gareth@valinux.com>, May 2000 6 */ 7 8 /* 9 * This file handles the architecture-dependent parts of process handling.. 10 */ 11 12 #include <stdarg.h> 13 14 #include <linux/cpu.h> 15 #include <linux/errno.h> 16 #include <linux/sched.h> 17 #include <linux/fs.h> 18 #include <linux/kernel.h> 19 #include <linux/mm.h> 20 #include <linux/elfcore.h> 21 #include <linux/smp.h> 22 #include <linux/stddef.h> 23 #include <linux/slab.h> 24 #include <linux/vmalloc.h> 25 #include <linux/user.h> 26 #include <linux/a.out.h> 27 #include <linux/interrupt.h> 28 #include <linux/utsname.h> 29 #include <linux/delay.h> 30 #include <linux/reboot.h> 31 #include <linux/init.h> 32 #include <linux/mc146818rtc.h> 33 #include <linux/module.h> 34 #include <linux/kallsyms.h> 35 #include <linux/ptrace.h> 36 #include <linux/random.h> 37 #include <linux/personality.h> 38 #include <linux/tick.h> 39 #include <linux/percpu.h> 40 41 #include <asm/uaccess.h> 42 #include <asm/pgtable.h> 43 #include <asm/system.h> 44 #include <asm/io.h> 45 #include <asm/ldt.h> 46 #include <asm/processor.h> 47 #include <asm/i387.h> 48 #include <asm/desc.h> 49 #include <asm/vm86.h> 50 #ifdef CONFIG_MATH_EMULATION 51 #include <asm/math_emu.h> 52 #endif 53 54 #include <linux/err.h> 55 56 #include <asm/tlbflush.h> 57 #include <asm/cpu.h> 58 #include <asm/kdebug.h> 59 60 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 61 62 static int hlt_counter; 63 64 unsigned long boot_option_idle_override = 0; 65 EXPORT_SYMBOL(boot_option_idle_override); 66 67 DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 68 EXPORT_PER_CPU_SYMBOL(current_task); 69 70 DEFINE_PER_CPU(int, cpu_number); 71 EXPORT_PER_CPU_SYMBOL(cpu_number); 72 73 /* 74 * Return saved PC of a blocked thread. 75 */ 76 unsigned long thread_saved_pc(struct task_struct *tsk) 77 { 78 return ((unsigned long *)tsk->thread.sp)[3]; 79 } 80 81 /* 82 * Powermanagement idle function, if any.. 83 */ 84 void (*pm_idle)(void); 85 EXPORT_SYMBOL(pm_idle); 86 static DEFINE_PER_CPU(unsigned int, cpu_idle_state); 87 88 void disable_hlt(void) 89 { 90 hlt_counter++; 91 } 92 93 EXPORT_SYMBOL(disable_hlt); 94 95 void enable_hlt(void) 96 { 97 hlt_counter--; 98 } 99 100 EXPORT_SYMBOL(enable_hlt); 101 102 /* 103 * We use this if we don't have any better 104 * idle routine.. 105 */ 106 void default_idle(void) 107 { 108 if (!hlt_counter && boot_cpu_data.hlt_works_ok) { 109 current_thread_info()->status &= ~TS_POLLING; 110 /* 111 * TS_POLLING-cleared state must be visible before we 112 * test NEED_RESCHED: 113 */ 114 smp_mb(); 115 116 local_irq_disable(); 117 if (!need_resched()) { 118 ktime_t t0, t1; 119 u64 t0n, t1n; 120 121 t0 = ktime_get(); 122 t0n = ktime_to_ns(t0); 123 safe_halt(); /* enables interrupts racelessly */ 124 local_irq_disable(); 125 t1 = ktime_get(); 126 t1n = ktime_to_ns(t1); 127 sched_clock_idle_wakeup_event(t1n - t0n); 128 } 129 local_irq_enable(); 130 current_thread_info()->status |= TS_POLLING; 131 } else { 132 /* loop is done by the caller */ 133 cpu_relax(); 134 } 135 } 136 #ifdef CONFIG_APM_MODULE 137 EXPORT_SYMBOL(default_idle); 138 #endif 139 140 /* 141 * On SMP it's slightly faster (but much more power-consuming!) 142 * to poll the ->work.need_resched flag instead of waiting for the 143 * cross-CPU IPI to arrive. Use this option with caution. 144 */ 145 static void poll_idle(void) 146 { 147 cpu_relax(); 148 } 149 150 #ifdef CONFIG_HOTPLUG_CPU 151 #include <asm/nmi.h> 152 /* We don't actually take CPU down, just spin without interrupts. */ 153 static inline void play_dead(void) 154 { 155 /* This must be done before dead CPU ack */ 156 cpu_exit_clear(); 157 wbinvd(); 158 mb(); 159 /* Ack it */ 160 __get_cpu_var(cpu_state) = CPU_DEAD; 161 162 /* 163 * With physical CPU hotplug, we should halt the cpu 164 */ 165 local_irq_disable(); 166 while (1) 167 halt(); 168 } 169 #else 170 static inline void play_dead(void) 171 { 172 BUG(); 173 } 174 #endif /* CONFIG_HOTPLUG_CPU */ 175 176 /* 177 * The idle thread. There's no useful work to be 178 * done, so just try to conserve power and have a 179 * low exit latency (ie sit in a loop waiting for 180 * somebody to say that they'd like to reschedule) 181 */ 182 void cpu_idle(void) 183 { 184 int cpu = smp_processor_id(); 185 186 current_thread_info()->status |= TS_POLLING; 187 188 /* endless idle loop with no priority at all */ 189 while (1) { 190 tick_nohz_stop_sched_tick(); 191 while (!need_resched()) { 192 void (*idle)(void); 193 194 if (__get_cpu_var(cpu_idle_state)) 195 __get_cpu_var(cpu_idle_state) = 0; 196 197 check_pgt_cache(); 198 rmb(); 199 idle = pm_idle; 200 201 if (rcu_pending(cpu)) 202 rcu_check_callbacks(cpu, 0); 203 204 if (!idle) 205 idle = default_idle; 206 207 if (cpu_is_offline(cpu)) 208 play_dead(); 209 210 __get_cpu_var(irq_stat).idle_timestamp = jiffies; 211 idle(); 212 } 213 tick_nohz_restart_sched_tick(); 214 preempt_enable_no_resched(); 215 schedule(); 216 preempt_disable(); 217 } 218 } 219 220 static void do_nothing(void *unused) 221 { 222 } 223 224 void cpu_idle_wait(void) 225 { 226 unsigned int cpu, this_cpu = get_cpu(); 227 cpumask_t map, tmp = current->cpus_allowed; 228 229 set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); 230 put_cpu(); 231 232 cpus_clear(map); 233 for_each_online_cpu(cpu) { 234 per_cpu(cpu_idle_state, cpu) = 1; 235 cpu_set(cpu, map); 236 } 237 238 __get_cpu_var(cpu_idle_state) = 0; 239 240 wmb(); 241 do { 242 ssleep(1); 243 for_each_online_cpu(cpu) { 244 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) 245 cpu_clear(cpu, map); 246 } 247 cpus_and(map, map, cpu_online_map); 248 /* 249 * We waited 1 sec, if a CPU still did not call idle 250 * it may be because it is in idle and not waking up 251 * because it has nothing to do. 252 * Give all the remaining CPUS a kick. 253 */ 254 smp_call_function_mask(map, do_nothing, 0, 0); 255 } while (!cpus_empty(map)); 256 257 set_cpus_allowed(current, tmp); 258 } 259 EXPORT_SYMBOL_GPL(cpu_idle_wait); 260 261 /* 262 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, 263 * which can obviate IPI to trigger checking of need_resched. 264 * We execute MONITOR against need_resched and enter optimized wait state 265 * through MWAIT. Whenever someone changes need_resched, we would be woken 266 * up from MWAIT (without an IPI). 267 * 268 * New with Core Duo processors, MWAIT can take some hints based on CPU 269 * capability. 270 */ 271 void mwait_idle_with_hints(unsigned long ax, unsigned long cx) 272 { 273 if (!need_resched()) { 274 __monitor((void *)¤t_thread_info()->flags, 0, 0); 275 smp_mb(); 276 if (!need_resched()) 277 __mwait(ax, cx); 278 } 279 } 280 281 /* Default MONITOR/MWAIT with no hints, used for default C1 state */ 282 static void mwait_idle(void) 283 { 284 local_irq_enable(); 285 mwait_idle_with_hints(0, 0); 286 } 287 288 static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) 289 { 290 if (force_mwait) 291 return 1; 292 /* Any C1 states supported? */ 293 return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0; 294 } 295 296 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 297 { 298 static int selected; 299 300 if (selected) 301 return; 302 #ifdef CONFIG_X86_SMP 303 if (pm_idle == poll_idle && smp_num_siblings > 1) { 304 printk(KERN_WARNING "WARNING: polling idle and HT enabled," 305 " performance may degrade.\n"); 306 } 307 #endif 308 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { 309 /* 310 * Skip, if setup has overridden idle. 311 * One CPU supports mwait => All CPUs supports mwait 312 */ 313 if (!pm_idle) { 314 printk(KERN_INFO "using mwait in idle threads.\n"); 315 pm_idle = mwait_idle; 316 } 317 } 318 selected = 1; 319 } 320 321 static int __init idle_setup(char *str) 322 { 323 if (!strcmp(str, "poll")) { 324 printk("using polling idle threads.\n"); 325 pm_idle = poll_idle; 326 } else if (!strcmp(str, "mwait")) 327 force_mwait = 1; 328 else 329 return -1; 330 331 boot_option_idle_override = 1; 332 return 0; 333 } 334 early_param("idle", idle_setup); 335 336 void __show_registers(struct pt_regs *regs, int all) 337 { 338 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 339 unsigned long d0, d1, d2, d3, d6, d7; 340 unsigned long sp; 341 unsigned short ss, gs; 342 343 if (user_mode_vm(regs)) { 344 sp = regs->sp; 345 ss = regs->ss & 0xffff; 346 savesegment(gs, gs); 347 } else { 348 sp = (unsigned long) (®s->sp); 349 savesegment(ss, ss); 350 savesegment(gs, gs); 351 } 352 353 printk("\n"); 354 printk("Pid: %d, comm: %s %s (%s %.*s)\n", 355 task_pid_nr(current), current->comm, 356 print_tainted(), init_utsname()->release, 357 (int)strcspn(init_utsname()->version, " "), 358 init_utsname()->version); 359 360 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", 361 0xffff & regs->cs, regs->ip, regs->flags, 362 smp_processor_id()); 363 print_symbol("EIP is at %s\n", regs->ip); 364 365 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", 366 regs->ax, regs->bx, regs->cx, regs->dx); 367 printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", 368 regs->si, regs->di, regs->bp, sp); 369 printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", 370 regs->ds & 0xffff, regs->es & 0xffff, 371 regs->fs & 0xffff, gs, ss); 372 373 if (!all) 374 return; 375 376 cr0 = read_cr0(); 377 cr2 = read_cr2(); 378 cr3 = read_cr3(); 379 cr4 = read_cr4_safe(); 380 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", 381 cr0, cr2, cr3, cr4); 382 383 get_debugreg(d0, 0); 384 get_debugreg(d1, 1); 385 get_debugreg(d2, 2); 386 get_debugreg(d3, 3); 387 printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", 388 d0, d1, d2, d3); 389 390 get_debugreg(d6, 6); 391 get_debugreg(d7, 7); 392 printk("DR6: %08lx DR7: %08lx\n", 393 d6, d7); 394 } 395 396 void show_regs(struct pt_regs *regs) 397 { 398 __show_registers(regs, 1); 399 show_trace(NULL, regs, ®s->sp, regs->bp); 400 } 401 402 /* 403 * This gets run with %bx containing the 404 * function to call, and %dx containing 405 * the "args". 406 */ 407 extern void kernel_thread_helper(void); 408 409 /* 410 * Create a kernel thread 411 */ 412 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 413 { 414 struct pt_regs regs; 415 416 memset(®s, 0, sizeof(regs)); 417 418 regs.bx = (unsigned long) fn; 419 regs.dx = (unsigned long) arg; 420 421 regs.ds = __USER_DS; 422 regs.es = __USER_DS; 423 regs.fs = __KERNEL_PERCPU; 424 regs.orig_ax = -1; 425 regs.ip = (unsigned long) kernel_thread_helper; 426 regs.cs = __KERNEL_CS | get_kernel_rpl(); 427 regs.flags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; 428 429 /* Ok, create the new process.. */ 430 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); 431 } 432 EXPORT_SYMBOL(kernel_thread); 433 434 /* 435 * Free current thread data structures etc.. 436 */ 437 void exit_thread(void) 438 { 439 /* The process may have allocated an io port bitmap... nuke it. */ 440 if (unlikely(test_thread_flag(TIF_IO_BITMAP))) { 441 struct task_struct *tsk = current; 442 struct thread_struct *t = &tsk->thread; 443 int cpu = get_cpu(); 444 struct tss_struct *tss = &per_cpu(init_tss, cpu); 445 446 kfree(t->io_bitmap_ptr); 447 t->io_bitmap_ptr = NULL; 448 clear_thread_flag(TIF_IO_BITMAP); 449 /* 450 * Careful, clear this in the TSS too: 451 */ 452 memset(tss->io_bitmap, 0xff, tss->io_bitmap_max); 453 t->io_bitmap_max = 0; 454 tss->io_bitmap_owner = NULL; 455 tss->io_bitmap_max = 0; 456 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; 457 put_cpu(); 458 } 459 } 460 461 void flush_thread(void) 462 { 463 struct task_struct *tsk = current; 464 465 tsk->thread.debugreg0 = 0; 466 tsk->thread.debugreg1 = 0; 467 tsk->thread.debugreg2 = 0; 468 tsk->thread.debugreg3 = 0; 469 tsk->thread.debugreg6 = 0; 470 tsk->thread.debugreg7 = 0; 471 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); 472 clear_tsk_thread_flag(tsk, TIF_DEBUG); 473 /* 474 * Forget coprocessor state.. 475 */ 476 clear_fpu(tsk); 477 clear_used_math(); 478 } 479 480 void release_thread(struct task_struct *dead_task) 481 { 482 BUG_ON(dead_task->mm); 483 release_vm86_irqs(dead_task); 484 } 485 486 /* 487 * This gets called before we allocate a new thread and copy 488 * the current task into it. 489 */ 490 void prepare_to_copy(struct task_struct *tsk) 491 { 492 unlazy_fpu(tsk); 493 } 494 495 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 496 unsigned long unused, 497 struct task_struct * p, struct pt_regs * regs) 498 { 499 struct pt_regs * childregs; 500 struct task_struct *tsk; 501 int err; 502 503 childregs = task_pt_regs(p); 504 *childregs = *regs; 505 childregs->ax = 0; 506 childregs->sp = sp; 507 508 p->thread.sp = (unsigned long) childregs; 509 p->thread.sp0 = (unsigned long) (childregs+1); 510 511 p->thread.ip = (unsigned long) ret_from_fork; 512 513 savesegment(gs, p->thread.gs); 514 515 tsk = current; 516 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { 517 p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, 518 IO_BITMAP_BYTES, GFP_KERNEL); 519 if (!p->thread.io_bitmap_ptr) { 520 p->thread.io_bitmap_max = 0; 521 return -ENOMEM; 522 } 523 set_tsk_thread_flag(p, TIF_IO_BITMAP); 524 } 525 526 err = 0; 527 528 /* 529 * Set a new TLS for the child thread? 530 */ 531 if (clone_flags & CLONE_SETTLS) 532 err = do_set_thread_area(p, -1, 533 (struct user_desc __user *)childregs->si, 0); 534 535 if (err && p->thread.io_bitmap_ptr) { 536 kfree(p->thread.io_bitmap_ptr); 537 p->thread.io_bitmap_max = 0; 538 } 539 return err; 540 } 541 542 /* 543 * fill in the user structure for a core dump.. 544 */ 545 void dump_thread(struct pt_regs * regs, struct user * dump) 546 { 547 u16 gs; 548 549 /* changed the size calculations - should hopefully work better. lbt */ 550 dump->magic = CMAGIC; 551 dump->start_code = 0; 552 dump->start_stack = regs->sp & ~(PAGE_SIZE - 1); 553 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; 554 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; 555 dump->u_dsize -= dump->u_tsize; 556 dump->u_ssize = 0; 557 dump->u_debugreg[0] = current->thread.debugreg0; 558 dump->u_debugreg[1] = current->thread.debugreg1; 559 dump->u_debugreg[2] = current->thread.debugreg2; 560 dump->u_debugreg[3] = current->thread.debugreg3; 561 dump->u_debugreg[4] = 0; 562 dump->u_debugreg[5] = 0; 563 dump->u_debugreg[6] = current->thread.debugreg6; 564 dump->u_debugreg[7] = current->thread.debugreg7; 565 566 if (dump->start_stack < TASK_SIZE) 567 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; 568 569 dump->regs.bx = regs->bx; 570 dump->regs.cx = regs->cx; 571 dump->regs.dx = regs->dx; 572 dump->regs.si = regs->si; 573 dump->regs.di = regs->di; 574 dump->regs.bp = regs->bp; 575 dump->regs.ax = regs->ax; 576 dump->regs.ds = (u16)regs->ds; 577 dump->regs.es = (u16)regs->es; 578 dump->regs.fs = (u16)regs->fs; 579 savesegment(gs,gs); 580 dump->regs.orig_ax = regs->orig_ax; 581 dump->regs.ip = regs->ip; 582 dump->regs.cs = (u16)regs->cs; 583 dump->regs.flags = regs->flags; 584 dump->regs.sp = regs->sp; 585 dump->regs.ss = (u16)regs->ss; 586 587 dump->u_fpvalid = dump_fpu (regs, &dump->i387); 588 } 589 EXPORT_SYMBOL(dump_thread); 590 591 #ifdef CONFIG_SECCOMP 592 static void hard_disable_TSC(void) 593 { 594 write_cr4(read_cr4() | X86_CR4_TSD); 595 } 596 void disable_TSC(void) 597 { 598 preempt_disable(); 599 if (!test_and_set_thread_flag(TIF_NOTSC)) 600 /* 601 * Must flip the CPU state synchronously with 602 * TIF_NOTSC in the current running context. 603 */ 604 hard_disable_TSC(); 605 preempt_enable(); 606 } 607 static void hard_enable_TSC(void) 608 { 609 write_cr4(read_cr4() & ~X86_CR4_TSD); 610 } 611 #endif /* CONFIG_SECCOMP */ 612 613 static noinline void 614 __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, 615 struct tss_struct *tss) 616 { 617 struct thread_struct *prev, *next; 618 unsigned long debugctl; 619 620 prev = &prev_p->thread; 621 next = &next_p->thread; 622 623 debugctl = prev->debugctlmsr; 624 if (next->ds_area_msr != prev->ds_area_msr) { 625 /* we clear debugctl to make sure DS 626 * is not in use when we change it */ 627 debugctl = 0; 628 wrmsrl(MSR_IA32_DEBUGCTLMSR, 0); 629 wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0); 630 } 631 632 if (next->debugctlmsr != debugctl) 633 wrmsr(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr, 0); 634 635 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { 636 set_debugreg(next->debugreg0, 0); 637 set_debugreg(next->debugreg1, 1); 638 set_debugreg(next->debugreg2, 2); 639 set_debugreg(next->debugreg3, 3); 640 /* no 4 and 5 */ 641 set_debugreg(next->debugreg6, 6); 642 set_debugreg(next->debugreg7, 7); 643 } 644 645 #ifdef CONFIG_SECCOMP 646 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ 647 test_tsk_thread_flag(next_p, TIF_NOTSC)) { 648 /* prev and next are different */ 649 if (test_tsk_thread_flag(next_p, TIF_NOTSC)) 650 hard_disable_TSC(); 651 else 652 hard_enable_TSC(); 653 } 654 #endif 655 656 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) 657 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); 658 659 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) 660 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); 661 662 663 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { 664 /* 665 * Disable the bitmap via an invalid offset. We still cache 666 * the previous bitmap owner and the IO bitmap contents: 667 */ 668 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; 669 return; 670 } 671 672 if (likely(next == tss->io_bitmap_owner)) { 673 /* 674 * Previous owner of the bitmap (hence the bitmap content) 675 * matches the next task, we dont have to do anything but 676 * to set a valid offset in the TSS: 677 */ 678 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; 679 return; 680 } 681 /* 682 * Lazy TSS's I/O bitmap copy. We set an invalid offset here 683 * and we let the task to get a GPF in case an I/O instruction 684 * is performed. The handler of the GPF will verify that the 685 * faulting task has a valid I/O bitmap and, it true, does the 686 * real copy and restart the instruction. This will save us 687 * redundant copies when the currently switched task does not 688 * perform any I/O during its timeslice. 689 */ 690 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; 691 } 692 693 /* 694 * switch_to(x,yn) should switch tasks from x to y. 695 * 696 * We fsave/fwait so that an exception goes off at the right time 697 * (as a call from the fsave or fwait in effect) rather than to 698 * the wrong process. Lazy FP saving no longer makes any sense 699 * with modern CPU's, and this simplifies a lot of things (SMP 700 * and UP become the same). 701 * 702 * NOTE! We used to use the x86 hardware context switching. The 703 * reason for not using it any more becomes apparent when you 704 * try to recover gracefully from saved state that is no longer 705 * valid (stale segment register values in particular). With the 706 * hardware task-switch, there is no way to fix up bad state in 707 * a reasonable manner. 708 * 709 * The fact that Intel documents the hardware task-switching to 710 * be slow is a fairly red herring - this code is not noticeably 711 * faster. However, there _is_ some room for improvement here, 712 * so the performance issues may eventually be a valid point. 713 * More important, however, is the fact that this allows us much 714 * more flexibility. 715 * 716 * The return value (in %ax) will be the "prev" task after 717 * the task-switch, and shows up in ret_from_fork in entry.S, 718 * for example. 719 */ 720 struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) 721 { 722 struct thread_struct *prev = &prev_p->thread, 723 *next = &next_p->thread; 724 int cpu = smp_processor_id(); 725 struct tss_struct *tss = &per_cpu(init_tss, cpu); 726 727 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ 728 729 __unlazy_fpu(prev_p); 730 731 732 /* we're going to use this soon, after a few expensive things */ 733 if (next_p->fpu_counter > 5) 734 prefetch(&next->i387.fxsave); 735 736 /* 737 * Reload esp0. 738 */ 739 load_sp0(tss, next); 740 741 /* 742 * Save away %gs. No need to save %fs, as it was saved on the 743 * stack on entry. No need to save %es and %ds, as those are 744 * always kernel segments while inside the kernel. Doing this 745 * before setting the new TLS descriptors avoids the situation 746 * where we temporarily have non-reloadable segments in %fs 747 * and %gs. This could be an issue if the NMI handler ever 748 * used %fs or %gs (it does not today), or if the kernel is 749 * running inside of a hypervisor layer. 750 */ 751 savesegment(gs, prev->gs); 752 753 /* 754 * Load the per-thread Thread-Local Storage descriptor. 755 */ 756 load_TLS(next, cpu); 757 758 /* 759 * Restore IOPL if needed. In normal use, the flags restore 760 * in the switch assembly will handle this. But if the kernel 761 * is running virtualized at a non-zero CPL, the popf will 762 * not restore flags, so it must be done in a separate step. 763 */ 764 if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) 765 set_iopl_mask(next->iopl); 766 767 /* 768 * Now maybe handle debug registers and/or IO bitmaps 769 */ 770 if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || 771 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) 772 __switch_to_xtra(prev_p, next_p, tss); 773 774 /* 775 * Leave lazy mode, flushing any hypercalls made here. 776 * This must be done before restoring TLS segments so 777 * the GDT and LDT are properly updated, and must be 778 * done before math_state_restore, so the TS bit is up 779 * to date. 780 */ 781 arch_leave_lazy_cpu_mode(); 782 783 /* If the task has used fpu the last 5 timeslices, just do a full 784 * restore of the math state immediately to avoid the trap; the 785 * chances of needing FPU soon are obviously high now 786 */ 787 if (next_p->fpu_counter > 5) 788 math_state_restore(); 789 790 /* 791 * Restore %gs if needed (which is common) 792 */ 793 if (prev->gs | next->gs) 794 loadsegment(gs, next->gs); 795 796 x86_write_percpu(current_task, next_p); 797 798 return prev_p; 799 } 800 801 asmlinkage int sys_fork(struct pt_regs regs) 802 { 803 return do_fork(SIGCHLD, regs.sp, ®s, 0, NULL, NULL); 804 } 805 806 asmlinkage int sys_clone(struct pt_regs regs) 807 { 808 unsigned long clone_flags; 809 unsigned long newsp; 810 int __user *parent_tidptr, *child_tidptr; 811 812 clone_flags = regs.bx; 813 newsp = regs.cx; 814 parent_tidptr = (int __user *)regs.dx; 815 child_tidptr = (int __user *)regs.di; 816 if (!newsp) 817 newsp = regs.sp; 818 return do_fork(clone_flags, newsp, ®s, 0, parent_tidptr, child_tidptr); 819 } 820 821 /* 822 * This is trivial, and on the face of it looks like it 823 * could equally well be done in user mode. 824 * 825 * Not so, for quite unobvious reasons - register pressure. 826 * In user mode vfork() cannot have a stack frame, and if 827 * done by calling the "clone()" system call directly, you 828 * do not have enough call-clobbered registers to hold all 829 * the information you need. 830 */ 831 asmlinkage int sys_vfork(struct pt_regs regs) 832 { 833 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, ®s, 0, NULL, NULL); 834 } 835 836 /* 837 * sys_execve() executes a new program. 838 */ 839 asmlinkage int sys_execve(struct pt_regs regs) 840 { 841 int error; 842 char * filename; 843 844 filename = getname((char __user *) regs.bx); 845 error = PTR_ERR(filename); 846 if (IS_ERR(filename)) 847 goto out; 848 error = do_execve(filename, 849 (char __user * __user *) regs.cx, 850 (char __user * __user *) regs.dx, 851 ®s); 852 if (error == 0) { 853 /* Make sure we don't return using sysenter.. */ 854 set_thread_flag(TIF_IRET); 855 } 856 putname(filename); 857 out: 858 return error; 859 } 860 861 #define top_esp (THREAD_SIZE - sizeof(unsigned long)) 862 #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) 863 864 unsigned long get_wchan(struct task_struct *p) 865 { 866 unsigned long bp, sp, ip; 867 unsigned long stack_page; 868 int count = 0; 869 if (!p || p == current || p->state == TASK_RUNNING) 870 return 0; 871 stack_page = (unsigned long)task_stack_page(p); 872 sp = p->thread.sp; 873 if (!stack_page || sp < stack_page || sp > top_esp+stack_page) 874 return 0; 875 /* include/asm-i386/system.h:switch_to() pushes bp last. */ 876 bp = *(unsigned long *) sp; 877 do { 878 if (bp < stack_page || bp > top_ebp+stack_page) 879 return 0; 880 ip = *(unsigned long *) (bp+4); 881 if (!in_sched_functions(ip)) 882 return ip; 883 bp = *(unsigned long *) bp; 884 } while (count++ < 16); 885 return 0; 886 } 887 888 unsigned long arch_align_stack(unsigned long sp) 889 { 890 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 891 sp -= get_random_int() % 8192; 892 return sp & ~0xf; 893 } 894 895 unsigned long arch_randomize_brk(struct mm_struct *mm) 896 { 897 unsigned long range_end = mm->brk + 0x02000000; 898 return randomize_range(mm->brk, range_end, 0) ? : mm->brk; 899 } 900