1 #include <linux/errno.h> 2 #include <linux/kernel.h> 3 #include <linux/mm.h> 4 #include <linux/smp.h> 5 #include <linux/prctl.h> 6 #include <linux/slab.h> 7 #include <linux/sched.h> 8 #include <linux/module.h> 9 #include <linux/pm.h> 10 #include <linux/clockchips.h> 11 #include <linux/random.h> 12 #include <linux/user-return-notifier.h> 13 #include <linux/dmi.h> 14 #include <linux/utsname.h> 15 #include <trace/events/power.h> 16 #include <linux/hw_breakpoint.h> 17 #include <asm/cpu.h> 18 #include <asm/system.h> 19 #include <asm/apic.h> 20 #include <asm/syscalls.h> 21 #include <asm/idle.h> 22 #include <asm/uaccess.h> 23 #include <asm/i387.h> 24 #include <asm/debugreg.h> 25 26 struct kmem_cache *task_xstate_cachep; 27 EXPORT_SYMBOL_GPL(task_xstate_cachep); 28 29 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 30 { 31 int ret; 32 33 *dst = *src; 34 if (fpu_allocated(&src->thread.fpu)) { 35 memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu)); 36 ret = fpu_alloc(&dst->thread.fpu); 37 if (ret) 38 return ret; 39 fpu_copy(&dst->thread.fpu, &src->thread.fpu); 40 } 41 return 0; 42 } 43 44 void free_thread_xstate(struct task_struct *tsk) 45 { 46 fpu_free(&tsk->thread.fpu); 47 } 48 49 void free_thread_info(struct thread_info *ti) 50 { 51 free_thread_xstate(ti->task); 52 free_pages((unsigned long)ti, get_order(THREAD_SIZE)); 53 } 54 55 void arch_task_cache_init(void) 56 { 57 task_xstate_cachep = 58 kmem_cache_create("task_xstate", xstate_size, 59 __alignof__(union thread_xstate), 60 SLAB_PANIC | SLAB_NOTRACK, NULL); 61 } 62 63 /* 64 * Free current thread data structures etc.. 65 */ 66 void exit_thread(void) 67 { 68 struct task_struct *me = current; 69 struct thread_struct *t = &me->thread; 70 unsigned long *bp = t->io_bitmap_ptr; 71 72 if (bp) { 73 struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); 74 75 t->io_bitmap_ptr = NULL; 76 clear_thread_flag(TIF_IO_BITMAP); 77 /* 78 * Careful, clear this in the TSS too: 79 */ 80 memset(tss->io_bitmap, 0xff, t->io_bitmap_max); 81 t->io_bitmap_max = 0; 82 put_cpu(); 83 kfree(bp); 84 } 85 } 86 87 void show_regs(struct pt_regs *regs) 88 { 89 show_registers(regs); 90 show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs), 0); 91 } 92 93 void show_regs_common(void) 94 { 95 const char *vendor, *product, *board; 96 97 vendor = dmi_get_system_info(DMI_SYS_VENDOR); 98 if (!vendor) 99 vendor = ""; 100 product = dmi_get_system_info(DMI_PRODUCT_NAME); 101 if (!product) 102 product = ""; 103 104 /* Board Name is optional */ 105 board = dmi_get_system_info(DMI_BOARD_NAME); 106 107 printk(KERN_CONT "\n"); 108 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s", 109 current->pid, current->comm, print_tainted(), 110 init_utsname()->release, 111 (int)strcspn(init_utsname()->version, " "), 112 init_utsname()->version); 113 printk(KERN_CONT " %s %s", vendor, product); 114 if (board) 115 printk(KERN_CONT "/%s", board); 116 printk(KERN_CONT "\n"); 117 } 118 119 void flush_thread(void) 120 { 121 struct task_struct *tsk = current; 122 123 flush_ptrace_hw_breakpoint(tsk); 124 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); 125 /* 126 * Forget coprocessor state.. 127 */ 128 tsk->fpu_counter = 0; 129 clear_fpu(tsk); 130 clear_used_math(); 131 } 132 133 static void hard_disable_TSC(void) 134 { 135 write_cr4(read_cr4() | X86_CR4_TSD); 136 } 137 138 void disable_TSC(void) 139 { 140 preempt_disable(); 141 if (!test_and_set_thread_flag(TIF_NOTSC)) 142 /* 143 * Must flip the CPU state synchronously with 144 * TIF_NOTSC in the current running context. 145 */ 146 hard_disable_TSC(); 147 preempt_enable(); 148 } 149 150 static void hard_enable_TSC(void) 151 { 152 write_cr4(read_cr4() & ~X86_CR4_TSD); 153 } 154 155 static void enable_TSC(void) 156 { 157 preempt_disable(); 158 if (test_and_clear_thread_flag(TIF_NOTSC)) 159 /* 160 * Must flip the CPU state synchronously with 161 * TIF_NOTSC in the current running context. 162 */ 163 hard_enable_TSC(); 164 preempt_enable(); 165 } 166 167 int get_tsc_mode(unsigned long adr) 168 { 169 unsigned int val; 170 171 if (test_thread_flag(TIF_NOTSC)) 172 val = PR_TSC_SIGSEGV; 173 else 174 val = PR_TSC_ENABLE; 175 176 return put_user(val, (unsigned int __user *)adr); 177 } 178 179 int set_tsc_mode(unsigned int val) 180 { 181 if (val == PR_TSC_SIGSEGV) 182 disable_TSC(); 183 else if (val == PR_TSC_ENABLE) 184 enable_TSC(); 185 else 186 return -EINVAL; 187 188 return 0; 189 } 190 191 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, 192 struct tss_struct *tss) 193 { 194 struct thread_struct *prev, *next; 195 196 prev = &prev_p->thread; 197 next = &next_p->thread; 198 199 if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^ 200 test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) { 201 unsigned long debugctl = get_debugctlmsr(); 202 203 debugctl &= ~DEBUGCTLMSR_BTF; 204 if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) 205 debugctl |= DEBUGCTLMSR_BTF; 206 207 update_debugctlmsr(debugctl); 208 } 209 210 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ 211 test_tsk_thread_flag(next_p, TIF_NOTSC)) { 212 /* prev and next are different */ 213 if (test_tsk_thread_flag(next_p, TIF_NOTSC)) 214 hard_disable_TSC(); 215 else 216 hard_enable_TSC(); 217 } 218 219 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { 220 /* 221 * Copy the relevant range of the IO bitmap. 222 * Normally this is 128 bytes or less: 223 */ 224 memcpy(tss->io_bitmap, next->io_bitmap_ptr, 225 max(prev->io_bitmap_max, next->io_bitmap_max)); 226 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { 227 /* 228 * Clear any possible leftover bits: 229 */ 230 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); 231 } 232 propagate_user_return_notify(prev_p, next_p); 233 } 234 235 int sys_fork(struct pt_regs *regs) 236 { 237 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); 238 } 239 240 /* 241 * This is trivial, and on the face of it looks like it 242 * could equally well be done in user mode. 243 * 244 * Not so, for quite unobvious reasons - register pressure. 245 * In user mode vfork() cannot have a stack frame, and if 246 * done by calling the "clone()" system call directly, you 247 * do not have enough call-clobbered registers to hold all 248 * the information you need. 249 */ 250 int sys_vfork(struct pt_regs *regs) 251 { 252 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, 253 NULL, NULL); 254 } 255 256 long 257 sys_clone(unsigned long clone_flags, unsigned long newsp, 258 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) 259 { 260 if (!newsp) 261 newsp = regs->sp; 262 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); 263 } 264 265 /* 266 * This gets run with %si containing the 267 * function to call, and %di containing 268 * the "args". 269 */ 270 extern void kernel_thread_helper(void); 271 272 /* 273 * Create a kernel thread 274 */ 275 int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 276 { 277 struct pt_regs regs; 278 279 memset(®s, 0, sizeof(regs)); 280 281 regs.si = (unsigned long) fn; 282 regs.di = (unsigned long) arg; 283 284 #ifdef CONFIG_X86_32 285 regs.ds = __USER_DS; 286 regs.es = __USER_DS; 287 regs.fs = __KERNEL_PERCPU; 288 regs.gs = __KERNEL_STACK_CANARY; 289 #else 290 regs.ss = __KERNEL_DS; 291 #endif 292 293 regs.orig_ax = -1; 294 regs.ip = (unsigned long) kernel_thread_helper; 295 regs.cs = __KERNEL_CS | get_kernel_rpl(); 296 regs.flags = X86_EFLAGS_IF | 0x2; 297 298 /* Ok, create the new process.. */ 299 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); 300 } 301 EXPORT_SYMBOL(kernel_thread); 302 303 /* 304 * sys_execve() executes a new program. 305 */ 306 long sys_execve(const char __user *name, 307 const char __user *const __user *argv, 308 const char __user *const __user *envp, struct pt_regs *regs) 309 { 310 long error; 311 char *filename; 312 313 filename = getname(name); 314 error = PTR_ERR(filename); 315 if (IS_ERR(filename)) 316 return error; 317 error = do_execve(filename, argv, envp, regs); 318 319 #ifdef CONFIG_X86_32 320 if (error == 0) { 321 /* Make sure we don't return using sysenter.. */ 322 set_thread_flag(TIF_IRET); 323 } 324 #endif 325 326 putname(filename); 327 return error; 328 } 329 330 /* 331 * Idle related variables and functions 332 */ 333 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; 334 EXPORT_SYMBOL(boot_option_idle_override); 335 336 /* 337 * Powermanagement idle function, if any.. 338 */ 339 void (*pm_idle)(void); 340 EXPORT_SYMBOL(pm_idle); 341 342 #ifdef CONFIG_X86_32 343 /* 344 * This halt magic was a workaround for ancient floppy DMA 345 * wreckage. It should be safe to remove. 346 */ 347 static int hlt_counter; 348 void disable_hlt(void) 349 { 350 hlt_counter++; 351 } 352 EXPORT_SYMBOL(disable_hlt); 353 354 void enable_hlt(void) 355 { 356 hlt_counter--; 357 } 358 EXPORT_SYMBOL(enable_hlt); 359 360 static inline int hlt_use_halt(void) 361 { 362 return (!hlt_counter && boot_cpu_data.hlt_works_ok); 363 } 364 #else 365 static inline int hlt_use_halt(void) 366 { 367 return 1; 368 } 369 #endif 370 371 /* 372 * We use this if we don't have any better 373 * idle routine.. 374 */ 375 void default_idle(void) 376 { 377 if (hlt_use_halt()) { 378 trace_power_start(POWER_CSTATE, 1, smp_processor_id()); 379 trace_cpu_idle(1, smp_processor_id()); 380 current_thread_info()->status &= ~TS_POLLING; 381 /* 382 * TS_POLLING-cleared state must be visible before we 383 * test NEED_RESCHED: 384 */ 385 smp_mb(); 386 387 if (!need_resched()) 388 safe_halt(); /* enables interrupts racelessly */ 389 else 390 local_irq_enable(); 391 current_thread_info()->status |= TS_POLLING; 392 trace_power_end(smp_processor_id()); 393 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); 394 } else { 395 local_irq_enable(); 396 /* loop is done by the caller */ 397 cpu_relax(); 398 } 399 } 400 #ifdef CONFIG_APM_MODULE 401 EXPORT_SYMBOL(default_idle); 402 #endif 403 404 void stop_this_cpu(void *dummy) 405 { 406 local_irq_disable(); 407 /* 408 * Remove this CPU: 409 */ 410 set_cpu_online(smp_processor_id(), false); 411 disable_local_APIC(); 412 413 for (;;) { 414 if (hlt_works(smp_processor_id())) 415 halt(); 416 } 417 } 418 419 static void do_nothing(void *unused) 420 { 421 } 422 423 /* 424 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of 425 * pm_idle and update to new pm_idle value. Required while changing pm_idle 426 * handler on SMP systems. 427 * 428 * Caller must have changed pm_idle to the new value before the call. Old 429 * pm_idle value will not be used by any CPU after the return of this function. 430 */ 431 void cpu_idle_wait(void) 432 { 433 smp_mb(); 434 /* kick all the CPUs so that they exit out of pm_idle */ 435 smp_call_function(do_nothing, NULL, 1); 436 } 437 EXPORT_SYMBOL_GPL(cpu_idle_wait); 438 439 /* 440 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, 441 * which can obviate IPI to trigger checking of need_resched. 442 * We execute MONITOR against need_resched and enter optimized wait state 443 * through MWAIT. Whenever someone changes need_resched, we would be woken 444 * up from MWAIT (without an IPI). 445 * 446 * New with Core Duo processors, MWAIT can take some hints based on CPU 447 * capability. 448 */ 449 void mwait_idle_with_hints(unsigned long ax, unsigned long cx) 450 { 451 if (!need_resched()) { 452 if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) 453 clflush((void *)¤t_thread_info()->flags); 454 455 __monitor((void *)¤t_thread_info()->flags, 0, 0); 456 smp_mb(); 457 if (!need_resched()) 458 __mwait(ax, cx); 459 } 460 } 461 462 /* Default MONITOR/MWAIT with no hints, used for default C1 state */ 463 static void mwait_idle(void) 464 { 465 if (!need_resched()) { 466 trace_power_start(POWER_CSTATE, 1, smp_processor_id()); 467 trace_cpu_idle(1, smp_processor_id()); 468 if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) 469 clflush((void *)¤t_thread_info()->flags); 470 471 __monitor((void *)¤t_thread_info()->flags, 0, 0); 472 smp_mb(); 473 if (!need_resched()) 474 __sti_mwait(0, 0); 475 else 476 local_irq_enable(); 477 trace_power_end(smp_processor_id()); 478 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); 479 } else 480 local_irq_enable(); 481 } 482 483 /* 484 * On SMP it's slightly faster (but much more power-consuming!) 485 * to poll the ->work.need_resched flag instead of waiting for the 486 * cross-CPU IPI to arrive. Use this option with caution. 487 */ 488 static void poll_idle(void) 489 { 490 trace_power_start(POWER_CSTATE, 0, smp_processor_id()); 491 trace_cpu_idle(0, smp_processor_id()); 492 local_irq_enable(); 493 while (!need_resched()) 494 cpu_relax(); 495 trace_power_end(smp_processor_id()); 496 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); 497 } 498 499 /* 500 * mwait selection logic: 501 * 502 * It depends on the CPU. For AMD CPUs that support MWAIT this is 503 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings 504 * then depend on a clock divisor and current Pstate of the core. If 505 * all cores of a processor are in halt state (C1) the processor can 506 * enter the C1E (C1 enhanced) state. If mwait is used this will never 507 * happen. 508 * 509 * idle=mwait overrides this decision and forces the usage of mwait. 510 */ 511 512 #define MWAIT_INFO 0x05 513 #define MWAIT_ECX_EXTENDED_INFO 0x01 514 #define MWAIT_EDX_C1 0xf0 515 516 int mwait_usable(const struct cpuinfo_x86 *c) 517 { 518 u32 eax, ebx, ecx, edx; 519 520 if (boot_option_idle_override == IDLE_FORCE_MWAIT) 521 return 1; 522 523 if (c->cpuid_level < MWAIT_INFO) 524 return 0; 525 526 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx); 527 /* Check, whether EDX has extended info about MWAIT */ 528 if (!(ecx & MWAIT_ECX_EXTENDED_INFO)) 529 return 1; 530 531 /* 532 * edx enumeratios MONITOR/MWAIT extensions. Check, whether 533 * C1 supports MWAIT 534 */ 535 return (edx & MWAIT_EDX_C1); 536 } 537 538 bool c1e_detected; 539 EXPORT_SYMBOL(c1e_detected); 540 541 static cpumask_var_t c1e_mask; 542 543 void c1e_remove_cpu(int cpu) 544 { 545 if (c1e_mask != NULL) 546 cpumask_clear_cpu(cpu, c1e_mask); 547 } 548 549 /* 550 * C1E aware idle routine. We check for C1E active in the interrupt 551 * pending message MSR. If we detect C1E, then we handle it the same 552 * way as C3 power states (local apic timer and TSC stop) 553 */ 554 static void c1e_idle(void) 555 { 556 if (need_resched()) 557 return; 558 559 if (!c1e_detected) { 560 u32 lo, hi; 561 562 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); 563 564 if (lo & K8_INTP_C1E_ACTIVE_MASK) { 565 c1e_detected = true; 566 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 567 mark_tsc_unstable("TSC halt in AMD C1E"); 568 printk(KERN_INFO "System has AMD C1E enabled\n"); 569 } 570 } 571 572 if (c1e_detected) { 573 int cpu = smp_processor_id(); 574 575 if (!cpumask_test_cpu(cpu, c1e_mask)) { 576 cpumask_set_cpu(cpu, c1e_mask); 577 /* 578 * Force broadcast so ACPI can not interfere. 579 */ 580 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, 581 &cpu); 582 printk(KERN_INFO "Switch to broadcast mode on CPU%d\n", 583 cpu); 584 } 585 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); 586 587 default_idle(); 588 589 /* 590 * The switch back from broadcast mode needs to be 591 * called with interrupts disabled. 592 */ 593 local_irq_disable(); 594 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); 595 local_irq_enable(); 596 } else 597 default_idle(); 598 } 599 600 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 601 { 602 #ifdef CONFIG_SMP 603 if (pm_idle == poll_idle && smp_num_siblings > 1) { 604 printk_once(KERN_WARNING "WARNING: polling idle and HT enabled," 605 " performance may degrade.\n"); 606 } 607 #endif 608 if (pm_idle) 609 return; 610 611 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { 612 /* 613 * One CPU supports mwait => All CPUs supports mwait 614 */ 615 printk(KERN_INFO "using mwait in idle threads.\n"); 616 pm_idle = mwait_idle; 617 } else if (cpu_has_amd_erratum(amd_erratum_400)) { 618 /* E400: APIC timer interrupt does not wake up CPU from C1e */ 619 printk(KERN_INFO "using C1E aware idle routine\n"); 620 pm_idle = c1e_idle; 621 } else 622 pm_idle = default_idle; 623 } 624 625 void __init init_c1e_mask(void) 626 { 627 /* If we're using c1e_idle, we need to allocate c1e_mask. */ 628 if (pm_idle == c1e_idle) 629 zalloc_cpumask_var(&c1e_mask, GFP_KERNEL); 630 } 631 632 static int __init idle_setup(char *str) 633 { 634 if (!str) 635 return -EINVAL; 636 637 if (!strcmp(str, "poll")) { 638 printk("using polling idle threads.\n"); 639 pm_idle = poll_idle; 640 boot_option_idle_override = IDLE_POLL; 641 } else if (!strcmp(str, "mwait")) { 642 boot_option_idle_override = IDLE_FORCE_MWAIT; 643 } else if (!strcmp(str, "halt")) { 644 /* 645 * When the boot option of idle=halt is added, halt is 646 * forced to be used for CPU idle. In such case CPU C2/C3 647 * won't be used again. 648 * To continue to load the CPU idle driver, don't touch 649 * the boot_option_idle_override. 650 */ 651 pm_idle = default_idle; 652 boot_option_idle_override = IDLE_HALT; 653 } else if (!strcmp(str, "nomwait")) { 654 /* 655 * If the boot option of "idle=nomwait" is added, 656 * it means that mwait will be disabled for CPU C2/C3 657 * states. In such case it won't touch the variable 658 * of boot_option_idle_override. 659 */ 660 boot_option_idle_override = IDLE_NOMWAIT; 661 } else 662 return -1; 663 664 return 0; 665 } 666 early_param("idle", idle_setup); 667 668 unsigned long arch_align_stack(unsigned long sp) 669 { 670 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 671 sp -= get_random_int() % 8192; 672 return sp & ~0xf; 673 } 674 675 unsigned long arch_randomize_brk(struct mm_struct *mm) 676 { 677 unsigned long range_end = mm->brk + 0x02000000; 678 return randomize_range(mm->brk, range_end, 0) ? : mm->brk; 679 } 680 681