1 /* 2 * Detect hard and soft lockups on a system 3 * 4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. 5 * 6 * Note: Most of this code is borrowed heavily from the original softlockup 7 * detector, so thanks to Ingo for the initial implementation. 8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks 9 * to those contributors as well. 10 */ 11 12 #define pr_fmt(fmt) "NMI watchdog: " fmt 13 14 #include <linux/mm.h> 15 #include <linux/cpu.h> 16 #include <linux/nmi.h> 17 #include <linux/init.h> 18 #include <linux/delay.h> 19 #include <linux/freezer.h> 20 #include <linux/kthread.h> 21 #include <linux/lockdep.h> 22 #include <linux/notifier.h> 23 #include <linux/module.h> 24 #include <linux/sysctl.h> 25 #include <linux/smpboot.h> 26 #include <linux/sched/rt.h> 27 28 #include <asm/irq_regs.h> 29 #include <linux/kvm_para.h> 30 #include <linux/perf_event.h> 31 32 int watchdog_user_enabled = 1; 33 int __read_mostly watchdog_thresh = 10; 34 static int __read_mostly watchdog_running; 35 static u64 __read_mostly sample_period; 36 37 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); 38 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); 39 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); 40 static DEFINE_PER_CPU(bool, softlockup_touch_sync); 41 static DEFINE_PER_CPU(bool, soft_watchdog_warn); 42 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); 43 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt); 44 #ifdef CONFIG_HARDLOCKUP_DETECTOR 45 static DEFINE_PER_CPU(bool, hard_watchdog_warn); 46 static DEFINE_PER_CPU(bool, watchdog_nmi_touch); 47 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); 48 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); 49 #endif 50 51 /* boot commands */ 52 /* 53 * Should we panic when a soft-lockup or hard-lockup occurs: 54 */ 55 #ifdef CONFIG_HARDLOCKUP_DETECTOR 56 static int hardlockup_panic = 57 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; 58 59 static int __init hardlockup_panic_setup(char *str) 60 { 61 if (!strncmp(str, "panic", 5)) 62 hardlockup_panic = 1; 63 else if (!strncmp(str, "nopanic", 7)) 64 hardlockup_panic = 0; 65 else if (!strncmp(str, "0", 1)) 66 watchdog_user_enabled = 0; 67 return 1; 68 } 69 __setup("nmi_watchdog=", hardlockup_panic_setup); 70 #endif 71 72 unsigned int __read_mostly softlockup_panic = 73 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; 74 75 static int __init softlockup_panic_setup(char *str) 76 { 77 softlockup_panic = simple_strtoul(str, NULL, 0); 78 79 return 1; 80 } 81 __setup("softlockup_panic=", softlockup_panic_setup); 82 83 static int __init nowatchdog_setup(char *str) 84 { 85 watchdog_user_enabled = 0; 86 return 1; 87 } 88 __setup("nowatchdog", nowatchdog_setup); 89 90 /* deprecated */ 91 static int __init nosoftlockup_setup(char *str) 92 { 93 watchdog_user_enabled = 0; 94 return 1; 95 } 96 __setup("nosoftlockup", nosoftlockup_setup); 97 /* */ 98 99 /* 100 * Hard-lockup warnings should be triggered after just a few seconds. Soft- 101 * lockups can have false positives under extreme conditions. So we generally 102 * want a higher threshold for soft lockups than for hard lockups. So we couple 103 * the thresholds with a factor: we make the soft threshold twice the amount of 104 * time the hard threshold is. 105 */ 106 static int get_softlockup_thresh(void) 107 { 108 return watchdog_thresh * 2; 109 } 110 111 /* 112 * Returns seconds, approximately. We don't need nanosecond 113 * resolution, and we don't need to waste time with a big divide when 114 * 2^30ns == 1.074s. 115 */ 116 static unsigned long get_timestamp(void) 117 { 118 return local_clock() >> 30LL; /* 2^30 ~= 10^9 */ 119 } 120 121 static void set_sample_period(void) 122 { 123 /* 124 * convert watchdog_thresh from seconds to ns 125 * the divide by 5 is to give hrtimer several chances (two 126 * or three with the current relation between the soft 127 * and hard thresholds) to increment before the 128 * hardlockup detector generates a warning 129 */ 130 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5); 131 } 132 133 /* Commands for resetting the watchdog */ 134 static void __touch_watchdog(void) 135 { 136 __this_cpu_write(watchdog_touch_ts, get_timestamp()); 137 } 138 139 void touch_softlockup_watchdog(void) 140 { 141 __this_cpu_write(watchdog_touch_ts, 0); 142 } 143 EXPORT_SYMBOL(touch_softlockup_watchdog); 144 145 void touch_all_softlockup_watchdogs(void) 146 { 147 int cpu; 148 149 /* 150 * this is done lockless 151 * do we care if a 0 races with a timestamp? 152 * all it means is the softlock check starts one cycle later 153 */ 154 for_each_online_cpu(cpu) 155 per_cpu(watchdog_touch_ts, cpu) = 0; 156 } 157 158 #ifdef CONFIG_HARDLOCKUP_DETECTOR 159 void touch_nmi_watchdog(void) 160 { 161 if (watchdog_user_enabled) { 162 unsigned cpu; 163 164 for_each_present_cpu(cpu) { 165 if (per_cpu(watchdog_nmi_touch, cpu) != true) 166 per_cpu(watchdog_nmi_touch, cpu) = true; 167 } 168 } 169 touch_softlockup_watchdog(); 170 } 171 EXPORT_SYMBOL(touch_nmi_watchdog); 172 173 #endif 174 175 void touch_softlockup_watchdog_sync(void) 176 { 177 __raw_get_cpu_var(softlockup_touch_sync) = true; 178 __raw_get_cpu_var(watchdog_touch_ts) = 0; 179 } 180 181 #ifdef CONFIG_HARDLOCKUP_DETECTOR 182 /* watchdog detector functions */ 183 static int is_hardlockup(void) 184 { 185 unsigned long hrint = __this_cpu_read(hrtimer_interrupts); 186 187 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) 188 return 1; 189 190 __this_cpu_write(hrtimer_interrupts_saved, hrint); 191 return 0; 192 } 193 #endif 194 195 static int is_softlockup(unsigned long touch_ts) 196 { 197 unsigned long now = get_timestamp(); 198 199 /* Warn about unreasonable delays: */ 200 if (time_after(now, touch_ts + get_softlockup_thresh())) 201 return now - touch_ts; 202 203 return 0; 204 } 205 206 #ifdef CONFIG_HARDLOCKUP_DETECTOR 207 208 static struct perf_event_attr wd_hw_attr = { 209 .type = PERF_TYPE_HARDWARE, 210 .config = PERF_COUNT_HW_CPU_CYCLES, 211 .size = sizeof(struct perf_event_attr), 212 .pinned = 1, 213 .disabled = 1, 214 }; 215 216 /* Callback function for perf event subsystem */ 217 static void watchdog_overflow_callback(struct perf_event *event, 218 struct perf_sample_data *data, 219 struct pt_regs *regs) 220 { 221 /* Ensure the watchdog never gets throttled */ 222 event->hw.interrupts = 0; 223 224 if (__this_cpu_read(watchdog_nmi_touch) == true) { 225 __this_cpu_write(watchdog_nmi_touch, false); 226 return; 227 } 228 229 /* check for a hardlockup 230 * This is done by making sure our timer interrupt 231 * is incrementing. The timer interrupt should have 232 * fired multiple times before we overflow'd. If it hasn't 233 * then this is a good indication the cpu is stuck 234 */ 235 if (is_hardlockup()) { 236 int this_cpu = smp_processor_id(); 237 238 /* only print hardlockups once */ 239 if (__this_cpu_read(hard_watchdog_warn) == true) 240 return; 241 242 if (hardlockup_panic) 243 panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); 244 else 245 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); 246 247 __this_cpu_write(hard_watchdog_warn, true); 248 return; 249 } 250 251 __this_cpu_write(hard_watchdog_warn, false); 252 return; 253 } 254 #endif /* CONFIG_HARDLOCKUP_DETECTOR */ 255 256 static void watchdog_interrupt_count(void) 257 { 258 __this_cpu_inc(hrtimer_interrupts); 259 } 260 261 static int watchdog_nmi_enable(unsigned int cpu); 262 static void watchdog_nmi_disable(unsigned int cpu); 263 264 /* watchdog kicker functions */ 265 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) 266 { 267 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); 268 struct pt_regs *regs = get_irq_regs(); 269 int duration; 270 271 /* kick the hardlockup detector */ 272 watchdog_interrupt_count(); 273 274 /* kick the softlockup detector */ 275 wake_up_process(__this_cpu_read(softlockup_watchdog)); 276 277 /* .. and repeat */ 278 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period)); 279 280 if (touch_ts == 0) { 281 if (unlikely(__this_cpu_read(softlockup_touch_sync))) { 282 /* 283 * If the time stamp was touched atomically 284 * make sure the scheduler tick is up to date. 285 */ 286 __this_cpu_write(softlockup_touch_sync, false); 287 sched_clock_tick(); 288 } 289 290 /* Clear the guest paused flag on watchdog reset */ 291 kvm_check_and_clear_guest_paused(); 292 __touch_watchdog(); 293 return HRTIMER_RESTART; 294 } 295 296 /* check for a softlockup 297 * This is done by making sure a high priority task is 298 * being scheduled. The task touches the watchdog to 299 * indicate it is getting cpu time. If it hasn't then 300 * this is a good indication some task is hogging the cpu 301 */ 302 duration = is_softlockup(touch_ts); 303 if (unlikely(duration)) { 304 /* 305 * If a virtual machine is stopped by the host it can look to 306 * the watchdog like a soft lockup, check to see if the host 307 * stopped the vm before we issue the warning 308 */ 309 if (kvm_check_and_clear_guest_paused()) 310 return HRTIMER_RESTART; 311 312 /* only warn once */ 313 if (__this_cpu_read(soft_watchdog_warn) == true) 314 return HRTIMER_RESTART; 315 316 printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", 317 smp_processor_id(), duration, 318 current->comm, task_pid_nr(current)); 319 print_modules(); 320 print_irqtrace_events(current); 321 if (regs) 322 show_regs(regs); 323 else 324 dump_stack(); 325 326 if (softlockup_panic) 327 panic("softlockup: hung tasks"); 328 __this_cpu_write(soft_watchdog_warn, true); 329 } else 330 __this_cpu_write(soft_watchdog_warn, false); 331 332 return HRTIMER_RESTART; 333 } 334 335 static void watchdog_set_prio(unsigned int policy, unsigned int prio) 336 { 337 struct sched_param param = { .sched_priority = prio }; 338 339 sched_setscheduler(current, policy, ¶m); 340 } 341 342 static void watchdog_enable(unsigned int cpu) 343 { 344 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); 345 346 /* kick off the timer for the hardlockup detector */ 347 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 348 hrtimer->function = watchdog_timer_fn; 349 350 /* Enable the perf event */ 351 watchdog_nmi_enable(cpu); 352 353 /* done here because hrtimer_start can only pin to smp_processor_id() */ 354 hrtimer_start(hrtimer, ns_to_ktime(sample_period), 355 HRTIMER_MODE_REL_PINNED); 356 357 /* initialize timestamp */ 358 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1); 359 __touch_watchdog(); 360 } 361 362 static void watchdog_disable(unsigned int cpu) 363 { 364 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); 365 366 watchdog_set_prio(SCHED_NORMAL, 0); 367 hrtimer_cancel(hrtimer); 368 /* disable the perf event */ 369 watchdog_nmi_disable(cpu); 370 } 371 372 static void watchdog_cleanup(unsigned int cpu, bool online) 373 { 374 watchdog_disable(cpu); 375 } 376 377 static int watchdog_should_run(unsigned int cpu) 378 { 379 return __this_cpu_read(hrtimer_interrupts) != 380 __this_cpu_read(soft_lockup_hrtimer_cnt); 381 } 382 383 /* 384 * The watchdog thread function - touches the timestamp. 385 * 386 * It only runs once every sample_period seconds (4 seconds by 387 * default) to reset the softlockup timestamp. If this gets delayed 388 * for more than 2*watchdog_thresh seconds then the debug-printout 389 * triggers in watchdog_timer_fn(). 390 */ 391 static void watchdog(unsigned int cpu) 392 { 393 __this_cpu_write(soft_lockup_hrtimer_cnt, 394 __this_cpu_read(hrtimer_interrupts)); 395 __touch_watchdog(); 396 } 397 398 #ifdef CONFIG_HARDLOCKUP_DETECTOR 399 /* 400 * People like the simple clean cpu node info on boot. 401 * Reduce the watchdog noise by only printing messages 402 * that are different from what cpu0 displayed. 403 */ 404 static unsigned long cpu0_err; 405 406 static int watchdog_nmi_enable(unsigned int cpu) 407 { 408 struct perf_event_attr *wd_attr; 409 struct perf_event *event = per_cpu(watchdog_ev, cpu); 410 411 /* is it already setup and enabled? */ 412 if (event && event->state > PERF_EVENT_STATE_OFF) 413 goto out; 414 415 /* it is setup but not enabled */ 416 if (event != NULL) 417 goto out_enable; 418 419 wd_attr = &wd_hw_attr; 420 wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); 421 422 /* Try to register using hardware perf events */ 423 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); 424 425 /* save cpu0 error for future comparision */ 426 if (cpu == 0 && IS_ERR(event)) 427 cpu0_err = PTR_ERR(event); 428 429 if (!IS_ERR(event)) { 430 /* only print for cpu0 or different than cpu0 */ 431 if (cpu == 0 || cpu0_err) 432 pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); 433 goto out_save; 434 } 435 436 /* skip displaying the same error again */ 437 if (cpu > 0 && (PTR_ERR(event) == cpu0_err)) 438 return PTR_ERR(event); 439 440 /* vary the KERN level based on the returned errno */ 441 if (PTR_ERR(event) == -EOPNOTSUPP) 442 pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu); 443 else if (PTR_ERR(event) == -ENOENT) 444 pr_warning("disabled (cpu%i): hardware events not enabled\n", 445 cpu); 446 else 447 pr_err("disabled (cpu%i): unable to create perf event: %ld\n", 448 cpu, PTR_ERR(event)); 449 return PTR_ERR(event); 450 451 /* success path */ 452 out_save: 453 per_cpu(watchdog_ev, cpu) = event; 454 out_enable: 455 perf_event_enable(per_cpu(watchdog_ev, cpu)); 456 out: 457 return 0; 458 } 459 460 static void watchdog_nmi_disable(unsigned int cpu) 461 { 462 struct perf_event *event = per_cpu(watchdog_ev, cpu); 463 464 if (event) { 465 perf_event_disable(event); 466 per_cpu(watchdog_ev, cpu) = NULL; 467 468 /* should be in cleanup, but blocks oprofile */ 469 perf_event_release_kernel(event); 470 } 471 return; 472 } 473 #else 474 static int watchdog_nmi_enable(unsigned int cpu) { return 0; } 475 static void watchdog_nmi_disable(unsigned int cpu) { return; } 476 #endif /* CONFIG_HARDLOCKUP_DETECTOR */ 477 478 static struct smp_hotplug_thread watchdog_threads = { 479 .store = &softlockup_watchdog, 480 .thread_should_run = watchdog_should_run, 481 .thread_fn = watchdog, 482 .thread_comm = "watchdog/%u", 483 .setup = watchdog_enable, 484 .cleanup = watchdog_cleanup, 485 .park = watchdog_disable, 486 .unpark = watchdog_enable, 487 }; 488 489 static void restart_watchdog_hrtimer(void *info) 490 { 491 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); 492 int ret; 493 494 /* 495 * No need to cancel and restart hrtimer if it is currently executing 496 * because it will reprogram itself with the new period now. 497 * We should never see it unqueued here because we are running per-cpu 498 * with interrupts disabled. 499 */ 500 ret = hrtimer_try_to_cancel(hrtimer); 501 if (ret == 1) 502 hrtimer_start(hrtimer, ns_to_ktime(sample_period), 503 HRTIMER_MODE_REL_PINNED); 504 } 505 506 static void update_timers(int cpu) 507 { 508 struct call_single_data data = {.func = restart_watchdog_hrtimer}; 509 /* 510 * Make sure that perf event counter will adopt to a new 511 * sampling period. Updating the sampling period directly would 512 * be much nicer but we do not have an API for that now so 513 * let's use a big hammer. 514 * Hrtimer will adopt the new period on the next tick but this 515 * might be late already so we have to restart the timer as well. 516 */ 517 watchdog_nmi_disable(cpu); 518 __smp_call_function_single(cpu, &data, 1); 519 watchdog_nmi_enable(cpu); 520 } 521 522 static void update_timers_all_cpus(void) 523 { 524 int cpu; 525 526 get_online_cpus(); 527 preempt_disable(); 528 for_each_online_cpu(cpu) 529 update_timers(cpu); 530 preempt_enable(); 531 put_online_cpus(); 532 } 533 534 static int watchdog_enable_all_cpus(bool sample_period_changed) 535 { 536 int err = 0; 537 538 if (!watchdog_running) { 539 err = smpboot_register_percpu_thread(&watchdog_threads); 540 if (err) 541 pr_err("Failed to create watchdog threads, disabled\n"); 542 else 543 watchdog_running = 1; 544 } else if (sample_period_changed) { 545 update_timers_all_cpus(); 546 } 547 548 return err; 549 } 550 551 /* prepare/enable/disable routines */ 552 /* sysctl functions */ 553 #ifdef CONFIG_SYSCTL 554 static void watchdog_disable_all_cpus(void) 555 { 556 if (watchdog_running) { 557 watchdog_running = 0; 558 smpboot_unregister_percpu_thread(&watchdog_threads); 559 } 560 } 561 562 /* 563 * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh 564 */ 565 566 int proc_dowatchdog(struct ctl_table *table, int write, 567 void __user *buffer, size_t *lenp, loff_t *ppos) 568 { 569 int err, old_thresh, old_enabled; 570 static DEFINE_MUTEX(watchdog_proc_mutex); 571 572 mutex_lock(&watchdog_proc_mutex); 573 old_thresh = ACCESS_ONCE(watchdog_thresh); 574 old_enabled = ACCESS_ONCE(watchdog_user_enabled); 575 576 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 577 if (err || !write) 578 goto out; 579 580 set_sample_period(); 581 /* 582 * Watchdog threads shouldn't be enabled if they are 583 * disabled. The 'watchdog_running' variable check in 584 * watchdog_*_all_cpus() function takes care of this. 585 */ 586 if (watchdog_user_enabled && watchdog_thresh) 587 err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh); 588 else 589 watchdog_disable_all_cpus(); 590 591 /* Restore old values on failure */ 592 if (err) { 593 watchdog_thresh = old_thresh; 594 watchdog_user_enabled = old_enabled; 595 } 596 out: 597 mutex_unlock(&watchdog_proc_mutex); 598 return err; 599 } 600 #endif /* CONFIG_SYSCTL */ 601 602 void __init lockup_detector_init(void) 603 { 604 set_sample_period(); 605 606 if (watchdog_user_enabled) 607 watchdog_enable_all_cpus(false); 608 } 609