1 /* 2 * Kernel Debug Core 3 * 4 * Maintainer: Jason Wessel <jason.wessel@windriver.com> 5 * 6 * Copyright (C) 2000-2001 VERITAS Software Corporation. 7 * Copyright (C) 2002-2004 Timesys Corporation 8 * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com> 9 * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz> 10 * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org> 11 * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd. 12 * Copyright (C) 2005-2009 Wind River Systems, Inc. 13 * Copyright (C) 2007 MontaVista Software, Inc. 14 * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 15 * 16 * Contributors at various stages not listed above: 17 * Jason Wessel ( jason.wessel@windriver.com ) 18 * George Anzinger <george@mvista.com> 19 * Anurekh Saxena (anurekh.saxena@timesys.com) 20 * Lake Stevens Instrument Division (Glenn Engel) 21 * Jim Kingdon, Cygnus Support. 22 * 23 * Original KGDB stub: David Grothe <dave@gcom.com>, 24 * Tigran Aivazian <tigran@sco.com> 25 * 26 * This file is licensed under the terms of the GNU General Public License 27 * version 2. This program is licensed "as is" without any warranty of any 28 * kind, whether express or implied. 29 */ 30 31 #define pr_fmt(fmt) "KGDB: " fmt 32 33 #include <linux/pid_namespace.h> 34 #include <linux/clocksource.h> 35 #include <linux/serial_core.h> 36 #include <linux/interrupt.h> 37 #include <linux/spinlock.h> 38 #include <linux/console.h> 39 #include <linux/threads.h> 40 #include <linux/uaccess.h> 41 #include <linux/kernel.h> 42 #include <linux/module.h> 43 #include <linux/ptrace.h> 44 #include <linux/string.h> 45 #include <linux/delay.h> 46 #include <linux/sched.h> 47 #include <linux/sysrq.h> 48 #include <linux/reboot.h> 49 #include <linux/init.h> 50 #include <linux/kgdb.h> 51 #include <linux/kdb.h> 52 #include <linux/nmi.h> 53 #include <linux/pid.h> 54 #include <linux/smp.h> 55 #include <linux/mm.h> 56 #include <linux/vmacache.h> 57 #include <linux/rcupdate.h> 58 59 #include <asm/cacheflush.h> 60 #include <asm/byteorder.h> 61 #include <linux/atomic.h> 62 63 #include "debug_core.h" 64 65 static int kgdb_break_asap; 66 67 struct debuggerinfo_struct kgdb_info[NR_CPUS]; 68 69 /** 70 * kgdb_connected - Is a host GDB connected to us? 71 */ 72 int kgdb_connected; 73 EXPORT_SYMBOL_GPL(kgdb_connected); 74 75 /* All the KGDB handlers are installed */ 76 int kgdb_io_module_registered; 77 78 /* Guard for recursive entry */ 79 static int exception_level; 80 81 struct kgdb_io *dbg_io_ops; 82 static DEFINE_SPINLOCK(kgdb_registration_lock); 83 84 /* Action for the reboot notifiter, a global allow kdb to change it */ 85 static int kgdbreboot; 86 /* kgdb console driver is loaded */ 87 static int kgdb_con_registered; 88 /* determine if kgdb console output should be used */ 89 static int kgdb_use_con; 90 /* Flag for alternate operations for early debugging */ 91 bool dbg_is_early = true; 92 /* Next cpu to become the master debug core */ 93 int dbg_switch_cpu; 94 95 /* Use kdb or gdbserver mode */ 96 int dbg_kdb_mode = 1; 97 98 static int __init opt_kgdb_con(char *str) 99 { 100 kgdb_use_con = 1; 101 return 0; 102 } 103 104 early_param("kgdbcon", opt_kgdb_con); 105 106 module_param(kgdb_use_con, int, 0644); 107 module_param(kgdbreboot, int, 0644); 108 109 /* 110 * Holds information about breakpoints in a kernel. These breakpoints are 111 * added and removed by gdb. 112 */ 113 static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = { 114 [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED } 115 }; 116 117 /* 118 * The CPU# of the active CPU, or -1 if none: 119 */ 120 atomic_t kgdb_active = ATOMIC_INIT(-1); 121 EXPORT_SYMBOL_GPL(kgdb_active); 122 static DEFINE_RAW_SPINLOCK(dbg_master_lock); 123 static DEFINE_RAW_SPINLOCK(dbg_slave_lock); 124 125 /* 126 * We use NR_CPUs not PERCPU, in case kgdb is used to debug early 127 * bootup code (which might not have percpu set up yet): 128 */ 129 static atomic_t masters_in_kgdb; 130 static atomic_t slaves_in_kgdb; 131 static atomic_t kgdb_break_tasklet_var; 132 atomic_t kgdb_setting_breakpoint; 133 134 struct task_struct *kgdb_usethread; 135 struct task_struct *kgdb_contthread; 136 137 int kgdb_single_step; 138 static pid_t kgdb_sstep_pid; 139 140 /* to keep track of the CPU which is doing the single stepping*/ 141 atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1); 142 143 /* 144 * If you are debugging a problem where roundup (the collection of 145 * all other CPUs) is a problem [this should be extremely rare], 146 * then use the nokgdbroundup option to avoid roundup. In that case 147 * the other CPUs might interfere with your debugging context, so 148 * use this with care: 149 */ 150 static int kgdb_do_roundup = 1; 151 152 static int __init opt_nokgdbroundup(char *str) 153 { 154 kgdb_do_roundup = 0; 155 156 return 0; 157 } 158 159 early_param("nokgdbroundup", opt_nokgdbroundup); 160 161 /* 162 * Finally, some KGDB code :-) 163 */ 164 165 /* 166 * Weak aliases for breakpoint management, 167 * can be overriden by architectures when needed: 168 */ 169 int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) 170 { 171 int err; 172 173 err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, 174 BREAK_INSTR_SIZE); 175 if (err) 176 return err; 177 err = probe_kernel_write((char *)bpt->bpt_addr, 178 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); 179 return err; 180 } 181 182 int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) 183 { 184 return probe_kernel_write((char *)bpt->bpt_addr, 185 (char *)bpt->saved_instr, BREAK_INSTR_SIZE); 186 } 187 188 int __weak kgdb_validate_break_address(unsigned long addr) 189 { 190 struct kgdb_bkpt tmp; 191 int err; 192 /* Validate setting the breakpoint and then removing it. If the 193 * remove fails, the kernel needs to emit a bad message because we 194 * are deep trouble not being able to put things back the way we 195 * found them. 196 */ 197 tmp.bpt_addr = addr; 198 err = kgdb_arch_set_breakpoint(&tmp); 199 if (err) 200 return err; 201 err = kgdb_arch_remove_breakpoint(&tmp); 202 if (err) 203 pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n", 204 addr); 205 return err; 206 } 207 208 unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs) 209 { 210 return instruction_pointer(regs); 211 } 212 213 int __weak kgdb_arch_init(void) 214 { 215 return 0; 216 } 217 218 int __weak kgdb_skipexception(int exception, struct pt_regs *regs) 219 { 220 return 0; 221 } 222 223 /* 224 * Some architectures need cache flushes when we set/clear a 225 * breakpoint: 226 */ 227 static void kgdb_flush_swbreak_addr(unsigned long addr) 228 { 229 if (!CACHE_FLUSH_IS_SAFE) 230 return; 231 232 if (current->mm) { 233 int i; 234 235 for (i = 0; i < VMACACHE_SIZE; i++) { 236 if (!current->vmacache.vmas[i]) 237 continue; 238 flush_cache_range(current->vmacache.vmas[i], 239 addr, addr + BREAK_INSTR_SIZE); 240 } 241 } 242 243 /* Force flush instruction cache if it was outside the mm */ 244 flush_icache_range(addr, addr + BREAK_INSTR_SIZE); 245 } 246 247 /* 248 * SW breakpoint management: 249 */ 250 int dbg_activate_sw_breakpoints(void) 251 { 252 int error; 253 int ret = 0; 254 int i; 255 256 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { 257 if (kgdb_break[i].state != BP_SET) 258 continue; 259 260 error = kgdb_arch_set_breakpoint(&kgdb_break[i]); 261 if (error) { 262 ret = error; 263 pr_info("BP install failed: %lx\n", 264 kgdb_break[i].bpt_addr); 265 continue; 266 } 267 268 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr); 269 kgdb_break[i].state = BP_ACTIVE; 270 } 271 return ret; 272 } 273 274 int dbg_set_sw_break(unsigned long addr) 275 { 276 int err = kgdb_validate_break_address(addr); 277 int breakno = -1; 278 int i; 279 280 if (err) 281 return err; 282 283 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { 284 if ((kgdb_break[i].state == BP_SET) && 285 (kgdb_break[i].bpt_addr == addr)) 286 return -EEXIST; 287 } 288 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { 289 if (kgdb_break[i].state == BP_REMOVED && 290 kgdb_break[i].bpt_addr == addr) { 291 breakno = i; 292 break; 293 } 294 } 295 296 if (breakno == -1) { 297 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { 298 if (kgdb_break[i].state == BP_UNDEFINED) { 299 breakno = i; 300 break; 301 } 302 } 303 } 304 305 if (breakno == -1) 306 return -E2BIG; 307 308 kgdb_break[breakno].state = BP_SET; 309 kgdb_break[breakno].type = BP_BREAKPOINT; 310 kgdb_break[breakno].bpt_addr = addr; 311 312 return 0; 313 } 314 315 int dbg_deactivate_sw_breakpoints(void) 316 { 317 int error; 318 int ret = 0; 319 int i; 320 321 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { 322 if (kgdb_break[i].state != BP_ACTIVE) 323 continue; 324 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]); 325 if (error) { 326 pr_info("BP remove failed: %lx\n", 327 kgdb_break[i].bpt_addr); 328 ret = error; 329 } 330 331 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr); 332 kgdb_break[i].state = BP_SET; 333 } 334 return ret; 335 } 336 337 int dbg_remove_sw_break(unsigned long addr) 338 { 339 int i; 340 341 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { 342 if ((kgdb_break[i].state == BP_SET) && 343 (kgdb_break[i].bpt_addr == addr)) { 344 kgdb_break[i].state = BP_REMOVED; 345 return 0; 346 } 347 } 348 return -ENOENT; 349 } 350 351 int kgdb_isremovedbreak(unsigned long addr) 352 { 353 int i; 354 355 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { 356 if ((kgdb_break[i].state == BP_REMOVED) && 357 (kgdb_break[i].bpt_addr == addr)) 358 return 1; 359 } 360 return 0; 361 } 362 363 int dbg_remove_all_break(void) 364 { 365 int error; 366 int i; 367 368 /* Clear memory breakpoints. */ 369 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { 370 if (kgdb_break[i].state != BP_ACTIVE) 371 goto setundefined; 372 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]); 373 if (error) 374 pr_err("breakpoint remove failed: %lx\n", 375 kgdb_break[i].bpt_addr); 376 setundefined: 377 kgdb_break[i].state = BP_UNDEFINED; 378 } 379 380 /* Clear hardware breakpoints. */ 381 if (arch_kgdb_ops.remove_all_hw_break) 382 arch_kgdb_ops.remove_all_hw_break(); 383 384 return 0; 385 } 386 387 /* 388 * Return true if there is a valid kgdb I/O module. Also if no 389 * debugger is attached a message can be printed to the console about 390 * waiting for the debugger to attach. 391 * 392 * The print_wait argument is only to be true when called from inside 393 * the core kgdb_handle_exception, because it will wait for the 394 * debugger to attach. 395 */ 396 static int kgdb_io_ready(int print_wait) 397 { 398 if (!dbg_io_ops) 399 return 0; 400 if (kgdb_connected) 401 return 1; 402 if (atomic_read(&kgdb_setting_breakpoint)) 403 return 1; 404 if (print_wait) { 405 #ifdef CONFIG_KGDB_KDB 406 if (!dbg_kdb_mode) 407 pr_crit("waiting... or $3#33 for KDB\n"); 408 #else 409 pr_crit("Waiting for remote debugger\n"); 410 #endif 411 } 412 return 1; 413 } 414 415 static int kgdb_reenter_check(struct kgdb_state *ks) 416 { 417 unsigned long addr; 418 419 if (atomic_read(&kgdb_active) != raw_smp_processor_id()) 420 return 0; 421 422 /* Panic on recursive debugger calls: */ 423 exception_level++; 424 addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs); 425 dbg_deactivate_sw_breakpoints(); 426 427 /* 428 * If the break point removed ok at the place exception 429 * occurred, try to recover and print a warning to the end 430 * user because the user planted a breakpoint in a place that 431 * KGDB needs in order to function. 432 */ 433 if (dbg_remove_sw_break(addr) == 0) { 434 exception_level = 0; 435 kgdb_skipexception(ks->ex_vector, ks->linux_regs); 436 dbg_activate_sw_breakpoints(); 437 pr_crit("re-enter error: breakpoint removed %lx\n", addr); 438 WARN_ON_ONCE(1); 439 440 return 1; 441 } 442 dbg_remove_all_break(); 443 kgdb_skipexception(ks->ex_vector, ks->linux_regs); 444 445 if (exception_level > 1) { 446 dump_stack(); 447 panic("Recursive entry to debugger"); 448 } 449 450 pr_crit("re-enter exception: ALL breakpoints killed\n"); 451 #ifdef CONFIG_KGDB_KDB 452 /* Allow kdb to debug itself one level */ 453 return 0; 454 #endif 455 dump_stack(); 456 panic("Recursive entry to debugger"); 457 458 return 1; 459 } 460 461 static void dbg_touch_watchdogs(void) 462 { 463 touch_softlockup_watchdog_sync(); 464 clocksource_touch_watchdog(); 465 rcu_cpu_stall_reset(); 466 } 467 468 static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, 469 int exception_state) 470 { 471 unsigned long flags; 472 int sstep_tries = 100; 473 int error; 474 int cpu; 475 int trace_on = 0; 476 int online_cpus = num_online_cpus(); 477 u64 time_left; 478 479 kgdb_info[ks->cpu].enter_kgdb++; 480 kgdb_info[ks->cpu].exception_state |= exception_state; 481 482 if (exception_state == DCPU_WANT_MASTER) 483 atomic_inc(&masters_in_kgdb); 484 else 485 atomic_inc(&slaves_in_kgdb); 486 487 if (arch_kgdb_ops.disable_hw_break) 488 arch_kgdb_ops.disable_hw_break(regs); 489 490 acquirelock: 491 /* 492 * Interrupts will be restored by the 'trap return' code, except when 493 * single stepping. 494 */ 495 local_irq_save(flags); 496 497 cpu = ks->cpu; 498 kgdb_info[cpu].debuggerinfo = regs; 499 kgdb_info[cpu].task = current; 500 kgdb_info[cpu].ret_state = 0; 501 kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT; 502 503 /* Make sure the above info reaches the primary CPU */ 504 smp_mb(); 505 506 if (exception_level == 1) { 507 if (raw_spin_trylock(&dbg_master_lock)) 508 atomic_xchg(&kgdb_active, cpu); 509 goto cpu_master_loop; 510 } 511 512 /* 513 * CPU will loop if it is a slave or request to become a kgdb 514 * master cpu and acquire the kgdb_active lock: 515 */ 516 while (1) { 517 cpu_loop: 518 if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) { 519 kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER; 520 goto cpu_master_loop; 521 } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) { 522 if (raw_spin_trylock(&dbg_master_lock)) { 523 atomic_xchg(&kgdb_active, cpu); 524 break; 525 } 526 } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) { 527 if (!raw_spin_is_locked(&dbg_slave_lock)) 528 goto return_normal; 529 } else { 530 return_normal: 531 /* Return to normal operation by executing any 532 * hw breakpoint fixup. 533 */ 534 if (arch_kgdb_ops.correct_hw_break) 535 arch_kgdb_ops.correct_hw_break(); 536 if (trace_on) 537 tracing_on(); 538 kgdb_info[cpu].exception_state &= 539 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE); 540 kgdb_info[cpu].enter_kgdb--; 541 smp_mb__before_atomic(); 542 atomic_dec(&slaves_in_kgdb); 543 dbg_touch_watchdogs(); 544 local_irq_restore(flags); 545 return 0; 546 } 547 cpu_relax(); 548 } 549 550 /* 551 * For single stepping, try to only enter on the processor 552 * that was single stepping. To guard against a deadlock, the 553 * kernel will only try for the value of sstep_tries before 554 * giving up and continuing on. 555 */ 556 if (atomic_read(&kgdb_cpu_doing_single_step) != -1 && 557 (kgdb_info[cpu].task && 558 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { 559 atomic_set(&kgdb_active, -1); 560 raw_spin_unlock(&dbg_master_lock); 561 dbg_touch_watchdogs(); 562 local_irq_restore(flags); 563 564 goto acquirelock; 565 } 566 567 if (!kgdb_io_ready(1)) { 568 kgdb_info[cpu].ret_state = 1; 569 goto kgdb_restore; /* No I/O connection, resume the system */ 570 } 571 572 /* 573 * Don't enter if we have hit a removed breakpoint. 574 */ 575 if (kgdb_skipexception(ks->ex_vector, ks->linux_regs)) 576 goto kgdb_restore; 577 578 /* Call the I/O driver's pre_exception routine */ 579 if (dbg_io_ops->pre_exception) 580 dbg_io_ops->pre_exception(); 581 582 /* 583 * Get the passive CPU lock which will hold all the non-primary 584 * CPU in a spin state while the debugger is active 585 */ 586 if (!kgdb_single_step) 587 raw_spin_lock(&dbg_slave_lock); 588 589 #ifdef CONFIG_SMP 590 /* If send_ready set, slaves are already waiting */ 591 if (ks->send_ready) 592 atomic_set(ks->send_ready, 1); 593 594 /* Signal the other CPUs to enter kgdb_wait() */ 595 else if ((!kgdb_single_step) && kgdb_do_roundup) 596 kgdb_roundup_cpus(flags); 597 #endif 598 599 /* 600 * Wait for the other CPUs to be notified and be waiting for us: 601 */ 602 time_left = MSEC_PER_SEC; 603 while (kgdb_do_roundup && --time_left && 604 (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) != 605 online_cpus) 606 udelay(1000); 607 if (!time_left) 608 pr_crit("Timed out waiting for secondary CPUs.\n"); 609 610 /* 611 * At this point the primary processor is completely 612 * in the debugger and all secondary CPUs are quiescent 613 */ 614 dbg_deactivate_sw_breakpoints(); 615 kgdb_single_step = 0; 616 kgdb_contthread = current; 617 exception_level = 0; 618 trace_on = tracing_is_on(); 619 if (trace_on) 620 tracing_off(); 621 622 while (1) { 623 cpu_master_loop: 624 if (dbg_kdb_mode) { 625 kgdb_connected = 1; 626 error = kdb_stub(ks); 627 if (error == -1) 628 continue; 629 kgdb_connected = 0; 630 } else { 631 error = gdb_serial_stub(ks); 632 } 633 634 if (error == DBG_PASS_EVENT) { 635 dbg_kdb_mode = !dbg_kdb_mode; 636 } else if (error == DBG_SWITCH_CPU_EVENT) { 637 kgdb_info[dbg_switch_cpu].exception_state |= 638 DCPU_NEXT_MASTER; 639 goto cpu_loop; 640 } else { 641 kgdb_info[cpu].ret_state = error; 642 break; 643 } 644 } 645 646 /* Call the I/O driver's post_exception routine */ 647 if (dbg_io_ops->post_exception) 648 dbg_io_ops->post_exception(); 649 650 if (!kgdb_single_step) { 651 raw_spin_unlock(&dbg_slave_lock); 652 /* Wait till all the CPUs have quit from the debugger. */ 653 while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb)) 654 cpu_relax(); 655 } 656 657 kgdb_restore: 658 if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { 659 int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step); 660 if (kgdb_info[sstep_cpu].task) 661 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid; 662 else 663 kgdb_sstep_pid = 0; 664 } 665 if (arch_kgdb_ops.correct_hw_break) 666 arch_kgdb_ops.correct_hw_break(); 667 if (trace_on) 668 tracing_on(); 669 670 kgdb_info[cpu].exception_state &= 671 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE); 672 kgdb_info[cpu].enter_kgdb--; 673 smp_mb__before_atomic(); 674 atomic_dec(&masters_in_kgdb); 675 /* Free kgdb_active */ 676 atomic_set(&kgdb_active, -1); 677 raw_spin_unlock(&dbg_master_lock); 678 dbg_touch_watchdogs(); 679 local_irq_restore(flags); 680 681 return kgdb_info[cpu].ret_state; 682 } 683 684 /* 685 * kgdb_handle_exception() - main entry point from a kernel exception 686 * 687 * Locking hierarchy: 688 * interface locks, if any (begin_session) 689 * kgdb lock (kgdb_active) 690 */ 691 int 692 kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) 693 { 694 struct kgdb_state kgdb_var; 695 struct kgdb_state *ks = &kgdb_var; 696 int ret = 0; 697 698 if (arch_kgdb_ops.enable_nmi) 699 arch_kgdb_ops.enable_nmi(0); 700 /* 701 * Avoid entering the debugger if we were triggered due to an oops 702 * but panic_timeout indicates the system should automatically 703 * reboot on panic. We don't want to get stuck waiting for input 704 * on such systems, especially if its "just" an oops. 705 */ 706 if (signo != SIGTRAP && panic_timeout) 707 return 1; 708 709 memset(ks, 0, sizeof(struct kgdb_state)); 710 ks->cpu = raw_smp_processor_id(); 711 ks->ex_vector = evector; 712 ks->signo = signo; 713 ks->err_code = ecode; 714 ks->linux_regs = regs; 715 716 if (kgdb_reenter_check(ks)) 717 goto out; /* Ouch, double exception ! */ 718 if (kgdb_info[ks->cpu].enter_kgdb != 0) 719 goto out; 720 721 ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER); 722 out: 723 if (arch_kgdb_ops.enable_nmi) 724 arch_kgdb_ops.enable_nmi(1); 725 return ret; 726 } 727 728 /* 729 * GDB places a breakpoint at this function to know dynamically 730 * loaded objects. It's not defined static so that only one instance with this 731 * name exists in the kernel. 732 */ 733 734 static int module_event(struct notifier_block *self, unsigned long val, 735 void *data) 736 { 737 return 0; 738 } 739 740 static struct notifier_block dbg_module_load_nb = { 741 .notifier_call = module_event, 742 }; 743 744 int kgdb_nmicallback(int cpu, void *regs) 745 { 746 #ifdef CONFIG_SMP 747 struct kgdb_state kgdb_var; 748 struct kgdb_state *ks = &kgdb_var; 749 750 memset(ks, 0, sizeof(struct kgdb_state)); 751 ks->cpu = cpu; 752 ks->linux_regs = regs; 753 754 if (kgdb_info[ks->cpu].enter_kgdb == 0 && 755 raw_spin_is_locked(&dbg_master_lock)) { 756 kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE); 757 return 0; 758 } 759 #endif 760 return 1; 761 } 762 763 int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code, 764 atomic_t *send_ready) 765 { 766 #ifdef CONFIG_SMP 767 if (!kgdb_io_ready(0) || !send_ready) 768 return 1; 769 770 if (kgdb_info[cpu].enter_kgdb == 0) { 771 struct kgdb_state kgdb_var; 772 struct kgdb_state *ks = &kgdb_var; 773 774 memset(ks, 0, sizeof(struct kgdb_state)); 775 ks->cpu = cpu; 776 ks->ex_vector = trapnr; 777 ks->signo = SIGTRAP; 778 ks->err_code = err_code; 779 ks->linux_regs = regs; 780 ks->send_ready = send_ready; 781 kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER); 782 return 0; 783 } 784 #endif 785 return 1; 786 } 787 788 static void kgdb_console_write(struct console *co, const char *s, 789 unsigned count) 790 { 791 unsigned long flags; 792 793 /* If we're debugging, or KGDB has not connected, don't try 794 * and print. */ 795 if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode) 796 return; 797 798 local_irq_save(flags); 799 gdbstub_msg_write(s, count); 800 local_irq_restore(flags); 801 } 802 803 static struct console kgdbcons = { 804 .name = "kgdb", 805 .write = kgdb_console_write, 806 .flags = CON_PRINTBUFFER | CON_ENABLED, 807 .index = -1, 808 }; 809 810 #ifdef CONFIG_MAGIC_SYSRQ 811 static void sysrq_handle_dbg(int key) 812 { 813 if (!dbg_io_ops) { 814 pr_crit("ERROR: No KGDB I/O module available\n"); 815 return; 816 } 817 if (!kgdb_connected) { 818 #ifdef CONFIG_KGDB_KDB 819 if (!dbg_kdb_mode) 820 pr_crit("KGDB or $3#33 for KDB\n"); 821 #else 822 pr_crit("Entering KGDB\n"); 823 #endif 824 } 825 826 kgdb_breakpoint(); 827 } 828 829 static struct sysrq_key_op sysrq_dbg_op = { 830 .handler = sysrq_handle_dbg, 831 .help_msg = "debug(g)", 832 .action_msg = "DEBUG", 833 }; 834 #endif 835 836 static int kgdb_panic_event(struct notifier_block *self, 837 unsigned long val, 838 void *data) 839 { 840 /* 841 * Avoid entering the debugger if we were triggered due to a panic 842 * We don't want to get stuck waiting for input from user in such case. 843 * panic_timeout indicates the system should automatically 844 * reboot on panic. 845 */ 846 if (panic_timeout) 847 return NOTIFY_DONE; 848 849 if (dbg_kdb_mode) 850 kdb_printf("PANIC: %s\n", (char *)data); 851 kgdb_breakpoint(); 852 return NOTIFY_DONE; 853 } 854 855 static struct notifier_block kgdb_panic_event_nb = { 856 .notifier_call = kgdb_panic_event, 857 .priority = INT_MAX, 858 }; 859 860 void __weak kgdb_arch_late(void) 861 { 862 } 863 864 void __init dbg_late_init(void) 865 { 866 dbg_is_early = false; 867 if (kgdb_io_module_registered) 868 kgdb_arch_late(); 869 kdb_init(KDB_INIT_FULL); 870 } 871 872 static int 873 dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x) 874 { 875 /* 876 * Take the following action on reboot notify depending on value: 877 * 1 == Enter debugger 878 * 0 == [the default] detatch debug client 879 * -1 == Do nothing... and use this until the board resets 880 */ 881 switch (kgdbreboot) { 882 case 1: 883 kgdb_breakpoint(); 884 case -1: 885 goto done; 886 } 887 if (!dbg_kdb_mode) 888 gdbstub_exit(code); 889 done: 890 return NOTIFY_DONE; 891 } 892 893 static struct notifier_block dbg_reboot_notifier = { 894 .notifier_call = dbg_notify_reboot, 895 .next = NULL, 896 .priority = INT_MAX, 897 }; 898 899 static void kgdb_register_callbacks(void) 900 { 901 if (!kgdb_io_module_registered) { 902 kgdb_io_module_registered = 1; 903 kgdb_arch_init(); 904 if (!dbg_is_early) 905 kgdb_arch_late(); 906 register_module_notifier(&dbg_module_load_nb); 907 register_reboot_notifier(&dbg_reboot_notifier); 908 atomic_notifier_chain_register(&panic_notifier_list, 909 &kgdb_panic_event_nb); 910 #ifdef CONFIG_MAGIC_SYSRQ 911 register_sysrq_key('g', &sysrq_dbg_op); 912 #endif 913 if (kgdb_use_con && !kgdb_con_registered) { 914 register_console(&kgdbcons); 915 kgdb_con_registered = 1; 916 } 917 } 918 } 919 920 static void kgdb_unregister_callbacks(void) 921 { 922 /* 923 * When this routine is called KGDB should unregister from the 924 * panic handler and clean up, making sure it is not handling any 925 * break exceptions at the time. 926 */ 927 if (kgdb_io_module_registered) { 928 kgdb_io_module_registered = 0; 929 unregister_reboot_notifier(&dbg_reboot_notifier); 930 unregister_module_notifier(&dbg_module_load_nb); 931 atomic_notifier_chain_unregister(&panic_notifier_list, 932 &kgdb_panic_event_nb); 933 kgdb_arch_exit(); 934 #ifdef CONFIG_MAGIC_SYSRQ 935 unregister_sysrq_key('g', &sysrq_dbg_op); 936 #endif 937 if (kgdb_con_registered) { 938 unregister_console(&kgdbcons); 939 kgdb_con_registered = 0; 940 } 941 } 942 } 943 944 /* 945 * There are times a tasklet needs to be used vs a compiled in 946 * break point so as to cause an exception outside a kgdb I/O module, 947 * such as is the case with kgdboe, where calling a breakpoint in the 948 * I/O driver itself would be fatal. 949 */ 950 static void kgdb_tasklet_bpt(unsigned long ing) 951 { 952 kgdb_breakpoint(); 953 atomic_set(&kgdb_break_tasklet_var, 0); 954 } 955 956 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0); 957 958 void kgdb_schedule_breakpoint(void) 959 { 960 if (atomic_read(&kgdb_break_tasklet_var) || 961 atomic_read(&kgdb_active) != -1 || 962 atomic_read(&kgdb_setting_breakpoint)) 963 return; 964 atomic_inc(&kgdb_break_tasklet_var); 965 tasklet_schedule(&kgdb_tasklet_breakpoint); 966 } 967 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint); 968 969 static void kgdb_initial_breakpoint(void) 970 { 971 kgdb_break_asap = 0; 972 973 pr_crit("Waiting for connection from remote gdb...\n"); 974 kgdb_breakpoint(); 975 } 976 977 /** 978 * kgdb_register_io_module - register KGDB IO module 979 * @new_dbg_io_ops: the io ops vector 980 * 981 * Register it with the KGDB core. 982 */ 983 int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops) 984 { 985 int err; 986 987 spin_lock(&kgdb_registration_lock); 988 989 if (dbg_io_ops) { 990 spin_unlock(&kgdb_registration_lock); 991 992 pr_err("Another I/O driver is already registered with KGDB\n"); 993 return -EBUSY; 994 } 995 996 if (new_dbg_io_ops->init) { 997 err = new_dbg_io_ops->init(); 998 if (err) { 999 spin_unlock(&kgdb_registration_lock); 1000 return err; 1001 } 1002 } 1003 1004 dbg_io_ops = new_dbg_io_ops; 1005 1006 spin_unlock(&kgdb_registration_lock); 1007 1008 pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name); 1009 1010 /* Arm KGDB now. */ 1011 kgdb_register_callbacks(); 1012 1013 if (kgdb_break_asap) 1014 kgdb_initial_breakpoint(); 1015 1016 return 0; 1017 } 1018 EXPORT_SYMBOL_GPL(kgdb_register_io_module); 1019 1020 /** 1021 * kkgdb_unregister_io_module - unregister KGDB IO module 1022 * @old_dbg_io_ops: the io ops vector 1023 * 1024 * Unregister it with the KGDB core. 1025 */ 1026 void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops) 1027 { 1028 BUG_ON(kgdb_connected); 1029 1030 /* 1031 * KGDB is no longer able to communicate out, so 1032 * unregister our callbacks and reset state. 1033 */ 1034 kgdb_unregister_callbacks(); 1035 1036 spin_lock(&kgdb_registration_lock); 1037 1038 WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops); 1039 dbg_io_ops = NULL; 1040 1041 spin_unlock(&kgdb_registration_lock); 1042 1043 pr_info("Unregistered I/O driver %s, debugger disabled\n", 1044 old_dbg_io_ops->name); 1045 } 1046 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module); 1047 1048 int dbg_io_get_char(void) 1049 { 1050 int ret = dbg_io_ops->read_char(); 1051 if (ret == NO_POLL_CHAR) 1052 return -1; 1053 if (!dbg_kdb_mode) 1054 return ret; 1055 if (ret == 127) 1056 return 8; 1057 return ret; 1058 } 1059 1060 /** 1061 * kgdb_breakpoint - generate breakpoint exception 1062 * 1063 * This function will generate a breakpoint exception. It is used at the 1064 * beginning of a program to sync up with a debugger and can be used 1065 * otherwise as a quick means to stop program execution and "break" into 1066 * the debugger. 1067 */ 1068 noinline void kgdb_breakpoint(void) 1069 { 1070 atomic_inc(&kgdb_setting_breakpoint); 1071 wmb(); /* Sync point before breakpoint */ 1072 arch_kgdb_breakpoint(); 1073 wmb(); /* Sync point after breakpoint */ 1074 atomic_dec(&kgdb_setting_breakpoint); 1075 } 1076 EXPORT_SYMBOL_GPL(kgdb_breakpoint); 1077 1078 static int __init opt_kgdb_wait(char *str) 1079 { 1080 kgdb_break_asap = 1; 1081 1082 kdb_init(KDB_INIT_EARLY); 1083 if (kgdb_io_module_registered) 1084 kgdb_initial_breakpoint(); 1085 1086 return 0; 1087 } 1088 1089 early_param("kgdbwait", opt_kgdb_wait); 1090