xref: /openbmc/linux/kernel/debug/debug_core.c (revision eea9507a)
1 /*
2  * Kernel Debug Core
3  *
4  * Maintainer: Jason Wessel <jason.wessel@windriver.com>
5  *
6  * Copyright (C) 2000-2001 VERITAS Software Corporation.
7  * Copyright (C) 2002-2004 Timesys Corporation
8  * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
9  * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz>
10  * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
11  * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
12  * Copyright (C) 2005-2009 Wind River Systems, Inc.
13  * Copyright (C) 2007 MontaVista Software, Inc.
14  * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
15  *
16  * Contributors at various stages not listed above:
17  *  Jason Wessel ( jason.wessel@windriver.com )
18  *  George Anzinger <george@mvista.com>
19  *  Anurekh Saxena (anurekh.saxena@timesys.com)
20  *  Lake Stevens Instrument Division (Glenn Engel)
21  *  Jim Kingdon, Cygnus Support.
22  *
23  * Original KGDB stub: David Grothe <dave@gcom.com>,
24  * Tigran Aivazian <tigran@sco.com>
25  *
26  * This file is licensed under the terms of the GNU General Public License
27  * version 2. This program is licensed "as is" without any warranty of any
28  * kind, whether express or implied.
29  */
30 #include <linux/pid_namespace.h>
31 #include <linux/clocksource.h>
32 #include <linux/serial_core.h>
33 #include <linux/interrupt.h>
34 #include <linux/spinlock.h>
35 #include <linux/console.h>
36 #include <linux/threads.h>
37 #include <linux/uaccess.h>
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/ptrace.h>
41 #include <linux/string.h>
42 #include <linux/delay.h>
43 #include <linux/sched.h>
44 #include <linux/sysrq.h>
45 #include <linux/reboot.h>
46 #include <linux/init.h>
47 #include <linux/kgdb.h>
48 #include <linux/kdb.h>
49 #include <linux/pid.h>
50 #include <linux/smp.h>
51 #include <linux/mm.h>
52 #include <linux/vmacache.h>
53 #include <linux/rcupdate.h>
54 
55 #include <asm/cacheflush.h>
56 #include <asm/byteorder.h>
57 #include <linux/atomic.h>
58 
59 #include "debug_core.h"
60 
61 static int kgdb_break_asap;
62 
63 struct debuggerinfo_struct kgdb_info[NR_CPUS];
64 
65 /**
66  * kgdb_connected - Is a host GDB connected to us?
67  */
68 int				kgdb_connected;
69 EXPORT_SYMBOL_GPL(kgdb_connected);
70 
71 /* All the KGDB handlers are installed */
72 int			kgdb_io_module_registered;
73 
74 /* Guard for recursive entry */
75 static int			exception_level;
76 
77 struct kgdb_io		*dbg_io_ops;
78 static DEFINE_SPINLOCK(kgdb_registration_lock);
79 
80 /* Action for the reboot notifiter, a global allow kdb to change it */
81 static int kgdbreboot;
82 /* kgdb console driver is loaded */
83 static int kgdb_con_registered;
84 /* determine if kgdb console output should be used */
85 static int kgdb_use_con;
86 /* Flag for alternate operations for early debugging */
87 bool dbg_is_early = true;
88 /* Next cpu to become the master debug core */
89 int dbg_switch_cpu;
90 
91 /* Use kdb or gdbserver mode */
92 int dbg_kdb_mode = 1;
93 
94 static int __init opt_kgdb_con(char *str)
95 {
96 	kgdb_use_con = 1;
97 	return 0;
98 }
99 
100 early_param("kgdbcon", opt_kgdb_con);
101 
102 module_param(kgdb_use_con, int, 0644);
103 module_param(kgdbreboot, int, 0644);
104 
105 /*
106  * Holds information about breakpoints in a kernel. These breakpoints are
107  * added and removed by gdb.
108  */
109 static struct kgdb_bkpt		kgdb_break[KGDB_MAX_BREAKPOINTS] = {
110 	[0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
111 };
112 
113 /*
114  * The CPU# of the active CPU, or -1 if none:
115  */
116 atomic_t			kgdb_active = ATOMIC_INIT(-1);
117 EXPORT_SYMBOL_GPL(kgdb_active);
118 static DEFINE_RAW_SPINLOCK(dbg_master_lock);
119 static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
120 
121 /*
122  * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
123  * bootup code (which might not have percpu set up yet):
124  */
125 static atomic_t			masters_in_kgdb;
126 static atomic_t			slaves_in_kgdb;
127 static atomic_t			kgdb_break_tasklet_var;
128 atomic_t			kgdb_setting_breakpoint;
129 
130 struct task_struct		*kgdb_usethread;
131 struct task_struct		*kgdb_contthread;
132 
133 int				kgdb_single_step;
134 static pid_t			kgdb_sstep_pid;
135 
136 /* to keep track of the CPU which is doing the single stepping*/
137 atomic_t			kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
138 
139 /*
140  * If you are debugging a problem where roundup (the collection of
141  * all other CPUs) is a problem [this should be extremely rare],
142  * then use the nokgdbroundup option to avoid roundup. In that case
143  * the other CPUs might interfere with your debugging context, so
144  * use this with care:
145  */
146 static int kgdb_do_roundup = 1;
147 
148 static int __init opt_nokgdbroundup(char *str)
149 {
150 	kgdb_do_roundup = 0;
151 
152 	return 0;
153 }
154 
155 early_param("nokgdbroundup", opt_nokgdbroundup);
156 
157 /*
158  * Finally, some KGDB code :-)
159  */
160 
161 /*
162  * Weak aliases for breakpoint management,
163  * can be overriden by architectures when needed:
164  */
165 int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
166 {
167 	int err;
168 
169 	err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
170 				BREAK_INSTR_SIZE);
171 	if (err)
172 		return err;
173 	err = probe_kernel_write((char *)bpt->bpt_addr,
174 				 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
175 	return err;
176 }
177 
178 int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
179 {
180 	return probe_kernel_write((char *)bpt->bpt_addr,
181 				  (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
182 }
183 
184 int __weak kgdb_validate_break_address(unsigned long addr)
185 {
186 	struct kgdb_bkpt tmp;
187 	int err;
188 	/* Validate setting the breakpoint and then removing it.  If the
189 	 * remove fails, the kernel needs to emit a bad message because we
190 	 * are deep trouble not being able to put things back the way we
191 	 * found them.
192 	 */
193 	tmp.bpt_addr = addr;
194 	err = kgdb_arch_set_breakpoint(&tmp);
195 	if (err)
196 		return err;
197 	err = kgdb_arch_remove_breakpoint(&tmp);
198 	if (err)
199 		printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
200 		   "memory destroyed at: %lx", addr);
201 	return err;
202 }
203 
204 unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
205 {
206 	return instruction_pointer(regs);
207 }
208 
209 int __weak kgdb_arch_init(void)
210 {
211 	return 0;
212 }
213 
214 int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
215 {
216 	return 0;
217 }
218 
219 /*
220  * Some architectures need cache flushes when we set/clear a
221  * breakpoint:
222  */
223 static void kgdb_flush_swbreak_addr(unsigned long addr)
224 {
225 	if (!CACHE_FLUSH_IS_SAFE)
226 		return;
227 
228 	if (current->mm) {
229 		int i;
230 
231 		for (i = 0; i < VMACACHE_SIZE; i++) {
232 			if (!current->vmacache[i])
233 				continue;
234 			flush_cache_range(current->vmacache[i],
235 					  addr, addr + BREAK_INSTR_SIZE);
236 		}
237 	}
238 
239 	/* Force flush instruction cache if it was outside the mm */
240 	flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
241 }
242 
243 /*
244  * SW breakpoint management:
245  */
246 int dbg_activate_sw_breakpoints(void)
247 {
248 	int error;
249 	int ret = 0;
250 	int i;
251 
252 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
253 		if (kgdb_break[i].state != BP_SET)
254 			continue;
255 
256 		error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
257 		if (error) {
258 			ret = error;
259 			printk(KERN_INFO "KGDB: BP install failed: %lx",
260 			       kgdb_break[i].bpt_addr);
261 			continue;
262 		}
263 
264 		kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
265 		kgdb_break[i].state = BP_ACTIVE;
266 	}
267 	return ret;
268 }
269 
270 int dbg_set_sw_break(unsigned long addr)
271 {
272 	int err = kgdb_validate_break_address(addr);
273 	int breakno = -1;
274 	int i;
275 
276 	if (err)
277 		return err;
278 
279 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
280 		if ((kgdb_break[i].state == BP_SET) &&
281 					(kgdb_break[i].bpt_addr == addr))
282 			return -EEXIST;
283 	}
284 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
285 		if (kgdb_break[i].state == BP_REMOVED &&
286 					kgdb_break[i].bpt_addr == addr) {
287 			breakno = i;
288 			break;
289 		}
290 	}
291 
292 	if (breakno == -1) {
293 		for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
294 			if (kgdb_break[i].state == BP_UNDEFINED) {
295 				breakno = i;
296 				break;
297 			}
298 		}
299 	}
300 
301 	if (breakno == -1)
302 		return -E2BIG;
303 
304 	kgdb_break[breakno].state = BP_SET;
305 	kgdb_break[breakno].type = BP_BREAKPOINT;
306 	kgdb_break[breakno].bpt_addr = addr;
307 
308 	return 0;
309 }
310 
311 int dbg_deactivate_sw_breakpoints(void)
312 {
313 	int error;
314 	int ret = 0;
315 	int i;
316 
317 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
318 		if (kgdb_break[i].state != BP_ACTIVE)
319 			continue;
320 		error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
321 		if (error) {
322 			printk(KERN_INFO "KGDB: BP remove failed: %lx\n",
323 			       kgdb_break[i].bpt_addr);
324 			ret = error;
325 		}
326 
327 		kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
328 		kgdb_break[i].state = BP_SET;
329 	}
330 	return ret;
331 }
332 
333 int dbg_remove_sw_break(unsigned long addr)
334 {
335 	int i;
336 
337 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
338 		if ((kgdb_break[i].state == BP_SET) &&
339 				(kgdb_break[i].bpt_addr == addr)) {
340 			kgdb_break[i].state = BP_REMOVED;
341 			return 0;
342 		}
343 	}
344 	return -ENOENT;
345 }
346 
347 int kgdb_isremovedbreak(unsigned long addr)
348 {
349 	int i;
350 
351 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
352 		if ((kgdb_break[i].state == BP_REMOVED) &&
353 					(kgdb_break[i].bpt_addr == addr))
354 			return 1;
355 	}
356 	return 0;
357 }
358 
359 int dbg_remove_all_break(void)
360 {
361 	int error;
362 	int i;
363 
364 	/* Clear memory breakpoints. */
365 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
366 		if (kgdb_break[i].state != BP_ACTIVE)
367 			goto setundefined;
368 		error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
369 		if (error)
370 			printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
371 			       kgdb_break[i].bpt_addr);
372 setundefined:
373 		kgdb_break[i].state = BP_UNDEFINED;
374 	}
375 
376 	/* Clear hardware breakpoints. */
377 	if (arch_kgdb_ops.remove_all_hw_break)
378 		arch_kgdb_ops.remove_all_hw_break();
379 
380 	return 0;
381 }
382 
383 /*
384  * Return true if there is a valid kgdb I/O module.  Also if no
385  * debugger is attached a message can be printed to the console about
386  * waiting for the debugger to attach.
387  *
388  * The print_wait argument is only to be true when called from inside
389  * the core kgdb_handle_exception, because it will wait for the
390  * debugger to attach.
391  */
392 static int kgdb_io_ready(int print_wait)
393 {
394 	if (!dbg_io_ops)
395 		return 0;
396 	if (kgdb_connected)
397 		return 1;
398 	if (atomic_read(&kgdb_setting_breakpoint))
399 		return 1;
400 	if (print_wait) {
401 #ifdef CONFIG_KGDB_KDB
402 		if (!dbg_kdb_mode)
403 			printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n");
404 #else
405 		printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
406 #endif
407 	}
408 	return 1;
409 }
410 
411 static int kgdb_reenter_check(struct kgdb_state *ks)
412 {
413 	unsigned long addr;
414 
415 	if (atomic_read(&kgdb_active) != raw_smp_processor_id())
416 		return 0;
417 
418 	/* Panic on recursive debugger calls: */
419 	exception_level++;
420 	addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
421 	dbg_deactivate_sw_breakpoints();
422 
423 	/*
424 	 * If the break point removed ok at the place exception
425 	 * occurred, try to recover and print a warning to the end
426 	 * user because the user planted a breakpoint in a place that
427 	 * KGDB needs in order to function.
428 	 */
429 	if (dbg_remove_sw_break(addr) == 0) {
430 		exception_level = 0;
431 		kgdb_skipexception(ks->ex_vector, ks->linux_regs);
432 		dbg_activate_sw_breakpoints();
433 		printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
434 			addr);
435 		WARN_ON_ONCE(1);
436 
437 		return 1;
438 	}
439 	dbg_remove_all_break();
440 	kgdb_skipexception(ks->ex_vector, ks->linux_regs);
441 
442 	if (exception_level > 1) {
443 		dump_stack();
444 		panic("Recursive entry to debugger");
445 	}
446 
447 	printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
448 #ifdef CONFIG_KGDB_KDB
449 	/* Allow kdb to debug itself one level */
450 	return 0;
451 #endif
452 	dump_stack();
453 	panic("Recursive entry to debugger");
454 
455 	return 1;
456 }
457 
458 static void dbg_touch_watchdogs(void)
459 {
460 	touch_softlockup_watchdog_sync();
461 	clocksource_touch_watchdog();
462 	rcu_cpu_stall_reset();
463 }
464 
465 static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
466 		int exception_state)
467 {
468 	unsigned long flags;
469 	int sstep_tries = 100;
470 	int error;
471 	int cpu;
472 	int trace_on = 0;
473 	int online_cpus = num_online_cpus();
474 
475 	kgdb_info[ks->cpu].enter_kgdb++;
476 	kgdb_info[ks->cpu].exception_state |= exception_state;
477 
478 	if (exception_state == DCPU_WANT_MASTER)
479 		atomic_inc(&masters_in_kgdb);
480 	else
481 		atomic_inc(&slaves_in_kgdb);
482 
483 	if (arch_kgdb_ops.disable_hw_break)
484 		arch_kgdb_ops.disable_hw_break(regs);
485 
486 acquirelock:
487 	/*
488 	 * Interrupts will be restored by the 'trap return' code, except when
489 	 * single stepping.
490 	 */
491 	local_irq_save(flags);
492 
493 	cpu = ks->cpu;
494 	kgdb_info[cpu].debuggerinfo = regs;
495 	kgdb_info[cpu].task = current;
496 	kgdb_info[cpu].ret_state = 0;
497 	kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
498 
499 	/* Make sure the above info reaches the primary CPU */
500 	smp_mb();
501 
502 	if (exception_level == 1) {
503 		if (raw_spin_trylock(&dbg_master_lock))
504 			atomic_xchg(&kgdb_active, cpu);
505 		goto cpu_master_loop;
506 	}
507 
508 	/*
509 	 * CPU will loop if it is a slave or request to become a kgdb
510 	 * master cpu and acquire the kgdb_active lock:
511 	 */
512 	while (1) {
513 cpu_loop:
514 		if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
515 			kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
516 			goto cpu_master_loop;
517 		} else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
518 			if (raw_spin_trylock(&dbg_master_lock)) {
519 				atomic_xchg(&kgdb_active, cpu);
520 				break;
521 			}
522 		} else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
523 			if (!raw_spin_is_locked(&dbg_slave_lock))
524 				goto return_normal;
525 		} else {
526 return_normal:
527 			/* Return to normal operation by executing any
528 			 * hw breakpoint fixup.
529 			 */
530 			if (arch_kgdb_ops.correct_hw_break)
531 				arch_kgdb_ops.correct_hw_break();
532 			if (trace_on)
533 				tracing_on();
534 			kgdb_info[cpu].exception_state &=
535 				~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
536 			kgdb_info[cpu].enter_kgdb--;
537 			smp_mb__before_atomic();
538 			atomic_dec(&slaves_in_kgdb);
539 			dbg_touch_watchdogs();
540 			local_irq_restore(flags);
541 			return 0;
542 		}
543 		cpu_relax();
544 	}
545 
546 	/*
547 	 * For single stepping, try to only enter on the processor
548 	 * that was single stepping.  To guard against a deadlock, the
549 	 * kernel will only try for the value of sstep_tries before
550 	 * giving up and continuing on.
551 	 */
552 	if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
553 	    (kgdb_info[cpu].task &&
554 	     kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
555 		atomic_set(&kgdb_active, -1);
556 		raw_spin_unlock(&dbg_master_lock);
557 		dbg_touch_watchdogs();
558 		local_irq_restore(flags);
559 
560 		goto acquirelock;
561 	}
562 
563 	if (!kgdb_io_ready(1)) {
564 		kgdb_info[cpu].ret_state = 1;
565 		goto kgdb_restore; /* No I/O connection, resume the system */
566 	}
567 
568 	/*
569 	 * Don't enter if we have hit a removed breakpoint.
570 	 */
571 	if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
572 		goto kgdb_restore;
573 
574 	/* Call the I/O driver's pre_exception routine */
575 	if (dbg_io_ops->pre_exception)
576 		dbg_io_ops->pre_exception();
577 
578 	/*
579 	 * Get the passive CPU lock which will hold all the non-primary
580 	 * CPU in a spin state while the debugger is active
581 	 */
582 	if (!kgdb_single_step)
583 		raw_spin_lock(&dbg_slave_lock);
584 
585 #ifdef CONFIG_SMP
586 	/* If send_ready set, slaves are already waiting */
587 	if (ks->send_ready)
588 		atomic_set(ks->send_ready, 1);
589 
590 	/* Signal the other CPUs to enter kgdb_wait() */
591 	else if ((!kgdb_single_step) && kgdb_do_roundup)
592 		kgdb_roundup_cpus(flags);
593 #endif
594 
595 	/*
596 	 * Wait for the other CPUs to be notified and be waiting for us:
597 	 */
598 	while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) +
599 				atomic_read(&slaves_in_kgdb)) != online_cpus)
600 		cpu_relax();
601 
602 	/*
603 	 * At this point the primary processor is completely
604 	 * in the debugger and all secondary CPUs are quiescent
605 	 */
606 	dbg_deactivate_sw_breakpoints();
607 	kgdb_single_step = 0;
608 	kgdb_contthread = current;
609 	exception_level = 0;
610 	trace_on = tracing_is_on();
611 	if (trace_on)
612 		tracing_off();
613 
614 	while (1) {
615 cpu_master_loop:
616 		if (dbg_kdb_mode) {
617 			kgdb_connected = 1;
618 			error = kdb_stub(ks);
619 			if (error == -1)
620 				continue;
621 			kgdb_connected = 0;
622 		} else {
623 			error = gdb_serial_stub(ks);
624 		}
625 
626 		if (error == DBG_PASS_EVENT) {
627 			dbg_kdb_mode = !dbg_kdb_mode;
628 		} else if (error == DBG_SWITCH_CPU_EVENT) {
629 			kgdb_info[dbg_switch_cpu].exception_state |=
630 				DCPU_NEXT_MASTER;
631 			goto cpu_loop;
632 		} else {
633 			kgdb_info[cpu].ret_state = error;
634 			break;
635 		}
636 	}
637 
638 	/* Call the I/O driver's post_exception routine */
639 	if (dbg_io_ops->post_exception)
640 		dbg_io_ops->post_exception();
641 
642 	if (!kgdb_single_step) {
643 		raw_spin_unlock(&dbg_slave_lock);
644 		/* Wait till all the CPUs have quit from the debugger. */
645 		while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
646 			cpu_relax();
647 	}
648 
649 kgdb_restore:
650 	if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
651 		int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
652 		if (kgdb_info[sstep_cpu].task)
653 			kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
654 		else
655 			kgdb_sstep_pid = 0;
656 	}
657 	if (arch_kgdb_ops.correct_hw_break)
658 		arch_kgdb_ops.correct_hw_break();
659 	if (trace_on)
660 		tracing_on();
661 
662 	kgdb_info[cpu].exception_state &=
663 		~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
664 	kgdb_info[cpu].enter_kgdb--;
665 	smp_mb__before_atomic();
666 	atomic_dec(&masters_in_kgdb);
667 	/* Free kgdb_active */
668 	atomic_set(&kgdb_active, -1);
669 	raw_spin_unlock(&dbg_master_lock);
670 	dbg_touch_watchdogs();
671 	local_irq_restore(flags);
672 
673 	return kgdb_info[cpu].ret_state;
674 }
675 
676 /*
677  * kgdb_handle_exception() - main entry point from a kernel exception
678  *
679  * Locking hierarchy:
680  *	interface locks, if any (begin_session)
681  *	kgdb lock (kgdb_active)
682  */
683 int
684 kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
685 {
686 	struct kgdb_state kgdb_var;
687 	struct kgdb_state *ks = &kgdb_var;
688 	int ret = 0;
689 
690 	if (arch_kgdb_ops.enable_nmi)
691 		arch_kgdb_ops.enable_nmi(0);
692 
693 	memset(ks, 0, sizeof(struct kgdb_state));
694 	ks->cpu			= raw_smp_processor_id();
695 	ks->ex_vector		= evector;
696 	ks->signo		= signo;
697 	ks->err_code		= ecode;
698 	ks->linux_regs		= regs;
699 
700 	if (kgdb_reenter_check(ks))
701 		goto out; /* Ouch, double exception ! */
702 	if (kgdb_info[ks->cpu].enter_kgdb != 0)
703 		goto out;
704 
705 	ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
706 out:
707 	if (arch_kgdb_ops.enable_nmi)
708 		arch_kgdb_ops.enable_nmi(1);
709 	return ret;
710 }
711 
712 /*
713  * GDB places a breakpoint at this function to know dynamically
714  * loaded objects. It's not defined static so that only one instance with this
715  * name exists in the kernel.
716  */
717 
718 static int module_event(struct notifier_block *self, unsigned long val,
719 	void *data)
720 {
721 	return 0;
722 }
723 
724 static struct notifier_block dbg_module_load_nb = {
725 	.notifier_call	= module_event,
726 };
727 
728 int kgdb_nmicallback(int cpu, void *regs)
729 {
730 #ifdef CONFIG_SMP
731 	struct kgdb_state kgdb_var;
732 	struct kgdb_state *ks = &kgdb_var;
733 
734 	memset(ks, 0, sizeof(struct kgdb_state));
735 	ks->cpu			= cpu;
736 	ks->linux_regs		= regs;
737 
738 	if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
739 			raw_spin_is_locked(&dbg_master_lock)) {
740 		kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
741 		return 0;
742 	}
743 #endif
744 	return 1;
745 }
746 
747 int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
748 							atomic_t *send_ready)
749 {
750 #ifdef CONFIG_SMP
751 	if (!kgdb_io_ready(0) || !send_ready)
752 		return 1;
753 
754 	if (kgdb_info[cpu].enter_kgdb == 0) {
755 		struct kgdb_state kgdb_var;
756 		struct kgdb_state *ks = &kgdb_var;
757 
758 		memset(ks, 0, sizeof(struct kgdb_state));
759 		ks->cpu			= cpu;
760 		ks->ex_vector		= trapnr;
761 		ks->signo		= SIGTRAP;
762 		ks->err_code		= err_code;
763 		ks->linux_regs		= regs;
764 		ks->send_ready		= send_ready;
765 		kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
766 		return 0;
767 	}
768 #endif
769 	return 1;
770 }
771 
772 static void kgdb_console_write(struct console *co, const char *s,
773    unsigned count)
774 {
775 	unsigned long flags;
776 
777 	/* If we're debugging, or KGDB has not connected, don't try
778 	 * and print. */
779 	if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
780 		return;
781 
782 	local_irq_save(flags);
783 	gdbstub_msg_write(s, count);
784 	local_irq_restore(flags);
785 }
786 
787 static struct console kgdbcons = {
788 	.name		= "kgdb",
789 	.write		= kgdb_console_write,
790 	.flags		= CON_PRINTBUFFER | CON_ENABLED,
791 	.index		= -1,
792 };
793 
794 #ifdef CONFIG_MAGIC_SYSRQ
795 static void sysrq_handle_dbg(int key)
796 {
797 	if (!dbg_io_ops) {
798 		printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
799 		return;
800 	}
801 	if (!kgdb_connected) {
802 #ifdef CONFIG_KGDB_KDB
803 		if (!dbg_kdb_mode)
804 			printk(KERN_CRIT "KGDB or $3#33 for KDB\n");
805 #else
806 		printk(KERN_CRIT "Entering KGDB\n");
807 #endif
808 	}
809 
810 	kgdb_breakpoint();
811 }
812 
813 static struct sysrq_key_op sysrq_dbg_op = {
814 	.handler	= sysrq_handle_dbg,
815 	.help_msg	= "debug(g)",
816 	.action_msg	= "DEBUG",
817 };
818 #endif
819 
820 static int kgdb_panic_event(struct notifier_block *self,
821 			    unsigned long val,
822 			    void *data)
823 {
824 	if (dbg_kdb_mode)
825 		kdb_printf("PANIC: %s\n", (char *)data);
826 	kgdb_breakpoint();
827 	return NOTIFY_DONE;
828 }
829 
830 static struct notifier_block kgdb_panic_event_nb = {
831        .notifier_call	= kgdb_panic_event,
832        .priority	= INT_MAX,
833 };
834 
835 void __weak kgdb_arch_late(void)
836 {
837 }
838 
839 void __init dbg_late_init(void)
840 {
841 	dbg_is_early = false;
842 	if (kgdb_io_module_registered)
843 		kgdb_arch_late();
844 	kdb_init(KDB_INIT_FULL);
845 }
846 
847 static int
848 dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
849 {
850 	/*
851 	 * Take the following action on reboot notify depending on value:
852 	 *    1 == Enter debugger
853 	 *    0 == [the default] detatch debug client
854 	 *   -1 == Do nothing... and use this until the board resets
855 	 */
856 	switch (kgdbreboot) {
857 	case 1:
858 		kgdb_breakpoint();
859 	case -1:
860 		goto done;
861 	}
862 	if (!dbg_kdb_mode)
863 		gdbstub_exit(code);
864 done:
865 	return NOTIFY_DONE;
866 }
867 
868 static struct notifier_block dbg_reboot_notifier = {
869 	.notifier_call		= dbg_notify_reboot,
870 	.next			= NULL,
871 	.priority		= INT_MAX,
872 };
873 
874 static void kgdb_register_callbacks(void)
875 {
876 	if (!kgdb_io_module_registered) {
877 		kgdb_io_module_registered = 1;
878 		kgdb_arch_init();
879 		if (!dbg_is_early)
880 			kgdb_arch_late();
881 		register_module_notifier(&dbg_module_load_nb);
882 		register_reboot_notifier(&dbg_reboot_notifier);
883 		atomic_notifier_chain_register(&panic_notifier_list,
884 					       &kgdb_panic_event_nb);
885 #ifdef CONFIG_MAGIC_SYSRQ
886 		register_sysrq_key('g', &sysrq_dbg_op);
887 #endif
888 		if (kgdb_use_con && !kgdb_con_registered) {
889 			register_console(&kgdbcons);
890 			kgdb_con_registered = 1;
891 		}
892 	}
893 }
894 
895 static void kgdb_unregister_callbacks(void)
896 {
897 	/*
898 	 * When this routine is called KGDB should unregister from the
899 	 * panic handler and clean up, making sure it is not handling any
900 	 * break exceptions at the time.
901 	 */
902 	if (kgdb_io_module_registered) {
903 		kgdb_io_module_registered = 0;
904 		unregister_reboot_notifier(&dbg_reboot_notifier);
905 		unregister_module_notifier(&dbg_module_load_nb);
906 		atomic_notifier_chain_unregister(&panic_notifier_list,
907 					       &kgdb_panic_event_nb);
908 		kgdb_arch_exit();
909 #ifdef CONFIG_MAGIC_SYSRQ
910 		unregister_sysrq_key('g', &sysrq_dbg_op);
911 #endif
912 		if (kgdb_con_registered) {
913 			unregister_console(&kgdbcons);
914 			kgdb_con_registered = 0;
915 		}
916 	}
917 }
918 
919 /*
920  * There are times a tasklet needs to be used vs a compiled in
921  * break point so as to cause an exception outside a kgdb I/O module,
922  * such as is the case with kgdboe, where calling a breakpoint in the
923  * I/O driver itself would be fatal.
924  */
925 static void kgdb_tasklet_bpt(unsigned long ing)
926 {
927 	kgdb_breakpoint();
928 	atomic_set(&kgdb_break_tasklet_var, 0);
929 }
930 
931 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
932 
933 void kgdb_schedule_breakpoint(void)
934 {
935 	if (atomic_read(&kgdb_break_tasklet_var) ||
936 		atomic_read(&kgdb_active) != -1 ||
937 		atomic_read(&kgdb_setting_breakpoint))
938 		return;
939 	atomic_inc(&kgdb_break_tasklet_var);
940 	tasklet_schedule(&kgdb_tasklet_breakpoint);
941 }
942 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
943 
944 static void kgdb_initial_breakpoint(void)
945 {
946 	kgdb_break_asap = 0;
947 
948 	printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
949 	kgdb_breakpoint();
950 }
951 
952 /**
953  *	kgdb_register_io_module - register KGDB IO module
954  *	@new_dbg_io_ops: the io ops vector
955  *
956  *	Register it with the KGDB core.
957  */
958 int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
959 {
960 	int err;
961 
962 	spin_lock(&kgdb_registration_lock);
963 
964 	if (dbg_io_ops) {
965 		spin_unlock(&kgdb_registration_lock);
966 
967 		printk(KERN_ERR "kgdb: Another I/O driver is already "
968 				"registered with KGDB.\n");
969 		return -EBUSY;
970 	}
971 
972 	if (new_dbg_io_ops->init) {
973 		err = new_dbg_io_ops->init();
974 		if (err) {
975 			spin_unlock(&kgdb_registration_lock);
976 			return err;
977 		}
978 	}
979 
980 	dbg_io_ops = new_dbg_io_ops;
981 
982 	spin_unlock(&kgdb_registration_lock);
983 
984 	printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
985 	       new_dbg_io_ops->name);
986 
987 	/* Arm KGDB now. */
988 	kgdb_register_callbacks();
989 
990 	if (kgdb_break_asap)
991 		kgdb_initial_breakpoint();
992 
993 	return 0;
994 }
995 EXPORT_SYMBOL_GPL(kgdb_register_io_module);
996 
997 /**
998  *	kkgdb_unregister_io_module - unregister KGDB IO module
999  *	@old_dbg_io_ops: the io ops vector
1000  *
1001  *	Unregister it with the KGDB core.
1002  */
1003 void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
1004 {
1005 	BUG_ON(kgdb_connected);
1006 
1007 	/*
1008 	 * KGDB is no longer able to communicate out, so
1009 	 * unregister our callbacks and reset state.
1010 	 */
1011 	kgdb_unregister_callbacks();
1012 
1013 	spin_lock(&kgdb_registration_lock);
1014 
1015 	WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
1016 	dbg_io_ops = NULL;
1017 
1018 	spin_unlock(&kgdb_registration_lock);
1019 
1020 	printk(KERN_INFO
1021 		"kgdb: Unregistered I/O driver %s, debugger disabled.\n",
1022 		old_dbg_io_ops->name);
1023 }
1024 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
1025 
1026 int dbg_io_get_char(void)
1027 {
1028 	int ret = dbg_io_ops->read_char();
1029 	if (ret == NO_POLL_CHAR)
1030 		return -1;
1031 	if (!dbg_kdb_mode)
1032 		return ret;
1033 	if (ret == 127)
1034 		return 8;
1035 	return ret;
1036 }
1037 
1038 /**
1039  * kgdb_breakpoint - generate breakpoint exception
1040  *
1041  * This function will generate a breakpoint exception.  It is used at the
1042  * beginning of a program to sync up with a debugger and can be used
1043  * otherwise as a quick means to stop program execution and "break" into
1044  * the debugger.
1045  */
1046 noinline void kgdb_breakpoint(void)
1047 {
1048 	atomic_inc(&kgdb_setting_breakpoint);
1049 	wmb(); /* Sync point before breakpoint */
1050 	arch_kgdb_breakpoint();
1051 	wmb(); /* Sync point after breakpoint */
1052 	atomic_dec(&kgdb_setting_breakpoint);
1053 }
1054 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
1055 
1056 static int __init opt_kgdb_wait(char *str)
1057 {
1058 	kgdb_break_asap = 1;
1059 
1060 	kdb_init(KDB_INIT_EARLY);
1061 	if (kgdb_io_module_registered)
1062 		kgdb_initial_breakpoint();
1063 
1064 	return 0;
1065 }
1066 
1067 early_param("kgdbwait", opt_kgdb_wait);
1068