xref: /openbmc/linux/arch/powerpc/kernel/kgdb.c (revision 2c6467d2)
1 /*
2  * PowerPC backend to the KGDB stub.
3  *
4  * 1998 (c) Michael AK Tesch (tesch@cs.wisc.edu)
5  * Copyright (C) 2003 Timesys Corporation.
6  * Copyright (C) 2004-2006 MontaVista Software, Inc.
7  * PPC64 Mods (C) 2005 Frank Rowand (frowand@mvista.com)
8  * PPC32 support restored by Vitaly Wool <vwool@ru.mvista.com> and
9  * Sergei Shtylyov <sshtylyov@ru.mvista.com>
10  * Copyright (C) 2007-2008 Wind River Systems, Inc.
11  *
12  * This file is licensed under the terms of the GNU General Public License
13  * version 2. This program as licensed "as is" without any warranty of any
14  * kind, whether express or implied.
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/kgdb.h>
19 #include <linux/smp.h>
20 #include <linux/signal.h>
21 #include <linux/ptrace.h>
22 #include <linux/kdebug.h>
23 #include <asm/current.h>
24 #include <asm/processor.h>
25 #include <asm/machdep.h>
26 #include <asm/debug.h>
27 #include <asm/code-patching.h>
28 #include <linux/slab.h>
29 
30 /*
31  * This table contains the mapping between PowerPC hardware trap types, and
32  * signals, which are primarily what GDB understands.  GDB and the kernel
33  * don't always agree on values, so we use constants taken from gdb-6.2.
34  */
35 static struct hard_trap_info
36 {
37 	unsigned int tt;		/* Trap type code for powerpc */
38 	unsigned char signo;		/* Signal that we map this trap into */
39 } hard_trap_info[] = {
40 	{ 0x0100, 0x02 /* SIGINT */  },		/* system reset */
41 	{ 0x0200, 0x0b /* SIGSEGV */ },		/* machine check */
42 	{ 0x0300, 0x0b /* SIGSEGV */ },		/* data access */
43 	{ 0x0400, 0x0b /* SIGSEGV */ },		/* instruction access */
44 	{ 0x0500, 0x02 /* SIGINT */  },		/* external interrupt */
45 	{ 0x0600, 0x0a /* SIGBUS */  },		/* alignment */
46 	{ 0x0700, 0x05 /* SIGTRAP */ },		/* program check */
47 	{ 0x0800, 0x08 /* SIGFPE */  },		/* fp unavailable */
48 	{ 0x0900, 0x0e /* SIGALRM */ },		/* decrementer */
49 	{ 0x0c00, 0x14 /* SIGCHLD */ },		/* system call */
50 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
51 	{ 0x2002, 0x05 /* SIGTRAP */ },		/* debug */
52 #if defined(CONFIG_FSL_BOOKE)
53 	{ 0x2010, 0x08 /* SIGFPE */  },		/* spe unavailable */
54 	{ 0x2020, 0x08 /* SIGFPE */  },		/* spe unavailable */
55 	{ 0x2030, 0x08 /* SIGFPE */  },		/* spe fp data */
56 	{ 0x2040, 0x08 /* SIGFPE */  },		/* spe fp data */
57 	{ 0x2050, 0x08 /* SIGFPE */  },		/* spe fp round */
58 	{ 0x2060, 0x0e /* SIGILL */  },		/* performance monitor */
59 	{ 0x2900, 0x08 /* SIGFPE */  },		/* apu unavailable */
60 	{ 0x3100, 0x0e /* SIGALRM */ },		/* fixed interval timer */
61 	{ 0x3200, 0x02 /* SIGINT */  }, 	/* watchdog */
62 #else /* ! CONFIG_FSL_BOOKE */
63 	{ 0x1000, 0x0e /* SIGALRM */ },		/* prog interval timer */
64 	{ 0x1010, 0x0e /* SIGALRM */ },		/* fixed interval timer */
65 	{ 0x1020, 0x02 /* SIGINT */  }, 	/* watchdog */
66 	{ 0x2010, 0x08 /* SIGFPE */  },		/* fp unavailable */
67 	{ 0x2020, 0x08 /* SIGFPE */  },		/* ap unavailable */
68 #endif
69 #else /* ! (defined(CONFIG_40x) || defined(CONFIG_BOOKE)) */
70 	{ 0x0d00, 0x05 /* SIGTRAP */ },		/* single-step */
71 #if defined(CONFIG_PPC_8xx)
72 	{ 0x1000, 0x04 /* SIGILL */  },		/* software emulation */
73 #else /* ! CONFIG_PPC_8xx */
74 	{ 0x0f00, 0x04 /* SIGILL */  },		/* performance monitor */
75 	{ 0x0f20, 0x08 /* SIGFPE */  },		/* altivec unavailable */
76 	{ 0x1300, 0x05 /* SIGTRAP */ }, 	/* instruction address break */
77 #if defined(CONFIG_PPC64)
78 	{ 0x1200, 0x05 /* SIGILL */  },		/* system error */
79 	{ 0x1500, 0x04 /* SIGILL */  },		/* soft patch */
80 	{ 0x1600, 0x04 /* SIGILL */  },		/* maintenance */
81 	{ 0x1700, 0x08 /* SIGFPE */  },		/* altivec assist */
82 	{ 0x1800, 0x04 /* SIGILL */  },		/* thermal */
83 #else /* ! CONFIG_PPC64 */
84 	{ 0x1400, 0x02 /* SIGINT */  },		/* SMI */
85 	{ 0x1600, 0x08 /* SIGFPE */  },		/* altivec assist */
86 	{ 0x1700, 0x04 /* SIGILL */  },		/* TAU */
87 	{ 0x2000, 0x05 /* SIGTRAP */ },		/* run mode */
88 #endif
89 #endif
90 #endif
91 	{ 0x0000, 0x00 }			/* Must be last */
92 };
93 
94 static int computeSignal(unsigned int tt)
95 {
96 	struct hard_trap_info *ht;
97 
98 	for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
99 		if (ht->tt == tt)
100 			return ht->signo;
101 
102 	return SIGHUP;		/* default for things we don't know about */
103 }
104 
105 /**
106  *
107  *	kgdb_skipexception - Bail out of KGDB when we've been triggered.
108  *	@exception: Exception vector number
109  *	@regs: Current &struct pt_regs.
110  *
111  *	On some architectures we need to skip a breakpoint exception when
112  *	it occurs after a breakpoint has been removed.
113  *
114  */
115 int kgdb_skipexception(int exception, struct pt_regs *regs)
116 {
117 	return kgdb_isremovedbreak(regs->nip);
118 }
119 
120 static int kgdb_debugger_ipi(struct pt_regs *regs)
121 {
122 	kgdb_nmicallback(raw_smp_processor_id(), regs);
123 	return 0;
124 }
125 
126 #ifdef CONFIG_SMP
127 void kgdb_roundup_cpus(void)
128 {
129 	smp_send_debugger_break();
130 }
131 #endif
132 
133 /* KGDB functions to use existing PowerPC64 hooks. */
134 static int kgdb_debugger(struct pt_regs *regs)
135 {
136 	return !kgdb_handle_exception(1, computeSignal(TRAP(regs)),
137 				      DIE_OOPS, regs);
138 }
139 
140 static int kgdb_handle_breakpoint(struct pt_regs *regs)
141 {
142 	if (user_mode(regs))
143 		return 0;
144 
145 	if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
146 		return 0;
147 
148 	if (*(u32 *)regs->nip == BREAK_INSTR)
149 		regs->nip += BREAK_INSTR_SIZE;
150 
151 	return 1;
152 }
153 
154 static DEFINE_PER_CPU(struct thread_info, kgdb_thread_info);
155 static int kgdb_singlestep(struct pt_regs *regs)
156 {
157 	struct thread_info *thread_info, *exception_thread_info;
158 	struct thread_info *backup_current_thread_info =
159 		this_cpu_ptr(&kgdb_thread_info);
160 
161 	if (user_mode(regs))
162 		return 0;
163 
164 	/*
165 	 * On Book E and perhaps other processors, singlestep is handled on
166 	 * the critical exception stack.  This causes current_thread_info()
167 	 * to fail, since it it locates the thread_info by masking off
168 	 * the low bits of the current stack pointer.  We work around
169 	 * this issue by copying the thread_info from the kernel stack
170 	 * before calling kgdb_handle_exception, and copying it back
171 	 * afterwards.  On most processors the copy is avoided since
172 	 * exception_thread_info == thread_info.
173 	 */
174 	thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));
175 	exception_thread_info = current_thread_info();
176 
177 	if (thread_info != exception_thread_info) {
178 		/* Save the original current_thread_info. */
179 		memcpy(backup_current_thread_info, exception_thread_info, sizeof *thread_info);
180 		memcpy(exception_thread_info, thread_info, sizeof *thread_info);
181 	}
182 
183 	kgdb_handle_exception(0, SIGTRAP, 0, regs);
184 
185 	if (thread_info != exception_thread_info)
186 		/* Restore current_thread_info lastly. */
187 		memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info);
188 
189 	return 1;
190 }
191 
192 static int kgdb_iabr_match(struct pt_regs *regs)
193 {
194 	if (user_mode(regs))
195 		return 0;
196 
197 	if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0)
198 		return 0;
199 	return 1;
200 }
201 
202 static int kgdb_break_match(struct pt_regs *regs)
203 {
204 	if (user_mode(regs))
205 		return 0;
206 
207 	if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0)
208 		return 0;
209 	return 1;
210 }
211 
212 #define PACK64(ptr, src) do { *(ptr++) = (src); } while (0)
213 
214 #define PACK32(ptr, src) do {          \
215 	u32 *ptr32;                   \
216 	ptr32 = (u32 *)ptr;           \
217 	*(ptr32++) = (src);           \
218 	ptr = (unsigned long *)ptr32; \
219 	} while (0)
220 
221 void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
222 {
223 	struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp +
224 						  STACK_FRAME_OVERHEAD);
225 	unsigned long *ptr = gdb_regs;
226 	int reg;
227 
228 	memset(gdb_regs, 0, NUMREGBYTES);
229 
230 	/* Regs GPR0-2 */
231 	for (reg = 0; reg < 3; reg++)
232 		PACK64(ptr, regs->gpr[reg]);
233 
234 	/* Regs GPR3-13 are caller saved, not in regs->gpr[] */
235 	ptr += 11;
236 
237 	/* Regs GPR14-31 */
238 	for (reg = 14; reg < 32; reg++)
239 		PACK64(ptr, regs->gpr[reg]);
240 
241 #ifdef CONFIG_FSL_BOOKE
242 #ifdef CONFIG_SPE
243 	for (reg = 0; reg < 32; reg++)
244 		PACK64(ptr, p->thread.evr[reg]);
245 #else
246 	ptr += 32;
247 #endif
248 #else
249 	/* fp registers not used by kernel, leave zero */
250 	ptr += 32 * 8 / sizeof(long);
251 #endif
252 
253 	PACK64(ptr, regs->nip);
254 	PACK64(ptr, regs->msr);
255 	PACK32(ptr, regs->ccr);
256 	PACK64(ptr, regs->link);
257 	PACK64(ptr, regs->ctr);
258 	PACK32(ptr, regs->xer);
259 
260 	BUG_ON((unsigned long)ptr >
261 	       (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
262 }
263 
264 #define GDB_SIZEOF_REG sizeof(unsigned long)
265 #define GDB_SIZEOF_REG_U32 sizeof(u32)
266 
267 #ifdef CONFIG_FSL_BOOKE
268 #define GDB_SIZEOF_FLOAT_REG sizeof(unsigned long)
269 #else
270 #define GDB_SIZEOF_FLOAT_REG sizeof(u64)
271 #endif
272 
273 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
274 {
275 	{ "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[0]) },
276 	{ "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[1]) },
277 	{ "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[2]) },
278 	{ "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[3]) },
279 	{ "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[4]) },
280 	{ "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[5]) },
281 	{ "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[6]) },
282 	{ "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[7]) },
283 	{ "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[8]) },
284 	{ "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[9]) },
285 	{ "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[10]) },
286 	{ "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[11]) },
287 	{ "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[12]) },
288 	{ "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[13]) },
289 	{ "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[14]) },
290 	{ "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[15]) },
291 	{ "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[16]) },
292 	{ "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[17]) },
293 	{ "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[18]) },
294 	{ "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[19]) },
295 	{ "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[20]) },
296 	{ "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[21]) },
297 	{ "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[22]) },
298 	{ "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[23]) },
299 	{ "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[24]) },
300 	{ "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[25]) },
301 	{ "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[26]) },
302 	{ "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[27]) },
303 	{ "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[28]) },
304 	{ "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[29]) },
305 	{ "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[30]) },
306 	{ "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[31]) },
307 
308 	{ "f0", GDB_SIZEOF_FLOAT_REG, 0 },
309 	{ "f1", GDB_SIZEOF_FLOAT_REG, 1 },
310 	{ "f2", GDB_SIZEOF_FLOAT_REG, 2 },
311 	{ "f3", GDB_SIZEOF_FLOAT_REG, 3 },
312 	{ "f4", GDB_SIZEOF_FLOAT_REG, 4 },
313 	{ "f5", GDB_SIZEOF_FLOAT_REG, 5 },
314 	{ "f6", GDB_SIZEOF_FLOAT_REG, 6 },
315 	{ "f7", GDB_SIZEOF_FLOAT_REG, 7 },
316 	{ "f8", GDB_SIZEOF_FLOAT_REG, 8 },
317 	{ "f9", GDB_SIZEOF_FLOAT_REG, 9 },
318 	{ "f10", GDB_SIZEOF_FLOAT_REG, 10 },
319 	{ "f11", GDB_SIZEOF_FLOAT_REG, 11 },
320 	{ "f12", GDB_SIZEOF_FLOAT_REG, 12 },
321 	{ "f13", GDB_SIZEOF_FLOAT_REG, 13 },
322 	{ "f14", GDB_SIZEOF_FLOAT_REG, 14 },
323 	{ "f15", GDB_SIZEOF_FLOAT_REG, 15 },
324 	{ "f16", GDB_SIZEOF_FLOAT_REG, 16 },
325 	{ "f17", GDB_SIZEOF_FLOAT_REG, 17 },
326 	{ "f18", GDB_SIZEOF_FLOAT_REG, 18 },
327 	{ "f19", GDB_SIZEOF_FLOAT_REG, 19 },
328 	{ "f20", GDB_SIZEOF_FLOAT_REG, 20 },
329 	{ "f21", GDB_SIZEOF_FLOAT_REG, 21 },
330 	{ "f22", GDB_SIZEOF_FLOAT_REG, 22 },
331 	{ "f23", GDB_SIZEOF_FLOAT_REG, 23 },
332 	{ "f24", GDB_SIZEOF_FLOAT_REG, 24 },
333 	{ "f25", GDB_SIZEOF_FLOAT_REG, 25 },
334 	{ "f26", GDB_SIZEOF_FLOAT_REG, 26 },
335 	{ "f27", GDB_SIZEOF_FLOAT_REG, 27 },
336 	{ "f28", GDB_SIZEOF_FLOAT_REG, 28 },
337 	{ "f29", GDB_SIZEOF_FLOAT_REG, 29 },
338 	{ "f30", GDB_SIZEOF_FLOAT_REG, 30 },
339 	{ "f31", GDB_SIZEOF_FLOAT_REG, 31 },
340 
341 	{ "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, nip) },
342 	{ "msr", GDB_SIZEOF_REG, offsetof(struct pt_regs, msr) },
343 	{ "cr", GDB_SIZEOF_REG_U32, offsetof(struct pt_regs, ccr) },
344 	{ "lr", GDB_SIZEOF_REG, offsetof(struct pt_regs, link) },
345 	{ "ctr", GDB_SIZEOF_REG_U32, offsetof(struct pt_regs, ctr) },
346 	{ "xer", GDB_SIZEOF_REG, offsetof(struct pt_regs, xer) },
347 };
348 
349 char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
350 {
351 	if (regno >= DBG_MAX_REG_NUM || regno < 0)
352 		return NULL;
353 
354 	if (regno < 32 || regno >= 64)
355 		/* First 0 -> 31 gpr registers*/
356 		/* pc, msr, ls... registers 64 -> 69 */
357 		memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
358 				dbg_reg_def[regno].size);
359 
360 	if (regno >= 32 && regno < 64) {
361 		/* FP registers 32 -> 63 */
362 #if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE)
363 		if (current)
364 			memcpy(mem, &current->thread.evr[regno-32],
365 					dbg_reg_def[regno].size);
366 #else
367 		/* fp registers not used by kernel, leave zero */
368 		memset(mem, 0, dbg_reg_def[regno].size);
369 #endif
370 	}
371 
372 	return dbg_reg_def[regno].name;
373 }
374 
375 int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
376 {
377 	if (regno >= DBG_MAX_REG_NUM || regno < 0)
378 		return -EINVAL;
379 
380 	if (regno < 32 || regno >= 64)
381 		/* First 0 -> 31 gpr registers*/
382 		/* pc, msr, ls... registers 64 -> 69 */
383 		memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
384 				dbg_reg_def[regno].size);
385 
386 	if (regno >= 32 && regno < 64) {
387 		/* FP registers 32 -> 63 */
388 #if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE)
389 		memcpy(&current->thread.evr[regno-32], mem,
390 				dbg_reg_def[regno].size);
391 #else
392 		/* fp registers not used by kernel, leave zero */
393 		return 0;
394 #endif
395 	}
396 
397 	return 0;
398 }
399 
400 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
401 {
402 	regs->nip = pc;
403 }
404 
405 /*
406  * This function does PowerPC specific procesing for interfacing to gdb.
407  */
408 int kgdb_arch_handle_exception(int vector, int signo, int err_code,
409 			       char *remcom_in_buffer, char *remcom_out_buffer,
410 			       struct pt_regs *linux_regs)
411 {
412 	char *ptr = &remcom_in_buffer[1];
413 	unsigned long addr;
414 
415 	switch (remcom_in_buffer[0]) {
416 		/*
417 		 * sAA..AA   Step one instruction from AA..AA
418 		 * This will return an error to gdb ..
419 		 */
420 	case 's':
421 	case 'c':
422 		/* handle the optional parameter */
423 		if (kgdb_hex2long(&ptr, &addr))
424 			linux_regs->nip = addr;
425 
426 		atomic_set(&kgdb_cpu_doing_single_step, -1);
427 		/* set the trace bit if we're stepping */
428 		if (remcom_in_buffer[0] == 's') {
429 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
430 			mtspr(SPRN_DBCR0,
431 			      mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
432 			linux_regs->msr |= MSR_DE;
433 #else
434 			linux_regs->msr |= MSR_SE;
435 #endif
436 			atomic_set(&kgdb_cpu_doing_single_step,
437 				   raw_smp_processor_id());
438 		}
439 		return 0;
440 	}
441 
442 	return -1;
443 }
444 
445 int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
446 {
447 	int err;
448 	unsigned int instr;
449 	unsigned int *addr = (unsigned int *)bpt->bpt_addr;
450 
451 	err = probe_kernel_address(addr, instr);
452 	if (err)
453 		return err;
454 
455 	err = patch_instruction(addr, BREAK_INSTR);
456 	if (err)
457 		return -EFAULT;
458 
459 	*(unsigned int *)bpt->saved_instr = instr;
460 
461 	return 0;
462 }
463 
464 int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
465 {
466 	int err;
467 	unsigned int instr = *(unsigned int *)bpt->saved_instr;
468 	unsigned int *addr = (unsigned int *)bpt->bpt_addr;
469 
470 	err = patch_instruction(addr, instr);
471 	if (err)
472 		return -EFAULT;
473 
474 	return 0;
475 }
476 
477 /*
478  * Global data
479  */
480 const struct kgdb_arch arch_kgdb_ops;
481 
482 static int kgdb_not_implemented(struct pt_regs *regs)
483 {
484 	return 0;
485 }
486 
487 static void *old__debugger_ipi;
488 static void *old__debugger;
489 static void *old__debugger_bpt;
490 static void *old__debugger_sstep;
491 static void *old__debugger_iabr_match;
492 static void *old__debugger_break_match;
493 static void *old__debugger_fault_handler;
494 
495 int kgdb_arch_init(void)
496 {
497 	old__debugger_ipi = __debugger_ipi;
498 	old__debugger = __debugger;
499 	old__debugger_bpt = __debugger_bpt;
500 	old__debugger_sstep = __debugger_sstep;
501 	old__debugger_iabr_match = __debugger_iabr_match;
502 	old__debugger_break_match = __debugger_break_match;
503 	old__debugger_fault_handler = __debugger_fault_handler;
504 
505 	__debugger_ipi = kgdb_debugger_ipi;
506 	__debugger = kgdb_debugger;
507 	__debugger_bpt = kgdb_handle_breakpoint;
508 	__debugger_sstep = kgdb_singlestep;
509 	__debugger_iabr_match = kgdb_iabr_match;
510 	__debugger_break_match = kgdb_break_match;
511 	__debugger_fault_handler = kgdb_not_implemented;
512 
513 	return 0;
514 }
515 
516 void kgdb_arch_exit(void)
517 {
518 	__debugger_ipi = old__debugger_ipi;
519 	__debugger = old__debugger;
520 	__debugger_bpt = old__debugger_bpt;
521 	__debugger_sstep = old__debugger_sstep;
522 	__debugger_iabr_match = old__debugger_iabr_match;
523 	__debugger_break_match = old__debugger_break_match;
524 	__debugger_fault_handler = old__debugger_fault_handler;
525 }
526