xref: /openbmc/linux/arch/loongarch/kernel/kgdb.c (revision b694e3c604e999343258c49e574abd7be012e726)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * LoongArch KGDB support
4  *
5  * Copyright (C) 2023 Loongson Technology Corporation Limited
6  */
7 
8 #include <linux/hw_breakpoint.h>
9 #include <linux/kdebug.h>
10 #include <linux/kgdb.h>
11 #include <linux/objtool.h>
12 #include <linux/processor.h>
13 #include <linux/ptrace.h>
14 #include <linux/sched.h>
15 #include <linux/smp.h>
16 
17 #include <asm/cacheflush.h>
18 #include <asm/fpu.h>
19 #include <asm/hw_breakpoint.h>
20 #include <asm/inst.h>
21 #include <asm/irq_regs.h>
22 #include <asm/ptrace.h>
23 #include <asm/sigcontext.h>
24 
25 int kgdb_watch_activated;
26 static unsigned int stepped_opcode;
27 static unsigned long stepped_address;
28 
29 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
30 	{ "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
31 	{ "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
32 	{ "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
33 	{ "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
34 	{ "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
35 	{ "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
36 	{ "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
37 	{ "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
38 	{ "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
39 	{ "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
40 	{ "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
41 	{ "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
42 	{ "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
43 	{ "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
44 	{ "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
45 	{ "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
46 	{ "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) },
47 	{ "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) },
48 	{ "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) },
49 	{ "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) },
50 	{ "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) },
51 	{ "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) },
52 	{ "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) },
53 	{ "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) },
54 	{ "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) },
55 	{ "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) },
56 	{ "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) },
57 	{ "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) },
58 	{ "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) },
59 	{ "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) },
60 	{ "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) },
61 	{ "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) },
62 	{ "orig_a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, orig_a0) },
63 	{ "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, csr_era) },
64 	{ "badv", GDB_SIZEOF_REG, offsetof(struct pt_regs, csr_badvaddr) },
65 	{ "f0", GDB_SIZEOF_REG, 0 },
66 	{ "f1", GDB_SIZEOF_REG, 1 },
67 	{ "f2", GDB_SIZEOF_REG, 2 },
68 	{ "f3", GDB_SIZEOF_REG, 3 },
69 	{ "f4", GDB_SIZEOF_REG, 4 },
70 	{ "f5", GDB_SIZEOF_REG, 5 },
71 	{ "f6", GDB_SIZEOF_REG, 6 },
72 	{ "f7", GDB_SIZEOF_REG, 7 },
73 	{ "f8", GDB_SIZEOF_REG, 8 },
74 	{ "f9", GDB_SIZEOF_REG, 9 },
75 	{ "f10", GDB_SIZEOF_REG, 10 },
76 	{ "f11", GDB_SIZEOF_REG, 11 },
77 	{ "f12", GDB_SIZEOF_REG, 12 },
78 	{ "f13", GDB_SIZEOF_REG, 13 },
79 	{ "f14", GDB_SIZEOF_REG, 14 },
80 	{ "f15", GDB_SIZEOF_REG, 15 },
81 	{ "f16", GDB_SIZEOF_REG, 16 },
82 	{ "f17", GDB_SIZEOF_REG, 17 },
83 	{ "f18", GDB_SIZEOF_REG, 18 },
84 	{ "f19", GDB_SIZEOF_REG, 19 },
85 	{ "f20", GDB_SIZEOF_REG, 20 },
86 	{ "f21", GDB_SIZEOF_REG, 21 },
87 	{ "f22", GDB_SIZEOF_REG, 22 },
88 	{ "f23", GDB_SIZEOF_REG, 23 },
89 	{ "f24", GDB_SIZEOF_REG, 24 },
90 	{ "f25", GDB_SIZEOF_REG, 25 },
91 	{ "f26", GDB_SIZEOF_REG, 26 },
92 	{ "f27", GDB_SIZEOF_REG, 27 },
93 	{ "f28", GDB_SIZEOF_REG, 28 },
94 	{ "f29", GDB_SIZEOF_REG, 29 },
95 	{ "f30", GDB_SIZEOF_REG, 30 },
96 	{ "f31", GDB_SIZEOF_REG, 31 },
97 	{ "fcc0", 1, 0 },
98 	{ "fcc1", 1, 1 },
99 	{ "fcc2", 1, 2 },
100 	{ "fcc3", 1, 3 },
101 	{ "fcc4", 1, 4 },
102 	{ "fcc5", 1, 5 },
103 	{ "fcc6", 1, 6 },
104 	{ "fcc7", 1, 7 },
105 	{ "fcsr", 4, 0 },
106 };
107 
dbg_get_reg(int regno,void * mem,struct pt_regs * regs)108 char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
109 {
110 	int reg_offset, reg_size;
111 
112 	if (regno < 0 || regno >= DBG_MAX_REG_NUM)
113 		return NULL;
114 
115 	reg_offset = dbg_reg_def[regno].offset;
116 	reg_size = dbg_reg_def[regno].size;
117 
118 	if (reg_offset == -1)
119 		goto out;
120 
121 	/* Handle general-purpose/orig_a0/pc/badv registers */
122 	if (regno <= DBG_PT_REGS_END) {
123 		memcpy(mem, (void *)regs + reg_offset, reg_size);
124 		goto out;
125 	}
126 
127 	if (!(regs->csr_euen & CSR_EUEN_FPEN))
128 		goto out;
129 
130 	save_fp(current);
131 
132 	/* Handle FP registers */
133 	switch (regno) {
134 	case DBG_FCSR:				/* Process the fcsr */
135 		memcpy(mem, (void *)&current->thread.fpu.fcsr, reg_size);
136 		break;
137 	case DBG_FCC_BASE ... DBG_FCC_END:	/* Process the fcc */
138 		memcpy(mem, (void *)&current->thread.fpu.fcc + reg_offset, reg_size);
139 		break;
140 	case DBG_FPR_BASE ... DBG_FPR_END:	/* Process the fpr */
141 		memcpy(mem, (void *)&current->thread.fpu.fpr[reg_offset], reg_size);
142 		break;
143 	default:
144 		break;
145 	}
146 
147 out:
148 	return dbg_reg_def[regno].name;
149 }
150 
dbg_set_reg(int regno,void * mem,struct pt_regs * regs)151 int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
152 {
153 	int reg_offset, reg_size;
154 
155 	if (regno < 0 || regno >= DBG_MAX_REG_NUM)
156 		return -EINVAL;
157 
158 	reg_offset = dbg_reg_def[regno].offset;
159 	reg_size = dbg_reg_def[regno].size;
160 
161 	if (reg_offset == -1)
162 		return 0;
163 
164 	/* Handle general-purpose/orig_a0/pc/badv registers */
165 	if (regno <= DBG_PT_REGS_END) {
166 		memcpy((void *)regs + reg_offset, mem, reg_size);
167 		return 0;
168 	}
169 
170 	if (!(regs->csr_euen & CSR_EUEN_FPEN))
171 		return 0;
172 
173 	/* Handle FP registers */
174 	switch (regno) {
175 	case DBG_FCSR:				/* Process the fcsr */
176 		memcpy((void *)&current->thread.fpu.fcsr, mem, reg_size);
177 		break;
178 	case DBG_FCC_BASE ... DBG_FCC_END:	/* Process the fcc */
179 		memcpy((void *)&current->thread.fpu.fcc + reg_offset, mem, reg_size);
180 		break;
181 	case DBG_FPR_BASE ... DBG_FPR_END:	/* Process the fpr */
182 		memcpy((void *)&current->thread.fpu.fpr[reg_offset], mem, reg_size);
183 		break;
184 	default:
185 		break;
186 	}
187 
188 	restore_fp(current);
189 
190 	return 0;
191 }
192 
193 /*
194  * Similar to regs_to_gdb_regs() except that process is sleeping and so
195  * we may not be able to get all the info.
196  */
sleeping_thread_to_gdb_regs(unsigned long * gdb_regs,struct task_struct * p)197 void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
198 {
199 	/* Initialize to zero */
200 	memset((char *)gdb_regs, 0, NUMREGBYTES);
201 
202 	gdb_regs[DBG_LOONGARCH_RA] = p->thread.reg01;
203 	gdb_regs[DBG_LOONGARCH_TP] = (long)p;
204 	gdb_regs[DBG_LOONGARCH_SP] = p->thread.reg03;
205 
206 	/* S0 - S8 */
207 	gdb_regs[DBG_LOONGARCH_S0] = p->thread.reg23;
208 	gdb_regs[DBG_LOONGARCH_S1] = p->thread.reg24;
209 	gdb_regs[DBG_LOONGARCH_S2] = p->thread.reg25;
210 	gdb_regs[DBG_LOONGARCH_S3] = p->thread.reg26;
211 	gdb_regs[DBG_LOONGARCH_S4] = p->thread.reg27;
212 	gdb_regs[DBG_LOONGARCH_S5] = p->thread.reg28;
213 	gdb_regs[DBG_LOONGARCH_S6] = p->thread.reg29;
214 	gdb_regs[DBG_LOONGARCH_S7] = p->thread.reg30;
215 	gdb_regs[DBG_LOONGARCH_S8] = p->thread.reg31;
216 
217 	/*
218 	 * PC use return address (RA), i.e. the moment after return from __switch_to()
219 	 */
220 	gdb_regs[DBG_LOONGARCH_PC] = p->thread.reg01;
221 }
222 
kgdb_arch_set_pc(struct pt_regs * regs,unsigned long pc)223 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
224 {
225 	regs->csr_era = pc;
226 }
227 
arch_kgdb_breakpoint(void)228 noinline void arch_kgdb_breakpoint(void)
229 {
230 	__asm__ __volatile__ (			\
231 		".globl kgdb_breakinst\n\t"	\
232 		"kgdb_breakinst:\tbreak 2\n\t"); /* BRK_KDB = 2 */
233 }
234 STACK_FRAME_NON_STANDARD(arch_kgdb_breakpoint);
235 
236 /*
237  * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
238  * then try to fall into the debugger
239  */
kgdb_loongarch_notify(struct notifier_block * self,unsigned long cmd,void * ptr)240 static int kgdb_loongarch_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
241 {
242 	struct die_args *args = (struct die_args *)ptr;
243 	struct pt_regs *regs = args->regs;
244 
245 	/* Userspace events, ignore. */
246 	if (user_mode(regs))
247 		return NOTIFY_DONE;
248 
249 	if (!kgdb_io_module_registered)
250 		return NOTIFY_DONE;
251 
252 	if (atomic_read(&kgdb_active) != -1)
253 		kgdb_nmicallback(smp_processor_id(), regs);
254 
255 	if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs))
256 		return NOTIFY_DONE;
257 
258 	if (atomic_read(&kgdb_setting_breakpoint))
259 		if (regs->csr_era == (unsigned long)&kgdb_breakinst)
260 			regs->csr_era += LOONGARCH_INSN_SIZE;
261 
262 	return NOTIFY_STOP;
263 }
264 
kgdb_breakpoint_handler(struct pt_regs * regs)265 bool kgdb_breakpoint_handler(struct pt_regs *regs)
266 {
267 	struct die_args args = {
268 		.regs	= regs,
269 		.str	= "Break",
270 		.err	= BRK_KDB,
271 		.trapnr = read_csr_excode(),
272 		.signr	= SIGTRAP,
273 
274 	};
275 
276 	return (kgdb_loongarch_notify(NULL, DIE_TRAP, &args) == NOTIFY_STOP) ? true : false;
277 }
278 
279 static struct notifier_block kgdb_notifier = {
280 	.notifier_call = kgdb_loongarch_notify,
281 };
282 
kgdb_arch_update_addr(struct pt_regs * regs,char * remcom_in_buffer)283 static inline void kgdb_arch_update_addr(struct pt_regs *regs,
284 					 char *remcom_in_buffer)
285 {
286 	unsigned long addr;
287 	char *ptr;
288 
289 	ptr = &remcom_in_buffer[1];
290 	if (kgdb_hex2long(&ptr, &addr))
291 		regs->csr_era = addr;
292 }
293 
294 /* Calculate the new address for after a step */
get_step_address(struct pt_regs * regs,unsigned long * next_addr)295 static int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
296 {
297 	char cj_val;
298 	unsigned int si, si_l, si_h, rd, rj, cj;
299 	unsigned long pc = instruction_pointer(regs);
300 	union loongarch_instruction *ip = (union loongarch_instruction *)pc;
301 
302 	if (pc & 3) {
303 		pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
304 		return -EINVAL;
305 	}
306 
307 	*next_addr = pc + LOONGARCH_INSN_SIZE;
308 
309 	si_h = ip->reg0i26_format.immediate_h;
310 	si_l = ip->reg0i26_format.immediate_l;
311 	switch (ip->reg0i26_format.opcode) {
312 	case b_op:
313 		*next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 27);
314 		return 0;
315 	case bl_op:
316 		*next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 27);
317 		regs->regs[1] = pc + LOONGARCH_INSN_SIZE;
318 		return 0;
319 	}
320 
321 	rj = ip->reg1i21_format.rj;
322 	cj = (rj & 0x07) + DBG_FCC_BASE;
323 	si_l = ip->reg1i21_format.immediate_l;
324 	si_h = ip->reg1i21_format.immediate_h;
325 	dbg_get_reg(cj, &cj_val, regs);
326 	switch (ip->reg1i21_format.opcode) {
327 	case beqz_op:
328 		if (regs->regs[rj] == 0)
329 			*next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
330 		return 0;
331 	case bnez_op:
332 		if (regs->regs[rj] != 0)
333 			*next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
334 		return 0;
335 	case bceqz_op: /* bceqz_op = bcnez_op */
336 		if (((rj & 0x18) == 0x00) && !cj_val) /* bceqz */
337 			*next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
338 		if (((rj & 0x18) == 0x08) && cj_val) /* bcnez */
339 			*next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
340 		return 0;
341 	}
342 
343 	rj = ip->reg2i16_format.rj;
344 	rd = ip->reg2i16_format.rd;
345 	si = ip->reg2i16_format.immediate;
346 	switch (ip->reg2i16_format.opcode) {
347 	case beq_op:
348 		if (regs->regs[rj] == regs->regs[rd])
349 			*next_addr = pc + sign_extend64(si << 2, 17);
350 		return 0;
351 	case bne_op:
352 		if (regs->regs[rj] != regs->regs[rd])
353 			*next_addr = pc + sign_extend64(si << 2, 17);
354 		return 0;
355 	case blt_op:
356 		if ((long)regs->regs[rj] < (long)regs->regs[rd])
357 			*next_addr = pc + sign_extend64(si << 2, 17);
358 		return 0;
359 	case bge_op:
360 		if ((long)regs->regs[rj] >= (long)regs->regs[rd])
361 			*next_addr = pc + sign_extend64(si << 2, 17);
362 		return 0;
363 	case bltu_op:
364 		if (regs->regs[rj] < regs->regs[rd])
365 			*next_addr = pc + sign_extend64(si << 2, 17);
366 		return 0;
367 	case bgeu_op:
368 		if (regs->regs[rj] >= regs->regs[rd])
369 			*next_addr = pc + sign_extend64(si << 2, 17);
370 		return 0;
371 	case jirl_op:
372 		regs->regs[rd] = pc + LOONGARCH_INSN_SIZE;
373 		*next_addr = regs->regs[rj] + sign_extend64(si << 2, 17);
374 		return 0;
375 	}
376 
377 	return 0;
378 }
379 
do_single_step(struct pt_regs * regs)380 static int do_single_step(struct pt_regs *regs)
381 {
382 	int error = 0;
383 	unsigned long addr = 0; /* Determine where the target instruction will send us to */
384 
385 	error = get_step_address(regs, &addr);
386 	if (error)
387 		return error;
388 
389 	/* Store the opcode in the stepped address */
390 	error = get_kernel_nofault(stepped_opcode, (void *)addr);
391 	if (error)
392 		return error;
393 
394 	stepped_address = addr;
395 
396 	/* Replace the opcode with the break instruction */
397 	error = copy_to_kernel_nofault((void *)stepped_address,
398 				       arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
399 	flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
400 
401 	if (error) {
402 		stepped_opcode = 0;
403 		stepped_address = 0;
404 	} else {
405 		kgdb_single_step = 1;
406 		atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id());
407 	}
408 
409 	return error;
410 }
411 
412 /* Undo a single step */
undo_single_step(struct pt_regs * regs)413 static void undo_single_step(struct pt_regs *regs)
414 {
415 	if (stepped_opcode) {
416 		copy_to_kernel_nofault((void *)stepped_address,
417 				       (void *)&stepped_opcode, BREAK_INSTR_SIZE);
418 		flush_icache_range(stepped_address, stepped_address + BREAK_INSTR_SIZE);
419 	}
420 
421 	stepped_opcode = 0;
422 	stepped_address = 0;
423 	kgdb_single_step = 0;
424 	atomic_set(&kgdb_cpu_doing_single_step, -1);
425 }
426 
kgdb_arch_handle_exception(int vector,int signo,int err_code,char * remcom_in_buffer,char * remcom_out_buffer,struct pt_regs * regs)427 int kgdb_arch_handle_exception(int vector, int signo, int err_code,
428 			       char *remcom_in_buffer, char *remcom_out_buffer,
429 			       struct pt_regs *regs)
430 {
431 	int ret = 0;
432 
433 	undo_single_step(regs);
434 	regs->csr_prmd |= CSR_PRMD_PWE;
435 
436 	switch (remcom_in_buffer[0]) {
437 	case 'D':
438 	case 'k':
439 		regs->csr_prmd &= ~CSR_PRMD_PWE;
440 		fallthrough;
441 	case 'c':
442 		kgdb_arch_update_addr(regs, remcom_in_buffer);
443 		break;
444 	case 's':
445 		kgdb_arch_update_addr(regs, remcom_in_buffer);
446 		ret = do_single_step(regs);
447 		break;
448 	default:
449 		ret = -1;
450 	}
451 
452 	return ret;
453 }
454 
455 static struct hw_breakpoint {
456 	unsigned int		enabled;
457 	unsigned long		addr;
458 	int			len;
459 	int			type;
460 	struct perf_event	* __percpu *pev;
461 } breakinfo[LOONGARCH_MAX_BRP];
462 
hw_break_reserve_slot(int breakno)463 static int hw_break_reserve_slot(int breakno)
464 {
465 	int cpu, cnt = 0;
466 	struct perf_event **pevent;
467 
468 	for_each_online_cpu(cpu) {
469 		cnt++;
470 		pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
471 		if (dbg_reserve_bp_slot(*pevent))
472 			goto fail;
473 	}
474 
475 	return 0;
476 
477 fail:
478 	for_each_online_cpu(cpu) {
479 		cnt--;
480 		if (!cnt)
481 			break;
482 		pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
483 		dbg_release_bp_slot(*pevent);
484 	}
485 
486 	return -1;
487 }
488 
hw_break_release_slot(int breakno)489 static int hw_break_release_slot(int breakno)
490 {
491 	int cpu;
492 	struct perf_event **pevent;
493 
494 	if (dbg_is_early)
495 		return 0;
496 
497 	for_each_online_cpu(cpu) {
498 		pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
499 		if (dbg_release_bp_slot(*pevent))
500 			/*
501 			 * The debugger is responsible for handing the retry on
502 			 * remove failure.
503 			 */
504 			return -1;
505 	}
506 
507 	return 0;
508 }
509 
kgdb_set_hw_break(unsigned long addr,int len,enum kgdb_bptype bptype)510 static int kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
511 {
512 	int i;
513 
514 	for (i = 0; i < LOONGARCH_MAX_BRP; i++)
515 		if (!breakinfo[i].enabled)
516 			break;
517 
518 	if (i == LOONGARCH_MAX_BRP)
519 		return -1;
520 
521 	switch (bptype) {
522 	case BP_HARDWARE_BREAKPOINT:
523 		breakinfo[i].type = HW_BREAKPOINT_X;
524 		break;
525 	case BP_READ_WATCHPOINT:
526 		breakinfo[i].type = HW_BREAKPOINT_R;
527 		break;
528 	case BP_WRITE_WATCHPOINT:
529 		breakinfo[i].type = HW_BREAKPOINT_W;
530 		break;
531 	case BP_ACCESS_WATCHPOINT:
532 		breakinfo[i].type = HW_BREAKPOINT_RW;
533 		break;
534 	default:
535 		return -1;
536 	}
537 
538 	switch (len) {
539 	case 1:
540 		breakinfo[i].len = HW_BREAKPOINT_LEN_1;
541 		break;
542 	case 2:
543 		breakinfo[i].len = HW_BREAKPOINT_LEN_2;
544 		break;
545 	case 4:
546 		breakinfo[i].len = HW_BREAKPOINT_LEN_4;
547 		break;
548 	case 8:
549 		breakinfo[i].len = HW_BREAKPOINT_LEN_8;
550 		break;
551 	default:
552 		return -1;
553 	}
554 
555 	breakinfo[i].addr = addr;
556 	if (hw_break_reserve_slot(i)) {
557 		breakinfo[i].addr = 0;
558 		return -1;
559 	}
560 	breakinfo[i].enabled = 1;
561 
562 	return 0;
563 }
564 
kgdb_remove_hw_break(unsigned long addr,int len,enum kgdb_bptype bptype)565 static int kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
566 {
567 	int i;
568 
569 	for (i = 0; i < LOONGARCH_MAX_BRP; i++)
570 		if (breakinfo[i].addr == addr && breakinfo[i].enabled)
571 			break;
572 
573 	if (i == LOONGARCH_MAX_BRP)
574 		return -1;
575 
576 	if (hw_break_release_slot(i)) {
577 		pr_err("Cannot remove hw breakpoint at %lx\n", addr);
578 		return -1;
579 	}
580 	breakinfo[i].enabled = 0;
581 
582 	return 0;
583 }
584 
kgdb_disable_hw_break(struct pt_regs * regs)585 static void kgdb_disable_hw_break(struct pt_regs *regs)
586 {
587 	int i;
588 	int cpu = raw_smp_processor_id();
589 	struct perf_event *bp;
590 
591 	for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
592 		if (!breakinfo[i].enabled)
593 			continue;
594 
595 		bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
596 		if (bp->attr.disabled == 1)
597 			continue;
598 
599 		arch_uninstall_hw_breakpoint(bp);
600 		bp->attr.disabled = 1;
601 	}
602 
603 	/* Disable hardware debugging while we are in kgdb */
604 	csr_xchg32(0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
605 }
606 
kgdb_remove_all_hw_break(void)607 static void kgdb_remove_all_hw_break(void)
608 {
609 	int i;
610 	int cpu = raw_smp_processor_id();
611 	struct perf_event *bp;
612 
613 	for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
614 		if (!breakinfo[i].enabled)
615 			continue;
616 
617 		bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
618 		if (!bp->attr.disabled) {
619 			arch_uninstall_hw_breakpoint(bp);
620 			bp->attr.disabled = 1;
621 			continue;
622 		}
623 
624 		if (hw_break_release_slot(i))
625 			pr_err("KGDB: hw bpt remove failed %lx\n", breakinfo[i].addr);
626 		breakinfo[i].enabled = 0;
627 	}
628 
629 	csr_xchg32(0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
630 	kgdb_watch_activated = 0;
631 }
632 
kgdb_correct_hw_break(void)633 static void kgdb_correct_hw_break(void)
634 {
635 	int i, activated = 0;
636 
637 	for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
638 		struct perf_event *bp;
639 		int val;
640 		int cpu = raw_smp_processor_id();
641 
642 		if (!breakinfo[i].enabled)
643 			continue;
644 
645 		bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
646 		if (bp->attr.disabled != 1)
647 			continue;
648 
649 		bp->attr.bp_addr = breakinfo[i].addr;
650 		bp->attr.bp_len = breakinfo[i].len;
651 		bp->attr.bp_type = breakinfo[i].type;
652 
653 		val = hw_breakpoint_arch_parse(bp, &bp->attr, counter_arch_bp(bp));
654 		if (val)
655 			return;
656 
657 		val = arch_install_hw_breakpoint(bp);
658 		if (!val)
659 			bp->attr.disabled = 0;
660 		activated = 1;
661 	}
662 
663 	csr_xchg32(activated ? CSR_CRMD_WE : 0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
664 	kgdb_watch_activated = activated;
665 }
666 
667 const struct kgdb_arch arch_kgdb_ops = {
668 	.gdb_bpt_instr		= {0x02, 0x00, break_op >> 1, 0x00}, /* BRK_KDB = 2 */
669 	.flags			= KGDB_HW_BREAKPOINT,
670 	.set_hw_breakpoint	= kgdb_set_hw_break,
671 	.remove_hw_breakpoint	= kgdb_remove_hw_break,
672 	.disable_hw_break	= kgdb_disable_hw_break,
673 	.remove_all_hw_break	= kgdb_remove_all_hw_break,
674 	.correct_hw_break	= kgdb_correct_hw_break,
675 };
676 
kgdb_arch_init(void)677 int kgdb_arch_init(void)
678 {
679 	return register_die_notifier(&kgdb_notifier);
680 }
681 
kgdb_arch_late(void)682 void kgdb_arch_late(void)
683 {
684 	int i, cpu;
685 	struct perf_event_attr attr;
686 	struct perf_event **pevent;
687 
688 	hw_breakpoint_init(&attr);
689 
690 	attr.bp_addr = (unsigned long)kgdb_arch_init;
691 	attr.bp_len = HW_BREAKPOINT_LEN_4;
692 	attr.bp_type = HW_BREAKPOINT_W;
693 	attr.disabled = 1;
694 
695 	for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
696 		if (breakinfo[i].pev)
697 			continue;
698 
699 		breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL);
700 		if (IS_ERR((void * __force)breakinfo[i].pev)) {
701 			pr_err("kgdb: Could not allocate hw breakpoints.\n");
702 			breakinfo[i].pev = NULL;
703 			return;
704 		}
705 
706 		for_each_online_cpu(cpu) {
707 			pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
708 			if (pevent[0]->destroy) {
709 				pevent[0]->destroy = NULL;
710 				release_bp_slot(*pevent);
711 			}
712 		}
713 	}
714 }
715 
kgdb_arch_exit(void)716 void kgdb_arch_exit(void)
717 {
718 	int i;
719 
720 	for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
721 		if (breakinfo[i].pev) {
722 			unregister_wide_hw_breakpoint(breakinfo[i].pev);
723 			breakinfo[i].pev = NULL;
724 		}
725 	}
726 
727 	unregister_die_notifier(&kgdb_notifier);
728 }
729