xref: /openbmc/linux/arch/riscv/kernel/kgdb.c (revision c9c1af3f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 SiFive
4  */
5 
6 #include <linux/ptrace.h>
7 #include <linux/kdebug.h>
8 #include <linux/bug.h>
9 #include <linux/kgdb.h>
10 #include <linux/irqflags.h>
11 #include <linux/string.h>
12 #include <asm/cacheflush.h>
13 #include <asm/gdb_xml.h>
14 #include <asm/insn.h>
15 
16 enum {
17 	NOT_KGDB_BREAK = 0,
18 	KGDB_SW_BREAK,
19 	KGDB_COMPILED_BREAK,
20 	KGDB_SW_SINGLE_STEP
21 };
22 
23 static unsigned long stepped_address;
24 static unsigned int stepped_opcode;
25 
decode_register_index(unsigned long opcode,int offset)26 static int decode_register_index(unsigned long opcode, int offset)
27 {
28 	return (opcode >> offset) & 0x1F;
29 }
30 
decode_register_index_short(unsigned long opcode,int offset)31 static int decode_register_index_short(unsigned long opcode, int offset)
32 {
33 	return ((opcode >> offset) & 0x7) + 8;
34 }
35 
36 /* Calculate the new address for after a step */
get_step_address(struct pt_regs * regs,unsigned long * next_addr)37 static int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
38 {
39 	unsigned long pc = regs->epc;
40 	unsigned long *regs_ptr = (unsigned long *)regs;
41 	unsigned int rs1_num, rs2_num;
42 	int op_code;
43 
44 	if (get_kernel_nofault(op_code, (void *)pc))
45 		return -EINVAL;
46 	if ((op_code & __INSN_LENGTH_MASK) != __INSN_LENGTH_GE_32) {
47 		if (riscv_insn_is_c_jalr(op_code) ||
48 		    riscv_insn_is_c_jr(op_code)) {
49 			rs1_num = decode_register_index(op_code, RVC_C2_RS1_OPOFF);
50 			*next_addr = regs_ptr[rs1_num];
51 		} else if (riscv_insn_is_c_j(op_code) ||
52 			   riscv_insn_is_c_jal(op_code)) {
53 			*next_addr = RVC_EXTRACT_JTYPE_IMM(op_code) + pc;
54 		} else if (riscv_insn_is_c_beqz(op_code)) {
55 			rs1_num = decode_register_index_short(op_code,
56 							      RVC_C1_RS1_OPOFF);
57 			if (!rs1_num || regs_ptr[rs1_num] == 0)
58 				*next_addr = RVC_EXTRACT_BTYPE_IMM(op_code) + pc;
59 			else
60 				*next_addr = pc + 2;
61 		} else if (riscv_insn_is_c_bnez(op_code)) {
62 			rs1_num =
63 			    decode_register_index_short(op_code, RVC_C1_RS1_OPOFF);
64 			if (rs1_num && regs_ptr[rs1_num] != 0)
65 				*next_addr = RVC_EXTRACT_BTYPE_IMM(op_code) + pc;
66 			else
67 				*next_addr = pc + 2;
68 		} else {
69 			*next_addr = pc + 2;
70 		}
71 	} else {
72 		if ((op_code & __INSN_OPCODE_MASK) == __INSN_BRANCH_OPCODE) {
73 			bool result = false;
74 			long imm = RV_EXTRACT_BTYPE_IMM(op_code);
75 			unsigned long rs1_val = 0, rs2_val = 0;
76 
77 			rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF);
78 			rs2_num = decode_register_index(op_code, RVG_RS2_OPOFF);
79 			if (rs1_num)
80 				rs1_val = regs_ptr[rs1_num];
81 			if (rs2_num)
82 				rs2_val = regs_ptr[rs2_num];
83 
84 			if (riscv_insn_is_beq(op_code))
85 				result = (rs1_val == rs2_val) ? true : false;
86 			else if (riscv_insn_is_bne(op_code))
87 				result = (rs1_val != rs2_val) ? true : false;
88 			else if (riscv_insn_is_blt(op_code))
89 				result =
90 				    ((long)rs1_val <
91 				     (long)rs2_val) ? true : false;
92 			else if (riscv_insn_is_bge(op_code))
93 				result =
94 				    ((long)rs1_val >=
95 				     (long)rs2_val) ? true : false;
96 			else if (riscv_insn_is_bltu(op_code))
97 				result = (rs1_val < rs2_val) ? true : false;
98 			else if (riscv_insn_is_bgeu(op_code))
99 				result = (rs1_val >= rs2_val) ? true : false;
100 			if (result)
101 				*next_addr = imm + pc;
102 			else
103 				*next_addr = pc + 4;
104 		} else if (riscv_insn_is_jal(op_code)) {
105 			*next_addr = RV_EXTRACT_JTYPE_IMM(op_code) + pc;
106 		} else if (riscv_insn_is_jalr(op_code)) {
107 			rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF);
108 			if (rs1_num)
109 				*next_addr = ((unsigned long *)regs)[rs1_num];
110 			*next_addr += RV_EXTRACT_ITYPE_IMM(op_code);
111 		} else if (riscv_insn_is_sret(op_code)) {
112 			*next_addr = pc;
113 		} else {
114 			*next_addr = pc + 4;
115 		}
116 	}
117 	return 0;
118 }
119 
do_single_step(struct pt_regs * regs)120 static int do_single_step(struct pt_regs *regs)
121 {
122 	/* Determine where the target instruction will send us to */
123 	unsigned long addr = 0;
124 	int error = get_step_address(regs, &addr);
125 
126 	if (error)
127 		return error;
128 
129 	/* Store the op code in the stepped address */
130 	error = get_kernel_nofault(stepped_opcode, (void *)addr);
131 	if (error)
132 		return error;
133 
134 	stepped_address = addr;
135 
136 	/* Replace the op code with the break instruction */
137 	error = copy_to_kernel_nofault((void *)stepped_address,
138 				   arch_kgdb_ops.gdb_bpt_instr,
139 				   BREAK_INSTR_SIZE);
140 	/* Flush and return */
141 	if (!error) {
142 		flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
143 		kgdb_single_step = 1;
144 		atomic_set(&kgdb_cpu_doing_single_step,
145 			   raw_smp_processor_id());
146 	} else {
147 		stepped_address = 0;
148 		stepped_opcode = 0;
149 	}
150 	return error;
151 }
152 
153 /* Undo a single step */
undo_single_step(struct pt_regs * regs)154 static void undo_single_step(struct pt_regs *regs)
155 {
156 	if (stepped_opcode != 0) {
157 		copy_to_kernel_nofault((void *)stepped_address,
158 				   (void *)&stepped_opcode, BREAK_INSTR_SIZE);
159 		flush_icache_range(stepped_address,
160 				   stepped_address + BREAK_INSTR_SIZE);
161 	}
162 	stepped_address = 0;
163 	stepped_opcode = 0;
164 	kgdb_single_step = 0;
165 	atomic_set(&kgdb_cpu_doing_single_step, -1);
166 }
167 
168 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
169 	{DBG_REG_ZERO, GDB_SIZEOF_REG, -1},
170 	{DBG_REG_RA, GDB_SIZEOF_REG, offsetof(struct pt_regs, ra)},
171 	{DBG_REG_SP, GDB_SIZEOF_REG, offsetof(struct pt_regs, sp)},
172 	{DBG_REG_GP, GDB_SIZEOF_REG, offsetof(struct pt_regs, gp)},
173 	{DBG_REG_TP, GDB_SIZEOF_REG, offsetof(struct pt_regs, tp)},
174 	{DBG_REG_T0, GDB_SIZEOF_REG, offsetof(struct pt_regs, t0)},
175 	{DBG_REG_T1, GDB_SIZEOF_REG, offsetof(struct pt_regs, t1)},
176 	{DBG_REG_T2, GDB_SIZEOF_REG, offsetof(struct pt_regs, t2)},
177 	{DBG_REG_FP, GDB_SIZEOF_REG, offsetof(struct pt_regs, s0)},
178 	{DBG_REG_S1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)},
179 	{DBG_REG_A0, GDB_SIZEOF_REG, offsetof(struct pt_regs, a0)},
180 	{DBG_REG_A1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)},
181 	{DBG_REG_A2, GDB_SIZEOF_REG, offsetof(struct pt_regs, a2)},
182 	{DBG_REG_A3, GDB_SIZEOF_REG, offsetof(struct pt_regs, a3)},
183 	{DBG_REG_A4, GDB_SIZEOF_REG, offsetof(struct pt_regs, a4)},
184 	{DBG_REG_A5, GDB_SIZEOF_REG, offsetof(struct pt_regs, a5)},
185 	{DBG_REG_A6, GDB_SIZEOF_REG, offsetof(struct pt_regs, a6)},
186 	{DBG_REG_A7, GDB_SIZEOF_REG, offsetof(struct pt_regs, a7)},
187 	{DBG_REG_S2, GDB_SIZEOF_REG, offsetof(struct pt_regs, s2)},
188 	{DBG_REG_S3, GDB_SIZEOF_REG, offsetof(struct pt_regs, s3)},
189 	{DBG_REG_S4, GDB_SIZEOF_REG, offsetof(struct pt_regs, s4)},
190 	{DBG_REG_S5, GDB_SIZEOF_REG, offsetof(struct pt_regs, s5)},
191 	{DBG_REG_S6, GDB_SIZEOF_REG, offsetof(struct pt_regs, s6)},
192 	{DBG_REG_S7, GDB_SIZEOF_REG, offsetof(struct pt_regs, s7)},
193 	{DBG_REG_S8, GDB_SIZEOF_REG, offsetof(struct pt_regs, s8)},
194 	{DBG_REG_S9, GDB_SIZEOF_REG, offsetof(struct pt_regs, s9)},
195 	{DBG_REG_S10, GDB_SIZEOF_REG, offsetof(struct pt_regs, s10)},
196 	{DBG_REG_S11, GDB_SIZEOF_REG, offsetof(struct pt_regs, s11)},
197 	{DBG_REG_T3, GDB_SIZEOF_REG, offsetof(struct pt_regs, t3)},
198 	{DBG_REG_T4, GDB_SIZEOF_REG, offsetof(struct pt_regs, t4)},
199 	{DBG_REG_T5, GDB_SIZEOF_REG, offsetof(struct pt_regs, t5)},
200 	{DBG_REG_T6, GDB_SIZEOF_REG, offsetof(struct pt_regs, t6)},
201 	{DBG_REG_EPC, GDB_SIZEOF_REG, offsetof(struct pt_regs, epc)},
202 	{DBG_REG_STATUS, GDB_SIZEOF_REG, offsetof(struct pt_regs, status)},
203 	{DBG_REG_BADADDR, GDB_SIZEOF_REG, offsetof(struct pt_regs, badaddr)},
204 	{DBG_REG_CAUSE, GDB_SIZEOF_REG, offsetof(struct pt_regs, cause)},
205 };
206 
dbg_get_reg(int regno,void * mem,struct pt_regs * regs)207 char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
208 {
209 	if (regno >= DBG_MAX_REG_NUM || regno < 0)
210 		return NULL;
211 
212 	if (dbg_reg_def[regno].offset != -1)
213 		memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
214 		       dbg_reg_def[regno].size);
215 	else
216 		memset(mem, 0, dbg_reg_def[regno].size);
217 	return dbg_reg_def[regno].name;
218 }
219 
dbg_set_reg(int regno,void * mem,struct pt_regs * regs)220 int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
221 {
222 	if (regno >= DBG_MAX_REG_NUM || regno < 0)
223 		return -EINVAL;
224 
225 	if (dbg_reg_def[regno].offset != -1)
226 		memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
227 		       dbg_reg_def[regno].size);
228 	return 0;
229 }
230 
231 void
sleeping_thread_to_gdb_regs(unsigned long * gdb_regs,struct task_struct * task)232 sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
233 {
234 	/* Initialize to zero */
235 	memset((char *)gdb_regs, 0, NUMREGBYTES);
236 
237 	gdb_regs[DBG_REG_SP_OFF] = task->thread.sp;
238 	gdb_regs[DBG_REG_FP_OFF] = task->thread.s[0];
239 	gdb_regs[DBG_REG_S1_OFF] = task->thread.s[1];
240 	gdb_regs[DBG_REG_S2_OFF] = task->thread.s[2];
241 	gdb_regs[DBG_REG_S3_OFF] = task->thread.s[3];
242 	gdb_regs[DBG_REG_S4_OFF] = task->thread.s[4];
243 	gdb_regs[DBG_REG_S5_OFF] = task->thread.s[5];
244 	gdb_regs[DBG_REG_S6_OFF] = task->thread.s[6];
245 	gdb_regs[DBG_REG_S7_OFF] = task->thread.s[7];
246 	gdb_regs[DBG_REG_S8_OFF] = task->thread.s[8];
247 	gdb_regs[DBG_REG_S9_OFF] = task->thread.s[10];
248 	gdb_regs[DBG_REG_S10_OFF] = task->thread.s[11];
249 	gdb_regs[DBG_REG_EPC_OFF] = task->thread.ra;
250 }
251 
kgdb_arch_set_pc(struct pt_regs * regs,unsigned long pc)252 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
253 {
254 	regs->epc = pc;
255 }
256 
kgdb_arch_handle_qxfer_pkt(char * remcom_in_buffer,char * remcom_out_buffer)257 void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
258 				char *remcom_out_buffer)
259 {
260 	if (!strncmp(remcom_in_buffer, gdb_xfer_read_target,
261 		     sizeof(gdb_xfer_read_target)))
262 		strcpy(remcom_out_buffer, riscv_gdb_stub_target_desc);
263 	else if (!strncmp(remcom_in_buffer, gdb_xfer_read_cpuxml,
264 			  sizeof(gdb_xfer_read_cpuxml)))
265 		strcpy(remcom_out_buffer, riscv_gdb_stub_cpuxml);
266 }
267 
kgdb_arch_update_addr(struct pt_regs * regs,char * remcom_in_buffer)268 static inline void kgdb_arch_update_addr(struct pt_regs *regs,
269 					 char *remcom_in_buffer)
270 {
271 	unsigned long addr;
272 	char *ptr;
273 
274 	ptr = &remcom_in_buffer[1];
275 	if (kgdb_hex2long(&ptr, &addr))
276 		regs->epc = addr;
277 }
278 
kgdb_arch_handle_exception(int vector,int signo,int err_code,char * remcom_in_buffer,char * remcom_out_buffer,struct pt_regs * regs)279 int kgdb_arch_handle_exception(int vector, int signo, int err_code,
280 			       char *remcom_in_buffer, char *remcom_out_buffer,
281 			       struct pt_regs *regs)
282 {
283 	int err = 0;
284 
285 	undo_single_step(regs);
286 
287 	switch (remcom_in_buffer[0]) {
288 	case 'c':
289 	case 'D':
290 	case 'k':
291 		if (remcom_in_buffer[0] == 'c')
292 			kgdb_arch_update_addr(regs, remcom_in_buffer);
293 		break;
294 	case 's':
295 		kgdb_arch_update_addr(regs, remcom_in_buffer);
296 		err = do_single_step(regs);
297 		break;
298 	default:
299 		err = -1;
300 	}
301 	return err;
302 }
303 
kgdb_riscv_kgdbbreak(unsigned long addr)304 static int kgdb_riscv_kgdbbreak(unsigned long addr)
305 {
306 	if (stepped_address == addr)
307 		return KGDB_SW_SINGLE_STEP;
308 	if (atomic_read(&kgdb_setting_breakpoint))
309 		if (addr == (unsigned long)&kgdb_compiled_break)
310 			return KGDB_COMPILED_BREAK;
311 
312 	return kgdb_has_hit_break(addr);
313 }
314 
kgdb_riscv_notify(struct notifier_block * self,unsigned long cmd,void * ptr)315 static int kgdb_riscv_notify(struct notifier_block *self, unsigned long cmd,
316 			     void *ptr)
317 {
318 	struct die_args *args = (struct die_args *)ptr;
319 	struct pt_regs *regs = args->regs;
320 	unsigned long flags;
321 	int type;
322 
323 	if (user_mode(regs))
324 		return NOTIFY_DONE;
325 
326 	type = kgdb_riscv_kgdbbreak(regs->epc);
327 	if (type == NOT_KGDB_BREAK && cmd == DIE_TRAP)
328 		return NOTIFY_DONE;
329 
330 	local_irq_save(flags);
331 
332 	if (kgdb_handle_exception(type == KGDB_SW_SINGLE_STEP ? 0 : 1,
333 				  args->signr, cmd, regs))
334 		return NOTIFY_DONE;
335 
336 	if (type == KGDB_COMPILED_BREAK)
337 		regs->epc += 4;
338 
339 	local_irq_restore(flags);
340 
341 	return NOTIFY_STOP;
342 }
343 
344 static struct notifier_block kgdb_notifier = {
345 	.notifier_call = kgdb_riscv_notify,
346 };
347 
kgdb_arch_init(void)348 int kgdb_arch_init(void)
349 {
350 	register_die_notifier(&kgdb_notifier);
351 
352 	return 0;
353 }
354 
kgdb_arch_exit(void)355 void kgdb_arch_exit(void)
356 {
357 	unregister_die_notifier(&kgdb_notifier);
358 }
359 
360 /*
361  * Global data
362  */
363 #ifdef CONFIG_RISCV_ISA_C
364 const struct kgdb_arch arch_kgdb_ops = {
365 	.gdb_bpt_instr = {0x02, 0x90},	/* c.ebreak */
366 };
367 #else
368 const struct kgdb_arch arch_kgdb_ops = {
369 	.gdb_bpt_instr = {0x73, 0x00, 0x10, 0x00},	/* ebreak */
370 };
371 #endif
372