xref: /openbmc/linux/arch/x86/kernel/unwind_orc.c (revision c127f98ba9aba1818a6ca3a1da5a24653a10d966)
1 #include <linux/module.h>
2 #include <linux/sort.h>
3 #include <asm/ptrace.h>
4 #include <asm/stacktrace.h>
5 #include <asm/unwind.h>
6 #include <asm/orc_types.h>
7 #include <asm/orc_lookup.h>
8 #include <asm/sections.h>
9 
10 #define orc_warn(fmt, ...) \
11 	printk_deferred_once(KERN_WARNING pr_fmt("WARNING: " fmt), ##__VA_ARGS__)
12 
13 extern int __start_orc_unwind_ip[];
14 extern int __stop_orc_unwind_ip[];
15 extern struct orc_entry __start_orc_unwind[];
16 extern struct orc_entry __stop_orc_unwind[];
17 
18 static DEFINE_MUTEX(sort_mutex);
19 int *cur_orc_ip_table = __start_orc_unwind_ip;
20 struct orc_entry *cur_orc_table = __start_orc_unwind;
21 
22 unsigned int lookup_num_blocks;
23 bool orc_init;
24 
25 static inline unsigned long orc_ip(const int *ip)
26 {
27 	return (unsigned long)ip + *ip;
28 }
29 
30 static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table,
31 				    unsigned int num_entries, unsigned long ip)
32 {
33 	int *first = ip_table;
34 	int *last = ip_table + num_entries - 1;
35 	int *mid = first, *found = first;
36 
37 	if (!num_entries)
38 		return NULL;
39 
40 	/*
41 	 * Do a binary range search to find the rightmost duplicate of a given
42 	 * starting address.  Some entries are section terminators which are
43 	 * "weak" entries for ensuring there are no gaps.  They should be
44 	 * ignored when they conflict with a real entry.
45 	 */
46 	while (first <= last) {
47 		mid = first + ((last - first) / 2);
48 
49 		if (orc_ip(mid) <= ip) {
50 			found = mid;
51 			first = mid + 1;
52 		} else
53 			last = mid - 1;
54 	}
55 
56 	return u_table + (found - ip_table);
57 }
58 
59 #ifdef CONFIG_MODULES
60 static struct orc_entry *orc_module_find(unsigned long ip)
61 {
62 	struct module *mod;
63 
64 	mod = __module_address(ip);
65 	if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip)
66 		return NULL;
67 	return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind,
68 			  mod->arch.num_orcs, ip);
69 }
70 #else
71 static struct orc_entry *orc_module_find(unsigned long ip)
72 {
73 	return NULL;
74 }
75 #endif
76 
77 static struct orc_entry *orc_find(unsigned long ip)
78 {
79 	if (!orc_init)
80 		return NULL;
81 
82 	/* For non-init vmlinux addresses, use the fast lookup table: */
83 	if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
84 		unsigned int idx, start, stop;
85 
86 		idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
87 
88 		if (unlikely((idx >= lookup_num_blocks-1))) {
89 			orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
90 				 idx, lookup_num_blocks, (void *)ip);
91 			return NULL;
92 		}
93 
94 		start = orc_lookup[idx];
95 		stop = orc_lookup[idx + 1] + 1;
96 
97 		if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
98 			     (__start_orc_unwind + stop > __stop_orc_unwind))) {
99 			orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
100 				 idx, lookup_num_blocks, start, stop, (void *)ip);
101 			return NULL;
102 		}
103 
104 		return __orc_find(__start_orc_unwind_ip + start,
105 				  __start_orc_unwind + start, stop - start, ip);
106 	}
107 
108 	/* vmlinux .init slow lookup: */
109 	if (ip >= (unsigned long)_sinittext && ip < (unsigned long)_einittext)
110 		return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
111 				  __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
112 
113 	/* Module lookup: */
114 	return orc_module_find(ip);
115 }
116 
117 static void orc_sort_swap(void *_a, void *_b, int size)
118 {
119 	struct orc_entry *orc_a, *orc_b;
120 	struct orc_entry orc_tmp;
121 	int *a = _a, *b = _b, tmp;
122 	int delta = _b - _a;
123 
124 	/* Swap the .orc_unwind_ip entries: */
125 	tmp = *a;
126 	*a = *b + delta;
127 	*b = tmp - delta;
128 
129 	/* Swap the corresponding .orc_unwind entries: */
130 	orc_a = cur_orc_table + (a - cur_orc_ip_table);
131 	orc_b = cur_orc_table + (b - cur_orc_ip_table);
132 	orc_tmp = *orc_a;
133 	*orc_a = *orc_b;
134 	*orc_b = orc_tmp;
135 }
136 
137 static int orc_sort_cmp(const void *_a, const void *_b)
138 {
139 	struct orc_entry *orc_a;
140 	const int *a = _a, *b = _b;
141 	unsigned long a_val = orc_ip(a);
142 	unsigned long b_val = orc_ip(b);
143 
144 	if (a_val > b_val)
145 		return 1;
146 	if (a_val < b_val)
147 		return -1;
148 
149 	/*
150 	 * The "weak" section terminator entries need to always be on the left
151 	 * to ensure the lookup code skips them in favor of real entries.
152 	 * These terminator entries exist to handle any gaps created by
153 	 * whitelisted .o files which didn't get objtool generation.
154 	 */
155 	orc_a = cur_orc_table + (a - cur_orc_ip_table);
156 	return orc_a->sp_reg == ORC_REG_UNDEFINED ? -1 : 1;
157 }
158 
159 #ifdef CONFIG_MODULES
160 void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
161 			void *_orc, size_t orc_size)
162 {
163 	int *orc_ip = _orc_ip;
164 	struct orc_entry *orc = _orc;
165 	unsigned int num_entries = orc_ip_size / sizeof(int);
166 
167 	WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 ||
168 		     orc_size % sizeof(*orc) != 0 ||
169 		     num_entries != orc_size / sizeof(*orc));
170 
171 	/*
172 	 * The 'cur_orc_*' globals allow the orc_sort_swap() callback to
173 	 * associate an .orc_unwind_ip table entry with its corresponding
174 	 * .orc_unwind entry so they can both be swapped.
175 	 */
176 	mutex_lock(&sort_mutex);
177 	cur_orc_ip_table = orc_ip;
178 	cur_orc_table = orc;
179 	sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap);
180 	mutex_unlock(&sort_mutex);
181 
182 	mod->arch.orc_unwind_ip = orc_ip;
183 	mod->arch.orc_unwind = orc;
184 	mod->arch.num_orcs = num_entries;
185 }
186 #endif
187 
188 void __init unwind_init(void)
189 {
190 	size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip;
191 	size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind;
192 	size_t num_entries = orc_ip_size / sizeof(int);
193 	struct orc_entry *orc;
194 	int i;
195 
196 	if (!num_entries || orc_ip_size % sizeof(int) != 0 ||
197 	    orc_size % sizeof(struct orc_entry) != 0 ||
198 	    num_entries != orc_size / sizeof(struct orc_entry)) {
199 		orc_warn("WARNING: Bad or missing .orc_unwind table.  Disabling unwinder.\n");
200 		return;
201 	}
202 
203 	/* Sort the .orc_unwind and .orc_unwind_ip tables: */
204 	sort(__start_orc_unwind_ip, num_entries, sizeof(int), orc_sort_cmp,
205 	     orc_sort_swap);
206 
207 	/* Initialize the fast lookup table: */
208 	lookup_num_blocks = orc_lookup_end - orc_lookup;
209 	for (i = 0; i < lookup_num_blocks-1; i++) {
210 		orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
211 				 num_entries,
212 				 LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i));
213 		if (!orc) {
214 			orc_warn("WARNING: Corrupt .orc_unwind table.  Disabling unwinder.\n");
215 			return;
216 		}
217 
218 		orc_lookup[i] = orc - __start_orc_unwind;
219 	}
220 
221 	/* Initialize the ending block: */
222 	orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries,
223 			 LOOKUP_STOP_IP);
224 	if (!orc) {
225 		orc_warn("WARNING: Corrupt .orc_unwind table.  Disabling unwinder.\n");
226 		return;
227 	}
228 	orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind;
229 
230 	orc_init = true;
231 }
232 
233 unsigned long unwind_get_return_address(struct unwind_state *state)
234 {
235 	if (unwind_done(state))
236 		return 0;
237 
238 	return __kernel_text_address(state->ip) ? state->ip : 0;
239 }
240 EXPORT_SYMBOL_GPL(unwind_get_return_address);
241 
242 unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
243 {
244 	if (unwind_done(state))
245 		return NULL;
246 
247 	if (state->regs)
248 		return &state->regs->ip;
249 
250 	if (state->sp)
251 		return (unsigned long *)state->sp - 1;
252 
253 	return NULL;
254 }
255 
256 static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
257 			    size_t len)
258 {
259 	struct stack_info *info = &state->stack_info;
260 	void *addr = (void *)_addr;
261 
262 	if (!on_stack(info, addr, len) &&
263 	    (get_stack_info(addr, state->task, info, &state->stack_mask)))
264 		return false;
265 
266 	return true;
267 }
268 
269 static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
270 			    unsigned long *val)
271 {
272 	if (!stack_access_ok(state, addr, sizeof(long)))
273 		return false;
274 
275 	*val = READ_ONCE_NOCHECK(*(unsigned long *)addr);
276 	return true;
277 }
278 
279 static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
280 			     unsigned long *ip, unsigned long *sp)
281 {
282 	struct pt_regs *regs = (struct pt_regs *)addr;
283 
284 	/* x86-32 support will be more complicated due to the &regs->sp hack */
285 	BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32));
286 
287 	if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
288 		return false;
289 
290 	*ip = regs->ip;
291 	*sp = regs->sp;
292 	return true;
293 }
294 
295 static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr,
296 				  unsigned long *ip, unsigned long *sp)
297 {
298 	struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET;
299 
300 	if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
301 		return false;
302 
303 	*ip = regs->ip;
304 	*sp = regs->sp;
305 	return true;
306 }
307 
308 bool unwind_next_frame(struct unwind_state *state)
309 {
310 	unsigned long ip_p, sp, orig_ip, prev_sp = state->sp;
311 	enum stack_type prev_type = state->stack_info.type;
312 	struct orc_entry *orc;
313 	bool indirect = false;
314 
315 	if (unwind_done(state))
316 		return false;
317 
318 	/* Don't let modules unload while we're reading their ORC data. */
319 	preempt_disable();
320 
321 	/* Have we reached the end? */
322 	if (state->regs && user_mode(state->regs))
323 		goto done;
324 
325 	/*
326 	 * Find the orc_entry associated with the text address.
327 	 *
328 	 * Decrement call return addresses by one so they work for sibling
329 	 * calls and calls to noreturn functions.
330 	 */
331 	orc = orc_find(state->signal ? state->ip : state->ip - 1);
332 	if (!orc || orc->sp_reg == ORC_REG_UNDEFINED)
333 		goto done;
334 	orig_ip = state->ip;
335 
336 	/* Find the previous frame's stack: */
337 	switch (orc->sp_reg) {
338 	case ORC_REG_SP:
339 		sp = state->sp + orc->sp_offset;
340 		break;
341 
342 	case ORC_REG_BP:
343 		sp = state->bp + orc->sp_offset;
344 		break;
345 
346 	case ORC_REG_SP_INDIRECT:
347 		sp = state->sp + orc->sp_offset;
348 		indirect = true;
349 		break;
350 
351 	case ORC_REG_BP_INDIRECT:
352 		sp = state->bp + orc->sp_offset;
353 		indirect = true;
354 		break;
355 
356 	case ORC_REG_R10:
357 		if (!state->regs || !state->full_regs) {
358 			orc_warn("missing regs for base reg R10 at ip %pB\n",
359 				 (void *)state->ip);
360 			goto done;
361 		}
362 		sp = state->regs->r10;
363 		break;
364 
365 	case ORC_REG_R13:
366 		if (!state->regs || !state->full_regs) {
367 			orc_warn("missing regs for base reg R13 at ip %pB\n",
368 				 (void *)state->ip);
369 			goto done;
370 		}
371 		sp = state->regs->r13;
372 		break;
373 
374 	case ORC_REG_DI:
375 		if (!state->regs || !state->full_regs) {
376 			orc_warn("missing regs for base reg DI at ip %pB\n",
377 				 (void *)state->ip);
378 			goto done;
379 		}
380 		sp = state->regs->di;
381 		break;
382 
383 	case ORC_REG_DX:
384 		if (!state->regs || !state->full_regs) {
385 			orc_warn("missing regs for base reg DX at ip %pB\n",
386 				 (void *)state->ip);
387 			goto done;
388 		}
389 		sp = state->regs->dx;
390 		break;
391 
392 	default:
393 		orc_warn("unknown SP base reg %d for ip %pB\n",
394 			 orc->sp_reg, (void *)state->ip);
395 		goto done;
396 	}
397 
398 	if (indirect) {
399 		if (!deref_stack_reg(state, sp, &sp))
400 			goto done;
401 	}
402 
403 	/* Find IP, SP and possibly regs: */
404 	switch (orc->type) {
405 	case ORC_TYPE_CALL:
406 		ip_p = sp - sizeof(long);
407 
408 		if (!deref_stack_reg(state, ip_p, &state->ip))
409 			goto done;
410 
411 		state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
412 						  state->ip, (void *)ip_p);
413 
414 		state->sp = sp;
415 		state->regs = NULL;
416 		state->signal = false;
417 		break;
418 
419 	case ORC_TYPE_REGS:
420 		if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
421 			orc_warn("can't dereference registers at %p for ip %pB\n",
422 				 (void *)sp, (void *)orig_ip);
423 			goto done;
424 		}
425 
426 		state->regs = (struct pt_regs *)sp;
427 		state->full_regs = true;
428 		state->signal = true;
429 		break;
430 
431 	case ORC_TYPE_REGS_IRET:
432 		if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
433 			orc_warn("can't dereference iret registers at %p for ip %pB\n",
434 				 (void *)sp, (void *)orig_ip);
435 			goto done;
436 		}
437 
438 		state->regs = (void *)sp - IRET_FRAME_OFFSET;
439 		state->full_regs = false;
440 		state->signal = true;
441 		break;
442 
443 	default:
444 		orc_warn("unknown .orc_unwind entry type %d for ip %pB\n",
445 			 orc->type, (void *)orig_ip);
446 		break;
447 	}
448 
449 	/* Find BP: */
450 	switch (orc->bp_reg) {
451 	case ORC_REG_UNDEFINED:
452 		if (state->regs && state->full_regs)
453 			state->bp = state->regs->bp;
454 		break;
455 
456 	case ORC_REG_PREV_SP:
457 		if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp))
458 			goto done;
459 		break;
460 
461 	case ORC_REG_BP:
462 		if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp))
463 			goto done;
464 		break;
465 
466 	default:
467 		orc_warn("unknown BP base reg %d for ip %pB\n",
468 			 orc->bp_reg, (void *)orig_ip);
469 		goto done;
470 	}
471 
472 	/* Prevent a recursive loop due to bad ORC data: */
473 	if (state->stack_info.type == prev_type &&
474 	    on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
475 	    state->sp <= prev_sp) {
476 		orc_warn("stack going in the wrong direction? ip=%pB\n",
477 			 (void *)orig_ip);
478 		goto done;
479 	}
480 
481 	preempt_enable();
482 	return true;
483 
484 done:
485 	preempt_enable();
486 	state->stack_info.type = STACK_TYPE_UNKNOWN;
487 	return false;
488 }
489 EXPORT_SYMBOL_GPL(unwind_next_frame);
490 
491 void __unwind_start(struct unwind_state *state, struct task_struct *task,
492 		    struct pt_regs *regs, unsigned long *first_frame)
493 {
494 	memset(state, 0, sizeof(*state));
495 	state->task = task;
496 
497 	/*
498 	 * Refuse to unwind the stack of a task while it's executing on another
499 	 * CPU.  This check is racy, but that's ok: the unwinder has other
500 	 * checks to prevent it from going off the rails.
501 	 */
502 	if (task_on_another_cpu(task))
503 		goto done;
504 
505 	if (regs) {
506 		if (user_mode(regs))
507 			goto done;
508 
509 		state->ip = regs->ip;
510 		state->sp = kernel_stack_pointer(regs);
511 		state->bp = regs->bp;
512 		state->regs = regs;
513 		state->full_regs = true;
514 		state->signal = true;
515 
516 	} else if (task == current) {
517 		asm volatile("lea (%%rip), %0\n\t"
518 			     "mov %%rsp, %1\n\t"
519 			     "mov %%rbp, %2\n\t"
520 			     : "=r" (state->ip), "=r" (state->sp),
521 			       "=r" (state->bp));
522 
523 	} else {
524 		struct inactive_task_frame *frame = (void *)task->thread.sp;
525 
526 		state->sp = task->thread.sp;
527 		state->bp = READ_ONCE_NOCHECK(frame->bp);
528 		state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
529 	}
530 
531 	if (get_stack_info((unsigned long *)state->sp, state->task,
532 			   &state->stack_info, &state->stack_mask)) {
533 		/*
534 		 * We weren't on a valid stack.  It's possible that
535 		 * we overflowed a valid stack into a guard page.
536 		 * See if the next page up is valid so that we can
537 		 * generate some kind of backtrace if this happens.
538 		 */
539 		void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
540 		if (get_stack_info(next_page, state->task, &state->stack_info,
541 				   &state->stack_mask))
542 			return;
543 	}
544 
545 	/*
546 	 * The caller can provide the address of the first frame directly
547 	 * (first_frame) or indirectly (regs->sp) to indicate which stack frame
548 	 * to start unwinding at.  Skip ahead until we reach it.
549 	 */
550 
551 	/* When starting from regs, skip the regs frame: */
552 	if (regs) {
553 		unwind_next_frame(state);
554 		return;
555 	}
556 
557 	/* Otherwise, skip ahead to the user-specified starting frame: */
558 	while (!unwind_done(state) &&
559 	       (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
560 			state->sp <= (unsigned long)first_frame))
561 		unwind_next_frame(state);
562 
563 	return;
564 
565 done:
566 	state->stack_info.type = STACK_TYPE_UNKNOWN;
567 	return;
568 }
569 EXPORT_SYMBOL_GPL(__unwind_start);
570