xref: /openbmc/linux/arch/x86/kernel/unwind_orc.c (revision e0256648)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/objtool.h>
3 #include <linux/module.h>
4 #include <linux/sort.h>
5 #include <asm/ptrace.h>
6 #include <asm/stacktrace.h>
7 #include <asm/unwind.h>
8 #include <asm/orc_types.h>
9 #include <asm/orc_lookup.h>
10 
11 #define orc_warn(fmt, ...) \
12 	printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
13 
14 #define orc_warn_current(args...)					\
15 ({									\
16 	if (state->task == current && !state->error)			\
17 		orc_warn(args);						\
18 })
19 
20 extern int __start_orc_unwind_ip[];
21 extern int __stop_orc_unwind_ip[];
22 extern struct orc_entry __start_orc_unwind[];
23 extern struct orc_entry __stop_orc_unwind[];
24 
25 static bool orc_init __ro_after_init;
26 static unsigned int lookup_num_blocks __ro_after_init;
27 
28 static inline unsigned long orc_ip(const int *ip)
29 {
30 	return (unsigned long)ip + *ip;
31 }
32 
33 static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table,
34 				    unsigned int num_entries, unsigned long ip)
35 {
36 	int *first = ip_table;
37 	int *last = ip_table + num_entries - 1;
38 	int *mid = first, *found = first;
39 
40 	if (!num_entries)
41 		return NULL;
42 
43 	/*
44 	 * Do a binary range search to find the rightmost duplicate of a given
45 	 * starting address.  Some entries are section terminators which are
46 	 * "weak" entries for ensuring there are no gaps.  They should be
47 	 * ignored when they conflict with a real entry.
48 	 */
49 	while (first <= last) {
50 		mid = first + ((last - first) / 2);
51 
52 		if (orc_ip(mid) <= ip) {
53 			found = mid;
54 			first = mid + 1;
55 		} else
56 			last = mid - 1;
57 	}
58 
59 	return u_table + (found - ip_table);
60 }
61 
62 #ifdef CONFIG_MODULES
63 static struct orc_entry *orc_module_find(unsigned long ip)
64 {
65 	struct module *mod;
66 
67 	mod = __module_address(ip);
68 	if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip)
69 		return NULL;
70 	return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind,
71 			  mod->arch.num_orcs, ip);
72 }
73 #else
74 static struct orc_entry *orc_module_find(unsigned long ip)
75 {
76 	return NULL;
77 }
78 #endif
79 
80 #ifdef CONFIG_DYNAMIC_FTRACE
81 static struct orc_entry *orc_find(unsigned long ip);
82 
83 /*
84  * Ftrace dynamic trampolines do not have orc entries of their own.
85  * But they are copies of the ftrace entries that are static and
86  * defined in ftrace_*.S, which do have orc entries.
87  *
88  * If the unwinder comes across a ftrace trampoline, then find the
89  * ftrace function that was used to create it, and use that ftrace
90  * function's orc entry, as the placement of the return code in
91  * the stack will be identical.
92  */
93 static struct orc_entry *orc_ftrace_find(unsigned long ip)
94 {
95 	struct ftrace_ops *ops;
96 	unsigned long tramp_addr, offset;
97 
98 	ops = ftrace_ops_trampoline(ip);
99 	if (!ops)
100 		return NULL;
101 
102 	/* Set tramp_addr to the start of the code copied by the trampoline */
103 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
104 		tramp_addr = (unsigned long)ftrace_regs_caller;
105 	else
106 		tramp_addr = (unsigned long)ftrace_caller;
107 
108 	/* Now place tramp_addr to the location within the trampoline ip is at */
109 	offset = ip - ops->trampoline;
110 	tramp_addr += offset;
111 
112 	/* Prevent unlikely recursion */
113 	if (ip == tramp_addr)
114 		return NULL;
115 
116 	return orc_find(tramp_addr);
117 }
118 #else
119 static struct orc_entry *orc_ftrace_find(unsigned long ip)
120 {
121 	return NULL;
122 }
123 #endif
124 
125 /*
126  * If we crash with IP==0, the last successfully executed instruction
127  * was probably an indirect function call with a NULL function pointer,
128  * and we don't have unwind information for NULL.
129  * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
130  * pointer into its parent and then continue normally from there.
131  */
132 static struct orc_entry null_orc_entry = {
133 	.sp_offset = sizeof(long),
134 	.sp_reg = ORC_REG_SP,
135 	.bp_reg = ORC_REG_UNDEFINED,
136 	.type = ORC_TYPE_CALL
137 };
138 
139 #ifdef CONFIG_CALL_THUNKS
140 static struct orc_entry *orc_callthunk_find(unsigned long ip)
141 {
142 	if (!is_callthunk((void *)ip))
143 		return NULL;
144 
145 	return &null_orc_entry;
146 }
147 #else
148 static struct orc_entry *orc_callthunk_find(unsigned long ip)
149 {
150 	return NULL;
151 }
152 #endif
153 
154 /* Fake frame pointer entry -- used as a fallback for generated code */
155 static struct orc_entry orc_fp_entry = {
156 	.type		= ORC_TYPE_CALL,
157 	.sp_reg		= ORC_REG_BP,
158 	.sp_offset	= 16,
159 	.bp_reg		= ORC_REG_PREV_SP,
160 	.bp_offset	= -16,
161 };
162 
163 static struct orc_entry *orc_find(unsigned long ip)
164 {
165 	static struct orc_entry *orc;
166 
167 	if (ip == 0)
168 		return &null_orc_entry;
169 
170 	/* For non-init vmlinux addresses, use the fast lookup table: */
171 	if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
172 		unsigned int idx, start, stop;
173 
174 		idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
175 
176 		if (unlikely((idx >= lookup_num_blocks-1))) {
177 			orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
178 				 idx, lookup_num_blocks, (void *)ip);
179 			return NULL;
180 		}
181 
182 		start = orc_lookup[idx];
183 		stop = orc_lookup[idx + 1] + 1;
184 
185 		if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
186 			     (__start_orc_unwind + stop > __stop_orc_unwind))) {
187 			orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
188 				 idx, lookup_num_blocks, start, stop, (void *)ip);
189 			return NULL;
190 		}
191 
192 		return __orc_find(__start_orc_unwind_ip + start,
193 				  __start_orc_unwind + start, stop - start, ip);
194 	}
195 
196 	/* vmlinux .init slow lookup: */
197 	if (is_kernel_inittext(ip))
198 		return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
199 				  __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
200 
201 	/* Module lookup: */
202 	orc = orc_module_find(ip);
203 	if (orc)
204 		return orc;
205 
206 	orc =  orc_ftrace_find(ip);
207 	if (orc)
208 		return orc;
209 
210 	return orc_callthunk_find(ip);
211 }
212 
213 #ifdef CONFIG_MODULES
214 
215 static DEFINE_MUTEX(sort_mutex);
216 static int *cur_orc_ip_table = __start_orc_unwind_ip;
217 static struct orc_entry *cur_orc_table = __start_orc_unwind;
218 
219 static void orc_sort_swap(void *_a, void *_b, int size)
220 {
221 	struct orc_entry *orc_a, *orc_b;
222 	struct orc_entry orc_tmp;
223 	int *a = _a, *b = _b, tmp;
224 	int delta = _b - _a;
225 
226 	/* Swap the .orc_unwind_ip entries: */
227 	tmp = *a;
228 	*a = *b + delta;
229 	*b = tmp - delta;
230 
231 	/* Swap the corresponding .orc_unwind entries: */
232 	orc_a = cur_orc_table + (a - cur_orc_ip_table);
233 	orc_b = cur_orc_table + (b - cur_orc_ip_table);
234 	orc_tmp = *orc_a;
235 	*orc_a = *orc_b;
236 	*orc_b = orc_tmp;
237 }
238 
239 static int orc_sort_cmp(const void *_a, const void *_b)
240 {
241 	struct orc_entry *orc_a;
242 	const int *a = _a, *b = _b;
243 	unsigned long a_val = orc_ip(a);
244 	unsigned long b_val = orc_ip(b);
245 
246 	if (a_val > b_val)
247 		return 1;
248 	if (a_val < b_val)
249 		return -1;
250 
251 	/*
252 	 * The "weak" section terminator entries need to always be first
253 	 * to ensure the lookup code skips them in favor of real entries.
254 	 * These terminator entries exist to handle any gaps created by
255 	 * whitelisted .o files which didn't get objtool generation.
256 	 */
257 	orc_a = cur_orc_table + (a - cur_orc_ip_table);
258 	return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1;
259 }
260 
261 void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
262 			void *_orc, size_t orc_size)
263 {
264 	int *orc_ip = _orc_ip;
265 	struct orc_entry *orc = _orc;
266 	unsigned int num_entries = orc_ip_size / sizeof(int);
267 
268 	WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 ||
269 		     orc_size % sizeof(*orc) != 0 ||
270 		     num_entries != orc_size / sizeof(*orc));
271 
272 	/*
273 	 * The 'cur_orc_*' globals allow the orc_sort_swap() callback to
274 	 * associate an .orc_unwind_ip table entry with its corresponding
275 	 * .orc_unwind entry so they can both be swapped.
276 	 */
277 	mutex_lock(&sort_mutex);
278 	cur_orc_ip_table = orc_ip;
279 	cur_orc_table = orc;
280 	sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap);
281 	mutex_unlock(&sort_mutex);
282 
283 	mod->arch.orc_unwind_ip = orc_ip;
284 	mod->arch.orc_unwind = orc;
285 	mod->arch.num_orcs = num_entries;
286 }
287 #endif
288 
289 void __init unwind_init(void)
290 {
291 	size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip;
292 	size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind;
293 	size_t num_entries = orc_ip_size / sizeof(int);
294 	struct orc_entry *orc;
295 	int i;
296 
297 	if (!num_entries || orc_ip_size % sizeof(int) != 0 ||
298 	    orc_size % sizeof(struct orc_entry) != 0 ||
299 	    num_entries != orc_size / sizeof(struct orc_entry)) {
300 		orc_warn("WARNING: Bad or missing .orc_unwind table.  Disabling unwinder.\n");
301 		return;
302 	}
303 
304 	/*
305 	 * Note, the orc_unwind and orc_unwind_ip tables were already
306 	 * sorted at build time via the 'sorttable' tool.
307 	 * It's ready for binary search straight away, no need to sort it.
308 	 */
309 
310 	/* Initialize the fast lookup table: */
311 	lookup_num_blocks = orc_lookup_end - orc_lookup;
312 	for (i = 0; i < lookup_num_blocks-1; i++) {
313 		orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
314 				 num_entries,
315 				 LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i));
316 		if (!orc) {
317 			orc_warn("WARNING: Corrupt .orc_unwind table.  Disabling unwinder.\n");
318 			return;
319 		}
320 
321 		orc_lookup[i] = orc - __start_orc_unwind;
322 	}
323 
324 	/* Initialize the ending block: */
325 	orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries,
326 			 LOOKUP_STOP_IP);
327 	if (!orc) {
328 		orc_warn("WARNING: Corrupt .orc_unwind table.  Disabling unwinder.\n");
329 		return;
330 	}
331 	orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind;
332 
333 	orc_init = true;
334 }
335 
336 unsigned long unwind_get_return_address(struct unwind_state *state)
337 {
338 	if (unwind_done(state))
339 		return 0;
340 
341 	return __kernel_text_address(state->ip) ? state->ip : 0;
342 }
343 EXPORT_SYMBOL_GPL(unwind_get_return_address);
344 
345 unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
346 {
347 	if (unwind_done(state))
348 		return NULL;
349 
350 	if (state->regs)
351 		return &state->regs->ip;
352 
353 	if (state->sp)
354 		return (unsigned long *)state->sp - 1;
355 
356 	return NULL;
357 }
358 
359 static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
360 			    size_t len)
361 {
362 	struct stack_info *info = &state->stack_info;
363 	void *addr = (void *)_addr;
364 
365 	if (on_stack(info, addr, len))
366 		return true;
367 
368 	return !get_stack_info(addr, state->task, info, &state->stack_mask) &&
369 		on_stack(info, addr, len);
370 }
371 
372 static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
373 			    unsigned long *val)
374 {
375 	if (!stack_access_ok(state, addr, sizeof(long)))
376 		return false;
377 
378 	*val = READ_ONCE_NOCHECK(*(unsigned long *)addr);
379 	return true;
380 }
381 
382 static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
383 			     unsigned long *ip, unsigned long *sp)
384 {
385 	struct pt_regs *regs = (struct pt_regs *)addr;
386 
387 	/* x86-32 support will be more complicated due to the &regs->sp hack */
388 	BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32));
389 
390 	if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
391 		return false;
392 
393 	*ip = READ_ONCE_NOCHECK(regs->ip);
394 	*sp = READ_ONCE_NOCHECK(regs->sp);
395 	return true;
396 }
397 
398 static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr,
399 				  unsigned long *ip, unsigned long *sp)
400 {
401 	struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET;
402 
403 	if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
404 		return false;
405 
406 	*ip = READ_ONCE_NOCHECK(regs->ip);
407 	*sp = READ_ONCE_NOCHECK(regs->sp);
408 	return true;
409 }
410 
411 /*
412  * If state->regs is non-NULL, and points to a full pt_regs, just get the reg
413  * value from state->regs.
414  *
415  * Otherwise, if state->regs just points to IRET regs, and the previous frame
416  * had full regs, it's safe to get the value from the previous regs.  This can
417  * happen when early/late IRQ entry code gets interrupted by an NMI.
418  */
419 static bool get_reg(struct unwind_state *state, unsigned int reg_off,
420 		    unsigned long *val)
421 {
422 	unsigned int reg = reg_off/8;
423 
424 	if (!state->regs)
425 		return false;
426 
427 	if (state->full_regs) {
428 		*val = READ_ONCE_NOCHECK(((unsigned long *)state->regs)[reg]);
429 		return true;
430 	}
431 
432 	if (state->prev_regs) {
433 		*val = READ_ONCE_NOCHECK(((unsigned long *)state->prev_regs)[reg]);
434 		return true;
435 	}
436 
437 	return false;
438 }
439 
440 bool unwind_next_frame(struct unwind_state *state)
441 {
442 	unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp;
443 	enum stack_type prev_type = state->stack_info.type;
444 	struct orc_entry *orc;
445 	bool indirect = false;
446 
447 	if (unwind_done(state))
448 		return false;
449 
450 	/* Don't let modules unload while we're reading their ORC data. */
451 	preempt_disable();
452 
453 	/* End-of-stack check for user tasks: */
454 	if (state->regs && user_mode(state->regs))
455 		goto the_end;
456 
457 	/*
458 	 * Find the orc_entry associated with the text address.
459 	 *
460 	 * For a call frame (as opposed to a signal frame), state->ip points to
461 	 * the instruction after the call.  That instruction's stack layout
462 	 * could be different from the call instruction's layout, for example
463 	 * if the call was to a noreturn function.  So get the ORC data for the
464 	 * call instruction itself.
465 	 */
466 	orc = orc_find(state->signal ? state->ip : state->ip - 1);
467 	if (!orc) {
468 		/*
469 		 * As a fallback, try to assume this code uses a frame pointer.
470 		 * This is useful for generated code, like BPF, which ORC
471 		 * doesn't know about.  This is just a guess, so the rest of
472 		 * the unwind is no longer considered reliable.
473 		 */
474 		orc = &orc_fp_entry;
475 		state->error = true;
476 	} else {
477 		if (orc->type == ORC_TYPE_UNDEFINED)
478 			goto err;
479 
480 		if (orc->type == ORC_TYPE_END_OF_STACK)
481 			goto the_end;
482 	}
483 
484 	state->signal = orc->signal;
485 
486 	/* Find the previous frame's stack: */
487 	switch (orc->sp_reg) {
488 	case ORC_REG_SP:
489 		sp = state->sp + orc->sp_offset;
490 		break;
491 
492 	case ORC_REG_BP:
493 		sp = state->bp + orc->sp_offset;
494 		break;
495 
496 	case ORC_REG_SP_INDIRECT:
497 		sp = state->sp;
498 		indirect = true;
499 		break;
500 
501 	case ORC_REG_BP_INDIRECT:
502 		sp = state->bp + orc->sp_offset;
503 		indirect = true;
504 		break;
505 
506 	case ORC_REG_R10:
507 		if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) {
508 			orc_warn_current("missing R10 value at %pB\n",
509 					 (void *)state->ip);
510 			goto err;
511 		}
512 		break;
513 
514 	case ORC_REG_R13:
515 		if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) {
516 			orc_warn_current("missing R13 value at %pB\n",
517 					 (void *)state->ip);
518 			goto err;
519 		}
520 		break;
521 
522 	case ORC_REG_DI:
523 		if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) {
524 			orc_warn_current("missing RDI value at %pB\n",
525 					 (void *)state->ip);
526 			goto err;
527 		}
528 		break;
529 
530 	case ORC_REG_DX:
531 		if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) {
532 			orc_warn_current("missing DX value at %pB\n",
533 					 (void *)state->ip);
534 			goto err;
535 		}
536 		break;
537 
538 	default:
539 		orc_warn("unknown SP base reg %d at %pB\n",
540 			 orc->sp_reg, (void *)state->ip);
541 		goto err;
542 	}
543 
544 	if (indirect) {
545 		if (!deref_stack_reg(state, sp, &sp))
546 			goto err;
547 
548 		if (orc->sp_reg == ORC_REG_SP_INDIRECT)
549 			sp += orc->sp_offset;
550 	}
551 
552 	/* Find IP, SP and possibly regs: */
553 	switch (orc->type) {
554 	case ORC_TYPE_CALL:
555 		ip_p = sp - sizeof(long);
556 
557 		if (!deref_stack_reg(state, ip_p, &state->ip))
558 			goto err;
559 
560 		state->ip = unwind_recover_ret_addr(state, state->ip,
561 						    (unsigned long *)ip_p);
562 		state->sp = sp;
563 		state->regs = NULL;
564 		state->prev_regs = NULL;
565 		break;
566 
567 	case ORC_TYPE_REGS:
568 		if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
569 			orc_warn_current("can't access registers at %pB\n",
570 					 (void *)orig_ip);
571 			goto err;
572 		}
573 		/*
574 		 * There is a small chance to interrupt at the entry of
575 		 * arch_rethook_trampoline() where the ORC info doesn't exist.
576 		 * That point is right after the RET to arch_rethook_trampoline()
577 		 * which was modified return address.
578 		 * At that point, the @addr_p of the unwind_recover_rethook()
579 		 * (this has to point the address of the stack entry storing
580 		 * the modified return address) must be "SP - (a stack entry)"
581 		 * because SP is incremented by the RET.
582 		 */
583 		state->ip = unwind_recover_rethook(state, state->ip,
584 				(unsigned long *)(state->sp - sizeof(long)));
585 		state->regs = (struct pt_regs *)sp;
586 		state->prev_regs = NULL;
587 		state->full_regs = true;
588 		break;
589 
590 	case ORC_TYPE_REGS_PARTIAL:
591 		if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
592 			orc_warn_current("can't access iret registers at %pB\n",
593 					 (void *)orig_ip);
594 			goto err;
595 		}
596 		/* See ORC_TYPE_REGS case comment. */
597 		state->ip = unwind_recover_rethook(state, state->ip,
598 				(unsigned long *)(state->sp - sizeof(long)));
599 
600 		if (state->full_regs)
601 			state->prev_regs = state->regs;
602 		state->regs = (void *)sp - IRET_FRAME_OFFSET;
603 		state->full_regs = false;
604 		break;
605 
606 	default:
607 		orc_warn("unknown .orc_unwind entry type %d at %pB\n",
608 			 orc->type, (void *)orig_ip);
609 		goto err;
610 	}
611 
612 	/* Find BP: */
613 	switch (orc->bp_reg) {
614 	case ORC_REG_UNDEFINED:
615 		if (get_reg(state, offsetof(struct pt_regs, bp), &tmp))
616 			state->bp = tmp;
617 		break;
618 
619 	case ORC_REG_PREV_SP:
620 		if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp))
621 			goto err;
622 		break;
623 
624 	case ORC_REG_BP:
625 		if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp))
626 			goto err;
627 		break;
628 
629 	default:
630 		orc_warn("unknown BP base reg %d for ip %pB\n",
631 			 orc->bp_reg, (void *)orig_ip);
632 		goto err;
633 	}
634 
635 	/* Prevent a recursive loop due to bad ORC data: */
636 	if (state->stack_info.type == prev_type &&
637 	    on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
638 	    state->sp <= prev_sp) {
639 		orc_warn_current("stack going in the wrong direction? at %pB\n",
640 				 (void *)orig_ip);
641 		goto err;
642 	}
643 
644 	preempt_enable();
645 	return true;
646 
647 err:
648 	state->error = true;
649 
650 the_end:
651 	preempt_enable();
652 	state->stack_info.type = STACK_TYPE_UNKNOWN;
653 	return false;
654 }
655 EXPORT_SYMBOL_GPL(unwind_next_frame);
656 
657 void __unwind_start(struct unwind_state *state, struct task_struct *task,
658 		    struct pt_regs *regs, unsigned long *first_frame)
659 {
660 	memset(state, 0, sizeof(*state));
661 	state->task = task;
662 
663 	if (!orc_init)
664 		goto err;
665 
666 	/*
667 	 * Refuse to unwind the stack of a task while it's executing on another
668 	 * CPU.  This check is racy, but that's ok: the unwinder has other
669 	 * checks to prevent it from going off the rails.
670 	 */
671 	if (task_on_another_cpu(task))
672 		goto err;
673 
674 	if (regs) {
675 		if (user_mode(regs))
676 			goto the_end;
677 
678 		state->ip = regs->ip;
679 		state->sp = regs->sp;
680 		state->bp = regs->bp;
681 		state->regs = regs;
682 		state->full_regs = true;
683 		state->signal = true;
684 
685 	} else if (task == current) {
686 		asm volatile("lea (%%rip), %0\n\t"
687 			     "mov %%rsp, %1\n\t"
688 			     "mov %%rbp, %2\n\t"
689 			     : "=r" (state->ip), "=r" (state->sp),
690 			       "=r" (state->bp));
691 
692 	} else {
693 		struct inactive_task_frame *frame = (void *)task->thread.sp;
694 
695 		state->sp = task->thread.sp + sizeof(*frame);
696 		state->bp = READ_ONCE_NOCHECK(frame->bp);
697 		state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
698 		state->signal = (void *)state->ip == ret_from_fork;
699 	}
700 
701 	if (get_stack_info((unsigned long *)state->sp, state->task,
702 			   &state->stack_info, &state->stack_mask)) {
703 		/*
704 		 * We weren't on a valid stack.  It's possible that
705 		 * we overflowed a valid stack into a guard page.
706 		 * See if the next page up is valid so that we can
707 		 * generate some kind of backtrace if this happens.
708 		 */
709 		void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
710 		state->error = true;
711 		if (get_stack_info(next_page, state->task, &state->stack_info,
712 				   &state->stack_mask))
713 			return;
714 	}
715 
716 	/*
717 	 * The caller can provide the address of the first frame directly
718 	 * (first_frame) or indirectly (regs->sp) to indicate which stack frame
719 	 * to start unwinding at.  Skip ahead until we reach it.
720 	 */
721 
722 	/* When starting from regs, skip the regs frame: */
723 	if (regs) {
724 		unwind_next_frame(state);
725 		return;
726 	}
727 
728 	/* Otherwise, skip ahead to the user-specified starting frame: */
729 	while (!unwind_done(state) &&
730 	       (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
731 			state->sp <= (unsigned long)first_frame))
732 		unwind_next_frame(state);
733 
734 	return;
735 
736 err:
737 	state->error = true;
738 the_end:
739 	state->stack_info.type = STACK_TYPE_UNKNOWN;
740 }
741 EXPORT_SYMBOL_GPL(__unwind_start);
742