xref: /openbmc/linux/tools/objtool/check.c (revision 498a1cf902c31c3af398082d65cf150b33b367e6)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
4  */
5 
6 #include <string.h>
7 #include <stdlib.h>
8 #include <inttypes.h>
9 #include <sys/mman.h>
10 
11 #include <arch/elf.h>
12 #include <objtool/builtin.h>
13 #include <objtool/cfi.h>
14 #include <objtool/arch.h>
15 #include <objtool/check.h>
16 #include <objtool/special.h>
17 #include <objtool/warn.h>
18 #include <objtool/endianness.h>
19 
20 #include <linux/objtool.h>
21 #include <linux/hashtable.h>
22 #include <linux/kernel.h>
23 #include <linux/static_call_types.h>
24 
25 struct alternative {
26 	struct list_head list;
27 	struct instruction *insn;
28 	bool skip_orig;
29 };
30 
31 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
32 
33 static struct cfi_init_state initial_func_cfi;
34 static struct cfi_state init_cfi;
35 static struct cfi_state func_cfi;
36 
37 struct instruction *find_insn(struct objtool_file *file,
38 			      struct section *sec, unsigned long offset)
39 {
40 	struct instruction *insn;
41 
42 	hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
43 		if (insn->sec == sec && insn->offset == offset)
44 			return insn;
45 	}
46 
47 	return NULL;
48 }
49 
50 static struct instruction *next_insn_same_sec(struct objtool_file *file,
51 					      struct instruction *insn)
52 {
53 	struct instruction *next = list_next_entry(insn, list);
54 
55 	if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
56 		return NULL;
57 
58 	return next;
59 }
60 
61 static struct instruction *next_insn_same_func(struct objtool_file *file,
62 					       struct instruction *insn)
63 {
64 	struct instruction *next = list_next_entry(insn, list);
65 	struct symbol *func = insn_func(insn);
66 
67 	if (!func)
68 		return NULL;
69 
70 	if (&next->list != &file->insn_list && insn_func(next) == func)
71 		return next;
72 
73 	/* Check if we're already in the subfunction: */
74 	if (func == func->cfunc)
75 		return NULL;
76 
77 	/* Move to the subfunction: */
78 	return find_insn(file, func->cfunc->sec, func->cfunc->offset);
79 }
80 
81 static struct instruction *prev_insn_same_sym(struct objtool_file *file,
82 					       struct instruction *insn)
83 {
84 	struct instruction *prev = list_prev_entry(insn, list);
85 
86 	if (&prev->list != &file->insn_list && insn_func(prev) == insn_func(insn))
87 		return prev;
88 
89 	return NULL;
90 }
91 
92 #define func_for_each_insn(file, func, insn)				\
93 	for (insn = find_insn(file, func->sec, func->offset);		\
94 	     insn;							\
95 	     insn = next_insn_same_func(file, insn))
96 
97 #define sym_for_each_insn(file, sym, insn)				\
98 	for (insn = find_insn(file, sym->sec, sym->offset);		\
99 	     insn && &insn->list != &file->insn_list &&			\
100 		insn->sec == sym->sec &&				\
101 		insn->offset < sym->offset + sym->len;			\
102 	     insn = list_next_entry(insn, list))
103 
104 #define sym_for_each_insn_continue_reverse(file, sym, insn)		\
105 	for (insn = list_prev_entry(insn, list);			\
106 	     &insn->list != &file->insn_list &&				\
107 		insn->sec == sym->sec && insn->offset >= sym->offset;	\
108 	     insn = list_prev_entry(insn, list))
109 
110 #define sec_for_each_insn_from(file, insn)				\
111 	for (; insn; insn = next_insn_same_sec(file, insn))
112 
113 #define sec_for_each_insn_continue(file, insn)				\
114 	for (insn = next_insn_same_sec(file, insn); insn;		\
115 	     insn = next_insn_same_sec(file, insn))
116 
117 static bool is_jump_table_jump(struct instruction *insn)
118 {
119 	struct alt_group *alt_group = insn->alt_group;
120 
121 	if (insn->jump_table)
122 		return true;
123 
124 	/* Retpoline alternative for a jump table? */
125 	return alt_group && alt_group->orig_group &&
126 	       alt_group->orig_group->first_insn->jump_table;
127 }
128 
129 static bool is_sibling_call(struct instruction *insn)
130 {
131 	/*
132 	 * Assume only STT_FUNC calls have jump-tables.
133 	 */
134 	if (insn_func(insn)) {
135 		/* An indirect jump is either a sibling call or a jump to a table. */
136 		if (insn->type == INSN_JUMP_DYNAMIC)
137 			return !is_jump_table_jump(insn);
138 	}
139 
140 	/* add_jump_destinations() sets insn->call_dest for sibling calls. */
141 	return (is_static_jump(insn) && insn->call_dest);
142 }
143 
144 /*
145  * This checks to see if the given function is a "noreturn" function.
146  *
147  * For global functions which are outside the scope of this object file, we
148  * have to keep a manual list of them.
149  *
150  * For local functions, we have to detect them manually by simply looking for
151  * the lack of a return instruction.
152  */
153 static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
154 				int recursion)
155 {
156 	int i;
157 	struct instruction *insn;
158 	bool empty = true;
159 
160 	/*
161 	 * Unfortunately these have to be hard coded because the noreturn
162 	 * attribute isn't provided in ELF data. Keep 'em sorted.
163 	 */
164 	static const char * const global_noreturns[] = {
165 		"__invalid_creds",
166 		"__module_put_and_kthread_exit",
167 		"__reiserfs_panic",
168 		"__stack_chk_fail",
169 		"__ubsan_handle_builtin_unreachable",
170 		"cpu_bringup_and_idle",
171 		"cpu_startup_entry",
172 		"do_exit",
173 		"do_group_exit",
174 		"do_task_dead",
175 		"ex_handler_msr_mce",
176 		"fortify_panic",
177 		"kthread_complete_and_exit",
178 		"kthread_exit",
179 		"kunit_try_catch_throw",
180 		"lbug_with_loc",
181 		"machine_real_restart",
182 		"make_task_dead",
183 		"panic",
184 		"rewind_stack_and_make_dead",
185 		"sev_es_terminate",
186 		"snp_abort",
187 		"stop_this_cpu",
188 		"usercopy_abort",
189 		"xen_cpu_bringup_again",
190 		"xen_start_kernel",
191 	};
192 
193 	if (!func)
194 		return false;
195 
196 	if (func->bind == STB_WEAK)
197 		return false;
198 
199 	if (func->bind == STB_GLOBAL)
200 		for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
201 			if (!strcmp(func->name, global_noreturns[i]))
202 				return true;
203 
204 	if (!func->len)
205 		return false;
206 
207 	insn = find_insn(file, func->sec, func->offset);
208 	if (!insn || !insn_func(insn))
209 		return false;
210 
211 	func_for_each_insn(file, func, insn) {
212 		empty = false;
213 
214 		if (insn->type == INSN_RETURN)
215 			return false;
216 	}
217 
218 	if (empty)
219 		return false;
220 
221 	/*
222 	 * A function can have a sibling call instead of a return.  In that
223 	 * case, the function's dead-end status depends on whether the target
224 	 * of the sibling call returns.
225 	 */
226 	func_for_each_insn(file, func, insn) {
227 		if (is_sibling_call(insn)) {
228 			struct instruction *dest = insn->jump_dest;
229 
230 			if (!dest)
231 				/* sibling call to another file */
232 				return false;
233 
234 			/* local sibling call */
235 			if (recursion == 5) {
236 				/*
237 				 * Infinite recursion: two functions have
238 				 * sibling calls to each other.  This is a very
239 				 * rare case.  It means they aren't dead ends.
240 				 */
241 				return false;
242 			}
243 
244 			return __dead_end_function(file, insn_func(dest), recursion+1);
245 		}
246 	}
247 
248 	return true;
249 }
250 
251 static bool dead_end_function(struct objtool_file *file, struct symbol *func)
252 {
253 	return __dead_end_function(file, func, 0);
254 }
255 
256 static void init_cfi_state(struct cfi_state *cfi)
257 {
258 	int i;
259 
260 	for (i = 0; i < CFI_NUM_REGS; i++) {
261 		cfi->regs[i].base = CFI_UNDEFINED;
262 		cfi->vals[i].base = CFI_UNDEFINED;
263 	}
264 	cfi->cfa.base = CFI_UNDEFINED;
265 	cfi->drap_reg = CFI_UNDEFINED;
266 	cfi->drap_offset = -1;
267 }
268 
269 static void init_insn_state(struct objtool_file *file, struct insn_state *state,
270 			    struct section *sec)
271 {
272 	memset(state, 0, sizeof(*state));
273 	init_cfi_state(&state->cfi);
274 
275 	/*
276 	 * We need the full vmlinux for noinstr validation, otherwise we can
277 	 * not correctly determine insn->call_dest->sec (external symbols do
278 	 * not have a section).
279 	 */
280 	if (opts.link && opts.noinstr && sec)
281 		state->noinstr = sec->noinstr;
282 }
283 
284 static struct cfi_state *cfi_alloc(void)
285 {
286 	struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
287 	if (!cfi) {
288 		WARN("calloc failed");
289 		exit(1);
290 	}
291 	nr_cfi++;
292 	return cfi;
293 }
294 
295 static int cfi_bits;
296 static struct hlist_head *cfi_hash;
297 
298 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
299 {
300 	return memcmp((void *)cfi1 + sizeof(cfi1->hash),
301 		      (void *)cfi2 + sizeof(cfi2->hash),
302 		      sizeof(struct cfi_state) - sizeof(struct hlist_node));
303 }
304 
305 static inline u32 cfi_key(struct cfi_state *cfi)
306 {
307 	return jhash((void *)cfi + sizeof(cfi->hash),
308 		     sizeof(*cfi) - sizeof(cfi->hash), 0);
309 }
310 
311 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
312 {
313 	struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
314 	struct cfi_state *obj;
315 
316 	hlist_for_each_entry(obj, head, hash) {
317 		if (!cficmp(cfi, obj)) {
318 			nr_cfi_cache++;
319 			return obj;
320 		}
321 	}
322 
323 	obj = cfi_alloc();
324 	*obj = *cfi;
325 	hlist_add_head(&obj->hash, head);
326 
327 	return obj;
328 }
329 
330 static void cfi_hash_add(struct cfi_state *cfi)
331 {
332 	struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
333 
334 	hlist_add_head(&cfi->hash, head);
335 }
336 
337 static void *cfi_hash_alloc(unsigned long size)
338 {
339 	cfi_bits = max(10, ilog2(size));
340 	cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
341 			PROT_READ|PROT_WRITE,
342 			MAP_PRIVATE|MAP_ANON, -1, 0);
343 	if (cfi_hash == (void *)-1L) {
344 		WARN("mmap fail cfi_hash");
345 		cfi_hash = NULL;
346 	}  else if (opts.stats) {
347 		printf("cfi_bits: %d\n", cfi_bits);
348 	}
349 
350 	return cfi_hash;
351 }
352 
353 static unsigned long nr_insns;
354 static unsigned long nr_insns_visited;
355 
356 /*
357  * Call the arch-specific instruction decoder for all the instructions and add
358  * them to the global instruction list.
359  */
360 static int decode_instructions(struct objtool_file *file)
361 {
362 	struct section *sec;
363 	struct symbol *func;
364 	unsigned long offset;
365 	struct instruction *insn;
366 	int ret;
367 
368 	for_each_sec(file, sec) {
369 
370 		if (!(sec->sh.sh_flags & SHF_EXECINSTR))
371 			continue;
372 
373 		if (strcmp(sec->name, ".altinstr_replacement") &&
374 		    strcmp(sec->name, ".altinstr_aux") &&
375 		    strncmp(sec->name, ".discard.", 9))
376 			sec->text = true;
377 
378 		if (!strcmp(sec->name, ".noinstr.text") ||
379 		    !strcmp(sec->name, ".entry.text") ||
380 		    !strcmp(sec->name, ".cpuidle.text") ||
381 		    !strncmp(sec->name, ".text.__x86.", 12))
382 			sec->noinstr = true;
383 
384 		/*
385 		 * .init.text code is ran before userspace and thus doesn't
386 		 * strictly need retpolines, except for modules which are
387 		 * loaded late, they very much do need retpoline in their
388 		 * .init.text
389 		 */
390 		if (!strcmp(sec->name, ".init.text") && !opts.module)
391 			sec->init = true;
392 
393 		for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
394 			insn = malloc(sizeof(*insn));
395 			if (!insn) {
396 				WARN("malloc failed");
397 				return -1;
398 			}
399 			memset(insn, 0, sizeof(*insn));
400 			INIT_LIST_HEAD(&insn->alts);
401 			INIT_LIST_HEAD(&insn->stack_ops);
402 			INIT_LIST_HEAD(&insn->call_node);
403 
404 			insn->sec = sec;
405 			insn->offset = offset;
406 
407 			ret = arch_decode_instruction(file, sec, offset,
408 						      sec->sh.sh_size - offset,
409 						      &insn->len, &insn->type,
410 						      &insn->immediate,
411 						      &insn->stack_ops);
412 			if (ret)
413 				goto err;
414 
415 			/*
416 			 * By default, "ud2" is a dead end unless otherwise
417 			 * annotated, because GCC 7 inserts it for certain
418 			 * divide-by-zero cases.
419 			 */
420 			if (insn->type == INSN_BUG)
421 				insn->dead_end = true;
422 
423 			hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
424 			list_add_tail(&insn->list, &file->insn_list);
425 			nr_insns++;
426 		}
427 
428 		list_for_each_entry(func, &sec->symbol_list, list) {
429 			if (func->type != STT_NOTYPE && func->type != STT_FUNC)
430 				continue;
431 
432 			if (func->offset == sec->sh.sh_size) {
433 				/* Heuristic: likely an "end" symbol */
434 				if (func->type == STT_NOTYPE)
435 					continue;
436 				WARN("%s(): STT_FUNC at end of section",
437 				     func->name);
438 				return -1;
439 			}
440 
441 			if (func->return_thunk || func->alias != func)
442 				continue;
443 
444 			if (!find_insn(file, sec, func->offset)) {
445 				WARN("%s(): can't find starting instruction",
446 				     func->name);
447 				return -1;
448 			}
449 
450 			sym_for_each_insn(file, func, insn) {
451 				insn->sym = func;
452 				if (func->type == STT_FUNC &&
453 				    insn->type == INSN_ENDBR &&
454 				    list_empty(&insn->call_node)) {
455 					if (insn->offset == func->offset) {
456 						list_add_tail(&insn->call_node, &file->endbr_list);
457 						file->nr_endbr++;
458 					} else {
459 						file->nr_endbr_int++;
460 					}
461 				}
462 			}
463 		}
464 	}
465 
466 	if (opts.stats)
467 		printf("nr_insns: %lu\n", nr_insns);
468 
469 	return 0;
470 
471 err:
472 	free(insn);
473 	return ret;
474 }
475 
476 /*
477  * Read the pv_ops[] .data table to find the static initialized values.
478  */
479 static int add_pv_ops(struct objtool_file *file, const char *symname)
480 {
481 	struct symbol *sym, *func;
482 	unsigned long off, end;
483 	struct reloc *rel;
484 	int idx;
485 
486 	sym = find_symbol_by_name(file->elf, symname);
487 	if (!sym)
488 		return 0;
489 
490 	off = sym->offset;
491 	end = off + sym->len;
492 	for (;;) {
493 		rel = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
494 		if (!rel)
495 			break;
496 
497 		func = rel->sym;
498 		if (func->type == STT_SECTION)
499 			func = find_symbol_by_offset(rel->sym->sec, rel->addend);
500 
501 		idx = (rel->offset - sym->offset) / sizeof(unsigned long);
502 
503 		objtool_pv_add(file, idx, func);
504 
505 		off = rel->offset + 1;
506 		if (off > end)
507 			break;
508 	}
509 
510 	return 0;
511 }
512 
513 /*
514  * Allocate and initialize file->pv_ops[].
515  */
516 static int init_pv_ops(struct objtool_file *file)
517 {
518 	static const char *pv_ops_tables[] = {
519 		"pv_ops",
520 		"xen_cpu_ops",
521 		"xen_irq_ops",
522 		"xen_mmu_ops",
523 		NULL,
524 	};
525 	const char *pv_ops;
526 	struct symbol *sym;
527 	int idx, nr;
528 
529 	if (!opts.noinstr)
530 		return 0;
531 
532 	file->pv_ops = NULL;
533 
534 	sym = find_symbol_by_name(file->elf, "pv_ops");
535 	if (!sym)
536 		return 0;
537 
538 	nr = sym->len / sizeof(unsigned long);
539 	file->pv_ops = calloc(sizeof(struct pv_state), nr);
540 	if (!file->pv_ops)
541 		return -1;
542 
543 	for (idx = 0; idx < nr; idx++)
544 		INIT_LIST_HEAD(&file->pv_ops[idx].targets);
545 
546 	for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++)
547 		add_pv_ops(file, pv_ops);
548 
549 	return 0;
550 }
551 
552 static struct instruction *find_last_insn(struct objtool_file *file,
553 					  struct section *sec)
554 {
555 	struct instruction *insn = NULL;
556 	unsigned int offset;
557 	unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
558 
559 	for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
560 		insn = find_insn(file, sec, offset);
561 
562 	return insn;
563 }
564 
565 /*
566  * Mark "ud2" instructions and manually annotated dead ends.
567  */
568 static int add_dead_ends(struct objtool_file *file)
569 {
570 	struct section *sec;
571 	struct reloc *reloc;
572 	struct instruction *insn;
573 
574 	/*
575 	 * Check for manually annotated dead ends.
576 	 */
577 	sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
578 	if (!sec)
579 		goto reachable;
580 
581 	list_for_each_entry(reloc, &sec->reloc_list, list) {
582 		if (reloc->sym->type != STT_SECTION) {
583 			WARN("unexpected relocation symbol type in %s", sec->name);
584 			return -1;
585 		}
586 		insn = find_insn(file, reloc->sym->sec, reloc->addend);
587 		if (insn)
588 			insn = list_prev_entry(insn, list);
589 		else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
590 			insn = find_last_insn(file, reloc->sym->sec);
591 			if (!insn) {
592 				WARN("can't find unreachable insn at %s+0x%" PRIx64,
593 				     reloc->sym->sec->name, reloc->addend);
594 				return -1;
595 			}
596 		} else {
597 			WARN("can't find unreachable insn at %s+0x%" PRIx64,
598 			     reloc->sym->sec->name, reloc->addend);
599 			return -1;
600 		}
601 
602 		insn->dead_end = true;
603 	}
604 
605 reachable:
606 	/*
607 	 * These manually annotated reachable checks are needed for GCC 4.4,
608 	 * where the Linux unreachable() macro isn't supported.  In that case
609 	 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
610 	 * not a dead end.
611 	 */
612 	sec = find_section_by_name(file->elf, ".rela.discard.reachable");
613 	if (!sec)
614 		return 0;
615 
616 	list_for_each_entry(reloc, &sec->reloc_list, list) {
617 		if (reloc->sym->type != STT_SECTION) {
618 			WARN("unexpected relocation symbol type in %s", sec->name);
619 			return -1;
620 		}
621 		insn = find_insn(file, reloc->sym->sec, reloc->addend);
622 		if (insn)
623 			insn = list_prev_entry(insn, list);
624 		else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
625 			insn = find_last_insn(file, reloc->sym->sec);
626 			if (!insn) {
627 				WARN("can't find reachable insn at %s+0x%" PRIx64,
628 				     reloc->sym->sec->name, reloc->addend);
629 				return -1;
630 			}
631 		} else {
632 			WARN("can't find reachable insn at %s+0x%" PRIx64,
633 			     reloc->sym->sec->name, reloc->addend);
634 			return -1;
635 		}
636 
637 		insn->dead_end = false;
638 	}
639 
640 	return 0;
641 }
642 
643 static int create_static_call_sections(struct objtool_file *file)
644 {
645 	struct section *sec;
646 	struct static_call_site *site;
647 	struct instruction *insn;
648 	struct symbol *key_sym;
649 	char *key_name, *tmp;
650 	int idx;
651 
652 	sec = find_section_by_name(file->elf, ".static_call_sites");
653 	if (sec) {
654 		INIT_LIST_HEAD(&file->static_call_list);
655 		WARN("file already has .static_call_sites section, skipping");
656 		return 0;
657 	}
658 
659 	if (list_empty(&file->static_call_list))
660 		return 0;
661 
662 	idx = 0;
663 	list_for_each_entry(insn, &file->static_call_list, call_node)
664 		idx++;
665 
666 	sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
667 				 sizeof(struct static_call_site), idx);
668 	if (!sec)
669 		return -1;
670 
671 	idx = 0;
672 	list_for_each_entry(insn, &file->static_call_list, call_node) {
673 
674 		site = (struct static_call_site *)sec->data->d_buf + idx;
675 		memset(site, 0, sizeof(struct static_call_site));
676 
677 		/* populate reloc for 'addr' */
678 		if (elf_add_reloc_to_insn(file->elf, sec,
679 					  idx * sizeof(struct static_call_site),
680 					  R_X86_64_PC32,
681 					  insn->sec, insn->offset))
682 			return -1;
683 
684 		/* find key symbol */
685 		key_name = strdup(insn->call_dest->name);
686 		if (!key_name) {
687 			perror("strdup");
688 			return -1;
689 		}
690 		if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
691 			    STATIC_CALL_TRAMP_PREFIX_LEN)) {
692 			WARN("static_call: trampoline name malformed: %s", key_name);
693 			return -1;
694 		}
695 		tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
696 		memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
697 
698 		key_sym = find_symbol_by_name(file->elf, tmp);
699 		if (!key_sym) {
700 			if (!opts.module) {
701 				WARN("static_call: can't find static_call_key symbol: %s", tmp);
702 				return -1;
703 			}
704 
705 			/*
706 			 * For modules(), the key might not be exported, which
707 			 * means the module can make static calls but isn't
708 			 * allowed to change them.
709 			 *
710 			 * In that case we temporarily set the key to be the
711 			 * trampoline address.  This is fixed up in
712 			 * static_call_add_module().
713 			 */
714 			key_sym = insn->call_dest;
715 		}
716 		free(key_name);
717 
718 		/* populate reloc for 'key' */
719 		if (elf_add_reloc(file->elf, sec,
720 				  idx * sizeof(struct static_call_site) + 4,
721 				  R_X86_64_PC32, key_sym,
722 				  is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
723 			return -1;
724 
725 		idx++;
726 	}
727 
728 	return 0;
729 }
730 
731 static int create_retpoline_sites_sections(struct objtool_file *file)
732 {
733 	struct instruction *insn;
734 	struct section *sec;
735 	int idx;
736 
737 	sec = find_section_by_name(file->elf, ".retpoline_sites");
738 	if (sec) {
739 		WARN("file already has .retpoline_sites, skipping");
740 		return 0;
741 	}
742 
743 	idx = 0;
744 	list_for_each_entry(insn, &file->retpoline_call_list, call_node)
745 		idx++;
746 
747 	if (!idx)
748 		return 0;
749 
750 	sec = elf_create_section(file->elf, ".retpoline_sites", 0,
751 				 sizeof(int), idx);
752 	if (!sec) {
753 		WARN("elf_create_section: .retpoline_sites");
754 		return -1;
755 	}
756 
757 	idx = 0;
758 	list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
759 
760 		int *site = (int *)sec->data->d_buf + idx;
761 		*site = 0;
762 
763 		if (elf_add_reloc_to_insn(file->elf, sec,
764 					  idx * sizeof(int),
765 					  R_X86_64_PC32,
766 					  insn->sec, insn->offset)) {
767 			WARN("elf_add_reloc_to_insn: .retpoline_sites");
768 			return -1;
769 		}
770 
771 		idx++;
772 	}
773 
774 	return 0;
775 }
776 
777 static int create_return_sites_sections(struct objtool_file *file)
778 {
779 	struct instruction *insn;
780 	struct section *sec;
781 	int idx;
782 
783 	sec = find_section_by_name(file->elf, ".return_sites");
784 	if (sec) {
785 		WARN("file already has .return_sites, skipping");
786 		return 0;
787 	}
788 
789 	idx = 0;
790 	list_for_each_entry(insn, &file->return_thunk_list, call_node)
791 		idx++;
792 
793 	if (!idx)
794 		return 0;
795 
796 	sec = elf_create_section(file->elf, ".return_sites", 0,
797 				 sizeof(int), idx);
798 	if (!sec) {
799 		WARN("elf_create_section: .return_sites");
800 		return -1;
801 	}
802 
803 	idx = 0;
804 	list_for_each_entry(insn, &file->return_thunk_list, call_node) {
805 
806 		int *site = (int *)sec->data->d_buf + idx;
807 		*site = 0;
808 
809 		if (elf_add_reloc_to_insn(file->elf, sec,
810 					  idx * sizeof(int),
811 					  R_X86_64_PC32,
812 					  insn->sec, insn->offset)) {
813 			WARN("elf_add_reloc_to_insn: .return_sites");
814 			return -1;
815 		}
816 
817 		idx++;
818 	}
819 
820 	return 0;
821 }
822 
823 static int create_ibt_endbr_seal_sections(struct objtool_file *file)
824 {
825 	struct instruction *insn;
826 	struct section *sec;
827 	int idx;
828 
829 	sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
830 	if (sec) {
831 		WARN("file already has .ibt_endbr_seal, skipping");
832 		return 0;
833 	}
834 
835 	idx = 0;
836 	list_for_each_entry(insn, &file->endbr_list, call_node)
837 		idx++;
838 
839 	if (opts.stats) {
840 		printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
841 		printf("ibt: ENDBR inside functions:  %d\n", file->nr_endbr_int);
842 		printf("ibt: superfluous ENDBR:       %d\n", idx);
843 	}
844 
845 	if (!idx)
846 		return 0;
847 
848 	sec = elf_create_section(file->elf, ".ibt_endbr_seal", 0,
849 				 sizeof(int), idx);
850 	if (!sec) {
851 		WARN("elf_create_section: .ibt_endbr_seal");
852 		return -1;
853 	}
854 
855 	idx = 0;
856 	list_for_each_entry(insn, &file->endbr_list, call_node) {
857 
858 		int *site = (int *)sec->data->d_buf + idx;
859 		*site = 0;
860 
861 		if (elf_add_reloc_to_insn(file->elf, sec,
862 					  idx * sizeof(int),
863 					  R_X86_64_PC32,
864 					  insn->sec, insn->offset)) {
865 			WARN("elf_add_reloc_to_insn: .ibt_endbr_seal");
866 			return -1;
867 		}
868 
869 		idx++;
870 	}
871 
872 	return 0;
873 }
874 
875 static int create_cfi_sections(struct objtool_file *file)
876 {
877 	struct section *sec, *s;
878 	struct symbol *sym;
879 	unsigned int *loc;
880 	int idx;
881 
882 	sec = find_section_by_name(file->elf, ".cfi_sites");
883 	if (sec) {
884 		INIT_LIST_HEAD(&file->call_list);
885 		WARN("file already has .cfi_sites section, skipping");
886 		return 0;
887 	}
888 
889 	idx = 0;
890 	for_each_sec(file, s) {
891 		if (!s->text)
892 			continue;
893 
894 		list_for_each_entry(sym, &s->symbol_list, list) {
895 			if (sym->type != STT_FUNC)
896 				continue;
897 
898 			if (strncmp(sym->name, "__cfi_", 6))
899 				continue;
900 
901 			idx++;
902 		}
903 	}
904 
905 	sec = elf_create_section(file->elf, ".cfi_sites", 0, sizeof(unsigned int), idx);
906 	if (!sec)
907 		return -1;
908 
909 	idx = 0;
910 	for_each_sec(file, s) {
911 		if (!s->text)
912 			continue;
913 
914 		list_for_each_entry(sym, &s->symbol_list, list) {
915 			if (sym->type != STT_FUNC)
916 				continue;
917 
918 			if (strncmp(sym->name, "__cfi_", 6))
919 				continue;
920 
921 			loc = (unsigned int *)sec->data->d_buf + idx;
922 			memset(loc, 0, sizeof(unsigned int));
923 
924 			if (elf_add_reloc_to_insn(file->elf, sec,
925 						  idx * sizeof(unsigned int),
926 						  R_X86_64_PC32,
927 						  s, sym->offset))
928 				return -1;
929 
930 			idx++;
931 		}
932 	}
933 
934 	return 0;
935 }
936 
937 static int create_mcount_loc_sections(struct objtool_file *file)
938 {
939 	int addrsize = elf_class_addrsize(file->elf);
940 	struct instruction *insn;
941 	struct section *sec;
942 	int idx;
943 
944 	sec = find_section_by_name(file->elf, "__mcount_loc");
945 	if (sec) {
946 		INIT_LIST_HEAD(&file->mcount_loc_list);
947 		WARN("file already has __mcount_loc section, skipping");
948 		return 0;
949 	}
950 
951 	if (list_empty(&file->mcount_loc_list))
952 		return 0;
953 
954 	idx = 0;
955 	list_for_each_entry(insn, &file->mcount_loc_list, call_node)
956 		idx++;
957 
958 	sec = elf_create_section(file->elf, "__mcount_loc", 0, addrsize, idx);
959 	if (!sec)
960 		return -1;
961 
962 	sec->sh.sh_addralign = addrsize;
963 
964 	idx = 0;
965 	list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
966 		void *loc;
967 
968 		loc = sec->data->d_buf + idx;
969 		memset(loc, 0, addrsize);
970 
971 		if (elf_add_reloc_to_insn(file->elf, sec, idx,
972 					  addrsize == sizeof(u64) ? R_ABS64 : R_ABS32,
973 					  insn->sec, insn->offset))
974 			return -1;
975 
976 		idx += addrsize;
977 	}
978 
979 	return 0;
980 }
981 
982 static int create_direct_call_sections(struct objtool_file *file)
983 {
984 	struct instruction *insn;
985 	struct section *sec;
986 	unsigned int *loc;
987 	int idx;
988 
989 	sec = find_section_by_name(file->elf, ".call_sites");
990 	if (sec) {
991 		INIT_LIST_HEAD(&file->call_list);
992 		WARN("file already has .call_sites section, skipping");
993 		return 0;
994 	}
995 
996 	if (list_empty(&file->call_list))
997 		return 0;
998 
999 	idx = 0;
1000 	list_for_each_entry(insn, &file->call_list, call_node)
1001 		idx++;
1002 
1003 	sec = elf_create_section(file->elf, ".call_sites", 0, sizeof(unsigned int), idx);
1004 	if (!sec)
1005 		return -1;
1006 
1007 	idx = 0;
1008 	list_for_each_entry(insn, &file->call_list, call_node) {
1009 
1010 		loc = (unsigned int *)sec->data->d_buf + idx;
1011 		memset(loc, 0, sizeof(unsigned int));
1012 
1013 		if (elf_add_reloc_to_insn(file->elf, sec,
1014 					  idx * sizeof(unsigned int),
1015 					  R_X86_64_PC32,
1016 					  insn->sec, insn->offset))
1017 			return -1;
1018 
1019 		idx++;
1020 	}
1021 
1022 	return 0;
1023 }
1024 
1025 /*
1026  * Warnings shouldn't be reported for ignored functions.
1027  */
1028 static void add_ignores(struct objtool_file *file)
1029 {
1030 	struct instruction *insn;
1031 	struct section *sec;
1032 	struct symbol *func;
1033 	struct reloc *reloc;
1034 
1035 	sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
1036 	if (!sec)
1037 		return;
1038 
1039 	list_for_each_entry(reloc, &sec->reloc_list, list) {
1040 		switch (reloc->sym->type) {
1041 		case STT_FUNC:
1042 			func = reloc->sym;
1043 			break;
1044 
1045 		case STT_SECTION:
1046 			func = find_func_by_offset(reloc->sym->sec, reloc->addend);
1047 			if (!func)
1048 				continue;
1049 			break;
1050 
1051 		default:
1052 			WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type);
1053 			continue;
1054 		}
1055 
1056 		func_for_each_insn(file, func, insn)
1057 			insn->ignore = true;
1058 	}
1059 }
1060 
1061 /*
1062  * This is a whitelist of functions that is allowed to be called with AC set.
1063  * The list is meant to be minimal and only contains compiler instrumentation
1064  * ABI and a few functions used to implement *_{to,from}_user() functions.
1065  *
1066  * These functions must not directly change AC, but may PUSHF/POPF.
1067  */
1068 static const char *uaccess_safe_builtin[] = {
1069 	/* KASAN */
1070 	"kasan_report",
1071 	"kasan_check_range",
1072 	/* KASAN out-of-line */
1073 	"__asan_loadN_noabort",
1074 	"__asan_load1_noabort",
1075 	"__asan_load2_noabort",
1076 	"__asan_load4_noabort",
1077 	"__asan_load8_noabort",
1078 	"__asan_load16_noabort",
1079 	"__asan_storeN_noabort",
1080 	"__asan_store1_noabort",
1081 	"__asan_store2_noabort",
1082 	"__asan_store4_noabort",
1083 	"__asan_store8_noabort",
1084 	"__asan_store16_noabort",
1085 	"__kasan_check_read",
1086 	"__kasan_check_write",
1087 	/* KASAN in-line */
1088 	"__asan_report_load_n_noabort",
1089 	"__asan_report_load1_noabort",
1090 	"__asan_report_load2_noabort",
1091 	"__asan_report_load4_noabort",
1092 	"__asan_report_load8_noabort",
1093 	"__asan_report_load16_noabort",
1094 	"__asan_report_store_n_noabort",
1095 	"__asan_report_store1_noabort",
1096 	"__asan_report_store2_noabort",
1097 	"__asan_report_store4_noabort",
1098 	"__asan_report_store8_noabort",
1099 	"__asan_report_store16_noabort",
1100 	/* KCSAN */
1101 	"__kcsan_check_access",
1102 	"__kcsan_mb",
1103 	"__kcsan_wmb",
1104 	"__kcsan_rmb",
1105 	"__kcsan_release",
1106 	"kcsan_found_watchpoint",
1107 	"kcsan_setup_watchpoint",
1108 	"kcsan_check_scoped_accesses",
1109 	"kcsan_disable_current",
1110 	"kcsan_enable_current_nowarn",
1111 	/* KCSAN/TSAN */
1112 	"__tsan_func_entry",
1113 	"__tsan_func_exit",
1114 	"__tsan_read_range",
1115 	"__tsan_write_range",
1116 	"__tsan_read1",
1117 	"__tsan_read2",
1118 	"__tsan_read4",
1119 	"__tsan_read8",
1120 	"__tsan_read16",
1121 	"__tsan_write1",
1122 	"__tsan_write2",
1123 	"__tsan_write4",
1124 	"__tsan_write8",
1125 	"__tsan_write16",
1126 	"__tsan_read_write1",
1127 	"__tsan_read_write2",
1128 	"__tsan_read_write4",
1129 	"__tsan_read_write8",
1130 	"__tsan_read_write16",
1131 	"__tsan_volatile_read1",
1132 	"__tsan_volatile_read2",
1133 	"__tsan_volatile_read4",
1134 	"__tsan_volatile_read8",
1135 	"__tsan_volatile_read16",
1136 	"__tsan_volatile_write1",
1137 	"__tsan_volatile_write2",
1138 	"__tsan_volatile_write4",
1139 	"__tsan_volatile_write8",
1140 	"__tsan_volatile_write16",
1141 	"__tsan_atomic8_load",
1142 	"__tsan_atomic16_load",
1143 	"__tsan_atomic32_load",
1144 	"__tsan_atomic64_load",
1145 	"__tsan_atomic8_store",
1146 	"__tsan_atomic16_store",
1147 	"__tsan_atomic32_store",
1148 	"__tsan_atomic64_store",
1149 	"__tsan_atomic8_exchange",
1150 	"__tsan_atomic16_exchange",
1151 	"__tsan_atomic32_exchange",
1152 	"__tsan_atomic64_exchange",
1153 	"__tsan_atomic8_fetch_add",
1154 	"__tsan_atomic16_fetch_add",
1155 	"__tsan_atomic32_fetch_add",
1156 	"__tsan_atomic64_fetch_add",
1157 	"__tsan_atomic8_fetch_sub",
1158 	"__tsan_atomic16_fetch_sub",
1159 	"__tsan_atomic32_fetch_sub",
1160 	"__tsan_atomic64_fetch_sub",
1161 	"__tsan_atomic8_fetch_and",
1162 	"__tsan_atomic16_fetch_and",
1163 	"__tsan_atomic32_fetch_and",
1164 	"__tsan_atomic64_fetch_and",
1165 	"__tsan_atomic8_fetch_or",
1166 	"__tsan_atomic16_fetch_or",
1167 	"__tsan_atomic32_fetch_or",
1168 	"__tsan_atomic64_fetch_or",
1169 	"__tsan_atomic8_fetch_xor",
1170 	"__tsan_atomic16_fetch_xor",
1171 	"__tsan_atomic32_fetch_xor",
1172 	"__tsan_atomic64_fetch_xor",
1173 	"__tsan_atomic8_fetch_nand",
1174 	"__tsan_atomic16_fetch_nand",
1175 	"__tsan_atomic32_fetch_nand",
1176 	"__tsan_atomic64_fetch_nand",
1177 	"__tsan_atomic8_compare_exchange_strong",
1178 	"__tsan_atomic16_compare_exchange_strong",
1179 	"__tsan_atomic32_compare_exchange_strong",
1180 	"__tsan_atomic64_compare_exchange_strong",
1181 	"__tsan_atomic8_compare_exchange_weak",
1182 	"__tsan_atomic16_compare_exchange_weak",
1183 	"__tsan_atomic32_compare_exchange_weak",
1184 	"__tsan_atomic64_compare_exchange_weak",
1185 	"__tsan_atomic8_compare_exchange_val",
1186 	"__tsan_atomic16_compare_exchange_val",
1187 	"__tsan_atomic32_compare_exchange_val",
1188 	"__tsan_atomic64_compare_exchange_val",
1189 	"__tsan_atomic_thread_fence",
1190 	"__tsan_atomic_signal_fence",
1191 	"__tsan_unaligned_read16",
1192 	"__tsan_unaligned_write16",
1193 	/* KCOV */
1194 	"write_comp_data",
1195 	"check_kcov_mode",
1196 	"__sanitizer_cov_trace_pc",
1197 	"__sanitizer_cov_trace_const_cmp1",
1198 	"__sanitizer_cov_trace_const_cmp2",
1199 	"__sanitizer_cov_trace_const_cmp4",
1200 	"__sanitizer_cov_trace_const_cmp8",
1201 	"__sanitizer_cov_trace_cmp1",
1202 	"__sanitizer_cov_trace_cmp2",
1203 	"__sanitizer_cov_trace_cmp4",
1204 	"__sanitizer_cov_trace_cmp8",
1205 	"__sanitizer_cov_trace_switch",
1206 	/* KMSAN */
1207 	"kmsan_copy_to_user",
1208 	"kmsan_report",
1209 	"kmsan_unpoison_entry_regs",
1210 	"kmsan_unpoison_memory",
1211 	"__msan_chain_origin",
1212 	"__msan_get_context_state",
1213 	"__msan_instrument_asm_store",
1214 	"__msan_metadata_ptr_for_load_1",
1215 	"__msan_metadata_ptr_for_load_2",
1216 	"__msan_metadata_ptr_for_load_4",
1217 	"__msan_metadata_ptr_for_load_8",
1218 	"__msan_metadata_ptr_for_load_n",
1219 	"__msan_metadata_ptr_for_store_1",
1220 	"__msan_metadata_ptr_for_store_2",
1221 	"__msan_metadata_ptr_for_store_4",
1222 	"__msan_metadata_ptr_for_store_8",
1223 	"__msan_metadata_ptr_for_store_n",
1224 	"__msan_poison_alloca",
1225 	"__msan_warning",
1226 	/* UBSAN */
1227 	"ubsan_type_mismatch_common",
1228 	"__ubsan_handle_type_mismatch",
1229 	"__ubsan_handle_type_mismatch_v1",
1230 	"__ubsan_handle_shift_out_of_bounds",
1231 	"__ubsan_handle_load_invalid_value",
1232 	/* misc */
1233 	"csum_partial_copy_generic",
1234 	"copy_mc_fragile",
1235 	"copy_mc_fragile_handle_tail",
1236 	"copy_mc_enhanced_fast_string",
1237 	"ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
1238 	"clear_user_erms",
1239 	"clear_user_rep_good",
1240 	"clear_user_original",
1241 	NULL
1242 };
1243 
1244 static void add_uaccess_safe(struct objtool_file *file)
1245 {
1246 	struct symbol *func;
1247 	const char **name;
1248 
1249 	if (!opts.uaccess)
1250 		return;
1251 
1252 	for (name = uaccess_safe_builtin; *name; name++) {
1253 		func = find_symbol_by_name(file->elf, *name);
1254 		if (!func)
1255 			continue;
1256 
1257 		func->uaccess_safe = true;
1258 	}
1259 }
1260 
1261 /*
1262  * FIXME: For now, just ignore any alternatives which add retpolines.  This is
1263  * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
1264  * But it at least allows objtool to understand the control flow *around* the
1265  * retpoline.
1266  */
1267 static int add_ignore_alternatives(struct objtool_file *file)
1268 {
1269 	struct section *sec;
1270 	struct reloc *reloc;
1271 	struct instruction *insn;
1272 
1273 	sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
1274 	if (!sec)
1275 		return 0;
1276 
1277 	list_for_each_entry(reloc, &sec->reloc_list, list) {
1278 		if (reloc->sym->type != STT_SECTION) {
1279 			WARN("unexpected relocation symbol type in %s", sec->name);
1280 			return -1;
1281 		}
1282 
1283 		insn = find_insn(file, reloc->sym->sec, reloc->addend);
1284 		if (!insn) {
1285 			WARN("bad .discard.ignore_alts entry");
1286 			return -1;
1287 		}
1288 
1289 		insn->ignore_alts = true;
1290 	}
1291 
1292 	return 0;
1293 }
1294 
1295 __weak bool arch_is_retpoline(struct symbol *sym)
1296 {
1297 	return false;
1298 }
1299 
1300 __weak bool arch_is_rethunk(struct symbol *sym)
1301 {
1302 	return false;
1303 }
1304 
1305 #define NEGATIVE_RELOC	((void *)-1L)
1306 
1307 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1308 {
1309 	if (insn->reloc == NEGATIVE_RELOC)
1310 		return NULL;
1311 
1312 	if (!insn->reloc) {
1313 		if (!file)
1314 			return NULL;
1315 
1316 		insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1317 						       insn->offset, insn->len);
1318 		if (!insn->reloc) {
1319 			insn->reloc = NEGATIVE_RELOC;
1320 			return NULL;
1321 		}
1322 	}
1323 
1324 	return insn->reloc;
1325 }
1326 
1327 static void remove_insn_ops(struct instruction *insn)
1328 {
1329 	struct stack_op *op, *tmp;
1330 
1331 	list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) {
1332 		list_del(&op->list);
1333 		free(op);
1334 	}
1335 }
1336 
1337 static void annotate_call_site(struct objtool_file *file,
1338 			       struct instruction *insn, bool sibling)
1339 {
1340 	struct reloc *reloc = insn_reloc(file, insn);
1341 	struct symbol *sym = insn->call_dest;
1342 
1343 	if (!sym)
1344 		sym = reloc->sym;
1345 
1346 	/*
1347 	 * Alternative replacement code is just template code which is
1348 	 * sometimes copied to the original instruction. For now, don't
1349 	 * annotate it. (In the future we might consider annotating the
1350 	 * original instruction if/when it ever makes sense to do so.)
1351 	 */
1352 	if (!strcmp(insn->sec->name, ".altinstr_replacement"))
1353 		return;
1354 
1355 	if (sym->static_call_tramp) {
1356 		list_add_tail(&insn->call_node, &file->static_call_list);
1357 		return;
1358 	}
1359 
1360 	if (sym->retpoline_thunk) {
1361 		list_add_tail(&insn->call_node, &file->retpoline_call_list);
1362 		return;
1363 	}
1364 
1365 	/*
1366 	 * Many compilers cannot disable KCOV or sanitizer calls with a function
1367 	 * attribute so they need a little help, NOP out any such calls from
1368 	 * noinstr text.
1369 	 */
1370 	if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
1371 		if (reloc) {
1372 			reloc->type = R_NONE;
1373 			elf_write_reloc(file->elf, reloc);
1374 		}
1375 
1376 		elf_write_insn(file->elf, insn->sec,
1377 			       insn->offset, insn->len,
1378 			       sibling ? arch_ret_insn(insn->len)
1379 			               : arch_nop_insn(insn->len));
1380 
1381 		insn->type = sibling ? INSN_RETURN : INSN_NOP;
1382 
1383 		if (sibling) {
1384 			/*
1385 			 * We've replaced the tail-call JMP insn by two new
1386 			 * insn: RET; INT3, except we only have a single struct
1387 			 * insn here. Mark it retpoline_safe to avoid the SLS
1388 			 * warning, instead of adding another insn.
1389 			 */
1390 			insn->retpoline_safe = true;
1391 		}
1392 
1393 		return;
1394 	}
1395 
1396 	if (opts.mcount && sym->fentry) {
1397 		if (sibling)
1398 			WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset);
1399 		if (opts.mnop) {
1400 			if (reloc) {
1401 				reloc->type = R_NONE;
1402 				elf_write_reloc(file->elf, reloc);
1403 			}
1404 
1405 			elf_write_insn(file->elf, insn->sec,
1406 				       insn->offset, insn->len,
1407 				       arch_nop_insn(insn->len));
1408 
1409 			insn->type = INSN_NOP;
1410 		}
1411 
1412 		list_add_tail(&insn->call_node, &file->mcount_loc_list);
1413 		return;
1414 	}
1415 
1416 	if (insn->type == INSN_CALL && !insn->sec->init)
1417 		list_add_tail(&insn->call_node, &file->call_list);
1418 
1419 	if (!sibling && dead_end_function(file, sym))
1420 		insn->dead_end = true;
1421 }
1422 
1423 static void add_call_dest(struct objtool_file *file, struct instruction *insn,
1424 			  struct symbol *dest, bool sibling)
1425 {
1426 	insn->call_dest = dest;
1427 	if (!dest)
1428 		return;
1429 
1430 	/*
1431 	 * Whatever stack impact regular CALLs have, should be undone
1432 	 * by the RETURN of the called function.
1433 	 *
1434 	 * Annotated intra-function calls retain the stack_ops but
1435 	 * are converted to JUMP, see read_intra_function_calls().
1436 	 */
1437 	remove_insn_ops(insn);
1438 
1439 	annotate_call_site(file, insn, sibling);
1440 }
1441 
1442 static void add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1443 {
1444 	/*
1445 	 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1446 	 * so convert them accordingly.
1447 	 */
1448 	switch (insn->type) {
1449 	case INSN_CALL:
1450 		insn->type = INSN_CALL_DYNAMIC;
1451 		break;
1452 	case INSN_JUMP_UNCONDITIONAL:
1453 		insn->type = INSN_JUMP_DYNAMIC;
1454 		break;
1455 	case INSN_JUMP_CONDITIONAL:
1456 		insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1457 		break;
1458 	default:
1459 		return;
1460 	}
1461 
1462 	insn->retpoline_safe = true;
1463 
1464 	/*
1465 	 * Whatever stack impact regular CALLs have, should be undone
1466 	 * by the RETURN of the called function.
1467 	 *
1468 	 * Annotated intra-function calls retain the stack_ops but
1469 	 * are converted to JUMP, see read_intra_function_calls().
1470 	 */
1471 	remove_insn_ops(insn);
1472 
1473 	annotate_call_site(file, insn, false);
1474 }
1475 
1476 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
1477 {
1478 	/*
1479 	 * Return thunk tail calls are really just returns in disguise,
1480 	 * so convert them accordingly.
1481 	 */
1482 	insn->type = INSN_RETURN;
1483 	insn->retpoline_safe = true;
1484 
1485 	if (add)
1486 		list_add_tail(&insn->call_node, &file->return_thunk_list);
1487 }
1488 
1489 static bool is_first_func_insn(struct objtool_file *file,
1490 			       struct instruction *insn, struct symbol *sym)
1491 {
1492 	if (insn->offset == sym->offset)
1493 		return true;
1494 
1495 	/* Allow direct CALL/JMP past ENDBR */
1496 	if (opts.ibt) {
1497 		struct instruction *prev = prev_insn_same_sym(file, insn);
1498 
1499 		if (prev && prev->type == INSN_ENDBR &&
1500 		    insn->offset == sym->offset + prev->len)
1501 			return true;
1502 	}
1503 
1504 	return false;
1505 }
1506 
1507 /*
1508  * A sibling call is a tail-call to another symbol -- to differentiate from a
1509  * recursive tail-call which is to the same symbol.
1510  */
1511 static bool jump_is_sibling_call(struct objtool_file *file,
1512 				 struct instruction *from, struct instruction *to)
1513 {
1514 	struct symbol *fs = from->sym;
1515 	struct symbol *ts = to->sym;
1516 
1517 	/* Not a sibling call if from/to a symbol hole */
1518 	if (!fs || !ts)
1519 		return false;
1520 
1521 	/* Not a sibling call if not targeting the start of a symbol. */
1522 	if (!is_first_func_insn(file, to, ts))
1523 		return false;
1524 
1525 	/* Disallow sibling calls into STT_NOTYPE */
1526 	if (ts->type == STT_NOTYPE)
1527 		return false;
1528 
1529 	/* Must not be self to be a sibling */
1530 	return fs->pfunc != ts->pfunc;
1531 }
1532 
1533 /*
1534  * Find the destination instructions for all jumps.
1535  */
1536 static int add_jump_destinations(struct objtool_file *file)
1537 {
1538 	struct instruction *insn, *jump_dest;
1539 	struct reloc *reloc;
1540 	struct section *dest_sec;
1541 	unsigned long dest_off;
1542 
1543 	for_each_insn(file, insn) {
1544 		if (insn->jump_dest) {
1545 			/*
1546 			 * handle_group_alt() may have previously set
1547 			 * 'jump_dest' for some alternatives.
1548 			 */
1549 			continue;
1550 		}
1551 		if (!is_static_jump(insn))
1552 			continue;
1553 
1554 		reloc = insn_reloc(file, insn);
1555 		if (!reloc) {
1556 			dest_sec = insn->sec;
1557 			dest_off = arch_jump_destination(insn);
1558 		} else if (reloc->sym->type == STT_SECTION) {
1559 			dest_sec = reloc->sym->sec;
1560 			dest_off = arch_dest_reloc_offset(reloc->addend);
1561 		} else if (reloc->sym->retpoline_thunk) {
1562 			add_retpoline_call(file, insn);
1563 			continue;
1564 		} else if (reloc->sym->return_thunk) {
1565 			add_return_call(file, insn, true);
1566 			continue;
1567 		} else if (insn_func(insn)) {
1568 			/*
1569 			 * External sibling call or internal sibling call with
1570 			 * STT_FUNC reloc.
1571 			 */
1572 			add_call_dest(file, insn, reloc->sym, true);
1573 			continue;
1574 		} else if (reloc->sym->sec->idx) {
1575 			dest_sec = reloc->sym->sec;
1576 			dest_off = reloc->sym->sym.st_value +
1577 				   arch_dest_reloc_offset(reloc->addend);
1578 		} else {
1579 			/* non-func asm code jumping to another file */
1580 			continue;
1581 		}
1582 
1583 		jump_dest = find_insn(file, dest_sec, dest_off);
1584 		if (!jump_dest) {
1585 			struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
1586 
1587 			/*
1588 			 * This is a special case for zen_untrain_ret().
1589 			 * It jumps to __x86_return_thunk(), but objtool
1590 			 * can't find the thunk's starting RET
1591 			 * instruction, because the RET is also in the
1592 			 * middle of another instruction.  Objtool only
1593 			 * knows about the outer instruction.
1594 			 */
1595 			if (sym && sym->return_thunk) {
1596 				add_return_call(file, insn, false);
1597 				continue;
1598 			}
1599 
1600 			WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
1601 				  insn->sec, insn->offset, dest_sec->name,
1602 				  dest_off);
1603 			return -1;
1604 		}
1605 
1606 		/*
1607 		 * Cross-function jump.
1608 		 */
1609 		if (insn_func(insn) && insn_func(jump_dest) &&
1610 		    insn_func(insn) != insn_func(jump_dest)) {
1611 
1612 			/*
1613 			 * For GCC 8+, create parent/child links for any cold
1614 			 * subfunctions.  This is _mostly_ redundant with a
1615 			 * similar initialization in read_symbols().
1616 			 *
1617 			 * If a function has aliases, we want the *first* such
1618 			 * function in the symbol table to be the subfunction's
1619 			 * parent.  In that case we overwrite the
1620 			 * initialization done in read_symbols().
1621 			 *
1622 			 * However this code can't completely replace the
1623 			 * read_symbols() code because this doesn't detect the
1624 			 * case where the parent function's only reference to a
1625 			 * subfunction is through a jump table.
1626 			 */
1627 			if (!strstr(insn_func(insn)->name, ".cold") &&
1628 			    strstr(insn_func(jump_dest)->name, ".cold")) {
1629 				insn_func(insn)->cfunc = insn_func(jump_dest);
1630 				insn_func(jump_dest)->pfunc = insn_func(insn);
1631 			}
1632 		}
1633 
1634 		if (jump_is_sibling_call(file, insn, jump_dest)) {
1635 			/*
1636 			 * Internal sibling call without reloc or with
1637 			 * STT_SECTION reloc.
1638 			 */
1639 			add_call_dest(file, insn, insn_func(jump_dest), true);
1640 			continue;
1641 		}
1642 
1643 		insn->jump_dest = jump_dest;
1644 	}
1645 
1646 	return 0;
1647 }
1648 
1649 static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1650 {
1651 	struct symbol *call_dest;
1652 
1653 	call_dest = find_func_by_offset(sec, offset);
1654 	if (!call_dest)
1655 		call_dest = find_symbol_by_offset(sec, offset);
1656 
1657 	return call_dest;
1658 }
1659 
1660 /*
1661  * Find the destination instructions for all calls.
1662  */
1663 static int add_call_destinations(struct objtool_file *file)
1664 {
1665 	struct instruction *insn;
1666 	unsigned long dest_off;
1667 	struct symbol *dest;
1668 	struct reloc *reloc;
1669 
1670 	for_each_insn(file, insn) {
1671 		if (insn->type != INSN_CALL)
1672 			continue;
1673 
1674 		reloc = insn_reloc(file, insn);
1675 		if (!reloc) {
1676 			dest_off = arch_jump_destination(insn);
1677 			dest = find_call_destination(insn->sec, dest_off);
1678 
1679 			add_call_dest(file, insn, dest, false);
1680 
1681 			if (insn->ignore)
1682 				continue;
1683 
1684 			if (!insn->call_dest) {
1685 				WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset);
1686 				return -1;
1687 			}
1688 
1689 			if (insn_func(insn) && insn->call_dest->type != STT_FUNC) {
1690 				WARN_FUNC("unsupported call to non-function",
1691 					  insn->sec, insn->offset);
1692 				return -1;
1693 			}
1694 
1695 		} else if (reloc->sym->type == STT_SECTION) {
1696 			dest_off = arch_dest_reloc_offset(reloc->addend);
1697 			dest = find_call_destination(reloc->sym->sec, dest_off);
1698 			if (!dest) {
1699 				WARN_FUNC("can't find call dest symbol at %s+0x%lx",
1700 					  insn->sec, insn->offset,
1701 					  reloc->sym->sec->name,
1702 					  dest_off);
1703 				return -1;
1704 			}
1705 
1706 			add_call_dest(file, insn, dest, false);
1707 
1708 		} else if (reloc->sym->retpoline_thunk) {
1709 			add_retpoline_call(file, insn);
1710 
1711 		} else
1712 			add_call_dest(file, insn, reloc->sym, false);
1713 	}
1714 
1715 	return 0;
1716 }
1717 
1718 /*
1719  * The .alternatives section requires some extra special care over and above
1720  * other special sections because alternatives are patched in place.
1721  */
1722 static int handle_group_alt(struct objtool_file *file,
1723 			    struct special_alt *special_alt,
1724 			    struct instruction *orig_insn,
1725 			    struct instruction **new_insn)
1726 {
1727 	struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL;
1728 	struct alt_group *orig_alt_group, *new_alt_group;
1729 	unsigned long dest_off;
1730 
1731 
1732 	orig_alt_group = malloc(sizeof(*orig_alt_group));
1733 	if (!orig_alt_group) {
1734 		WARN("malloc failed");
1735 		return -1;
1736 	}
1737 	orig_alt_group->cfi = calloc(special_alt->orig_len,
1738 				     sizeof(struct cfi_state *));
1739 	if (!orig_alt_group->cfi) {
1740 		WARN("calloc failed");
1741 		return -1;
1742 	}
1743 
1744 	last_orig_insn = NULL;
1745 	insn = orig_insn;
1746 	sec_for_each_insn_from(file, insn) {
1747 		if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1748 			break;
1749 
1750 		insn->alt_group = orig_alt_group;
1751 		last_orig_insn = insn;
1752 	}
1753 	orig_alt_group->orig_group = NULL;
1754 	orig_alt_group->first_insn = orig_insn;
1755 	orig_alt_group->last_insn = last_orig_insn;
1756 
1757 
1758 	new_alt_group = malloc(sizeof(*new_alt_group));
1759 	if (!new_alt_group) {
1760 		WARN("malloc failed");
1761 		return -1;
1762 	}
1763 
1764 	if (special_alt->new_len < special_alt->orig_len) {
1765 		/*
1766 		 * Insert a fake nop at the end to make the replacement
1767 		 * alt_group the same size as the original.  This is needed to
1768 		 * allow propagate_alt_cfi() to do its magic.  When the last
1769 		 * instruction affects the stack, the instruction after it (the
1770 		 * nop) will propagate the new state to the shared CFI array.
1771 		 */
1772 		nop = malloc(sizeof(*nop));
1773 		if (!nop) {
1774 			WARN("malloc failed");
1775 			return -1;
1776 		}
1777 		memset(nop, 0, sizeof(*nop));
1778 		INIT_LIST_HEAD(&nop->alts);
1779 		INIT_LIST_HEAD(&nop->stack_ops);
1780 
1781 		nop->sec = special_alt->new_sec;
1782 		nop->offset = special_alt->new_off + special_alt->new_len;
1783 		nop->len = special_alt->orig_len - special_alt->new_len;
1784 		nop->type = INSN_NOP;
1785 		nop->sym = orig_insn->sym;
1786 		nop->alt_group = new_alt_group;
1787 		nop->ignore = orig_insn->ignore_alts;
1788 	}
1789 
1790 	if (!special_alt->new_len) {
1791 		*new_insn = nop;
1792 		goto end;
1793 	}
1794 
1795 	insn = *new_insn;
1796 	sec_for_each_insn_from(file, insn) {
1797 		struct reloc *alt_reloc;
1798 
1799 		if (insn->offset >= special_alt->new_off + special_alt->new_len)
1800 			break;
1801 
1802 		last_new_insn = insn;
1803 
1804 		insn->ignore = orig_insn->ignore_alts;
1805 		insn->sym = orig_insn->sym;
1806 		insn->alt_group = new_alt_group;
1807 
1808 		/*
1809 		 * Since alternative replacement code is copy/pasted by the
1810 		 * kernel after applying relocations, generally such code can't
1811 		 * have relative-address relocation references to outside the
1812 		 * .altinstr_replacement section, unless the arch's
1813 		 * alternatives code can adjust the relative offsets
1814 		 * accordingly.
1815 		 */
1816 		alt_reloc = insn_reloc(file, insn);
1817 		if (alt_reloc && arch_pc_relative_reloc(alt_reloc) &&
1818 		    !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1819 
1820 			WARN_FUNC("unsupported relocation in alternatives section",
1821 				  insn->sec, insn->offset);
1822 			return -1;
1823 		}
1824 
1825 		if (!is_static_jump(insn))
1826 			continue;
1827 
1828 		if (!insn->immediate)
1829 			continue;
1830 
1831 		dest_off = arch_jump_destination(insn);
1832 		if (dest_off == special_alt->new_off + special_alt->new_len) {
1833 			insn->jump_dest = next_insn_same_sec(file, last_orig_insn);
1834 			if (!insn->jump_dest) {
1835 				WARN_FUNC("can't find alternative jump destination",
1836 					  insn->sec, insn->offset);
1837 				return -1;
1838 			}
1839 		}
1840 	}
1841 
1842 	if (!last_new_insn) {
1843 		WARN_FUNC("can't find last new alternative instruction",
1844 			  special_alt->new_sec, special_alt->new_off);
1845 		return -1;
1846 	}
1847 
1848 	if (nop)
1849 		list_add(&nop->list, &last_new_insn->list);
1850 end:
1851 	new_alt_group->orig_group = orig_alt_group;
1852 	new_alt_group->first_insn = *new_insn;
1853 	new_alt_group->last_insn = nop ? : last_new_insn;
1854 	new_alt_group->cfi = orig_alt_group->cfi;
1855 	return 0;
1856 }
1857 
1858 /*
1859  * A jump table entry can either convert a nop to a jump or a jump to a nop.
1860  * If the original instruction is a jump, make the alt entry an effective nop
1861  * by just skipping the original instruction.
1862  */
1863 static int handle_jump_alt(struct objtool_file *file,
1864 			   struct special_alt *special_alt,
1865 			   struct instruction *orig_insn,
1866 			   struct instruction **new_insn)
1867 {
1868 	if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1869 	    orig_insn->type != INSN_NOP) {
1870 
1871 		WARN_FUNC("unsupported instruction at jump label",
1872 			  orig_insn->sec, orig_insn->offset);
1873 		return -1;
1874 	}
1875 
1876 	if (opts.hack_jump_label && special_alt->key_addend & 2) {
1877 		struct reloc *reloc = insn_reloc(file, orig_insn);
1878 
1879 		if (reloc) {
1880 			reloc->type = R_NONE;
1881 			elf_write_reloc(file->elf, reloc);
1882 		}
1883 		elf_write_insn(file->elf, orig_insn->sec,
1884 			       orig_insn->offset, orig_insn->len,
1885 			       arch_nop_insn(orig_insn->len));
1886 		orig_insn->type = INSN_NOP;
1887 	}
1888 
1889 	if (orig_insn->type == INSN_NOP) {
1890 		if (orig_insn->len == 2)
1891 			file->jl_nop_short++;
1892 		else
1893 			file->jl_nop_long++;
1894 
1895 		return 0;
1896 	}
1897 
1898 	if (orig_insn->len == 2)
1899 		file->jl_short++;
1900 	else
1901 		file->jl_long++;
1902 
1903 	*new_insn = list_next_entry(orig_insn, list);
1904 	return 0;
1905 }
1906 
1907 /*
1908  * Read all the special sections which have alternate instructions which can be
1909  * patched in or redirected to at runtime.  Each instruction having alternate
1910  * instruction(s) has them added to its insn->alts list, which will be
1911  * traversed in validate_branch().
1912  */
1913 static int add_special_section_alts(struct objtool_file *file)
1914 {
1915 	struct list_head special_alts;
1916 	struct instruction *orig_insn, *new_insn;
1917 	struct special_alt *special_alt, *tmp;
1918 	struct alternative *alt;
1919 	int ret;
1920 
1921 	ret = special_get_alts(file->elf, &special_alts);
1922 	if (ret)
1923 		return ret;
1924 
1925 	list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1926 
1927 		orig_insn = find_insn(file, special_alt->orig_sec,
1928 				      special_alt->orig_off);
1929 		if (!orig_insn) {
1930 			WARN_FUNC("special: can't find orig instruction",
1931 				  special_alt->orig_sec, special_alt->orig_off);
1932 			ret = -1;
1933 			goto out;
1934 		}
1935 
1936 		new_insn = NULL;
1937 		if (!special_alt->group || special_alt->new_len) {
1938 			new_insn = find_insn(file, special_alt->new_sec,
1939 					     special_alt->new_off);
1940 			if (!new_insn) {
1941 				WARN_FUNC("special: can't find new instruction",
1942 					  special_alt->new_sec,
1943 					  special_alt->new_off);
1944 				ret = -1;
1945 				goto out;
1946 			}
1947 		}
1948 
1949 		if (special_alt->group) {
1950 			if (!special_alt->orig_len) {
1951 				WARN_FUNC("empty alternative entry",
1952 					  orig_insn->sec, orig_insn->offset);
1953 				continue;
1954 			}
1955 
1956 			ret = handle_group_alt(file, special_alt, orig_insn,
1957 					       &new_insn);
1958 			if (ret)
1959 				goto out;
1960 		} else if (special_alt->jump_or_nop) {
1961 			ret = handle_jump_alt(file, special_alt, orig_insn,
1962 					      &new_insn);
1963 			if (ret)
1964 				goto out;
1965 		}
1966 
1967 		alt = malloc(sizeof(*alt));
1968 		if (!alt) {
1969 			WARN("malloc failed");
1970 			ret = -1;
1971 			goto out;
1972 		}
1973 
1974 		alt->insn = new_insn;
1975 		alt->skip_orig = special_alt->skip_orig;
1976 		orig_insn->ignore_alts |= special_alt->skip_alt;
1977 		list_add_tail(&alt->list, &orig_insn->alts);
1978 
1979 		list_del(&special_alt->list);
1980 		free(special_alt);
1981 	}
1982 
1983 	if (opts.stats) {
1984 		printf("jl\\\tNOP\tJMP\n");
1985 		printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
1986 		printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
1987 	}
1988 
1989 out:
1990 	return ret;
1991 }
1992 
1993 static int add_jump_table(struct objtool_file *file, struct instruction *insn,
1994 			    struct reloc *table)
1995 {
1996 	struct reloc *reloc = table;
1997 	struct instruction *dest_insn;
1998 	struct alternative *alt;
1999 	struct symbol *pfunc = insn_func(insn)->pfunc;
2000 	unsigned int prev_offset = 0;
2001 
2002 	/*
2003 	 * Each @reloc is a switch table relocation which points to the target
2004 	 * instruction.
2005 	 */
2006 	list_for_each_entry_from(reloc, &table->sec->reloc_list, list) {
2007 
2008 		/* Check for the end of the table: */
2009 		if (reloc != table && reloc->jump_table_start)
2010 			break;
2011 
2012 		/* Make sure the table entries are consecutive: */
2013 		if (prev_offset && reloc->offset != prev_offset + 8)
2014 			break;
2015 
2016 		/* Detect function pointers from contiguous objects: */
2017 		if (reloc->sym->sec == pfunc->sec &&
2018 		    reloc->addend == pfunc->offset)
2019 			break;
2020 
2021 		dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
2022 		if (!dest_insn)
2023 			break;
2024 
2025 		/* Make sure the destination is in the same function: */
2026 		if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc)
2027 			break;
2028 
2029 		alt = malloc(sizeof(*alt));
2030 		if (!alt) {
2031 			WARN("malloc failed");
2032 			return -1;
2033 		}
2034 
2035 		alt->insn = dest_insn;
2036 		list_add_tail(&alt->list, &insn->alts);
2037 		prev_offset = reloc->offset;
2038 	}
2039 
2040 	if (!prev_offset) {
2041 		WARN_FUNC("can't find switch jump table",
2042 			  insn->sec, insn->offset);
2043 		return -1;
2044 	}
2045 
2046 	return 0;
2047 }
2048 
2049 /*
2050  * find_jump_table() - Given a dynamic jump, find the switch jump table
2051  * associated with it.
2052  */
2053 static struct reloc *find_jump_table(struct objtool_file *file,
2054 				      struct symbol *func,
2055 				      struct instruction *insn)
2056 {
2057 	struct reloc *table_reloc;
2058 	struct instruction *dest_insn, *orig_insn = insn;
2059 
2060 	/*
2061 	 * Backward search using the @first_jump_src links, these help avoid
2062 	 * much of the 'in between' code. Which avoids us getting confused by
2063 	 * it.
2064 	 */
2065 	for (;
2066 	     insn && insn_func(insn) && insn_func(insn)->pfunc == func;
2067 	     insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
2068 
2069 		if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
2070 			break;
2071 
2072 		/* allow small jumps within the range */
2073 		if (insn->type == INSN_JUMP_UNCONDITIONAL &&
2074 		    insn->jump_dest &&
2075 		    (insn->jump_dest->offset <= insn->offset ||
2076 		     insn->jump_dest->offset > orig_insn->offset))
2077 		    break;
2078 
2079 		table_reloc = arch_find_switch_table(file, insn);
2080 		if (!table_reloc)
2081 			continue;
2082 		dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
2083 		if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
2084 			continue;
2085 
2086 		return table_reloc;
2087 	}
2088 
2089 	return NULL;
2090 }
2091 
2092 /*
2093  * First pass: Mark the head of each jump table so that in the next pass,
2094  * we know when a given jump table ends and the next one starts.
2095  */
2096 static void mark_func_jump_tables(struct objtool_file *file,
2097 				    struct symbol *func)
2098 {
2099 	struct instruction *insn, *last = NULL;
2100 	struct reloc *reloc;
2101 
2102 	func_for_each_insn(file, func, insn) {
2103 		if (!last)
2104 			last = insn;
2105 
2106 		/*
2107 		 * Store back-pointers for unconditional forward jumps such
2108 		 * that find_jump_table() can back-track using those and
2109 		 * avoid some potentially confusing code.
2110 		 */
2111 		if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
2112 		    insn->offset > last->offset &&
2113 		    insn->jump_dest->offset > insn->offset &&
2114 		    !insn->jump_dest->first_jump_src) {
2115 
2116 			insn->jump_dest->first_jump_src = insn;
2117 			last = insn->jump_dest;
2118 		}
2119 
2120 		if (insn->type != INSN_JUMP_DYNAMIC)
2121 			continue;
2122 
2123 		reloc = find_jump_table(file, func, insn);
2124 		if (reloc) {
2125 			reloc->jump_table_start = true;
2126 			insn->jump_table = reloc;
2127 		}
2128 	}
2129 }
2130 
2131 static int add_func_jump_tables(struct objtool_file *file,
2132 				  struct symbol *func)
2133 {
2134 	struct instruction *insn;
2135 	int ret;
2136 
2137 	func_for_each_insn(file, func, insn) {
2138 		if (!insn->jump_table)
2139 			continue;
2140 
2141 		ret = add_jump_table(file, insn, insn->jump_table);
2142 		if (ret)
2143 			return ret;
2144 	}
2145 
2146 	return 0;
2147 }
2148 
2149 /*
2150  * For some switch statements, gcc generates a jump table in the .rodata
2151  * section which contains a list of addresses within the function to jump to.
2152  * This finds these jump tables and adds them to the insn->alts lists.
2153  */
2154 static int add_jump_table_alts(struct objtool_file *file)
2155 {
2156 	struct section *sec;
2157 	struct symbol *func;
2158 	int ret;
2159 
2160 	if (!file->rodata)
2161 		return 0;
2162 
2163 	for_each_sec(file, sec) {
2164 		list_for_each_entry(func, &sec->symbol_list, list) {
2165 			if (func->type != STT_FUNC)
2166 				continue;
2167 
2168 			mark_func_jump_tables(file, func);
2169 			ret = add_func_jump_tables(file, func);
2170 			if (ret)
2171 				return ret;
2172 		}
2173 	}
2174 
2175 	return 0;
2176 }
2177 
2178 static void set_func_state(struct cfi_state *state)
2179 {
2180 	state->cfa = initial_func_cfi.cfa;
2181 	memcpy(&state->regs, &initial_func_cfi.regs,
2182 	       CFI_NUM_REGS * sizeof(struct cfi_reg));
2183 	state->stack_size = initial_func_cfi.cfa.offset;
2184 }
2185 
2186 static int read_unwind_hints(struct objtool_file *file)
2187 {
2188 	struct cfi_state cfi = init_cfi;
2189 	struct section *sec, *relocsec;
2190 	struct unwind_hint *hint;
2191 	struct instruction *insn;
2192 	struct reloc *reloc;
2193 	int i;
2194 
2195 	sec = find_section_by_name(file->elf, ".discard.unwind_hints");
2196 	if (!sec)
2197 		return 0;
2198 
2199 	relocsec = sec->reloc;
2200 	if (!relocsec) {
2201 		WARN("missing .rela.discard.unwind_hints section");
2202 		return -1;
2203 	}
2204 
2205 	if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
2206 		WARN("struct unwind_hint size mismatch");
2207 		return -1;
2208 	}
2209 
2210 	file->hints = true;
2211 
2212 	for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
2213 		hint = (struct unwind_hint *)sec->data->d_buf + i;
2214 
2215 		reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
2216 		if (!reloc) {
2217 			WARN("can't find reloc for unwind_hints[%d]", i);
2218 			return -1;
2219 		}
2220 
2221 		insn = find_insn(file, reloc->sym->sec, reloc->addend);
2222 		if (!insn) {
2223 			WARN("can't find insn for unwind_hints[%d]", i);
2224 			return -1;
2225 		}
2226 
2227 		insn->hint = true;
2228 
2229 		if (hint->type == UNWIND_HINT_TYPE_SAVE) {
2230 			insn->hint = false;
2231 			insn->save = true;
2232 			continue;
2233 		}
2234 
2235 		if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
2236 			insn->restore = true;
2237 			continue;
2238 		}
2239 
2240 		if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
2241 			struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
2242 
2243 			if (sym && sym->bind == STB_GLOBAL) {
2244 				if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
2245 					WARN_FUNC("UNWIND_HINT_IRET_REGS without ENDBR",
2246 						  insn->sec, insn->offset);
2247 				}
2248 
2249 				insn->entry = 1;
2250 			}
2251 		}
2252 
2253 		if (hint->type == UNWIND_HINT_TYPE_ENTRY) {
2254 			hint->type = UNWIND_HINT_TYPE_CALL;
2255 			insn->entry = 1;
2256 		}
2257 
2258 		if (hint->type == UNWIND_HINT_TYPE_FUNC) {
2259 			insn->cfi = &func_cfi;
2260 			continue;
2261 		}
2262 
2263 		if (insn->cfi)
2264 			cfi = *(insn->cfi);
2265 
2266 		if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
2267 			WARN_FUNC("unsupported unwind_hint sp base reg %d",
2268 				  insn->sec, insn->offset, hint->sp_reg);
2269 			return -1;
2270 		}
2271 
2272 		cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset);
2273 		cfi.type = hint->type;
2274 		cfi.end = hint->end;
2275 
2276 		insn->cfi = cfi_hash_find_or_add(&cfi);
2277 	}
2278 
2279 	return 0;
2280 }
2281 
2282 static int read_noendbr_hints(struct objtool_file *file)
2283 {
2284 	struct section *sec;
2285 	struct instruction *insn;
2286 	struct reloc *reloc;
2287 
2288 	sec = find_section_by_name(file->elf, ".rela.discard.noendbr");
2289 	if (!sec)
2290 		return 0;
2291 
2292 	list_for_each_entry(reloc, &sec->reloc_list, list) {
2293 		insn = find_insn(file, reloc->sym->sec, reloc->sym->offset + reloc->addend);
2294 		if (!insn) {
2295 			WARN("bad .discard.noendbr entry");
2296 			return -1;
2297 		}
2298 
2299 		insn->noendbr = 1;
2300 	}
2301 
2302 	return 0;
2303 }
2304 
2305 static int read_retpoline_hints(struct objtool_file *file)
2306 {
2307 	struct section *sec;
2308 	struct instruction *insn;
2309 	struct reloc *reloc;
2310 
2311 	sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
2312 	if (!sec)
2313 		return 0;
2314 
2315 	list_for_each_entry(reloc, &sec->reloc_list, list) {
2316 		if (reloc->sym->type != STT_SECTION) {
2317 			WARN("unexpected relocation symbol type in %s", sec->name);
2318 			return -1;
2319 		}
2320 
2321 		insn = find_insn(file, reloc->sym->sec, reloc->addend);
2322 		if (!insn) {
2323 			WARN("bad .discard.retpoline_safe entry");
2324 			return -1;
2325 		}
2326 
2327 		if (insn->type != INSN_JUMP_DYNAMIC &&
2328 		    insn->type != INSN_CALL_DYNAMIC &&
2329 		    insn->type != INSN_RETURN &&
2330 		    insn->type != INSN_NOP) {
2331 			WARN_FUNC("retpoline_safe hint not an indirect jump/call/ret/nop",
2332 				  insn->sec, insn->offset);
2333 			return -1;
2334 		}
2335 
2336 		insn->retpoline_safe = true;
2337 	}
2338 
2339 	return 0;
2340 }
2341 
2342 static int read_instr_hints(struct objtool_file *file)
2343 {
2344 	struct section *sec;
2345 	struct instruction *insn;
2346 	struct reloc *reloc;
2347 
2348 	sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
2349 	if (!sec)
2350 		return 0;
2351 
2352 	list_for_each_entry(reloc, &sec->reloc_list, list) {
2353 		if (reloc->sym->type != STT_SECTION) {
2354 			WARN("unexpected relocation symbol type in %s", sec->name);
2355 			return -1;
2356 		}
2357 
2358 		insn = find_insn(file, reloc->sym->sec, reloc->addend);
2359 		if (!insn) {
2360 			WARN("bad .discard.instr_end entry");
2361 			return -1;
2362 		}
2363 
2364 		insn->instr--;
2365 	}
2366 
2367 	sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
2368 	if (!sec)
2369 		return 0;
2370 
2371 	list_for_each_entry(reloc, &sec->reloc_list, list) {
2372 		if (reloc->sym->type != STT_SECTION) {
2373 			WARN("unexpected relocation symbol type in %s", sec->name);
2374 			return -1;
2375 		}
2376 
2377 		insn = find_insn(file, reloc->sym->sec, reloc->addend);
2378 		if (!insn) {
2379 			WARN("bad .discard.instr_begin entry");
2380 			return -1;
2381 		}
2382 
2383 		insn->instr++;
2384 	}
2385 
2386 	return 0;
2387 }
2388 
2389 static int read_intra_function_calls(struct objtool_file *file)
2390 {
2391 	struct instruction *insn;
2392 	struct section *sec;
2393 	struct reloc *reloc;
2394 
2395 	sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
2396 	if (!sec)
2397 		return 0;
2398 
2399 	list_for_each_entry(reloc, &sec->reloc_list, list) {
2400 		unsigned long dest_off;
2401 
2402 		if (reloc->sym->type != STT_SECTION) {
2403 			WARN("unexpected relocation symbol type in %s",
2404 			     sec->name);
2405 			return -1;
2406 		}
2407 
2408 		insn = find_insn(file, reloc->sym->sec, reloc->addend);
2409 		if (!insn) {
2410 			WARN("bad .discard.intra_function_call entry");
2411 			return -1;
2412 		}
2413 
2414 		if (insn->type != INSN_CALL) {
2415 			WARN_FUNC("intra_function_call not a direct call",
2416 				  insn->sec, insn->offset);
2417 			return -1;
2418 		}
2419 
2420 		/*
2421 		 * Treat intra-function CALLs as JMPs, but with a stack_op.
2422 		 * See add_call_destinations(), which strips stack_ops from
2423 		 * normal CALLs.
2424 		 */
2425 		insn->type = INSN_JUMP_UNCONDITIONAL;
2426 
2427 		dest_off = arch_jump_destination(insn);
2428 		insn->jump_dest = find_insn(file, insn->sec, dest_off);
2429 		if (!insn->jump_dest) {
2430 			WARN_FUNC("can't find call dest at %s+0x%lx",
2431 				  insn->sec, insn->offset,
2432 				  insn->sec->name, dest_off);
2433 			return -1;
2434 		}
2435 	}
2436 
2437 	return 0;
2438 }
2439 
2440 /*
2441  * Return true if name matches an instrumentation function, where calls to that
2442  * function from noinstr code can safely be removed, but compilers won't do so.
2443  */
2444 static bool is_profiling_func(const char *name)
2445 {
2446 	/*
2447 	 * Many compilers cannot disable KCOV with a function attribute.
2448 	 */
2449 	if (!strncmp(name, "__sanitizer_cov_", 16))
2450 		return true;
2451 
2452 	/*
2453 	 * Some compilers currently do not remove __tsan_func_entry/exit nor
2454 	 * __tsan_atomic_signal_fence (used for barrier instrumentation) with
2455 	 * the __no_sanitize_thread attribute, remove them. Once the kernel's
2456 	 * minimum Clang version is 14.0, this can be removed.
2457 	 */
2458 	if (!strncmp(name, "__tsan_func_", 12) ||
2459 	    !strcmp(name, "__tsan_atomic_signal_fence"))
2460 		return true;
2461 
2462 	return false;
2463 }
2464 
2465 static int classify_symbols(struct objtool_file *file)
2466 {
2467 	struct section *sec;
2468 	struct symbol *func;
2469 
2470 	for_each_sec(file, sec) {
2471 		list_for_each_entry(func, &sec->symbol_list, list) {
2472 			if (func->bind != STB_GLOBAL)
2473 				continue;
2474 
2475 			if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2476 				     strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2477 				func->static_call_tramp = true;
2478 
2479 			if (arch_is_retpoline(func))
2480 				func->retpoline_thunk = true;
2481 
2482 			if (arch_is_rethunk(func))
2483 				func->return_thunk = true;
2484 
2485 			if (arch_ftrace_match(func->name))
2486 				func->fentry = true;
2487 
2488 			if (is_profiling_func(func->name))
2489 				func->profiling_func = true;
2490 		}
2491 	}
2492 
2493 	return 0;
2494 }
2495 
2496 static void mark_rodata(struct objtool_file *file)
2497 {
2498 	struct section *sec;
2499 	bool found = false;
2500 
2501 	/*
2502 	 * Search for the following rodata sections, each of which can
2503 	 * potentially contain jump tables:
2504 	 *
2505 	 * - .rodata: can contain GCC switch tables
2506 	 * - .rodata.<func>: same, if -fdata-sections is being used
2507 	 * - .rodata..c_jump_table: contains C annotated jump tables
2508 	 *
2509 	 * .rodata.str1.* sections are ignored; they don't contain jump tables.
2510 	 */
2511 	for_each_sec(file, sec) {
2512 		if (!strncmp(sec->name, ".rodata", 7) &&
2513 		    !strstr(sec->name, ".str1.")) {
2514 			sec->rodata = true;
2515 			found = true;
2516 		}
2517 	}
2518 
2519 	file->rodata = found;
2520 }
2521 
2522 static int decode_sections(struct objtool_file *file)
2523 {
2524 	int ret;
2525 
2526 	mark_rodata(file);
2527 
2528 	ret = init_pv_ops(file);
2529 	if (ret)
2530 		return ret;
2531 
2532 	/*
2533 	 * Must be before add_{jump_call}_destination.
2534 	 */
2535 	ret = classify_symbols(file);
2536 	if (ret)
2537 		return ret;
2538 
2539 	ret = decode_instructions(file);
2540 	if (ret)
2541 		return ret;
2542 
2543 	add_ignores(file);
2544 	add_uaccess_safe(file);
2545 
2546 	ret = add_ignore_alternatives(file);
2547 	if (ret)
2548 		return ret;
2549 
2550 	/*
2551 	 * Must be before read_unwind_hints() since that needs insn->noendbr.
2552 	 */
2553 	ret = read_noendbr_hints(file);
2554 	if (ret)
2555 		return ret;
2556 
2557 	/*
2558 	 * Must be before add_jump_destinations(), which depends on 'func'
2559 	 * being set for alternatives, to enable proper sibling call detection.
2560 	 */
2561 	if (opts.stackval || opts.orc || opts.uaccess || opts.noinstr) {
2562 		ret = add_special_section_alts(file);
2563 		if (ret)
2564 			return ret;
2565 	}
2566 
2567 	ret = add_jump_destinations(file);
2568 	if (ret)
2569 		return ret;
2570 
2571 	/*
2572 	 * Must be before add_call_destination(); it changes INSN_CALL to
2573 	 * INSN_JUMP.
2574 	 */
2575 	ret = read_intra_function_calls(file);
2576 	if (ret)
2577 		return ret;
2578 
2579 	ret = add_call_destinations(file);
2580 	if (ret)
2581 		return ret;
2582 
2583 	/*
2584 	 * Must be after add_call_destinations() such that it can override
2585 	 * dead_end_function() marks.
2586 	 */
2587 	ret = add_dead_ends(file);
2588 	if (ret)
2589 		return ret;
2590 
2591 	ret = add_jump_table_alts(file);
2592 	if (ret)
2593 		return ret;
2594 
2595 	ret = read_unwind_hints(file);
2596 	if (ret)
2597 		return ret;
2598 
2599 	ret = read_retpoline_hints(file);
2600 	if (ret)
2601 		return ret;
2602 
2603 	ret = read_instr_hints(file);
2604 	if (ret)
2605 		return ret;
2606 
2607 	return 0;
2608 }
2609 
2610 static bool is_fentry_call(struct instruction *insn)
2611 {
2612 	if (insn->type == INSN_CALL &&
2613 	    insn->call_dest &&
2614 	    insn->call_dest->fentry)
2615 		return true;
2616 
2617 	return false;
2618 }
2619 
2620 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2621 {
2622 	struct cfi_state *cfi = &state->cfi;
2623 	int i;
2624 
2625 	if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2626 		return true;
2627 
2628 	if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2629 		return true;
2630 
2631 	if (cfi->stack_size != initial_func_cfi.cfa.offset)
2632 		return true;
2633 
2634 	for (i = 0; i < CFI_NUM_REGS; i++) {
2635 		if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2636 		    cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2637 			return true;
2638 	}
2639 
2640 	return false;
2641 }
2642 
2643 static bool check_reg_frame_pos(const struct cfi_reg *reg,
2644 				int expected_offset)
2645 {
2646 	return reg->base == CFI_CFA &&
2647 	       reg->offset == expected_offset;
2648 }
2649 
2650 static bool has_valid_stack_frame(struct insn_state *state)
2651 {
2652 	struct cfi_state *cfi = &state->cfi;
2653 
2654 	if (cfi->cfa.base == CFI_BP &&
2655 	    check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2656 	    check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2657 		return true;
2658 
2659 	if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2660 		return true;
2661 
2662 	return false;
2663 }
2664 
2665 static int update_cfi_state_regs(struct instruction *insn,
2666 				  struct cfi_state *cfi,
2667 				  struct stack_op *op)
2668 {
2669 	struct cfi_reg *cfa = &cfi->cfa;
2670 
2671 	if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2672 		return 0;
2673 
2674 	/* push */
2675 	if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2676 		cfa->offset += 8;
2677 
2678 	/* pop */
2679 	if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2680 		cfa->offset -= 8;
2681 
2682 	/* add immediate to sp */
2683 	if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2684 	    op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2685 		cfa->offset -= op->src.offset;
2686 
2687 	return 0;
2688 }
2689 
2690 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2691 {
2692 	if (arch_callee_saved_reg(reg) &&
2693 	    cfi->regs[reg].base == CFI_UNDEFINED) {
2694 		cfi->regs[reg].base = base;
2695 		cfi->regs[reg].offset = offset;
2696 	}
2697 }
2698 
2699 static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2700 {
2701 	cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2702 	cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2703 }
2704 
2705 /*
2706  * A note about DRAP stack alignment:
2707  *
2708  * GCC has the concept of a DRAP register, which is used to help keep track of
2709  * the stack pointer when aligning the stack.  r10 or r13 is used as the DRAP
2710  * register.  The typical DRAP pattern is:
2711  *
2712  *   4c 8d 54 24 08		lea    0x8(%rsp),%r10
2713  *   48 83 e4 c0		and    $0xffffffffffffffc0,%rsp
2714  *   41 ff 72 f8		pushq  -0x8(%r10)
2715  *   55				push   %rbp
2716  *   48 89 e5			mov    %rsp,%rbp
2717  *				(more pushes)
2718  *   41 52			push   %r10
2719  *				...
2720  *   41 5a			pop    %r10
2721  *				(more pops)
2722  *   5d				pop    %rbp
2723  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2724  *   c3				retq
2725  *
2726  * There are some variations in the epilogues, like:
2727  *
2728  *   5b				pop    %rbx
2729  *   41 5a			pop    %r10
2730  *   41 5c			pop    %r12
2731  *   41 5d			pop    %r13
2732  *   41 5e			pop    %r14
2733  *   c9				leaveq
2734  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2735  *   c3				retq
2736  *
2737  * and:
2738  *
2739  *   4c 8b 55 e8		mov    -0x18(%rbp),%r10
2740  *   48 8b 5d e0		mov    -0x20(%rbp),%rbx
2741  *   4c 8b 65 f0		mov    -0x10(%rbp),%r12
2742  *   4c 8b 6d f8		mov    -0x8(%rbp),%r13
2743  *   c9				leaveq
2744  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2745  *   c3				retq
2746  *
2747  * Sometimes r13 is used as the DRAP register, in which case it's saved and
2748  * restored beforehand:
2749  *
2750  *   41 55			push   %r13
2751  *   4c 8d 6c 24 10		lea    0x10(%rsp),%r13
2752  *   48 83 e4 f0		and    $0xfffffffffffffff0,%rsp
2753  *				...
2754  *   49 8d 65 f0		lea    -0x10(%r13),%rsp
2755  *   41 5d			pop    %r13
2756  *   c3				retq
2757  */
2758 static int update_cfi_state(struct instruction *insn,
2759 			    struct instruction *next_insn,
2760 			    struct cfi_state *cfi, struct stack_op *op)
2761 {
2762 	struct cfi_reg *cfa = &cfi->cfa;
2763 	struct cfi_reg *regs = cfi->regs;
2764 
2765 	/* stack operations don't make sense with an undefined CFA */
2766 	if (cfa->base == CFI_UNDEFINED) {
2767 		if (insn_func(insn)) {
2768 			WARN_FUNC("undefined stack state", insn->sec, insn->offset);
2769 			return -1;
2770 		}
2771 		return 0;
2772 	}
2773 
2774 	if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2775 	    cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2776 		return update_cfi_state_regs(insn, cfi, op);
2777 
2778 	switch (op->dest.type) {
2779 
2780 	case OP_DEST_REG:
2781 		switch (op->src.type) {
2782 
2783 		case OP_SRC_REG:
2784 			if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2785 			    cfa->base == CFI_SP &&
2786 			    check_reg_frame_pos(&regs[CFI_BP], -cfa->offset)) {
2787 
2788 				/* mov %rsp, %rbp */
2789 				cfa->base = op->dest.reg;
2790 				cfi->bp_scratch = false;
2791 			}
2792 
2793 			else if (op->src.reg == CFI_SP &&
2794 				 op->dest.reg == CFI_BP && cfi->drap) {
2795 
2796 				/* drap: mov %rsp, %rbp */
2797 				regs[CFI_BP].base = CFI_BP;
2798 				regs[CFI_BP].offset = -cfi->stack_size;
2799 				cfi->bp_scratch = false;
2800 			}
2801 
2802 			else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2803 
2804 				/*
2805 				 * mov %rsp, %reg
2806 				 *
2807 				 * This is needed for the rare case where GCC
2808 				 * does:
2809 				 *
2810 				 *   mov    %rsp, %rax
2811 				 *   ...
2812 				 *   mov    %rax, %rsp
2813 				 */
2814 				cfi->vals[op->dest.reg].base = CFI_CFA;
2815 				cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2816 			}
2817 
2818 			else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2819 				 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2820 
2821 				/*
2822 				 * mov %rbp, %rsp
2823 				 *
2824 				 * Restore the original stack pointer (Clang).
2825 				 */
2826 				cfi->stack_size = -cfi->regs[CFI_BP].offset;
2827 			}
2828 
2829 			else if (op->dest.reg == cfa->base) {
2830 
2831 				/* mov %reg, %rsp */
2832 				if (cfa->base == CFI_SP &&
2833 				    cfi->vals[op->src.reg].base == CFI_CFA) {
2834 
2835 					/*
2836 					 * This is needed for the rare case
2837 					 * where GCC does something dumb like:
2838 					 *
2839 					 *   lea    0x8(%rsp), %rcx
2840 					 *   ...
2841 					 *   mov    %rcx, %rsp
2842 					 */
2843 					cfa->offset = -cfi->vals[op->src.reg].offset;
2844 					cfi->stack_size = cfa->offset;
2845 
2846 				} else if (cfa->base == CFI_SP &&
2847 					   cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2848 					   cfi->vals[op->src.reg].offset == cfa->offset) {
2849 
2850 					/*
2851 					 * Stack swizzle:
2852 					 *
2853 					 * 1: mov %rsp, (%[tos])
2854 					 * 2: mov %[tos], %rsp
2855 					 *    ...
2856 					 * 3: pop %rsp
2857 					 *
2858 					 * Where:
2859 					 *
2860 					 * 1 - places a pointer to the previous
2861 					 *     stack at the Top-of-Stack of the
2862 					 *     new stack.
2863 					 *
2864 					 * 2 - switches to the new stack.
2865 					 *
2866 					 * 3 - pops the Top-of-Stack to restore
2867 					 *     the original stack.
2868 					 *
2869 					 * Note: we set base to SP_INDIRECT
2870 					 * here and preserve offset. Therefore
2871 					 * when the unwinder reaches ToS it
2872 					 * will dereference SP and then add the
2873 					 * offset to find the next frame, IOW:
2874 					 * (%rsp) + offset.
2875 					 */
2876 					cfa->base = CFI_SP_INDIRECT;
2877 
2878 				} else {
2879 					cfa->base = CFI_UNDEFINED;
2880 					cfa->offset = 0;
2881 				}
2882 			}
2883 
2884 			else if (op->dest.reg == CFI_SP &&
2885 				 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2886 				 cfi->vals[op->src.reg].offset == cfa->offset) {
2887 
2888 				/*
2889 				 * The same stack swizzle case 2) as above. But
2890 				 * because we can't change cfa->base, case 3)
2891 				 * will become a regular POP. Pretend we're a
2892 				 * PUSH so things don't go unbalanced.
2893 				 */
2894 				cfi->stack_size += 8;
2895 			}
2896 
2897 
2898 			break;
2899 
2900 		case OP_SRC_ADD:
2901 			if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2902 
2903 				/* add imm, %rsp */
2904 				cfi->stack_size -= op->src.offset;
2905 				if (cfa->base == CFI_SP)
2906 					cfa->offset -= op->src.offset;
2907 				break;
2908 			}
2909 
2910 			if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2911 
2912 				/* lea disp(%rbp), %rsp */
2913 				cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2914 				break;
2915 			}
2916 
2917 			if (!cfi->drap && op->src.reg == CFI_SP &&
2918 			    op->dest.reg == CFI_BP && cfa->base == CFI_SP &&
2919 			    check_reg_frame_pos(&regs[CFI_BP], -cfa->offset + op->src.offset)) {
2920 
2921 				/* lea disp(%rsp), %rbp */
2922 				cfa->base = CFI_BP;
2923 				cfa->offset -= op->src.offset;
2924 				cfi->bp_scratch = false;
2925 				break;
2926 			}
2927 
2928 			if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2929 
2930 				/* drap: lea disp(%rsp), %drap */
2931 				cfi->drap_reg = op->dest.reg;
2932 
2933 				/*
2934 				 * lea disp(%rsp), %reg
2935 				 *
2936 				 * This is needed for the rare case where GCC
2937 				 * does something dumb like:
2938 				 *
2939 				 *   lea    0x8(%rsp), %rcx
2940 				 *   ...
2941 				 *   mov    %rcx, %rsp
2942 				 */
2943 				cfi->vals[op->dest.reg].base = CFI_CFA;
2944 				cfi->vals[op->dest.reg].offset = \
2945 					-cfi->stack_size + op->src.offset;
2946 
2947 				break;
2948 			}
2949 
2950 			if (cfi->drap && op->dest.reg == CFI_SP &&
2951 			    op->src.reg == cfi->drap_reg) {
2952 
2953 				 /* drap: lea disp(%drap), %rsp */
2954 				cfa->base = CFI_SP;
2955 				cfa->offset = cfi->stack_size = -op->src.offset;
2956 				cfi->drap_reg = CFI_UNDEFINED;
2957 				cfi->drap = false;
2958 				break;
2959 			}
2960 
2961 			if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
2962 				WARN_FUNC("unsupported stack register modification",
2963 					  insn->sec, insn->offset);
2964 				return -1;
2965 			}
2966 
2967 			break;
2968 
2969 		case OP_SRC_AND:
2970 			if (op->dest.reg != CFI_SP ||
2971 			    (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
2972 			    (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
2973 				WARN_FUNC("unsupported stack pointer realignment",
2974 					  insn->sec, insn->offset);
2975 				return -1;
2976 			}
2977 
2978 			if (cfi->drap_reg != CFI_UNDEFINED) {
2979 				/* drap: and imm, %rsp */
2980 				cfa->base = cfi->drap_reg;
2981 				cfa->offset = cfi->stack_size = 0;
2982 				cfi->drap = true;
2983 			}
2984 
2985 			/*
2986 			 * Older versions of GCC (4.8ish) realign the stack
2987 			 * without DRAP, with a frame pointer.
2988 			 */
2989 
2990 			break;
2991 
2992 		case OP_SRC_POP:
2993 		case OP_SRC_POPF:
2994 			if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
2995 
2996 				/* pop %rsp; # restore from a stack swizzle */
2997 				cfa->base = CFI_SP;
2998 				break;
2999 			}
3000 
3001 			if (!cfi->drap && op->dest.reg == cfa->base) {
3002 
3003 				/* pop %rbp */
3004 				cfa->base = CFI_SP;
3005 			}
3006 
3007 			if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
3008 			    op->dest.reg == cfi->drap_reg &&
3009 			    cfi->drap_offset == -cfi->stack_size) {
3010 
3011 				/* drap: pop %drap */
3012 				cfa->base = cfi->drap_reg;
3013 				cfa->offset = 0;
3014 				cfi->drap_offset = -1;
3015 
3016 			} else if (cfi->stack_size == -regs[op->dest.reg].offset) {
3017 
3018 				/* pop %reg */
3019 				restore_reg(cfi, op->dest.reg);
3020 			}
3021 
3022 			cfi->stack_size -= 8;
3023 			if (cfa->base == CFI_SP)
3024 				cfa->offset -= 8;
3025 
3026 			break;
3027 
3028 		case OP_SRC_REG_INDIRECT:
3029 			if (!cfi->drap && op->dest.reg == cfa->base &&
3030 			    op->dest.reg == CFI_BP) {
3031 
3032 				/* mov disp(%rsp), %rbp */
3033 				cfa->base = CFI_SP;
3034 				cfa->offset = cfi->stack_size;
3035 			}
3036 
3037 			if (cfi->drap && op->src.reg == CFI_BP &&
3038 			    op->src.offset == cfi->drap_offset) {
3039 
3040 				/* drap: mov disp(%rbp), %drap */
3041 				cfa->base = cfi->drap_reg;
3042 				cfa->offset = 0;
3043 				cfi->drap_offset = -1;
3044 			}
3045 
3046 			if (cfi->drap && op->src.reg == CFI_BP &&
3047 			    op->src.offset == regs[op->dest.reg].offset) {
3048 
3049 				/* drap: mov disp(%rbp), %reg */
3050 				restore_reg(cfi, op->dest.reg);
3051 
3052 			} else if (op->src.reg == cfa->base &&
3053 			    op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
3054 
3055 				/* mov disp(%rbp), %reg */
3056 				/* mov disp(%rsp), %reg */
3057 				restore_reg(cfi, op->dest.reg);
3058 
3059 			} else if (op->src.reg == CFI_SP &&
3060 				   op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
3061 
3062 				/* mov disp(%rsp), %reg */
3063 				restore_reg(cfi, op->dest.reg);
3064 			}
3065 
3066 			break;
3067 
3068 		default:
3069 			WARN_FUNC("unknown stack-related instruction",
3070 				  insn->sec, insn->offset);
3071 			return -1;
3072 		}
3073 
3074 		break;
3075 
3076 	case OP_DEST_PUSH:
3077 	case OP_DEST_PUSHF:
3078 		cfi->stack_size += 8;
3079 		if (cfa->base == CFI_SP)
3080 			cfa->offset += 8;
3081 
3082 		if (op->src.type != OP_SRC_REG)
3083 			break;
3084 
3085 		if (cfi->drap) {
3086 			if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3087 
3088 				/* drap: push %drap */
3089 				cfa->base = CFI_BP_INDIRECT;
3090 				cfa->offset = -cfi->stack_size;
3091 
3092 				/* save drap so we know when to restore it */
3093 				cfi->drap_offset = -cfi->stack_size;
3094 
3095 			} else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
3096 
3097 				/* drap: push %rbp */
3098 				cfi->stack_size = 0;
3099 
3100 			} else {
3101 
3102 				/* drap: push %reg */
3103 				save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
3104 			}
3105 
3106 		} else {
3107 
3108 			/* push %reg */
3109 			save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
3110 		}
3111 
3112 		/* detect when asm code uses rbp as a scratch register */
3113 		if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP &&
3114 		    cfa->base != CFI_BP)
3115 			cfi->bp_scratch = true;
3116 		break;
3117 
3118 	case OP_DEST_REG_INDIRECT:
3119 
3120 		if (cfi->drap) {
3121 			if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3122 
3123 				/* drap: mov %drap, disp(%rbp) */
3124 				cfa->base = CFI_BP_INDIRECT;
3125 				cfa->offset = op->dest.offset;
3126 
3127 				/* save drap offset so we know when to restore it */
3128 				cfi->drap_offset = op->dest.offset;
3129 			} else {
3130 
3131 				/* drap: mov reg, disp(%rbp) */
3132 				save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
3133 			}
3134 
3135 		} else if (op->dest.reg == cfa->base) {
3136 
3137 			/* mov reg, disp(%rbp) */
3138 			/* mov reg, disp(%rsp) */
3139 			save_reg(cfi, op->src.reg, CFI_CFA,
3140 				 op->dest.offset - cfi->cfa.offset);
3141 
3142 		} else if (op->dest.reg == CFI_SP) {
3143 
3144 			/* mov reg, disp(%rsp) */
3145 			save_reg(cfi, op->src.reg, CFI_CFA,
3146 				 op->dest.offset - cfi->stack_size);
3147 
3148 		} else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
3149 
3150 			/* mov %rsp, (%reg); # setup a stack swizzle. */
3151 			cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
3152 			cfi->vals[op->dest.reg].offset = cfa->offset;
3153 		}
3154 
3155 		break;
3156 
3157 	case OP_DEST_MEM:
3158 		if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
3159 			WARN_FUNC("unknown stack-related memory operation",
3160 				  insn->sec, insn->offset);
3161 			return -1;
3162 		}
3163 
3164 		/* pop mem */
3165 		cfi->stack_size -= 8;
3166 		if (cfa->base == CFI_SP)
3167 			cfa->offset -= 8;
3168 
3169 		break;
3170 
3171 	default:
3172 		WARN_FUNC("unknown stack-related instruction",
3173 			  insn->sec, insn->offset);
3174 		return -1;
3175 	}
3176 
3177 	return 0;
3178 }
3179 
3180 /*
3181  * The stack layouts of alternatives instructions can sometimes diverge when
3182  * they have stack modifications.  That's fine as long as the potential stack
3183  * layouts don't conflict at any given potential instruction boundary.
3184  *
3185  * Flatten the CFIs of the different alternative code streams (both original
3186  * and replacement) into a single shared CFI array which can be used to detect
3187  * conflicts and nicely feed a linear array of ORC entries to the unwinder.
3188  */
3189 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
3190 {
3191 	struct cfi_state **alt_cfi;
3192 	int group_off;
3193 
3194 	if (!insn->alt_group)
3195 		return 0;
3196 
3197 	if (!insn->cfi) {
3198 		WARN("CFI missing");
3199 		return -1;
3200 	}
3201 
3202 	alt_cfi = insn->alt_group->cfi;
3203 	group_off = insn->offset - insn->alt_group->first_insn->offset;
3204 
3205 	if (!alt_cfi[group_off]) {
3206 		alt_cfi[group_off] = insn->cfi;
3207 	} else {
3208 		if (cficmp(alt_cfi[group_off], insn->cfi)) {
3209 			WARN_FUNC("stack layout conflict in alternatives",
3210 				  insn->sec, insn->offset);
3211 			return -1;
3212 		}
3213 	}
3214 
3215 	return 0;
3216 }
3217 
3218 static int handle_insn_ops(struct instruction *insn,
3219 			   struct instruction *next_insn,
3220 			   struct insn_state *state)
3221 {
3222 	struct stack_op *op;
3223 
3224 	list_for_each_entry(op, &insn->stack_ops, list) {
3225 
3226 		if (update_cfi_state(insn, next_insn, &state->cfi, op))
3227 			return 1;
3228 
3229 		if (!insn->alt_group)
3230 			continue;
3231 
3232 		if (op->dest.type == OP_DEST_PUSHF) {
3233 			if (!state->uaccess_stack) {
3234 				state->uaccess_stack = 1;
3235 			} else if (state->uaccess_stack >> 31) {
3236 				WARN_FUNC("PUSHF stack exhausted",
3237 					  insn->sec, insn->offset);
3238 				return 1;
3239 			}
3240 			state->uaccess_stack <<= 1;
3241 			state->uaccess_stack  |= state->uaccess;
3242 		}
3243 
3244 		if (op->src.type == OP_SRC_POPF) {
3245 			if (state->uaccess_stack) {
3246 				state->uaccess = state->uaccess_stack & 1;
3247 				state->uaccess_stack >>= 1;
3248 				if (state->uaccess_stack == 1)
3249 					state->uaccess_stack = 0;
3250 			}
3251 		}
3252 	}
3253 
3254 	return 0;
3255 }
3256 
3257 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
3258 {
3259 	struct cfi_state *cfi1 = insn->cfi;
3260 	int i;
3261 
3262 	if (!cfi1) {
3263 		WARN("CFI missing");
3264 		return false;
3265 	}
3266 
3267 	if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
3268 
3269 		WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
3270 			  insn->sec, insn->offset,
3271 			  cfi1->cfa.base, cfi1->cfa.offset,
3272 			  cfi2->cfa.base, cfi2->cfa.offset);
3273 
3274 	} else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
3275 		for (i = 0; i < CFI_NUM_REGS; i++) {
3276 			if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
3277 				    sizeof(struct cfi_reg)))
3278 				continue;
3279 
3280 			WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
3281 				  insn->sec, insn->offset,
3282 				  i, cfi1->regs[i].base, cfi1->regs[i].offset,
3283 				  i, cfi2->regs[i].base, cfi2->regs[i].offset);
3284 			break;
3285 		}
3286 
3287 	} else if (cfi1->type != cfi2->type) {
3288 
3289 		WARN_FUNC("stack state mismatch: type1=%d type2=%d",
3290 			  insn->sec, insn->offset, cfi1->type, cfi2->type);
3291 
3292 	} else if (cfi1->drap != cfi2->drap ||
3293 		   (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
3294 		   (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
3295 
3296 		WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
3297 			  insn->sec, insn->offset,
3298 			  cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
3299 			  cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
3300 
3301 	} else
3302 		return true;
3303 
3304 	return false;
3305 }
3306 
3307 static inline bool func_uaccess_safe(struct symbol *func)
3308 {
3309 	if (func)
3310 		return func->uaccess_safe;
3311 
3312 	return false;
3313 }
3314 
3315 static inline const char *call_dest_name(struct instruction *insn)
3316 {
3317 	static char pvname[19];
3318 	struct reloc *rel;
3319 	int idx;
3320 
3321 	if (insn->call_dest)
3322 		return insn->call_dest->name;
3323 
3324 	rel = insn_reloc(NULL, insn);
3325 	if (rel && !strcmp(rel->sym->name, "pv_ops")) {
3326 		idx = (rel->addend / sizeof(void *));
3327 		snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
3328 		return pvname;
3329 	}
3330 
3331 	return "{dynamic}";
3332 }
3333 
3334 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
3335 {
3336 	struct symbol *target;
3337 	struct reloc *rel;
3338 	int idx;
3339 
3340 	rel = insn_reloc(file, insn);
3341 	if (!rel || strcmp(rel->sym->name, "pv_ops"))
3342 		return false;
3343 
3344 	idx = (arch_dest_reloc_offset(rel->addend) / sizeof(void *));
3345 
3346 	if (file->pv_ops[idx].clean)
3347 		return true;
3348 
3349 	file->pv_ops[idx].clean = true;
3350 
3351 	list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
3352 		if (!target->sec->noinstr) {
3353 			WARN("pv_ops[%d]: %s", idx, target->name);
3354 			file->pv_ops[idx].clean = false;
3355 		}
3356 	}
3357 
3358 	return file->pv_ops[idx].clean;
3359 }
3360 
3361 static inline bool noinstr_call_dest(struct objtool_file *file,
3362 				     struct instruction *insn,
3363 				     struct symbol *func)
3364 {
3365 	/*
3366 	 * We can't deal with indirect function calls at present;
3367 	 * assume they're instrumented.
3368 	 */
3369 	if (!func) {
3370 		if (file->pv_ops)
3371 			return pv_call_dest(file, insn);
3372 
3373 		return false;
3374 	}
3375 
3376 	/*
3377 	 * If the symbol is from a noinstr section; we good.
3378 	 */
3379 	if (func->sec->noinstr)
3380 		return true;
3381 
3382 	/*
3383 	 * If the symbol is a static_call trampoline, we can't tell.
3384 	 */
3385 	if (func->static_call_tramp)
3386 		return true;
3387 
3388 	/*
3389 	 * The __ubsan_handle_*() calls are like WARN(), they only happen when
3390 	 * something 'BAD' happened. At the risk of taking the machine down,
3391 	 * let them proceed to get the message out.
3392 	 */
3393 	if (!strncmp(func->name, "__ubsan_handle_", 15))
3394 		return true;
3395 
3396 	return false;
3397 }
3398 
3399 static int validate_call(struct objtool_file *file,
3400 			 struct instruction *insn,
3401 			 struct insn_state *state)
3402 {
3403 	if (state->noinstr && state->instr <= 0 &&
3404 	    !noinstr_call_dest(file, insn, insn->call_dest)) {
3405 		WARN_FUNC("call to %s() leaves .noinstr.text section",
3406 				insn->sec, insn->offset, call_dest_name(insn));
3407 		return 1;
3408 	}
3409 
3410 	if (state->uaccess && !func_uaccess_safe(insn->call_dest)) {
3411 		WARN_FUNC("call to %s() with UACCESS enabled",
3412 				insn->sec, insn->offset, call_dest_name(insn));
3413 		return 1;
3414 	}
3415 
3416 	if (state->df) {
3417 		WARN_FUNC("call to %s() with DF set",
3418 				insn->sec, insn->offset, call_dest_name(insn));
3419 		return 1;
3420 	}
3421 
3422 	return 0;
3423 }
3424 
3425 static int validate_sibling_call(struct objtool_file *file,
3426 				 struct instruction *insn,
3427 				 struct insn_state *state)
3428 {
3429 	if (insn_func(insn) && has_modified_stack_frame(insn, state)) {
3430 		WARN_FUNC("sibling call from callable instruction with modified stack frame",
3431 				insn->sec, insn->offset);
3432 		return 1;
3433 	}
3434 
3435 	return validate_call(file, insn, state);
3436 }
3437 
3438 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
3439 {
3440 	if (state->noinstr && state->instr > 0) {
3441 		WARN_FUNC("return with instrumentation enabled",
3442 			  insn->sec, insn->offset);
3443 		return 1;
3444 	}
3445 
3446 	if (state->uaccess && !func_uaccess_safe(func)) {
3447 		WARN_FUNC("return with UACCESS enabled",
3448 			  insn->sec, insn->offset);
3449 		return 1;
3450 	}
3451 
3452 	if (!state->uaccess && func_uaccess_safe(func)) {
3453 		WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function",
3454 			  insn->sec, insn->offset);
3455 		return 1;
3456 	}
3457 
3458 	if (state->df) {
3459 		WARN_FUNC("return with DF set",
3460 			  insn->sec, insn->offset);
3461 		return 1;
3462 	}
3463 
3464 	if (func && has_modified_stack_frame(insn, state)) {
3465 		WARN_FUNC("return with modified stack frame",
3466 			  insn->sec, insn->offset);
3467 		return 1;
3468 	}
3469 
3470 	if (state->cfi.bp_scratch) {
3471 		WARN_FUNC("BP used as a scratch register",
3472 			  insn->sec, insn->offset);
3473 		return 1;
3474 	}
3475 
3476 	return 0;
3477 }
3478 
3479 static struct instruction *next_insn_to_validate(struct objtool_file *file,
3480 						 struct instruction *insn)
3481 {
3482 	struct alt_group *alt_group = insn->alt_group;
3483 
3484 	/*
3485 	 * Simulate the fact that alternatives are patched in-place.  When the
3486 	 * end of a replacement alt_group is reached, redirect objtool flow to
3487 	 * the end of the original alt_group.
3488 	 */
3489 	if (alt_group && insn == alt_group->last_insn && alt_group->orig_group)
3490 		return next_insn_same_sec(file, alt_group->orig_group->last_insn);
3491 
3492 	return next_insn_same_sec(file, insn);
3493 }
3494 
3495 /*
3496  * Follow the branch starting at the given instruction, and recursively follow
3497  * any other branches (jumps).  Meanwhile, track the frame pointer state at
3498  * each instruction and validate all the rules described in
3499  * tools/objtool/Documentation/objtool.txt.
3500  */
3501 static int validate_branch(struct objtool_file *file, struct symbol *func,
3502 			   struct instruction *insn, struct insn_state state)
3503 {
3504 	struct alternative *alt;
3505 	struct instruction *next_insn, *prev_insn = NULL;
3506 	struct section *sec;
3507 	u8 visited;
3508 	int ret;
3509 
3510 	sec = insn->sec;
3511 
3512 	while (1) {
3513 		next_insn = next_insn_to_validate(file, insn);
3514 
3515 		if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
3516 			/* Ignore KCFI type preambles, which always fall through */
3517 			if (!strncmp(func->name, "__cfi_", 6) ||
3518 			    !strncmp(func->name, "__pfx_", 6))
3519 				return 0;
3520 
3521 			WARN("%s() falls through to next function %s()",
3522 			     func->name, insn_func(insn)->name);
3523 			return 1;
3524 		}
3525 
3526 		if (func && insn->ignore) {
3527 			WARN_FUNC("BUG: why am I validating an ignored function?",
3528 				  sec, insn->offset);
3529 			return 1;
3530 		}
3531 
3532 		visited = VISITED_BRANCH << state.uaccess;
3533 		if (insn->visited & VISITED_BRANCH_MASK) {
3534 			if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
3535 				return 1;
3536 
3537 			if (insn->visited & visited)
3538 				return 0;
3539 		} else {
3540 			nr_insns_visited++;
3541 		}
3542 
3543 		if (state.noinstr)
3544 			state.instr += insn->instr;
3545 
3546 		if (insn->hint) {
3547 			if (insn->restore) {
3548 				struct instruction *save_insn, *i;
3549 
3550 				i = insn;
3551 				save_insn = NULL;
3552 
3553 				sym_for_each_insn_continue_reverse(file, func, i) {
3554 					if (i->save) {
3555 						save_insn = i;
3556 						break;
3557 					}
3558 				}
3559 
3560 				if (!save_insn) {
3561 					WARN_FUNC("no corresponding CFI save for CFI restore",
3562 						  sec, insn->offset);
3563 					return 1;
3564 				}
3565 
3566 				if (!save_insn->visited) {
3567 					WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo",
3568 						  sec, insn->offset);
3569 					return 1;
3570 				}
3571 
3572 				insn->cfi = save_insn->cfi;
3573 				nr_cfi_reused++;
3574 			}
3575 
3576 			state.cfi = *insn->cfi;
3577 		} else {
3578 			/* XXX track if we actually changed state.cfi */
3579 
3580 			if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
3581 				insn->cfi = prev_insn->cfi;
3582 				nr_cfi_reused++;
3583 			} else {
3584 				insn->cfi = cfi_hash_find_or_add(&state.cfi);
3585 			}
3586 		}
3587 
3588 		insn->visited |= visited;
3589 
3590 		if (propagate_alt_cfi(file, insn))
3591 			return 1;
3592 
3593 		if (!insn->ignore_alts && !list_empty(&insn->alts)) {
3594 			bool skip_orig = false;
3595 
3596 			list_for_each_entry(alt, &insn->alts, list) {
3597 				if (alt->skip_orig)
3598 					skip_orig = true;
3599 
3600 				ret = validate_branch(file, func, alt->insn, state);
3601 				if (ret) {
3602 					if (opts.backtrace)
3603 						BT_FUNC("(alt)", insn);
3604 					return ret;
3605 				}
3606 			}
3607 
3608 			if (skip_orig)
3609 				return 0;
3610 		}
3611 
3612 		if (handle_insn_ops(insn, next_insn, &state))
3613 			return 1;
3614 
3615 		switch (insn->type) {
3616 
3617 		case INSN_RETURN:
3618 			return validate_return(func, insn, &state);
3619 
3620 		case INSN_CALL:
3621 		case INSN_CALL_DYNAMIC:
3622 			ret = validate_call(file, insn, &state);
3623 			if (ret)
3624 				return ret;
3625 
3626 			if (opts.stackval && func && !is_fentry_call(insn) &&
3627 			    !has_valid_stack_frame(&state)) {
3628 				WARN_FUNC("call without frame pointer save/setup",
3629 					  sec, insn->offset);
3630 				return 1;
3631 			}
3632 
3633 			if (insn->dead_end)
3634 				return 0;
3635 
3636 			break;
3637 
3638 		case INSN_JUMP_CONDITIONAL:
3639 		case INSN_JUMP_UNCONDITIONAL:
3640 			if (is_sibling_call(insn)) {
3641 				ret = validate_sibling_call(file, insn, &state);
3642 				if (ret)
3643 					return ret;
3644 
3645 			} else if (insn->jump_dest) {
3646 				ret = validate_branch(file, func,
3647 						      insn->jump_dest, state);
3648 				if (ret) {
3649 					if (opts.backtrace)
3650 						BT_FUNC("(branch)", insn);
3651 					return ret;
3652 				}
3653 			}
3654 
3655 			if (insn->type == INSN_JUMP_UNCONDITIONAL)
3656 				return 0;
3657 
3658 			break;
3659 
3660 		case INSN_JUMP_DYNAMIC:
3661 		case INSN_JUMP_DYNAMIC_CONDITIONAL:
3662 			if (is_sibling_call(insn)) {
3663 				ret = validate_sibling_call(file, insn, &state);
3664 				if (ret)
3665 					return ret;
3666 			}
3667 
3668 			if (insn->type == INSN_JUMP_DYNAMIC)
3669 				return 0;
3670 
3671 			break;
3672 
3673 		case INSN_CONTEXT_SWITCH:
3674 			if (func && (!next_insn || !next_insn->hint)) {
3675 				WARN_FUNC("unsupported instruction in callable function",
3676 					  sec, insn->offset);
3677 				return 1;
3678 			}
3679 			return 0;
3680 
3681 		case INSN_STAC:
3682 			if (state.uaccess) {
3683 				WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
3684 				return 1;
3685 			}
3686 
3687 			state.uaccess = true;
3688 			break;
3689 
3690 		case INSN_CLAC:
3691 			if (!state.uaccess && func) {
3692 				WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
3693 				return 1;
3694 			}
3695 
3696 			if (func_uaccess_safe(func) && !state.uaccess_stack) {
3697 				WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
3698 				return 1;
3699 			}
3700 
3701 			state.uaccess = false;
3702 			break;
3703 
3704 		case INSN_STD:
3705 			if (state.df) {
3706 				WARN_FUNC("recursive STD", sec, insn->offset);
3707 				return 1;
3708 			}
3709 
3710 			state.df = true;
3711 			break;
3712 
3713 		case INSN_CLD:
3714 			if (!state.df && func) {
3715 				WARN_FUNC("redundant CLD", sec, insn->offset);
3716 				return 1;
3717 			}
3718 
3719 			state.df = false;
3720 			break;
3721 
3722 		default:
3723 			break;
3724 		}
3725 
3726 		if (insn->dead_end)
3727 			return 0;
3728 
3729 		if (!next_insn) {
3730 			if (state.cfi.cfa.base == CFI_UNDEFINED)
3731 				return 0;
3732 			WARN("%s: unexpected end of section", sec->name);
3733 			return 1;
3734 		}
3735 
3736 		prev_insn = insn;
3737 		insn = next_insn;
3738 	}
3739 
3740 	return 0;
3741 }
3742 
3743 static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
3744 {
3745 	struct instruction *insn;
3746 	struct insn_state state;
3747 	int ret, warnings = 0;
3748 
3749 	if (!file->hints)
3750 		return 0;
3751 
3752 	init_insn_state(file, &state, sec);
3753 
3754 	if (sec) {
3755 		insn = find_insn(file, sec, 0);
3756 		if (!insn)
3757 			return 0;
3758 	} else {
3759 		insn = list_first_entry(&file->insn_list, typeof(*insn), list);
3760 	}
3761 
3762 	while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) {
3763 		if (insn->hint && !insn->visited && !insn->ignore) {
3764 			ret = validate_branch(file, insn_func(insn), insn, state);
3765 			if (ret && opts.backtrace)
3766 				BT_FUNC("<=== (hint)", insn);
3767 			warnings += ret;
3768 		}
3769 
3770 		insn = list_next_entry(insn, list);
3771 	}
3772 
3773 	return warnings;
3774 }
3775 
3776 /*
3777  * Validate rethunk entry constraint: must untrain RET before the first RET.
3778  *
3779  * Follow every branch (intra-function) and ensure ANNOTATE_UNRET_END comes
3780  * before an actual RET instruction.
3781  */
3782 static int validate_entry(struct objtool_file *file, struct instruction *insn)
3783 {
3784 	struct instruction *next, *dest;
3785 	int ret, warnings = 0;
3786 
3787 	for (;;) {
3788 		next = next_insn_to_validate(file, insn);
3789 
3790 		if (insn->visited & VISITED_ENTRY)
3791 			return 0;
3792 
3793 		insn->visited |= VISITED_ENTRY;
3794 
3795 		if (!insn->ignore_alts && !list_empty(&insn->alts)) {
3796 			struct alternative *alt;
3797 			bool skip_orig = false;
3798 
3799 			list_for_each_entry(alt, &insn->alts, list) {
3800 				if (alt->skip_orig)
3801 					skip_orig = true;
3802 
3803 				ret = validate_entry(file, alt->insn);
3804 				if (ret) {
3805 				        if (opts.backtrace)
3806 						BT_FUNC("(alt)", insn);
3807 					return ret;
3808 				}
3809 			}
3810 
3811 			if (skip_orig)
3812 				return 0;
3813 		}
3814 
3815 		switch (insn->type) {
3816 
3817 		case INSN_CALL_DYNAMIC:
3818 		case INSN_JUMP_DYNAMIC:
3819 		case INSN_JUMP_DYNAMIC_CONDITIONAL:
3820 			WARN_FUNC("early indirect call", insn->sec, insn->offset);
3821 			return 1;
3822 
3823 		case INSN_JUMP_UNCONDITIONAL:
3824 		case INSN_JUMP_CONDITIONAL:
3825 			if (!is_sibling_call(insn)) {
3826 				if (!insn->jump_dest) {
3827 					WARN_FUNC("unresolved jump target after linking?!?",
3828 						  insn->sec, insn->offset);
3829 					return -1;
3830 				}
3831 				ret = validate_entry(file, insn->jump_dest);
3832 				if (ret) {
3833 					if (opts.backtrace) {
3834 						BT_FUNC("(branch%s)", insn,
3835 							insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
3836 					}
3837 					return ret;
3838 				}
3839 
3840 				if (insn->type == INSN_JUMP_UNCONDITIONAL)
3841 					return 0;
3842 
3843 				break;
3844 			}
3845 
3846 			/* fallthrough */
3847 		case INSN_CALL:
3848 			dest = find_insn(file, insn->call_dest->sec,
3849 					 insn->call_dest->offset);
3850 			if (!dest) {
3851 				WARN("Unresolved function after linking!?: %s",
3852 				     insn->call_dest->name);
3853 				return -1;
3854 			}
3855 
3856 			ret = validate_entry(file, dest);
3857 			if (ret) {
3858 				if (opts.backtrace)
3859 					BT_FUNC("(call)", insn);
3860 				return ret;
3861 			}
3862 			/*
3863 			 * If a call returns without error, it must have seen UNTRAIN_RET.
3864 			 * Therefore any non-error return is a success.
3865 			 */
3866 			return 0;
3867 
3868 		case INSN_RETURN:
3869 			WARN_FUNC("RET before UNTRAIN", insn->sec, insn->offset);
3870 			return 1;
3871 
3872 		case INSN_NOP:
3873 			if (insn->retpoline_safe)
3874 				return 0;
3875 			break;
3876 
3877 		default:
3878 			break;
3879 		}
3880 
3881 		if (!next) {
3882 			WARN_FUNC("teh end!", insn->sec, insn->offset);
3883 			return -1;
3884 		}
3885 		insn = next;
3886 	}
3887 
3888 	return warnings;
3889 }
3890 
3891 /*
3892  * Validate that all branches starting at 'insn->entry' encounter UNRET_END
3893  * before RET.
3894  */
3895 static int validate_unret(struct objtool_file *file)
3896 {
3897 	struct instruction *insn;
3898 	int ret, warnings = 0;
3899 
3900 	for_each_insn(file, insn) {
3901 		if (!insn->entry)
3902 			continue;
3903 
3904 		ret = validate_entry(file, insn);
3905 		if (ret < 0) {
3906 			WARN_FUNC("Failed UNRET validation", insn->sec, insn->offset);
3907 			return ret;
3908 		}
3909 		warnings += ret;
3910 	}
3911 
3912 	return warnings;
3913 }
3914 
3915 static int validate_retpoline(struct objtool_file *file)
3916 {
3917 	struct instruction *insn;
3918 	int warnings = 0;
3919 
3920 	for_each_insn(file, insn) {
3921 		if (insn->type != INSN_JUMP_DYNAMIC &&
3922 		    insn->type != INSN_CALL_DYNAMIC &&
3923 		    insn->type != INSN_RETURN)
3924 			continue;
3925 
3926 		if (insn->retpoline_safe)
3927 			continue;
3928 
3929 		if (insn->sec->init)
3930 			continue;
3931 
3932 		if (insn->type == INSN_RETURN) {
3933 			if (opts.rethunk) {
3934 				WARN_FUNC("'naked' return found in RETHUNK build",
3935 					  insn->sec, insn->offset);
3936 			} else
3937 				continue;
3938 		} else {
3939 			WARN_FUNC("indirect %s found in RETPOLINE build",
3940 				  insn->sec, insn->offset,
3941 				  insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
3942 		}
3943 
3944 		warnings++;
3945 	}
3946 
3947 	return warnings;
3948 }
3949 
3950 static bool is_kasan_insn(struct instruction *insn)
3951 {
3952 	return (insn->type == INSN_CALL &&
3953 		!strcmp(insn->call_dest->name, "__asan_handle_no_return"));
3954 }
3955 
3956 static bool is_ubsan_insn(struct instruction *insn)
3957 {
3958 	return (insn->type == INSN_CALL &&
3959 		!strcmp(insn->call_dest->name,
3960 			"__ubsan_handle_builtin_unreachable"));
3961 }
3962 
3963 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
3964 {
3965 	int i;
3966 	struct instruction *prev_insn;
3967 
3968 	if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP)
3969 		return true;
3970 
3971 	/*
3972 	 * Ignore alternative replacement instructions.  This can happen
3973 	 * when a whitelisted function uses one of the ALTERNATIVE macros.
3974 	 */
3975 	if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
3976 	    !strcmp(insn->sec->name, ".altinstr_aux"))
3977 		return true;
3978 
3979 	/*
3980 	 * Whole archive runs might encounter dead code from weak symbols.
3981 	 * This is where the linker will have dropped the weak symbol in
3982 	 * favour of a regular symbol, but leaves the code in place.
3983 	 *
3984 	 * In this case we'll find a piece of code (whole function) that is not
3985 	 * covered by a !section symbol. Ignore them.
3986 	 */
3987 	if (opts.link && !insn_func(insn)) {
3988 		int size = find_symbol_hole_containing(insn->sec, insn->offset);
3989 		unsigned long end = insn->offset + size;
3990 
3991 		if (!size) /* not a hole */
3992 			return false;
3993 
3994 		if (size < 0) /* hole until the end */
3995 			return true;
3996 
3997 		sec_for_each_insn_continue(file, insn) {
3998 			/*
3999 			 * If we reach a visited instruction at or before the
4000 			 * end of the hole, ignore the unreachable.
4001 			 */
4002 			if (insn->visited)
4003 				return true;
4004 
4005 			if (insn->offset >= end)
4006 				break;
4007 
4008 			/*
4009 			 * If this hole jumps to a .cold function, mark it ignore too.
4010 			 */
4011 			if (insn->jump_dest && insn_func(insn->jump_dest) &&
4012 			    strstr(insn_func(insn->jump_dest)->name, ".cold")) {
4013 				struct instruction *dest = insn->jump_dest;
4014 				func_for_each_insn(file, insn_func(dest), dest)
4015 					dest->ignore = true;
4016 			}
4017 		}
4018 
4019 		return false;
4020 	}
4021 
4022 	if (!insn_func(insn))
4023 		return false;
4024 
4025 	if (insn_func(insn)->static_call_tramp)
4026 		return true;
4027 
4028 	/*
4029 	 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
4030 	 * __builtin_unreachable().  The BUG() macro has an unreachable() after
4031 	 * the UD2, which causes GCC's undefined trap logic to emit another UD2
4032 	 * (or occasionally a JMP to UD2).
4033 	 *
4034 	 * It may also insert a UD2 after calling a __noreturn function.
4035 	 */
4036 	prev_insn = list_prev_entry(insn, list);
4037 	if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) &&
4038 	    (insn->type == INSN_BUG ||
4039 	     (insn->type == INSN_JUMP_UNCONDITIONAL &&
4040 	      insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
4041 		return true;
4042 
4043 	/*
4044 	 * Check if this (or a subsequent) instruction is related to
4045 	 * CONFIG_UBSAN or CONFIG_KASAN.
4046 	 *
4047 	 * End the search at 5 instructions to avoid going into the weeds.
4048 	 */
4049 	for (i = 0; i < 5; i++) {
4050 
4051 		if (is_kasan_insn(insn) || is_ubsan_insn(insn))
4052 			return true;
4053 
4054 		if (insn->type == INSN_JUMP_UNCONDITIONAL) {
4055 			if (insn->jump_dest &&
4056 			    insn_func(insn->jump_dest) == insn_func(insn)) {
4057 				insn = insn->jump_dest;
4058 				continue;
4059 			}
4060 
4061 			break;
4062 		}
4063 
4064 		if (insn->offset + insn->len >= insn_func(insn)->offset + insn_func(insn)->len)
4065 			break;
4066 
4067 		insn = list_next_entry(insn, list);
4068 	}
4069 
4070 	return false;
4071 }
4072 
4073 static int add_prefix_symbol(struct objtool_file *file, struct symbol *func,
4074 			     struct instruction *insn)
4075 {
4076 	if (!opts.prefix)
4077 		return 0;
4078 
4079 	for (;;) {
4080 		struct instruction *prev = list_prev_entry(insn, list);
4081 		u64 offset;
4082 
4083 		if (&prev->list == &file->insn_list)
4084 			break;
4085 
4086 		if (prev->type != INSN_NOP)
4087 			break;
4088 
4089 		offset = func->offset - prev->offset;
4090 		if (offset >= opts.prefix) {
4091 			if (offset == opts.prefix) {
4092 				/*
4093 				 * Since the sec->symbol_list is ordered by
4094 				 * offset (see elf_add_symbol()) the added
4095 				 * symbol will not be seen by the iteration in
4096 				 * validate_section().
4097 				 *
4098 				 * Hence the lack of list_for_each_entry_safe()
4099 				 * there.
4100 				 *
4101 				 * The direct concequence is that prefix symbols
4102 				 * don't get visited (because pointless), except
4103 				 * for the logic in ignore_unreachable_insn()
4104 				 * that needs the terminating insn to be visited
4105 				 * otherwise it will report the hole.
4106 				 *
4107 				 * Hence mark the first instruction of the
4108 				 * prefix symbol as visisted.
4109 				 */
4110 				prev->visited |= VISITED_BRANCH;
4111 				elf_create_prefix_symbol(file->elf, func, opts.prefix);
4112 			}
4113 			break;
4114 		}
4115 		insn = prev;
4116 	}
4117 
4118 	return 0;
4119 }
4120 
4121 static int validate_symbol(struct objtool_file *file, struct section *sec,
4122 			   struct symbol *sym, struct insn_state *state)
4123 {
4124 	struct instruction *insn;
4125 	int ret;
4126 
4127 	if (!sym->len) {
4128 		WARN("%s() is missing an ELF size annotation", sym->name);
4129 		return 1;
4130 	}
4131 
4132 	if (sym->pfunc != sym || sym->alias != sym)
4133 		return 0;
4134 
4135 	insn = find_insn(file, sec, sym->offset);
4136 	if (!insn || insn->ignore || insn->visited)
4137 		return 0;
4138 
4139 	add_prefix_symbol(file, sym, insn);
4140 
4141 	state->uaccess = sym->uaccess_safe;
4142 
4143 	ret = validate_branch(file, insn_func(insn), insn, *state);
4144 	if (ret && opts.backtrace)
4145 		BT_FUNC("<=== (sym)", insn);
4146 	return ret;
4147 }
4148 
4149 static int validate_section(struct objtool_file *file, struct section *sec)
4150 {
4151 	struct insn_state state;
4152 	struct symbol *func;
4153 	int warnings = 0;
4154 
4155 	list_for_each_entry(func, &sec->symbol_list, list) {
4156 		if (func->type != STT_FUNC)
4157 			continue;
4158 
4159 		init_insn_state(file, &state, sec);
4160 		set_func_state(&state.cfi);
4161 
4162 		warnings += validate_symbol(file, sec, func, &state);
4163 	}
4164 
4165 	return warnings;
4166 }
4167 
4168 static int validate_noinstr_sections(struct objtool_file *file)
4169 {
4170 	struct section *sec;
4171 	int warnings = 0;
4172 
4173 	sec = find_section_by_name(file->elf, ".noinstr.text");
4174 	if (sec) {
4175 		warnings += validate_section(file, sec);
4176 		warnings += validate_unwind_hints(file, sec);
4177 	}
4178 
4179 	sec = find_section_by_name(file->elf, ".entry.text");
4180 	if (sec) {
4181 		warnings += validate_section(file, sec);
4182 		warnings += validate_unwind_hints(file, sec);
4183 	}
4184 
4185 	sec = find_section_by_name(file->elf, ".cpuidle.text");
4186 	if (sec) {
4187 		warnings += validate_section(file, sec);
4188 		warnings += validate_unwind_hints(file, sec);
4189 	}
4190 
4191 	return warnings;
4192 }
4193 
4194 static int validate_functions(struct objtool_file *file)
4195 {
4196 	struct section *sec;
4197 	int warnings = 0;
4198 
4199 	for_each_sec(file, sec) {
4200 		if (!(sec->sh.sh_flags & SHF_EXECINSTR))
4201 			continue;
4202 
4203 		warnings += validate_section(file, sec);
4204 	}
4205 
4206 	return warnings;
4207 }
4208 
4209 static void mark_endbr_used(struct instruction *insn)
4210 {
4211 	if (!list_empty(&insn->call_node))
4212 		list_del_init(&insn->call_node);
4213 }
4214 
4215 static bool noendbr_range(struct objtool_file *file, struct instruction *insn)
4216 {
4217 	struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1);
4218 	struct instruction *first;
4219 
4220 	if (!sym)
4221 		return false;
4222 
4223 	first = find_insn(file, sym->sec, sym->offset);
4224 	if (!first)
4225 		return false;
4226 
4227 	if (first->type != INSN_ENDBR && !first->noendbr)
4228 		return false;
4229 
4230 	return insn->offset == sym->offset + sym->len;
4231 }
4232 
4233 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
4234 {
4235 	struct instruction *dest;
4236 	struct reloc *reloc;
4237 	unsigned long off;
4238 	int warnings = 0;
4239 
4240 	/*
4241 	 * Looking for function pointer load relocations.  Ignore
4242 	 * direct/indirect branches:
4243 	 */
4244 	switch (insn->type) {
4245 	case INSN_CALL:
4246 	case INSN_CALL_DYNAMIC:
4247 	case INSN_JUMP_CONDITIONAL:
4248 	case INSN_JUMP_UNCONDITIONAL:
4249 	case INSN_JUMP_DYNAMIC:
4250 	case INSN_JUMP_DYNAMIC_CONDITIONAL:
4251 	case INSN_RETURN:
4252 	case INSN_NOP:
4253 		return 0;
4254 	default:
4255 		break;
4256 	}
4257 
4258 	for (reloc = insn_reloc(file, insn);
4259 	     reloc;
4260 	     reloc = find_reloc_by_dest_range(file->elf, insn->sec,
4261 					      reloc->offset + 1,
4262 					      (insn->offset + insn->len) - (reloc->offset + 1))) {
4263 
4264 		/*
4265 		 * static_call_update() references the trampoline, which
4266 		 * doesn't have (or need) ENDBR.  Skip warning in that case.
4267 		 */
4268 		if (reloc->sym->static_call_tramp)
4269 			continue;
4270 
4271 		off = reloc->sym->offset;
4272 		if (reloc->type == R_X86_64_PC32 || reloc->type == R_X86_64_PLT32)
4273 			off += arch_dest_reloc_offset(reloc->addend);
4274 		else
4275 			off += reloc->addend;
4276 
4277 		dest = find_insn(file, reloc->sym->sec, off);
4278 		if (!dest)
4279 			continue;
4280 
4281 		if (dest->type == INSN_ENDBR) {
4282 			mark_endbr_used(dest);
4283 			continue;
4284 		}
4285 
4286 		if (insn_func(dest) && insn_func(dest) == insn_func(insn)) {
4287 			/*
4288 			 * Anything from->to self is either _THIS_IP_ or
4289 			 * IRET-to-self.
4290 			 *
4291 			 * There is no sane way to annotate _THIS_IP_ since the
4292 			 * compiler treats the relocation as a constant and is
4293 			 * happy to fold in offsets, skewing any annotation we
4294 			 * do, leading to vast amounts of false-positives.
4295 			 *
4296 			 * There's also compiler generated _THIS_IP_ through
4297 			 * KCOV and such which we have no hope of annotating.
4298 			 *
4299 			 * As such, blanket accept self-references without
4300 			 * issue.
4301 			 */
4302 			continue;
4303 		}
4304 
4305 		/*
4306 		 * Accept anything ANNOTATE_NOENDBR.
4307 		 */
4308 		if (dest->noendbr)
4309 			continue;
4310 
4311 		/*
4312 		 * Accept if this is the instruction after a symbol
4313 		 * that is (no)endbr -- typical code-range usage.
4314 		 */
4315 		if (noendbr_range(file, dest))
4316 			continue;
4317 
4318 		WARN_FUNC("relocation to !ENDBR: %s",
4319 			  insn->sec, insn->offset,
4320 			  offstr(dest->sec, dest->offset));
4321 
4322 		warnings++;
4323 	}
4324 
4325 	return warnings;
4326 }
4327 
4328 static int validate_ibt_data_reloc(struct objtool_file *file,
4329 				   struct reloc *reloc)
4330 {
4331 	struct instruction *dest;
4332 
4333 	dest = find_insn(file, reloc->sym->sec,
4334 			 reloc->sym->offset + reloc->addend);
4335 	if (!dest)
4336 		return 0;
4337 
4338 	if (dest->type == INSN_ENDBR) {
4339 		mark_endbr_used(dest);
4340 		return 0;
4341 	}
4342 
4343 	if (dest->noendbr)
4344 		return 0;
4345 
4346 	WARN_FUNC("data relocation to !ENDBR: %s",
4347 		  reloc->sec->base, reloc->offset,
4348 		  offstr(dest->sec, dest->offset));
4349 
4350 	return 1;
4351 }
4352 
4353 /*
4354  * Validate IBT rules and remove used ENDBR instructions from the seal list.
4355  * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
4356  * NOPs) later, in create_ibt_endbr_seal_sections().
4357  */
4358 static int validate_ibt(struct objtool_file *file)
4359 {
4360 	struct section *sec;
4361 	struct reloc *reloc;
4362 	struct instruction *insn;
4363 	int warnings = 0;
4364 
4365 	for_each_insn(file, insn)
4366 		warnings += validate_ibt_insn(file, insn);
4367 
4368 	for_each_sec(file, sec) {
4369 
4370 		/* Already done by validate_ibt_insn() */
4371 		if (sec->sh.sh_flags & SHF_EXECINSTR)
4372 			continue;
4373 
4374 		if (!sec->reloc)
4375 			continue;
4376 
4377 		/*
4378 		 * These sections can reference text addresses, but not with
4379 		 * the intent to indirect branch to them.
4380 		 */
4381 		if ((!strncmp(sec->name, ".discard", 8) &&
4382 		     strcmp(sec->name, ".discard.ibt_endbr_noseal"))	||
4383 		    !strncmp(sec->name, ".debug", 6)			||
4384 		    !strcmp(sec->name, ".altinstructions")		||
4385 		    !strcmp(sec->name, ".ibt_endbr_seal")		||
4386 		    !strcmp(sec->name, ".orc_unwind_ip")		||
4387 		    !strcmp(sec->name, ".parainstructions")		||
4388 		    !strcmp(sec->name, ".retpoline_sites")		||
4389 		    !strcmp(sec->name, ".smp_locks")			||
4390 		    !strcmp(sec->name, ".static_call_sites")		||
4391 		    !strcmp(sec->name, "_error_injection_whitelist")	||
4392 		    !strcmp(sec->name, "_kprobe_blacklist")		||
4393 		    !strcmp(sec->name, "__bug_table")			||
4394 		    !strcmp(sec->name, "__ex_table")			||
4395 		    !strcmp(sec->name, "__jump_table")			||
4396 		    !strcmp(sec->name, "__mcount_loc")			||
4397 		    !strcmp(sec->name, ".kcfi_traps")			||
4398 		    strstr(sec->name, "__patchable_function_entries"))
4399 			continue;
4400 
4401 		list_for_each_entry(reloc, &sec->reloc->reloc_list, list)
4402 			warnings += validate_ibt_data_reloc(file, reloc);
4403 	}
4404 
4405 	return warnings;
4406 }
4407 
4408 static int validate_sls(struct objtool_file *file)
4409 {
4410 	struct instruction *insn, *next_insn;
4411 	int warnings = 0;
4412 
4413 	for_each_insn(file, insn) {
4414 		next_insn = next_insn_same_sec(file, insn);
4415 
4416 		if (insn->retpoline_safe)
4417 			continue;
4418 
4419 		switch (insn->type) {
4420 		case INSN_RETURN:
4421 			if (!next_insn || next_insn->type != INSN_TRAP) {
4422 				WARN_FUNC("missing int3 after ret",
4423 					  insn->sec, insn->offset);
4424 				warnings++;
4425 			}
4426 
4427 			break;
4428 		case INSN_JUMP_DYNAMIC:
4429 			if (!next_insn || next_insn->type != INSN_TRAP) {
4430 				WARN_FUNC("missing int3 after indirect jump",
4431 					  insn->sec, insn->offset);
4432 				warnings++;
4433 			}
4434 			break;
4435 		default:
4436 			break;
4437 		}
4438 	}
4439 
4440 	return warnings;
4441 }
4442 
4443 static int validate_reachable_instructions(struct objtool_file *file)
4444 {
4445 	struct instruction *insn;
4446 
4447 	if (file->ignore_unreachables)
4448 		return 0;
4449 
4450 	for_each_insn(file, insn) {
4451 		if (insn->visited || ignore_unreachable_insn(file, insn))
4452 			continue;
4453 
4454 		WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
4455 		return 1;
4456 	}
4457 
4458 	return 0;
4459 }
4460 
4461 int check(struct objtool_file *file)
4462 {
4463 	int ret, warnings = 0;
4464 
4465 	arch_initial_func_cfi_state(&initial_func_cfi);
4466 	init_cfi_state(&init_cfi);
4467 	init_cfi_state(&func_cfi);
4468 	set_func_state(&func_cfi);
4469 
4470 	if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3)))
4471 		goto out;
4472 
4473 	cfi_hash_add(&init_cfi);
4474 	cfi_hash_add(&func_cfi);
4475 
4476 	ret = decode_sections(file);
4477 	if (ret < 0)
4478 		goto out;
4479 
4480 	warnings += ret;
4481 
4482 	if (list_empty(&file->insn_list))
4483 		goto out;
4484 
4485 	if (opts.retpoline) {
4486 		ret = validate_retpoline(file);
4487 		if (ret < 0)
4488 			return ret;
4489 		warnings += ret;
4490 	}
4491 
4492 	if (opts.stackval || opts.orc || opts.uaccess) {
4493 		ret = validate_functions(file);
4494 		if (ret < 0)
4495 			goto out;
4496 		warnings += ret;
4497 
4498 		ret = validate_unwind_hints(file, NULL);
4499 		if (ret < 0)
4500 			goto out;
4501 		warnings += ret;
4502 
4503 		if (!warnings) {
4504 			ret = validate_reachable_instructions(file);
4505 			if (ret < 0)
4506 				goto out;
4507 			warnings += ret;
4508 		}
4509 
4510 	} else if (opts.noinstr) {
4511 		ret = validate_noinstr_sections(file);
4512 		if (ret < 0)
4513 			goto out;
4514 		warnings += ret;
4515 	}
4516 
4517 	if (opts.unret) {
4518 		/*
4519 		 * Must be after validate_branch() and friends, it plays
4520 		 * further games with insn->visited.
4521 		 */
4522 		ret = validate_unret(file);
4523 		if (ret < 0)
4524 			return ret;
4525 		warnings += ret;
4526 	}
4527 
4528 	if (opts.ibt) {
4529 		ret = validate_ibt(file);
4530 		if (ret < 0)
4531 			goto out;
4532 		warnings += ret;
4533 	}
4534 
4535 	if (opts.sls) {
4536 		ret = validate_sls(file);
4537 		if (ret < 0)
4538 			goto out;
4539 		warnings += ret;
4540 	}
4541 
4542 	if (opts.static_call) {
4543 		ret = create_static_call_sections(file);
4544 		if (ret < 0)
4545 			goto out;
4546 		warnings += ret;
4547 	}
4548 
4549 	if (opts.retpoline) {
4550 		ret = create_retpoline_sites_sections(file);
4551 		if (ret < 0)
4552 			goto out;
4553 		warnings += ret;
4554 	}
4555 
4556 	if (opts.cfi) {
4557 		ret = create_cfi_sections(file);
4558 		if (ret < 0)
4559 			goto out;
4560 		warnings += ret;
4561 	}
4562 
4563 	if (opts.rethunk) {
4564 		ret = create_return_sites_sections(file);
4565 		if (ret < 0)
4566 			goto out;
4567 		warnings += ret;
4568 
4569 		if (opts.hack_skylake) {
4570 			ret = create_direct_call_sections(file);
4571 			if (ret < 0)
4572 				goto out;
4573 			warnings += ret;
4574 		}
4575 	}
4576 
4577 	if (opts.mcount) {
4578 		ret = create_mcount_loc_sections(file);
4579 		if (ret < 0)
4580 			goto out;
4581 		warnings += ret;
4582 	}
4583 
4584 	if (opts.ibt) {
4585 		ret = create_ibt_endbr_seal_sections(file);
4586 		if (ret < 0)
4587 			goto out;
4588 		warnings += ret;
4589 	}
4590 
4591 	if (opts.orc && !list_empty(&file->insn_list)) {
4592 		ret = orc_create(file);
4593 		if (ret < 0)
4594 			goto out;
4595 		warnings += ret;
4596 	}
4597 
4598 
4599 	if (opts.stats) {
4600 		printf("nr_insns_visited: %ld\n", nr_insns_visited);
4601 		printf("nr_cfi: %ld\n", nr_cfi);
4602 		printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
4603 		printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
4604 	}
4605 
4606 out:
4607 	/*
4608 	 *  For now, don't fail the kernel build on fatal warnings.  These
4609 	 *  errors are still fairly common due to the growing matrix of
4610 	 *  supported toolchains and their recent pace of change.
4611 	 */
4612 	return 0;
4613 }
4614