xref: /openbmc/linux/tools/objtool/check.c (revision 2d311f48)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
4  */
5 
6 #include <string.h>
7 #include <stdlib.h>
8 #include <inttypes.h>
9 #include <sys/mman.h>
10 
11 #include <arch/elf.h>
12 #include <objtool/builtin.h>
13 #include <objtool/cfi.h>
14 #include <objtool/arch.h>
15 #include <objtool/check.h>
16 #include <objtool/special.h>
17 #include <objtool/warn.h>
18 #include <objtool/endianness.h>
19 
20 #include <linux/objtool.h>
21 #include <linux/hashtable.h>
22 #include <linux/kernel.h>
23 #include <linux/static_call_types.h>
24 
25 struct alternative {
26 	struct list_head list;
27 	struct instruction *insn;
28 	bool skip_orig;
29 };
30 
31 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
32 
33 static struct cfi_init_state initial_func_cfi;
34 static struct cfi_state init_cfi;
35 static struct cfi_state func_cfi;
36 
37 struct instruction *find_insn(struct objtool_file *file,
38 			      struct section *sec, unsigned long offset)
39 {
40 	struct instruction *insn;
41 
42 	hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
43 		if (insn->sec == sec && insn->offset == offset)
44 			return insn;
45 	}
46 
47 	return NULL;
48 }
49 
50 static struct instruction *next_insn_same_sec(struct objtool_file *file,
51 					      struct instruction *insn)
52 {
53 	struct instruction *next = list_next_entry(insn, list);
54 
55 	if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
56 		return NULL;
57 
58 	return next;
59 }
60 
61 static struct instruction *next_insn_same_func(struct objtool_file *file,
62 					       struct instruction *insn)
63 {
64 	struct instruction *next = list_next_entry(insn, list);
65 	struct symbol *func = insn_func(insn);
66 
67 	if (!func)
68 		return NULL;
69 
70 	if (&next->list != &file->insn_list && insn_func(next) == func)
71 		return next;
72 
73 	/* Check if we're already in the subfunction: */
74 	if (func == func->cfunc)
75 		return NULL;
76 
77 	/* Move to the subfunction: */
78 	return find_insn(file, func->cfunc->sec, func->cfunc->offset);
79 }
80 
81 static struct instruction *prev_insn_same_sym(struct objtool_file *file,
82 					       struct instruction *insn)
83 {
84 	struct instruction *prev = list_prev_entry(insn, list);
85 
86 	if (&prev->list != &file->insn_list && insn_func(prev) == insn_func(insn))
87 		return prev;
88 
89 	return NULL;
90 }
91 
92 #define func_for_each_insn(file, func, insn)				\
93 	for (insn = find_insn(file, func->sec, func->offset);		\
94 	     insn;							\
95 	     insn = next_insn_same_func(file, insn))
96 
97 #define sym_for_each_insn(file, sym, insn)				\
98 	for (insn = find_insn(file, sym->sec, sym->offset);		\
99 	     insn && &insn->list != &file->insn_list &&			\
100 		insn->sec == sym->sec &&				\
101 		insn->offset < sym->offset + sym->len;			\
102 	     insn = list_next_entry(insn, list))
103 
104 #define sym_for_each_insn_continue_reverse(file, sym, insn)		\
105 	for (insn = list_prev_entry(insn, list);			\
106 	     &insn->list != &file->insn_list &&				\
107 		insn->sec == sym->sec && insn->offset >= sym->offset;	\
108 	     insn = list_prev_entry(insn, list))
109 
110 #define sec_for_each_insn_from(file, insn)				\
111 	for (; insn; insn = next_insn_same_sec(file, insn))
112 
113 #define sec_for_each_insn_continue(file, insn)				\
114 	for (insn = next_insn_same_sec(file, insn); insn;		\
115 	     insn = next_insn_same_sec(file, insn))
116 
117 static bool is_jump_table_jump(struct instruction *insn)
118 {
119 	struct alt_group *alt_group = insn->alt_group;
120 
121 	if (insn->jump_table)
122 		return true;
123 
124 	/* Retpoline alternative for a jump table? */
125 	return alt_group && alt_group->orig_group &&
126 	       alt_group->orig_group->first_insn->jump_table;
127 }
128 
129 static bool is_sibling_call(struct instruction *insn)
130 {
131 	/*
132 	 * Assume only STT_FUNC calls have jump-tables.
133 	 */
134 	if (insn_func(insn)) {
135 		/* An indirect jump is either a sibling call or a jump to a table. */
136 		if (insn->type == INSN_JUMP_DYNAMIC)
137 			return !is_jump_table_jump(insn);
138 	}
139 
140 	/* add_jump_destinations() sets insn->call_dest for sibling calls. */
141 	return (is_static_jump(insn) && insn->call_dest);
142 }
143 
144 /*
145  * This checks to see if the given function is a "noreturn" function.
146  *
147  * For global functions which are outside the scope of this object file, we
148  * have to keep a manual list of them.
149  *
150  * For local functions, we have to detect them manually by simply looking for
151  * the lack of a return instruction.
152  */
153 static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
154 				int recursion)
155 {
156 	int i;
157 	struct instruction *insn;
158 	bool empty = true;
159 
160 	/*
161 	 * Unfortunately these have to be hard coded because the noreturn
162 	 * attribute isn't provided in ELF data. Keep 'em sorted.
163 	 */
164 	static const char * const global_noreturns[] = {
165 		"__invalid_creds",
166 		"__module_put_and_kthread_exit",
167 		"__reiserfs_panic",
168 		"__stack_chk_fail",
169 		"__ubsan_handle_builtin_unreachable",
170 		"cpu_bringup_and_idle",
171 		"cpu_startup_entry",
172 		"do_exit",
173 		"do_group_exit",
174 		"do_task_dead",
175 		"ex_handler_msr_mce",
176 		"fortify_panic",
177 		"kthread_complete_and_exit",
178 		"kthread_exit",
179 		"kunit_try_catch_throw",
180 		"lbug_with_loc",
181 		"machine_real_restart",
182 		"make_task_dead",
183 		"panic",
184 		"rewind_stack_and_make_dead",
185 		"sev_es_terminate",
186 		"snp_abort",
187 		"stop_this_cpu",
188 		"usercopy_abort",
189 		"xen_cpu_bringup_again",
190 		"xen_start_kernel",
191 	};
192 
193 	if (!func)
194 		return false;
195 
196 	if (func->bind == STB_WEAK)
197 		return false;
198 
199 	if (func->bind == STB_GLOBAL)
200 		for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
201 			if (!strcmp(func->name, global_noreturns[i]))
202 				return true;
203 
204 	if (!func->len)
205 		return false;
206 
207 	insn = find_insn(file, func->sec, func->offset);
208 	if (!insn || !insn_func(insn))
209 		return false;
210 
211 	func_for_each_insn(file, func, insn) {
212 		empty = false;
213 
214 		if (insn->type == INSN_RETURN)
215 			return false;
216 	}
217 
218 	if (empty)
219 		return false;
220 
221 	/*
222 	 * A function can have a sibling call instead of a return.  In that
223 	 * case, the function's dead-end status depends on whether the target
224 	 * of the sibling call returns.
225 	 */
226 	func_for_each_insn(file, func, insn) {
227 		if (is_sibling_call(insn)) {
228 			struct instruction *dest = insn->jump_dest;
229 
230 			if (!dest)
231 				/* sibling call to another file */
232 				return false;
233 
234 			/* local sibling call */
235 			if (recursion == 5) {
236 				/*
237 				 * Infinite recursion: two functions have
238 				 * sibling calls to each other.  This is a very
239 				 * rare case.  It means they aren't dead ends.
240 				 */
241 				return false;
242 			}
243 
244 			return __dead_end_function(file, insn_func(dest), recursion+1);
245 		}
246 	}
247 
248 	return true;
249 }
250 
251 static bool dead_end_function(struct objtool_file *file, struct symbol *func)
252 {
253 	return __dead_end_function(file, func, 0);
254 }
255 
256 static void init_cfi_state(struct cfi_state *cfi)
257 {
258 	int i;
259 
260 	for (i = 0; i < CFI_NUM_REGS; i++) {
261 		cfi->regs[i].base = CFI_UNDEFINED;
262 		cfi->vals[i].base = CFI_UNDEFINED;
263 	}
264 	cfi->cfa.base = CFI_UNDEFINED;
265 	cfi->drap_reg = CFI_UNDEFINED;
266 	cfi->drap_offset = -1;
267 }
268 
269 static void init_insn_state(struct objtool_file *file, struct insn_state *state,
270 			    struct section *sec)
271 {
272 	memset(state, 0, sizeof(*state));
273 	init_cfi_state(&state->cfi);
274 
275 	/*
276 	 * We need the full vmlinux for noinstr validation, otherwise we can
277 	 * not correctly determine insn->call_dest->sec (external symbols do
278 	 * not have a section).
279 	 */
280 	if (opts.link && opts.noinstr && sec)
281 		state->noinstr = sec->noinstr;
282 }
283 
284 static struct cfi_state *cfi_alloc(void)
285 {
286 	struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
287 	if (!cfi) {
288 		WARN("calloc failed");
289 		exit(1);
290 	}
291 	nr_cfi++;
292 	return cfi;
293 }
294 
295 static int cfi_bits;
296 static struct hlist_head *cfi_hash;
297 
298 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
299 {
300 	return memcmp((void *)cfi1 + sizeof(cfi1->hash),
301 		      (void *)cfi2 + sizeof(cfi2->hash),
302 		      sizeof(struct cfi_state) - sizeof(struct hlist_node));
303 }
304 
305 static inline u32 cfi_key(struct cfi_state *cfi)
306 {
307 	return jhash((void *)cfi + sizeof(cfi->hash),
308 		     sizeof(*cfi) - sizeof(cfi->hash), 0);
309 }
310 
311 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
312 {
313 	struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
314 	struct cfi_state *obj;
315 
316 	hlist_for_each_entry(obj, head, hash) {
317 		if (!cficmp(cfi, obj)) {
318 			nr_cfi_cache++;
319 			return obj;
320 		}
321 	}
322 
323 	obj = cfi_alloc();
324 	*obj = *cfi;
325 	hlist_add_head(&obj->hash, head);
326 
327 	return obj;
328 }
329 
330 static void cfi_hash_add(struct cfi_state *cfi)
331 {
332 	struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
333 
334 	hlist_add_head(&cfi->hash, head);
335 }
336 
337 static void *cfi_hash_alloc(unsigned long size)
338 {
339 	cfi_bits = max(10, ilog2(size));
340 	cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
341 			PROT_READ|PROT_WRITE,
342 			MAP_PRIVATE|MAP_ANON, -1, 0);
343 	if (cfi_hash == (void *)-1L) {
344 		WARN("mmap fail cfi_hash");
345 		cfi_hash = NULL;
346 	}  else if (opts.stats) {
347 		printf("cfi_bits: %d\n", cfi_bits);
348 	}
349 
350 	return cfi_hash;
351 }
352 
353 static unsigned long nr_insns;
354 static unsigned long nr_insns_visited;
355 
356 /*
357  * Call the arch-specific instruction decoder for all the instructions and add
358  * them to the global instruction list.
359  */
360 static int decode_instructions(struct objtool_file *file)
361 {
362 	struct section *sec;
363 	struct symbol *func;
364 	unsigned long offset;
365 	struct instruction *insn;
366 	int ret;
367 
368 	for_each_sec(file, sec) {
369 
370 		if (!(sec->sh.sh_flags & SHF_EXECINSTR))
371 			continue;
372 
373 		if (strcmp(sec->name, ".altinstr_replacement") &&
374 		    strcmp(sec->name, ".altinstr_aux") &&
375 		    strncmp(sec->name, ".discard.", 9))
376 			sec->text = true;
377 
378 		if (!strcmp(sec->name, ".noinstr.text") ||
379 		    !strcmp(sec->name, ".entry.text") ||
380 		    !strcmp(sec->name, ".cpuidle.text") ||
381 		    !strncmp(sec->name, ".text.__x86.", 12))
382 			sec->noinstr = true;
383 
384 		/*
385 		 * .init.text code is ran before userspace and thus doesn't
386 		 * strictly need retpolines, except for modules which are
387 		 * loaded late, they very much do need retpoline in their
388 		 * .init.text
389 		 */
390 		if (!strcmp(sec->name, ".init.text") && !opts.module)
391 			sec->init = true;
392 
393 		for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
394 			insn = malloc(sizeof(*insn));
395 			if (!insn) {
396 				WARN("malloc failed");
397 				return -1;
398 			}
399 			memset(insn, 0, sizeof(*insn));
400 			INIT_LIST_HEAD(&insn->alts);
401 			INIT_LIST_HEAD(&insn->stack_ops);
402 			INIT_LIST_HEAD(&insn->call_node);
403 
404 			insn->sec = sec;
405 			insn->offset = offset;
406 
407 			ret = arch_decode_instruction(file, sec, offset,
408 						      sec->sh.sh_size - offset,
409 						      &insn->len, &insn->type,
410 						      &insn->immediate,
411 						      &insn->stack_ops);
412 			if (ret)
413 				goto err;
414 
415 			/*
416 			 * By default, "ud2" is a dead end unless otherwise
417 			 * annotated, because GCC 7 inserts it for certain
418 			 * divide-by-zero cases.
419 			 */
420 			if (insn->type == INSN_BUG)
421 				insn->dead_end = true;
422 
423 			hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
424 			list_add_tail(&insn->list, &file->insn_list);
425 			nr_insns++;
426 		}
427 
428 		list_for_each_entry(func, &sec->symbol_list, list) {
429 			if (func->type != STT_NOTYPE && func->type != STT_FUNC)
430 				continue;
431 
432 			if (func->offset == sec->sh.sh_size) {
433 				/* Heuristic: likely an "end" symbol */
434 				if (func->type == STT_NOTYPE)
435 					continue;
436 				WARN("%s(): STT_FUNC at end of section",
437 				     func->name);
438 				return -1;
439 			}
440 
441 			if (func->return_thunk || func->alias != func)
442 				continue;
443 
444 			if (!find_insn(file, sec, func->offset)) {
445 				WARN("%s(): can't find starting instruction",
446 				     func->name);
447 				return -1;
448 			}
449 
450 			sym_for_each_insn(file, func, insn) {
451 				insn->sym = func;
452 				if (func->type == STT_FUNC &&
453 				    insn->type == INSN_ENDBR &&
454 				    list_empty(&insn->call_node)) {
455 					if (insn->offset == func->offset) {
456 						list_add_tail(&insn->call_node, &file->endbr_list);
457 						file->nr_endbr++;
458 					} else {
459 						file->nr_endbr_int++;
460 					}
461 				}
462 			}
463 		}
464 	}
465 
466 	if (opts.stats)
467 		printf("nr_insns: %lu\n", nr_insns);
468 
469 	return 0;
470 
471 err:
472 	free(insn);
473 	return ret;
474 }
475 
476 /*
477  * Read the pv_ops[] .data table to find the static initialized values.
478  */
479 static int add_pv_ops(struct objtool_file *file, const char *symname)
480 {
481 	struct symbol *sym, *func;
482 	unsigned long off, end;
483 	struct reloc *rel;
484 	int idx;
485 
486 	sym = find_symbol_by_name(file->elf, symname);
487 	if (!sym)
488 		return 0;
489 
490 	off = sym->offset;
491 	end = off + sym->len;
492 	for (;;) {
493 		rel = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
494 		if (!rel)
495 			break;
496 
497 		func = rel->sym;
498 		if (func->type == STT_SECTION)
499 			func = find_symbol_by_offset(rel->sym->sec, rel->addend);
500 
501 		idx = (rel->offset - sym->offset) / sizeof(unsigned long);
502 
503 		objtool_pv_add(file, idx, func);
504 
505 		off = rel->offset + 1;
506 		if (off > end)
507 			break;
508 	}
509 
510 	return 0;
511 }
512 
513 /*
514  * Allocate and initialize file->pv_ops[].
515  */
516 static int init_pv_ops(struct objtool_file *file)
517 {
518 	static const char *pv_ops_tables[] = {
519 		"pv_ops",
520 		"xen_cpu_ops",
521 		"xen_irq_ops",
522 		"xen_mmu_ops",
523 		NULL,
524 	};
525 	const char *pv_ops;
526 	struct symbol *sym;
527 	int idx, nr;
528 
529 	if (!opts.noinstr)
530 		return 0;
531 
532 	file->pv_ops = NULL;
533 
534 	sym = find_symbol_by_name(file->elf, "pv_ops");
535 	if (!sym)
536 		return 0;
537 
538 	nr = sym->len / sizeof(unsigned long);
539 	file->pv_ops = calloc(sizeof(struct pv_state), nr);
540 	if (!file->pv_ops)
541 		return -1;
542 
543 	for (idx = 0; idx < nr; idx++)
544 		INIT_LIST_HEAD(&file->pv_ops[idx].targets);
545 
546 	for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++)
547 		add_pv_ops(file, pv_ops);
548 
549 	return 0;
550 }
551 
552 static struct instruction *find_last_insn(struct objtool_file *file,
553 					  struct section *sec)
554 {
555 	struct instruction *insn = NULL;
556 	unsigned int offset;
557 	unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
558 
559 	for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
560 		insn = find_insn(file, sec, offset);
561 
562 	return insn;
563 }
564 
565 /*
566  * Mark "ud2" instructions and manually annotated dead ends.
567  */
568 static int add_dead_ends(struct objtool_file *file)
569 {
570 	struct section *sec;
571 	struct reloc *reloc;
572 	struct instruction *insn;
573 
574 	/*
575 	 * Check for manually annotated dead ends.
576 	 */
577 	sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
578 	if (!sec)
579 		goto reachable;
580 
581 	list_for_each_entry(reloc, &sec->reloc_list, list) {
582 		if (reloc->sym->type != STT_SECTION) {
583 			WARN("unexpected relocation symbol type in %s", sec->name);
584 			return -1;
585 		}
586 		insn = find_insn(file, reloc->sym->sec, reloc->addend);
587 		if (insn)
588 			insn = list_prev_entry(insn, list);
589 		else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
590 			insn = find_last_insn(file, reloc->sym->sec);
591 			if (!insn) {
592 				WARN("can't find unreachable insn at %s+0x%" PRIx64,
593 				     reloc->sym->sec->name, reloc->addend);
594 				return -1;
595 			}
596 		} else {
597 			WARN("can't find unreachable insn at %s+0x%" PRIx64,
598 			     reloc->sym->sec->name, reloc->addend);
599 			return -1;
600 		}
601 
602 		insn->dead_end = true;
603 	}
604 
605 reachable:
606 	/*
607 	 * These manually annotated reachable checks are needed for GCC 4.4,
608 	 * where the Linux unreachable() macro isn't supported.  In that case
609 	 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
610 	 * not a dead end.
611 	 */
612 	sec = find_section_by_name(file->elf, ".rela.discard.reachable");
613 	if (!sec)
614 		return 0;
615 
616 	list_for_each_entry(reloc, &sec->reloc_list, list) {
617 		if (reloc->sym->type != STT_SECTION) {
618 			WARN("unexpected relocation symbol type in %s", sec->name);
619 			return -1;
620 		}
621 		insn = find_insn(file, reloc->sym->sec, reloc->addend);
622 		if (insn)
623 			insn = list_prev_entry(insn, list);
624 		else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
625 			insn = find_last_insn(file, reloc->sym->sec);
626 			if (!insn) {
627 				WARN("can't find reachable insn at %s+0x%" PRIx64,
628 				     reloc->sym->sec->name, reloc->addend);
629 				return -1;
630 			}
631 		} else {
632 			WARN("can't find reachable insn at %s+0x%" PRIx64,
633 			     reloc->sym->sec->name, reloc->addend);
634 			return -1;
635 		}
636 
637 		insn->dead_end = false;
638 	}
639 
640 	return 0;
641 }
642 
643 static int create_static_call_sections(struct objtool_file *file)
644 {
645 	struct section *sec;
646 	struct static_call_site *site;
647 	struct instruction *insn;
648 	struct symbol *key_sym;
649 	char *key_name, *tmp;
650 	int idx;
651 
652 	sec = find_section_by_name(file->elf, ".static_call_sites");
653 	if (sec) {
654 		INIT_LIST_HEAD(&file->static_call_list);
655 		WARN("file already has .static_call_sites section, skipping");
656 		return 0;
657 	}
658 
659 	if (list_empty(&file->static_call_list))
660 		return 0;
661 
662 	idx = 0;
663 	list_for_each_entry(insn, &file->static_call_list, call_node)
664 		idx++;
665 
666 	sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
667 				 sizeof(struct static_call_site), idx);
668 	if (!sec)
669 		return -1;
670 
671 	idx = 0;
672 	list_for_each_entry(insn, &file->static_call_list, call_node) {
673 
674 		site = (struct static_call_site *)sec->data->d_buf + idx;
675 		memset(site, 0, sizeof(struct static_call_site));
676 
677 		/* populate reloc for 'addr' */
678 		if (elf_add_reloc_to_insn(file->elf, sec,
679 					  idx * sizeof(struct static_call_site),
680 					  R_X86_64_PC32,
681 					  insn->sec, insn->offset))
682 			return -1;
683 
684 		/* find key symbol */
685 		key_name = strdup(insn->call_dest->name);
686 		if (!key_name) {
687 			perror("strdup");
688 			return -1;
689 		}
690 		if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
691 			    STATIC_CALL_TRAMP_PREFIX_LEN)) {
692 			WARN("static_call: trampoline name malformed: %s", key_name);
693 			return -1;
694 		}
695 		tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
696 		memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
697 
698 		key_sym = find_symbol_by_name(file->elf, tmp);
699 		if (!key_sym) {
700 			if (!opts.module) {
701 				WARN("static_call: can't find static_call_key symbol: %s", tmp);
702 				return -1;
703 			}
704 
705 			/*
706 			 * For modules(), the key might not be exported, which
707 			 * means the module can make static calls but isn't
708 			 * allowed to change them.
709 			 *
710 			 * In that case we temporarily set the key to be the
711 			 * trampoline address.  This is fixed up in
712 			 * static_call_add_module().
713 			 */
714 			key_sym = insn->call_dest;
715 		}
716 		free(key_name);
717 
718 		/* populate reloc for 'key' */
719 		if (elf_add_reloc(file->elf, sec,
720 				  idx * sizeof(struct static_call_site) + 4,
721 				  R_X86_64_PC32, key_sym,
722 				  is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
723 			return -1;
724 
725 		idx++;
726 	}
727 
728 	return 0;
729 }
730 
731 static int create_retpoline_sites_sections(struct objtool_file *file)
732 {
733 	struct instruction *insn;
734 	struct section *sec;
735 	int idx;
736 
737 	sec = find_section_by_name(file->elf, ".retpoline_sites");
738 	if (sec) {
739 		WARN("file already has .retpoline_sites, skipping");
740 		return 0;
741 	}
742 
743 	idx = 0;
744 	list_for_each_entry(insn, &file->retpoline_call_list, call_node)
745 		idx++;
746 
747 	if (!idx)
748 		return 0;
749 
750 	sec = elf_create_section(file->elf, ".retpoline_sites", 0,
751 				 sizeof(int), idx);
752 	if (!sec) {
753 		WARN("elf_create_section: .retpoline_sites");
754 		return -1;
755 	}
756 
757 	idx = 0;
758 	list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
759 
760 		int *site = (int *)sec->data->d_buf + idx;
761 		*site = 0;
762 
763 		if (elf_add_reloc_to_insn(file->elf, sec,
764 					  idx * sizeof(int),
765 					  R_X86_64_PC32,
766 					  insn->sec, insn->offset)) {
767 			WARN("elf_add_reloc_to_insn: .retpoline_sites");
768 			return -1;
769 		}
770 
771 		idx++;
772 	}
773 
774 	return 0;
775 }
776 
777 static int create_return_sites_sections(struct objtool_file *file)
778 {
779 	struct instruction *insn;
780 	struct section *sec;
781 	int idx;
782 
783 	sec = find_section_by_name(file->elf, ".return_sites");
784 	if (sec) {
785 		WARN("file already has .return_sites, skipping");
786 		return 0;
787 	}
788 
789 	idx = 0;
790 	list_for_each_entry(insn, &file->return_thunk_list, call_node)
791 		idx++;
792 
793 	if (!idx)
794 		return 0;
795 
796 	sec = elf_create_section(file->elf, ".return_sites", 0,
797 				 sizeof(int), idx);
798 	if (!sec) {
799 		WARN("elf_create_section: .return_sites");
800 		return -1;
801 	}
802 
803 	idx = 0;
804 	list_for_each_entry(insn, &file->return_thunk_list, call_node) {
805 
806 		int *site = (int *)sec->data->d_buf + idx;
807 		*site = 0;
808 
809 		if (elf_add_reloc_to_insn(file->elf, sec,
810 					  idx * sizeof(int),
811 					  R_X86_64_PC32,
812 					  insn->sec, insn->offset)) {
813 			WARN("elf_add_reloc_to_insn: .return_sites");
814 			return -1;
815 		}
816 
817 		idx++;
818 	}
819 
820 	return 0;
821 }
822 
823 static int create_ibt_endbr_seal_sections(struct objtool_file *file)
824 {
825 	struct instruction *insn;
826 	struct section *sec;
827 	int idx;
828 
829 	sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
830 	if (sec) {
831 		WARN("file already has .ibt_endbr_seal, skipping");
832 		return 0;
833 	}
834 
835 	idx = 0;
836 	list_for_each_entry(insn, &file->endbr_list, call_node)
837 		idx++;
838 
839 	if (opts.stats) {
840 		printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
841 		printf("ibt: ENDBR inside functions:  %d\n", file->nr_endbr_int);
842 		printf("ibt: superfluous ENDBR:       %d\n", idx);
843 	}
844 
845 	if (!idx)
846 		return 0;
847 
848 	sec = elf_create_section(file->elf, ".ibt_endbr_seal", 0,
849 				 sizeof(int), idx);
850 	if (!sec) {
851 		WARN("elf_create_section: .ibt_endbr_seal");
852 		return -1;
853 	}
854 
855 	idx = 0;
856 	list_for_each_entry(insn, &file->endbr_list, call_node) {
857 
858 		int *site = (int *)sec->data->d_buf + idx;
859 		*site = 0;
860 
861 		if (elf_add_reloc_to_insn(file->elf, sec,
862 					  idx * sizeof(int),
863 					  R_X86_64_PC32,
864 					  insn->sec, insn->offset)) {
865 			WARN("elf_add_reloc_to_insn: .ibt_endbr_seal");
866 			return -1;
867 		}
868 
869 		idx++;
870 	}
871 
872 	return 0;
873 }
874 
875 static int create_cfi_sections(struct objtool_file *file)
876 {
877 	struct section *sec, *s;
878 	struct symbol *sym;
879 	unsigned int *loc;
880 	int idx;
881 
882 	sec = find_section_by_name(file->elf, ".cfi_sites");
883 	if (sec) {
884 		INIT_LIST_HEAD(&file->call_list);
885 		WARN("file already has .cfi_sites section, skipping");
886 		return 0;
887 	}
888 
889 	idx = 0;
890 	for_each_sec(file, s) {
891 		if (!s->text)
892 			continue;
893 
894 		list_for_each_entry(sym, &s->symbol_list, list) {
895 			if (sym->type != STT_FUNC)
896 				continue;
897 
898 			if (strncmp(sym->name, "__cfi_", 6))
899 				continue;
900 
901 			idx++;
902 		}
903 	}
904 
905 	sec = elf_create_section(file->elf, ".cfi_sites", 0, sizeof(unsigned int), idx);
906 	if (!sec)
907 		return -1;
908 
909 	idx = 0;
910 	for_each_sec(file, s) {
911 		if (!s->text)
912 			continue;
913 
914 		list_for_each_entry(sym, &s->symbol_list, list) {
915 			if (sym->type != STT_FUNC)
916 				continue;
917 
918 			if (strncmp(sym->name, "__cfi_", 6))
919 				continue;
920 
921 			loc = (unsigned int *)sec->data->d_buf + idx;
922 			memset(loc, 0, sizeof(unsigned int));
923 
924 			if (elf_add_reloc_to_insn(file->elf, sec,
925 						  idx * sizeof(unsigned int),
926 						  R_X86_64_PC32,
927 						  s, sym->offset))
928 				return -1;
929 
930 			idx++;
931 		}
932 	}
933 
934 	return 0;
935 }
936 
937 static int create_mcount_loc_sections(struct objtool_file *file)
938 {
939 	int addrsize = elf_class_addrsize(file->elf);
940 	struct instruction *insn;
941 	struct section *sec;
942 	int idx;
943 
944 	sec = find_section_by_name(file->elf, "__mcount_loc");
945 	if (sec) {
946 		INIT_LIST_HEAD(&file->mcount_loc_list);
947 		WARN("file already has __mcount_loc section, skipping");
948 		return 0;
949 	}
950 
951 	if (list_empty(&file->mcount_loc_list))
952 		return 0;
953 
954 	idx = 0;
955 	list_for_each_entry(insn, &file->mcount_loc_list, call_node)
956 		idx++;
957 
958 	sec = elf_create_section(file->elf, "__mcount_loc", 0, addrsize, idx);
959 	if (!sec)
960 		return -1;
961 
962 	sec->sh.sh_addralign = addrsize;
963 
964 	idx = 0;
965 	list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
966 		void *loc;
967 
968 		loc = sec->data->d_buf + idx;
969 		memset(loc, 0, addrsize);
970 
971 		if (elf_add_reloc_to_insn(file->elf, sec, idx,
972 					  addrsize == sizeof(u64) ? R_ABS64 : R_ABS32,
973 					  insn->sec, insn->offset))
974 			return -1;
975 
976 		idx += addrsize;
977 	}
978 
979 	return 0;
980 }
981 
982 static int create_direct_call_sections(struct objtool_file *file)
983 {
984 	struct instruction *insn;
985 	struct section *sec;
986 	unsigned int *loc;
987 	int idx;
988 
989 	sec = find_section_by_name(file->elf, ".call_sites");
990 	if (sec) {
991 		INIT_LIST_HEAD(&file->call_list);
992 		WARN("file already has .call_sites section, skipping");
993 		return 0;
994 	}
995 
996 	if (list_empty(&file->call_list))
997 		return 0;
998 
999 	idx = 0;
1000 	list_for_each_entry(insn, &file->call_list, call_node)
1001 		idx++;
1002 
1003 	sec = elf_create_section(file->elf, ".call_sites", 0, sizeof(unsigned int), idx);
1004 	if (!sec)
1005 		return -1;
1006 
1007 	idx = 0;
1008 	list_for_each_entry(insn, &file->call_list, call_node) {
1009 
1010 		loc = (unsigned int *)sec->data->d_buf + idx;
1011 		memset(loc, 0, sizeof(unsigned int));
1012 
1013 		if (elf_add_reloc_to_insn(file->elf, sec,
1014 					  idx * sizeof(unsigned int),
1015 					  R_X86_64_PC32,
1016 					  insn->sec, insn->offset))
1017 			return -1;
1018 
1019 		idx++;
1020 	}
1021 
1022 	return 0;
1023 }
1024 
1025 /*
1026  * Warnings shouldn't be reported for ignored functions.
1027  */
1028 static void add_ignores(struct objtool_file *file)
1029 {
1030 	struct instruction *insn;
1031 	struct section *sec;
1032 	struct symbol *func;
1033 	struct reloc *reloc;
1034 
1035 	sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
1036 	if (!sec)
1037 		return;
1038 
1039 	list_for_each_entry(reloc, &sec->reloc_list, list) {
1040 		switch (reloc->sym->type) {
1041 		case STT_FUNC:
1042 			func = reloc->sym;
1043 			break;
1044 
1045 		case STT_SECTION:
1046 			func = find_func_by_offset(reloc->sym->sec, reloc->addend);
1047 			if (!func)
1048 				continue;
1049 			break;
1050 
1051 		default:
1052 			WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type);
1053 			continue;
1054 		}
1055 
1056 		func_for_each_insn(file, func, insn)
1057 			insn->ignore = true;
1058 	}
1059 }
1060 
1061 /*
1062  * This is a whitelist of functions that is allowed to be called with AC set.
1063  * The list is meant to be minimal and only contains compiler instrumentation
1064  * ABI and a few functions used to implement *_{to,from}_user() functions.
1065  *
1066  * These functions must not directly change AC, but may PUSHF/POPF.
1067  */
1068 static const char *uaccess_safe_builtin[] = {
1069 	/* KASAN */
1070 	"kasan_report",
1071 	"kasan_check_range",
1072 	/* KASAN out-of-line */
1073 	"__asan_loadN_noabort",
1074 	"__asan_load1_noabort",
1075 	"__asan_load2_noabort",
1076 	"__asan_load4_noabort",
1077 	"__asan_load8_noabort",
1078 	"__asan_load16_noabort",
1079 	"__asan_storeN_noabort",
1080 	"__asan_store1_noabort",
1081 	"__asan_store2_noabort",
1082 	"__asan_store4_noabort",
1083 	"__asan_store8_noabort",
1084 	"__asan_store16_noabort",
1085 	"__kasan_check_read",
1086 	"__kasan_check_write",
1087 	/* KASAN in-line */
1088 	"__asan_report_load_n_noabort",
1089 	"__asan_report_load1_noabort",
1090 	"__asan_report_load2_noabort",
1091 	"__asan_report_load4_noabort",
1092 	"__asan_report_load8_noabort",
1093 	"__asan_report_load16_noabort",
1094 	"__asan_report_store_n_noabort",
1095 	"__asan_report_store1_noabort",
1096 	"__asan_report_store2_noabort",
1097 	"__asan_report_store4_noabort",
1098 	"__asan_report_store8_noabort",
1099 	"__asan_report_store16_noabort",
1100 	/* KCSAN */
1101 	"__kcsan_check_access",
1102 	"__kcsan_mb",
1103 	"__kcsan_wmb",
1104 	"__kcsan_rmb",
1105 	"__kcsan_release",
1106 	"kcsan_found_watchpoint",
1107 	"kcsan_setup_watchpoint",
1108 	"kcsan_check_scoped_accesses",
1109 	"kcsan_disable_current",
1110 	"kcsan_enable_current_nowarn",
1111 	/* KCSAN/TSAN */
1112 	"__tsan_func_entry",
1113 	"__tsan_func_exit",
1114 	"__tsan_read_range",
1115 	"__tsan_write_range",
1116 	"__tsan_read1",
1117 	"__tsan_read2",
1118 	"__tsan_read4",
1119 	"__tsan_read8",
1120 	"__tsan_read16",
1121 	"__tsan_write1",
1122 	"__tsan_write2",
1123 	"__tsan_write4",
1124 	"__tsan_write8",
1125 	"__tsan_write16",
1126 	"__tsan_read_write1",
1127 	"__tsan_read_write2",
1128 	"__tsan_read_write4",
1129 	"__tsan_read_write8",
1130 	"__tsan_read_write16",
1131 	"__tsan_volatile_read1",
1132 	"__tsan_volatile_read2",
1133 	"__tsan_volatile_read4",
1134 	"__tsan_volatile_read8",
1135 	"__tsan_volatile_read16",
1136 	"__tsan_volatile_write1",
1137 	"__tsan_volatile_write2",
1138 	"__tsan_volatile_write4",
1139 	"__tsan_volatile_write8",
1140 	"__tsan_volatile_write16",
1141 	"__tsan_atomic8_load",
1142 	"__tsan_atomic16_load",
1143 	"__tsan_atomic32_load",
1144 	"__tsan_atomic64_load",
1145 	"__tsan_atomic8_store",
1146 	"__tsan_atomic16_store",
1147 	"__tsan_atomic32_store",
1148 	"__tsan_atomic64_store",
1149 	"__tsan_atomic8_exchange",
1150 	"__tsan_atomic16_exchange",
1151 	"__tsan_atomic32_exchange",
1152 	"__tsan_atomic64_exchange",
1153 	"__tsan_atomic8_fetch_add",
1154 	"__tsan_atomic16_fetch_add",
1155 	"__tsan_atomic32_fetch_add",
1156 	"__tsan_atomic64_fetch_add",
1157 	"__tsan_atomic8_fetch_sub",
1158 	"__tsan_atomic16_fetch_sub",
1159 	"__tsan_atomic32_fetch_sub",
1160 	"__tsan_atomic64_fetch_sub",
1161 	"__tsan_atomic8_fetch_and",
1162 	"__tsan_atomic16_fetch_and",
1163 	"__tsan_atomic32_fetch_and",
1164 	"__tsan_atomic64_fetch_and",
1165 	"__tsan_atomic8_fetch_or",
1166 	"__tsan_atomic16_fetch_or",
1167 	"__tsan_atomic32_fetch_or",
1168 	"__tsan_atomic64_fetch_or",
1169 	"__tsan_atomic8_fetch_xor",
1170 	"__tsan_atomic16_fetch_xor",
1171 	"__tsan_atomic32_fetch_xor",
1172 	"__tsan_atomic64_fetch_xor",
1173 	"__tsan_atomic8_fetch_nand",
1174 	"__tsan_atomic16_fetch_nand",
1175 	"__tsan_atomic32_fetch_nand",
1176 	"__tsan_atomic64_fetch_nand",
1177 	"__tsan_atomic8_compare_exchange_strong",
1178 	"__tsan_atomic16_compare_exchange_strong",
1179 	"__tsan_atomic32_compare_exchange_strong",
1180 	"__tsan_atomic64_compare_exchange_strong",
1181 	"__tsan_atomic8_compare_exchange_weak",
1182 	"__tsan_atomic16_compare_exchange_weak",
1183 	"__tsan_atomic32_compare_exchange_weak",
1184 	"__tsan_atomic64_compare_exchange_weak",
1185 	"__tsan_atomic8_compare_exchange_val",
1186 	"__tsan_atomic16_compare_exchange_val",
1187 	"__tsan_atomic32_compare_exchange_val",
1188 	"__tsan_atomic64_compare_exchange_val",
1189 	"__tsan_atomic_thread_fence",
1190 	"__tsan_atomic_signal_fence",
1191 	/* KCOV */
1192 	"write_comp_data",
1193 	"check_kcov_mode",
1194 	"__sanitizer_cov_trace_pc",
1195 	"__sanitizer_cov_trace_const_cmp1",
1196 	"__sanitizer_cov_trace_const_cmp2",
1197 	"__sanitizer_cov_trace_const_cmp4",
1198 	"__sanitizer_cov_trace_const_cmp8",
1199 	"__sanitizer_cov_trace_cmp1",
1200 	"__sanitizer_cov_trace_cmp2",
1201 	"__sanitizer_cov_trace_cmp4",
1202 	"__sanitizer_cov_trace_cmp8",
1203 	"__sanitizer_cov_trace_switch",
1204 	/* KMSAN */
1205 	"kmsan_copy_to_user",
1206 	"kmsan_report",
1207 	"kmsan_unpoison_entry_regs",
1208 	"kmsan_unpoison_memory",
1209 	"__msan_chain_origin",
1210 	"__msan_get_context_state",
1211 	"__msan_instrument_asm_store",
1212 	"__msan_metadata_ptr_for_load_1",
1213 	"__msan_metadata_ptr_for_load_2",
1214 	"__msan_metadata_ptr_for_load_4",
1215 	"__msan_metadata_ptr_for_load_8",
1216 	"__msan_metadata_ptr_for_load_n",
1217 	"__msan_metadata_ptr_for_store_1",
1218 	"__msan_metadata_ptr_for_store_2",
1219 	"__msan_metadata_ptr_for_store_4",
1220 	"__msan_metadata_ptr_for_store_8",
1221 	"__msan_metadata_ptr_for_store_n",
1222 	"__msan_poison_alloca",
1223 	"__msan_warning",
1224 	/* UBSAN */
1225 	"ubsan_type_mismatch_common",
1226 	"__ubsan_handle_type_mismatch",
1227 	"__ubsan_handle_type_mismatch_v1",
1228 	"__ubsan_handle_shift_out_of_bounds",
1229 	"__ubsan_handle_load_invalid_value",
1230 	/* misc */
1231 	"csum_partial_copy_generic",
1232 	"copy_mc_fragile",
1233 	"copy_mc_fragile_handle_tail",
1234 	"copy_mc_enhanced_fast_string",
1235 	"ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
1236 	"clear_user_erms",
1237 	"clear_user_rep_good",
1238 	"clear_user_original",
1239 	NULL
1240 };
1241 
1242 static void add_uaccess_safe(struct objtool_file *file)
1243 {
1244 	struct symbol *func;
1245 	const char **name;
1246 
1247 	if (!opts.uaccess)
1248 		return;
1249 
1250 	for (name = uaccess_safe_builtin; *name; name++) {
1251 		func = find_symbol_by_name(file->elf, *name);
1252 		if (!func)
1253 			continue;
1254 
1255 		func->uaccess_safe = true;
1256 	}
1257 }
1258 
1259 /*
1260  * FIXME: For now, just ignore any alternatives which add retpolines.  This is
1261  * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
1262  * But it at least allows objtool to understand the control flow *around* the
1263  * retpoline.
1264  */
1265 static int add_ignore_alternatives(struct objtool_file *file)
1266 {
1267 	struct section *sec;
1268 	struct reloc *reloc;
1269 	struct instruction *insn;
1270 
1271 	sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
1272 	if (!sec)
1273 		return 0;
1274 
1275 	list_for_each_entry(reloc, &sec->reloc_list, list) {
1276 		if (reloc->sym->type != STT_SECTION) {
1277 			WARN("unexpected relocation symbol type in %s", sec->name);
1278 			return -1;
1279 		}
1280 
1281 		insn = find_insn(file, reloc->sym->sec, reloc->addend);
1282 		if (!insn) {
1283 			WARN("bad .discard.ignore_alts entry");
1284 			return -1;
1285 		}
1286 
1287 		insn->ignore_alts = true;
1288 	}
1289 
1290 	return 0;
1291 }
1292 
1293 __weak bool arch_is_retpoline(struct symbol *sym)
1294 {
1295 	return false;
1296 }
1297 
1298 __weak bool arch_is_rethunk(struct symbol *sym)
1299 {
1300 	return false;
1301 }
1302 
1303 #define NEGATIVE_RELOC	((void *)-1L)
1304 
1305 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1306 {
1307 	if (insn->reloc == NEGATIVE_RELOC)
1308 		return NULL;
1309 
1310 	if (!insn->reloc) {
1311 		if (!file)
1312 			return NULL;
1313 
1314 		insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1315 						       insn->offset, insn->len);
1316 		if (!insn->reloc) {
1317 			insn->reloc = NEGATIVE_RELOC;
1318 			return NULL;
1319 		}
1320 	}
1321 
1322 	return insn->reloc;
1323 }
1324 
1325 static void remove_insn_ops(struct instruction *insn)
1326 {
1327 	struct stack_op *op, *tmp;
1328 
1329 	list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) {
1330 		list_del(&op->list);
1331 		free(op);
1332 	}
1333 }
1334 
1335 static void annotate_call_site(struct objtool_file *file,
1336 			       struct instruction *insn, bool sibling)
1337 {
1338 	struct reloc *reloc = insn_reloc(file, insn);
1339 	struct symbol *sym = insn->call_dest;
1340 
1341 	if (!sym)
1342 		sym = reloc->sym;
1343 
1344 	/*
1345 	 * Alternative replacement code is just template code which is
1346 	 * sometimes copied to the original instruction. For now, don't
1347 	 * annotate it. (In the future we might consider annotating the
1348 	 * original instruction if/when it ever makes sense to do so.)
1349 	 */
1350 	if (!strcmp(insn->sec->name, ".altinstr_replacement"))
1351 		return;
1352 
1353 	if (sym->static_call_tramp) {
1354 		list_add_tail(&insn->call_node, &file->static_call_list);
1355 		return;
1356 	}
1357 
1358 	if (sym->retpoline_thunk) {
1359 		list_add_tail(&insn->call_node, &file->retpoline_call_list);
1360 		return;
1361 	}
1362 
1363 	/*
1364 	 * Many compilers cannot disable KCOV or sanitizer calls with a function
1365 	 * attribute so they need a little help, NOP out any such calls from
1366 	 * noinstr text.
1367 	 */
1368 	if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
1369 		if (reloc) {
1370 			reloc->type = R_NONE;
1371 			elf_write_reloc(file->elf, reloc);
1372 		}
1373 
1374 		elf_write_insn(file->elf, insn->sec,
1375 			       insn->offset, insn->len,
1376 			       sibling ? arch_ret_insn(insn->len)
1377 			               : arch_nop_insn(insn->len));
1378 
1379 		insn->type = sibling ? INSN_RETURN : INSN_NOP;
1380 
1381 		if (sibling) {
1382 			/*
1383 			 * We've replaced the tail-call JMP insn by two new
1384 			 * insn: RET; INT3, except we only have a single struct
1385 			 * insn here. Mark it retpoline_safe to avoid the SLS
1386 			 * warning, instead of adding another insn.
1387 			 */
1388 			insn->retpoline_safe = true;
1389 		}
1390 
1391 		return;
1392 	}
1393 
1394 	if (opts.mcount && sym->fentry) {
1395 		if (sibling)
1396 			WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset);
1397 		if (opts.mnop) {
1398 			if (reloc) {
1399 				reloc->type = R_NONE;
1400 				elf_write_reloc(file->elf, reloc);
1401 			}
1402 
1403 			elf_write_insn(file->elf, insn->sec,
1404 				       insn->offset, insn->len,
1405 				       arch_nop_insn(insn->len));
1406 
1407 			insn->type = INSN_NOP;
1408 		}
1409 
1410 		list_add_tail(&insn->call_node, &file->mcount_loc_list);
1411 		return;
1412 	}
1413 
1414 	if (insn->type == INSN_CALL && !insn->sec->init)
1415 		list_add_tail(&insn->call_node, &file->call_list);
1416 
1417 	if (!sibling && dead_end_function(file, sym))
1418 		insn->dead_end = true;
1419 }
1420 
1421 static void add_call_dest(struct objtool_file *file, struct instruction *insn,
1422 			  struct symbol *dest, bool sibling)
1423 {
1424 	insn->call_dest = dest;
1425 	if (!dest)
1426 		return;
1427 
1428 	/*
1429 	 * Whatever stack impact regular CALLs have, should be undone
1430 	 * by the RETURN of the called function.
1431 	 *
1432 	 * Annotated intra-function calls retain the stack_ops but
1433 	 * are converted to JUMP, see read_intra_function_calls().
1434 	 */
1435 	remove_insn_ops(insn);
1436 
1437 	annotate_call_site(file, insn, sibling);
1438 }
1439 
1440 static void add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1441 {
1442 	/*
1443 	 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1444 	 * so convert them accordingly.
1445 	 */
1446 	switch (insn->type) {
1447 	case INSN_CALL:
1448 		insn->type = INSN_CALL_DYNAMIC;
1449 		break;
1450 	case INSN_JUMP_UNCONDITIONAL:
1451 		insn->type = INSN_JUMP_DYNAMIC;
1452 		break;
1453 	case INSN_JUMP_CONDITIONAL:
1454 		insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1455 		break;
1456 	default:
1457 		return;
1458 	}
1459 
1460 	insn->retpoline_safe = true;
1461 
1462 	/*
1463 	 * Whatever stack impact regular CALLs have, should be undone
1464 	 * by the RETURN of the called function.
1465 	 *
1466 	 * Annotated intra-function calls retain the stack_ops but
1467 	 * are converted to JUMP, see read_intra_function_calls().
1468 	 */
1469 	remove_insn_ops(insn);
1470 
1471 	annotate_call_site(file, insn, false);
1472 }
1473 
1474 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
1475 {
1476 	/*
1477 	 * Return thunk tail calls are really just returns in disguise,
1478 	 * so convert them accordingly.
1479 	 */
1480 	insn->type = INSN_RETURN;
1481 	insn->retpoline_safe = true;
1482 
1483 	if (add)
1484 		list_add_tail(&insn->call_node, &file->return_thunk_list);
1485 }
1486 
1487 static bool is_first_func_insn(struct objtool_file *file,
1488 			       struct instruction *insn, struct symbol *sym)
1489 {
1490 	if (insn->offset == sym->offset)
1491 		return true;
1492 
1493 	/* Allow direct CALL/JMP past ENDBR */
1494 	if (opts.ibt) {
1495 		struct instruction *prev = prev_insn_same_sym(file, insn);
1496 
1497 		if (prev && prev->type == INSN_ENDBR &&
1498 		    insn->offset == sym->offset + prev->len)
1499 			return true;
1500 	}
1501 
1502 	return false;
1503 }
1504 
1505 /*
1506  * A sibling call is a tail-call to another symbol -- to differentiate from a
1507  * recursive tail-call which is to the same symbol.
1508  */
1509 static bool jump_is_sibling_call(struct objtool_file *file,
1510 				 struct instruction *from, struct instruction *to)
1511 {
1512 	struct symbol *fs = from->sym;
1513 	struct symbol *ts = to->sym;
1514 
1515 	/* Not a sibling call if from/to a symbol hole */
1516 	if (!fs || !ts)
1517 		return false;
1518 
1519 	/* Not a sibling call if not targeting the start of a symbol. */
1520 	if (!is_first_func_insn(file, to, ts))
1521 		return false;
1522 
1523 	/* Disallow sibling calls into STT_NOTYPE */
1524 	if (ts->type == STT_NOTYPE)
1525 		return false;
1526 
1527 	/* Must not be self to be a sibling */
1528 	return fs->pfunc != ts->pfunc;
1529 }
1530 
1531 /*
1532  * Find the destination instructions for all jumps.
1533  */
1534 static int add_jump_destinations(struct objtool_file *file)
1535 {
1536 	struct instruction *insn, *jump_dest;
1537 	struct reloc *reloc;
1538 	struct section *dest_sec;
1539 	unsigned long dest_off;
1540 
1541 	for_each_insn(file, insn) {
1542 		if (insn->jump_dest) {
1543 			/*
1544 			 * handle_group_alt() may have previously set
1545 			 * 'jump_dest' for some alternatives.
1546 			 */
1547 			continue;
1548 		}
1549 		if (!is_static_jump(insn))
1550 			continue;
1551 
1552 		reloc = insn_reloc(file, insn);
1553 		if (!reloc) {
1554 			dest_sec = insn->sec;
1555 			dest_off = arch_jump_destination(insn);
1556 		} else if (reloc->sym->type == STT_SECTION) {
1557 			dest_sec = reloc->sym->sec;
1558 			dest_off = arch_dest_reloc_offset(reloc->addend);
1559 		} else if (reloc->sym->retpoline_thunk) {
1560 			add_retpoline_call(file, insn);
1561 			continue;
1562 		} else if (reloc->sym->return_thunk) {
1563 			add_return_call(file, insn, true);
1564 			continue;
1565 		} else if (insn_func(insn)) {
1566 			/*
1567 			 * External sibling call or internal sibling call with
1568 			 * STT_FUNC reloc.
1569 			 */
1570 			add_call_dest(file, insn, reloc->sym, true);
1571 			continue;
1572 		} else if (reloc->sym->sec->idx) {
1573 			dest_sec = reloc->sym->sec;
1574 			dest_off = reloc->sym->sym.st_value +
1575 				   arch_dest_reloc_offset(reloc->addend);
1576 		} else {
1577 			/* non-func asm code jumping to another file */
1578 			continue;
1579 		}
1580 
1581 		jump_dest = find_insn(file, dest_sec, dest_off);
1582 		if (!jump_dest) {
1583 			struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
1584 
1585 			/*
1586 			 * This is a special case for zen_untrain_ret().
1587 			 * It jumps to __x86_return_thunk(), but objtool
1588 			 * can't find the thunk's starting RET
1589 			 * instruction, because the RET is also in the
1590 			 * middle of another instruction.  Objtool only
1591 			 * knows about the outer instruction.
1592 			 */
1593 			if (sym && sym->return_thunk) {
1594 				add_return_call(file, insn, false);
1595 				continue;
1596 			}
1597 
1598 			WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
1599 				  insn->sec, insn->offset, dest_sec->name,
1600 				  dest_off);
1601 			return -1;
1602 		}
1603 
1604 		/*
1605 		 * Cross-function jump.
1606 		 */
1607 		if (insn_func(insn) && insn_func(jump_dest) &&
1608 		    insn_func(insn) != insn_func(jump_dest)) {
1609 
1610 			/*
1611 			 * For GCC 8+, create parent/child links for any cold
1612 			 * subfunctions.  This is _mostly_ redundant with a
1613 			 * similar initialization in read_symbols().
1614 			 *
1615 			 * If a function has aliases, we want the *first* such
1616 			 * function in the symbol table to be the subfunction's
1617 			 * parent.  In that case we overwrite the
1618 			 * initialization done in read_symbols().
1619 			 *
1620 			 * However this code can't completely replace the
1621 			 * read_symbols() code because this doesn't detect the
1622 			 * case where the parent function's only reference to a
1623 			 * subfunction is through a jump table.
1624 			 */
1625 			if (!strstr(insn_func(insn)->name, ".cold") &&
1626 			    strstr(insn_func(jump_dest)->name, ".cold")) {
1627 				insn_func(insn)->cfunc = insn_func(jump_dest);
1628 				insn_func(jump_dest)->pfunc = insn_func(insn);
1629 			}
1630 		}
1631 
1632 		if (jump_is_sibling_call(file, insn, jump_dest)) {
1633 			/*
1634 			 * Internal sibling call without reloc or with
1635 			 * STT_SECTION reloc.
1636 			 */
1637 			add_call_dest(file, insn, insn_func(jump_dest), true);
1638 			continue;
1639 		}
1640 
1641 		insn->jump_dest = jump_dest;
1642 	}
1643 
1644 	return 0;
1645 }
1646 
1647 static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1648 {
1649 	struct symbol *call_dest;
1650 
1651 	call_dest = find_func_by_offset(sec, offset);
1652 	if (!call_dest)
1653 		call_dest = find_symbol_by_offset(sec, offset);
1654 
1655 	return call_dest;
1656 }
1657 
1658 /*
1659  * Find the destination instructions for all calls.
1660  */
1661 static int add_call_destinations(struct objtool_file *file)
1662 {
1663 	struct instruction *insn;
1664 	unsigned long dest_off;
1665 	struct symbol *dest;
1666 	struct reloc *reloc;
1667 
1668 	for_each_insn(file, insn) {
1669 		if (insn->type != INSN_CALL)
1670 			continue;
1671 
1672 		reloc = insn_reloc(file, insn);
1673 		if (!reloc) {
1674 			dest_off = arch_jump_destination(insn);
1675 			dest = find_call_destination(insn->sec, dest_off);
1676 
1677 			add_call_dest(file, insn, dest, false);
1678 
1679 			if (insn->ignore)
1680 				continue;
1681 
1682 			if (!insn->call_dest) {
1683 				WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset);
1684 				return -1;
1685 			}
1686 
1687 			if (insn_func(insn) && insn->call_dest->type != STT_FUNC) {
1688 				WARN_FUNC("unsupported call to non-function",
1689 					  insn->sec, insn->offset);
1690 				return -1;
1691 			}
1692 
1693 		} else if (reloc->sym->type == STT_SECTION) {
1694 			dest_off = arch_dest_reloc_offset(reloc->addend);
1695 			dest = find_call_destination(reloc->sym->sec, dest_off);
1696 			if (!dest) {
1697 				WARN_FUNC("can't find call dest symbol at %s+0x%lx",
1698 					  insn->sec, insn->offset,
1699 					  reloc->sym->sec->name,
1700 					  dest_off);
1701 				return -1;
1702 			}
1703 
1704 			add_call_dest(file, insn, dest, false);
1705 
1706 		} else if (reloc->sym->retpoline_thunk) {
1707 			add_retpoline_call(file, insn);
1708 
1709 		} else
1710 			add_call_dest(file, insn, reloc->sym, false);
1711 	}
1712 
1713 	return 0;
1714 }
1715 
1716 /*
1717  * The .alternatives section requires some extra special care over and above
1718  * other special sections because alternatives are patched in place.
1719  */
1720 static int handle_group_alt(struct objtool_file *file,
1721 			    struct special_alt *special_alt,
1722 			    struct instruction *orig_insn,
1723 			    struct instruction **new_insn)
1724 {
1725 	struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL;
1726 	struct alt_group *orig_alt_group, *new_alt_group;
1727 	unsigned long dest_off;
1728 
1729 
1730 	orig_alt_group = malloc(sizeof(*orig_alt_group));
1731 	if (!orig_alt_group) {
1732 		WARN("malloc failed");
1733 		return -1;
1734 	}
1735 	orig_alt_group->cfi = calloc(special_alt->orig_len,
1736 				     sizeof(struct cfi_state *));
1737 	if (!orig_alt_group->cfi) {
1738 		WARN("calloc failed");
1739 		return -1;
1740 	}
1741 
1742 	last_orig_insn = NULL;
1743 	insn = orig_insn;
1744 	sec_for_each_insn_from(file, insn) {
1745 		if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1746 			break;
1747 
1748 		insn->alt_group = orig_alt_group;
1749 		last_orig_insn = insn;
1750 	}
1751 	orig_alt_group->orig_group = NULL;
1752 	orig_alt_group->first_insn = orig_insn;
1753 	orig_alt_group->last_insn = last_orig_insn;
1754 
1755 
1756 	new_alt_group = malloc(sizeof(*new_alt_group));
1757 	if (!new_alt_group) {
1758 		WARN("malloc failed");
1759 		return -1;
1760 	}
1761 
1762 	if (special_alt->new_len < special_alt->orig_len) {
1763 		/*
1764 		 * Insert a fake nop at the end to make the replacement
1765 		 * alt_group the same size as the original.  This is needed to
1766 		 * allow propagate_alt_cfi() to do its magic.  When the last
1767 		 * instruction affects the stack, the instruction after it (the
1768 		 * nop) will propagate the new state to the shared CFI array.
1769 		 */
1770 		nop = malloc(sizeof(*nop));
1771 		if (!nop) {
1772 			WARN("malloc failed");
1773 			return -1;
1774 		}
1775 		memset(nop, 0, sizeof(*nop));
1776 		INIT_LIST_HEAD(&nop->alts);
1777 		INIT_LIST_HEAD(&nop->stack_ops);
1778 
1779 		nop->sec = special_alt->new_sec;
1780 		nop->offset = special_alt->new_off + special_alt->new_len;
1781 		nop->len = special_alt->orig_len - special_alt->new_len;
1782 		nop->type = INSN_NOP;
1783 		nop->sym = orig_insn->sym;
1784 		nop->alt_group = new_alt_group;
1785 		nop->ignore = orig_insn->ignore_alts;
1786 	}
1787 
1788 	if (!special_alt->new_len) {
1789 		*new_insn = nop;
1790 		goto end;
1791 	}
1792 
1793 	insn = *new_insn;
1794 	sec_for_each_insn_from(file, insn) {
1795 		struct reloc *alt_reloc;
1796 
1797 		if (insn->offset >= special_alt->new_off + special_alt->new_len)
1798 			break;
1799 
1800 		last_new_insn = insn;
1801 
1802 		insn->ignore = orig_insn->ignore_alts;
1803 		insn->sym = orig_insn->sym;
1804 		insn->alt_group = new_alt_group;
1805 
1806 		/*
1807 		 * Since alternative replacement code is copy/pasted by the
1808 		 * kernel after applying relocations, generally such code can't
1809 		 * have relative-address relocation references to outside the
1810 		 * .altinstr_replacement section, unless the arch's
1811 		 * alternatives code can adjust the relative offsets
1812 		 * accordingly.
1813 		 */
1814 		alt_reloc = insn_reloc(file, insn);
1815 		if (alt_reloc && arch_pc_relative_reloc(alt_reloc) &&
1816 		    !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1817 
1818 			WARN_FUNC("unsupported relocation in alternatives section",
1819 				  insn->sec, insn->offset);
1820 			return -1;
1821 		}
1822 
1823 		if (!is_static_jump(insn))
1824 			continue;
1825 
1826 		if (!insn->immediate)
1827 			continue;
1828 
1829 		dest_off = arch_jump_destination(insn);
1830 		if (dest_off == special_alt->new_off + special_alt->new_len) {
1831 			insn->jump_dest = next_insn_same_sec(file, last_orig_insn);
1832 			if (!insn->jump_dest) {
1833 				WARN_FUNC("can't find alternative jump destination",
1834 					  insn->sec, insn->offset);
1835 				return -1;
1836 			}
1837 		}
1838 	}
1839 
1840 	if (!last_new_insn) {
1841 		WARN_FUNC("can't find last new alternative instruction",
1842 			  special_alt->new_sec, special_alt->new_off);
1843 		return -1;
1844 	}
1845 
1846 	if (nop)
1847 		list_add(&nop->list, &last_new_insn->list);
1848 end:
1849 	new_alt_group->orig_group = orig_alt_group;
1850 	new_alt_group->first_insn = *new_insn;
1851 	new_alt_group->last_insn = nop ? : last_new_insn;
1852 	new_alt_group->cfi = orig_alt_group->cfi;
1853 	return 0;
1854 }
1855 
1856 /*
1857  * A jump table entry can either convert a nop to a jump or a jump to a nop.
1858  * If the original instruction is a jump, make the alt entry an effective nop
1859  * by just skipping the original instruction.
1860  */
1861 static int handle_jump_alt(struct objtool_file *file,
1862 			   struct special_alt *special_alt,
1863 			   struct instruction *orig_insn,
1864 			   struct instruction **new_insn)
1865 {
1866 	if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1867 	    orig_insn->type != INSN_NOP) {
1868 
1869 		WARN_FUNC("unsupported instruction at jump label",
1870 			  orig_insn->sec, orig_insn->offset);
1871 		return -1;
1872 	}
1873 
1874 	if (opts.hack_jump_label && special_alt->key_addend & 2) {
1875 		struct reloc *reloc = insn_reloc(file, orig_insn);
1876 
1877 		if (reloc) {
1878 			reloc->type = R_NONE;
1879 			elf_write_reloc(file->elf, reloc);
1880 		}
1881 		elf_write_insn(file->elf, orig_insn->sec,
1882 			       orig_insn->offset, orig_insn->len,
1883 			       arch_nop_insn(orig_insn->len));
1884 		orig_insn->type = INSN_NOP;
1885 	}
1886 
1887 	if (orig_insn->type == INSN_NOP) {
1888 		if (orig_insn->len == 2)
1889 			file->jl_nop_short++;
1890 		else
1891 			file->jl_nop_long++;
1892 
1893 		return 0;
1894 	}
1895 
1896 	if (orig_insn->len == 2)
1897 		file->jl_short++;
1898 	else
1899 		file->jl_long++;
1900 
1901 	*new_insn = list_next_entry(orig_insn, list);
1902 	return 0;
1903 }
1904 
1905 /*
1906  * Read all the special sections which have alternate instructions which can be
1907  * patched in or redirected to at runtime.  Each instruction having alternate
1908  * instruction(s) has them added to its insn->alts list, which will be
1909  * traversed in validate_branch().
1910  */
1911 static int add_special_section_alts(struct objtool_file *file)
1912 {
1913 	struct list_head special_alts;
1914 	struct instruction *orig_insn, *new_insn;
1915 	struct special_alt *special_alt, *tmp;
1916 	struct alternative *alt;
1917 	int ret;
1918 
1919 	ret = special_get_alts(file->elf, &special_alts);
1920 	if (ret)
1921 		return ret;
1922 
1923 	list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1924 
1925 		orig_insn = find_insn(file, special_alt->orig_sec,
1926 				      special_alt->orig_off);
1927 		if (!orig_insn) {
1928 			WARN_FUNC("special: can't find orig instruction",
1929 				  special_alt->orig_sec, special_alt->orig_off);
1930 			ret = -1;
1931 			goto out;
1932 		}
1933 
1934 		new_insn = NULL;
1935 		if (!special_alt->group || special_alt->new_len) {
1936 			new_insn = find_insn(file, special_alt->new_sec,
1937 					     special_alt->new_off);
1938 			if (!new_insn) {
1939 				WARN_FUNC("special: can't find new instruction",
1940 					  special_alt->new_sec,
1941 					  special_alt->new_off);
1942 				ret = -1;
1943 				goto out;
1944 			}
1945 		}
1946 
1947 		if (special_alt->group) {
1948 			if (!special_alt->orig_len) {
1949 				WARN_FUNC("empty alternative entry",
1950 					  orig_insn->sec, orig_insn->offset);
1951 				continue;
1952 			}
1953 
1954 			ret = handle_group_alt(file, special_alt, orig_insn,
1955 					       &new_insn);
1956 			if (ret)
1957 				goto out;
1958 		} else if (special_alt->jump_or_nop) {
1959 			ret = handle_jump_alt(file, special_alt, orig_insn,
1960 					      &new_insn);
1961 			if (ret)
1962 				goto out;
1963 		}
1964 
1965 		alt = malloc(sizeof(*alt));
1966 		if (!alt) {
1967 			WARN("malloc failed");
1968 			ret = -1;
1969 			goto out;
1970 		}
1971 
1972 		alt->insn = new_insn;
1973 		alt->skip_orig = special_alt->skip_orig;
1974 		orig_insn->ignore_alts |= special_alt->skip_alt;
1975 		list_add_tail(&alt->list, &orig_insn->alts);
1976 
1977 		list_del(&special_alt->list);
1978 		free(special_alt);
1979 	}
1980 
1981 	if (opts.stats) {
1982 		printf("jl\\\tNOP\tJMP\n");
1983 		printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
1984 		printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
1985 	}
1986 
1987 out:
1988 	return ret;
1989 }
1990 
1991 static int add_jump_table(struct objtool_file *file, struct instruction *insn,
1992 			    struct reloc *table)
1993 {
1994 	struct reloc *reloc = table;
1995 	struct instruction *dest_insn;
1996 	struct alternative *alt;
1997 	struct symbol *pfunc = insn_func(insn)->pfunc;
1998 	unsigned int prev_offset = 0;
1999 
2000 	/*
2001 	 * Each @reloc is a switch table relocation which points to the target
2002 	 * instruction.
2003 	 */
2004 	list_for_each_entry_from(reloc, &table->sec->reloc_list, list) {
2005 
2006 		/* Check for the end of the table: */
2007 		if (reloc != table && reloc->jump_table_start)
2008 			break;
2009 
2010 		/* Make sure the table entries are consecutive: */
2011 		if (prev_offset && reloc->offset != prev_offset + 8)
2012 			break;
2013 
2014 		/* Detect function pointers from contiguous objects: */
2015 		if (reloc->sym->sec == pfunc->sec &&
2016 		    reloc->addend == pfunc->offset)
2017 			break;
2018 
2019 		dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
2020 		if (!dest_insn)
2021 			break;
2022 
2023 		/* Make sure the destination is in the same function: */
2024 		if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc)
2025 			break;
2026 
2027 		alt = malloc(sizeof(*alt));
2028 		if (!alt) {
2029 			WARN("malloc failed");
2030 			return -1;
2031 		}
2032 
2033 		alt->insn = dest_insn;
2034 		list_add_tail(&alt->list, &insn->alts);
2035 		prev_offset = reloc->offset;
2036 	}
2037 
2038 	if (!prev_offset) {
2039 		WARN_FUNC("can't find switch jump table",
2040 			  insn->sec, insn->offset);
2041 		return -1;
2042 	}
2043 
2044 	return 0;
2045 }
2046 
2047 /*
2048  * find_jump_table() - Given a dynamic jump, find the switch jump table
2049  * associated with it.
2050  */
2051 static struct reloc *find_jump_table(struct objtool_file *file,
2052 				      struct symbol *func,
2053 				      struct instruction *insn)
2054 {
2055 	struct reloc *table_reloc;
2056 	struct instruction *dest_insn, *orig_insn = insn;
2057 
2058 	/*
2059 	 * Backward search using the @first_jump_src links, these help avoid
2060 	 * much of the 'in between' code. Which avoids us getting confused by
2061 	 * it.
2062 	 */
2063 	for (;
2064 	     insn && insn_func(insn) && insn_func(insn)->pfunc == func;
2065 	     insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
2066 
2067 		if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
2068 			break;
2069 
2070 		/* allow small jumps within the range */
2071 		if (insn->type == INSN_JUMP_UNCONDITIONAL &&
2072 		    insn->jump_dest &&
2073 		    (insn->jump_dest->offset <= insn->offset ||
2074 		     insn->jump_dest->offset > orig_insn->offset))
2075 		    break;
2076 
2077 		table_reloc = arch_find_switch_table(file, insn);
2078 		if (!table_reloc)
2079 			continue;
2080 		dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
2081 		if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
2082 			continue;
2083 
2084 		return table_reloc;
2085 	}
2086 
2087 	return NULL;
2088 }
2089 
2090 /*
2091  * First pass: Mark the head of each jump table so that in the next pass,
2092  * we know when a given jump table ends and the next one starts.
2093  */
2094 static void mark_func_jump_tables(struct objtool_file *file,
2095 				    struct symbol *func)
2096 {
2097 	struct instruction *insn, *last = NULL;
2098 	struct reloc *reloc;
2099 
2100 	func_for_each_insn(file, func, insn) {
2101 		if (!last)
2102 			last = insn;
2103 
2104 		/*
2105 		 * Store back-pointers for unconditional forward jumps such
2106 		 * that find_jump_table() can back-track using those and
2107 		 * avoid some potentially confusing code.
2108 		 */
2109 		if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
2110 		    insn->offset > last->offset &&
2111 		    insn->jump_dest->offset > insn->offset &&
2112 		    !insn->jump_dest->first_jump_src) {
2113 
2114 			insn->jump_dest->first_jump_src = insn;
2115 			last = insn->jump_dest;
2116 		}
2117 
2118 		if (insn->type != INSN_JUMP_DYNAMIC)
2119 			continue;
2120 
2121 		reloc = find_jump_table(file, func, insn);
2122 		if (reloc) {
2123 			reloc->jump_table_start = true;
2124 			insn->jump_table = reloc;
2125 		}
2126 	}
2127 }
2128 
2129 static int add_func_jump_tables(struct objtool_file *file,
2130 				  struct symbol *func)
2131 {
2132 	struct instruction *insn;
2133 	int ret;
2134 
2135 	func_for_each_insn(file, func, insn) {
2136 		if (!insn->jump_table)
2137 			continue;
2138 
2139 		ret = add_jump_table(file, insn, insn->jump_table);
2140 		if (ret)
2141 			return ret;
2142 	}
2143 
2144 	return 0;
2145 }
2146 
2147 /*
2148  * For some switch statements, gcc generates a jump table in the .rodata
2149  * section which contains a list of addresses within the function to jump to.
2150  * This finds these jump tables and adds them to the insn->alts lists.
2151  */
2152 static int add_jump_table_alts(struct objtool_file *file)
2153 {
2154 	struct section *sec;
2155 	struct symbol *func;
2156 	int ret;
2157 
2158 	if (!file->rodata)
2159 		return 0;
2160 
2161 	for_each_sec(file, sec) {
2162 		list_for_each_entry(func, &sec->symbol_list, list) {
2163 			if (func->type != STT_FUNC)
2164 				continue;
2165 
2166 			mark_func_jump_tables(file, func);
2167 			ret = add_func_jump_tables(file, func);
2168 			if (ret)
2169 				return ret;
2170 		}
2171 	}
2172 
2173 	return 0;
2174 }
2175 
2176 static void set_func_state(struct cfi_state *state)
2177 {
2178 	state->cfa = initial_func_cfi.cfa;
2179 	memcpy(&state->regs, &initial_func_cfi.regs,
2180 	       CFI_NUM_REGS * sizeof(struct cfi_reg));
2181 	state->stack_size = initial_func_cfi.cfa.offset;
2182 }
2183 
2184 static int read_unwind_hints(struct objtool_file *file)
2185 {
2186 	struct cfi_state cfi = init_cfi;
2187 	struct section *sec, *relocsec;
2188 	struct unwind_hint *hint;
2189 	struct instruction *insn;
2190 	struct reloc *reloc;
2191 	int i;
2192 
2193 	sec = find_section_by_name(file->elf, ".discard.unwind_hints");
2194 	if (!sec)
2195 		return 0;
2196 
2197 	relocsec = sec->reloc;
2198 	if (!relocsec) {
2199 		WARN("missing .rela.discard.unwind_hints section");
2200 		return -1;
2201 	}
2202 
2203 	if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
2204 		WARN("struct unwind_hint size mismatch");
2205 		return -1;
2206 	}
2207 
2208 	file->hints = true;
2209 
2210 	for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
2211 		hint = (struct unwind_hint *)sec->data->d_buf + i;
2212 
2213 		reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
2214 		if (!reloc) {
2215 			WARN("can't find reloc for unwind_hints[%d]", i);
2216 			return -1;
2217 		}
2218 
2219 		insn = find_insn(file, reloc->sym->sec, reloc->addend);
2220 		if (!insn) {
2221 			WARN("can't find insn for unwind_hints[%d]", i);
2222 			return -1;
2223 		}
2224 
2225 		insn->hint = true;
2226 
2227 		if (hint->type == UNWIND_HINT_TYPE_SAVE) {
2228 			insn->hint = false;
2229 			insn->save = true;
2230 			continue;
2231 		}
2232 
2233 		if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
2234 			insn->restore = true;
2235 			continue;
2236 		}
2237 
2238 		if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
2239 			struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
2240 
2241 			if (sym && sym->bind == STB_GLOBAL) {
2242 				if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
2243 					WARN_FUNC("UNWIND_HINT_IRET_REGS without ENDBR",
2244 						  insn->sec, insn->offset);
2245 				}
2246 
2247 				insn->entry = 1;
2248 			}
2249 		}
2250 
2251 		if (hint->type == UNWIND_HINT_TYPE_ENTRY) {
2252 			hint->type = UNWIND_HINT_TYPE_CALL;
2253 			insn->entry = 1;
2254 		}
2255 
2256 		if (hint->type == UNWIND_HINT_TYPE_FUNC) {
2257 			insn->cfi = &func_cfi;
2258 			continue;
2259 		}
2260 
2261 		if (insn->cfi)
2262 			cfi = *(insn->cfi);
2263 
2264 		if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
2265 			WARN_FUNC("unsupported unwind_hint sp base reg %d",
2266 				  insn->sec, insn->offset, hint->sp_reg);
2267 			return -1;
2268 		}
2269 
2270 		cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset);
2271 		cfi.type = hint->type;
2272 		cfi.end = hint->end;
2273 
2274 		insn->cfi = cfi_hash_find_or_add(&cfi);
2275 	}
2276 
2277 	return 0;
2278 }
2279 
2280 static int read_noendbr_hints(struct objtool_file *file)
2281 {
2282 	struct section *sec;
2283 	struct instruction *insn;
2284 	struct reloc *reloc;
2285 
2286 	sec = find_section_by_name(file->elf, ".rela.discard.noendbr");
2287 	if (!sec)
2288 		return 0;
2289 
2290 	list_for_each_entry(reloc, &sec->reloc_list, list) {
2291 		insn = find_insn(file, reloc->sym->sec, reloc->sym->offset + reloc->addend);
2292 		if (!insn) {
2293 			WARN("bad .discard.noendbr entry");
2294 			return -1;
2295 		}
2296 
2297 		insn->noendbr = 1;
2298 	}
2299 
2300 	return 0;
2301 }
2302 
2303 static int read_retpoline_hints(struct objtool_file *file)
2304 {
2305 	struct section *sec;
2306 	struct instruction *insn;
2307 	struct reloc *reloc;
2308 
2309 	sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
2310 	if (!sec)
2311 		return 0;
2312 
2313 	list_for_each_entry(reloc, &sec->reloc_list, list) {
2314 		if (reloc->sym->type != STT_SECTION) {
2315 			WARN("unexpected relocation symbol type in %s", sec->name);
2316 			return -1;
2317 		}
2318 
2319 		insn = find_insn(file, reloc->sym->sec, reloc->addend);
2320 		if (!insn) {
2321 			WARN("bad .discard.retpoline_safe entry");
2322 			return -1;
2323 		}
2324 
2325 		if (insn->type != INSN_JUMP_DYNAMIC &&
2326 		    insn->type != INSN_CALL_DYNAMIC &&
2327 		    insn->type != INSN_RETURN &&
2328 		    insn->type != INSN_NOP) {
2329 			WARN_FUNC("retpoline_safe hint not an indirect jump/call/ret/nop",
2330 				  insn->sec, insn->offset);
2331 			return -1;
2332 		}
2333 
2334 		insn->retpoline_safe = true;
2335 	}
2336 
2337 	return 0;
2338 }
2339 
2340 static int read_instr_hints(struct objtool_file *file)
2341 {
2342 	struct section *sec;
2343 	struct instruction *insn;
2344 	struct reloc *reloc;
2345 
2346 	sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
2347 	if (!sec)
2348 		return 0;
2349 
2350 	list_for_each_entry(reloc, &sec->reloc_list, list) {
2351 		if (reloc->sym->type != STT_SECTION) {
2352 			WARN("unexpected relocation symbol type in %s", sec->name);
2353 			return -1;
2354 		}
2355 
2356 		insn = find_insn(file, reloc->sym->sec, reloc->addend);
2357 		if (!insn) {
2358 			WARN("bad .discard.instr_end entry");
2359 			return -1;
2360 		}
2361 
2362 		insn->instr--;
2363 	}
2364 
2365 	sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
2366 	if (!sec)
2367 		return 0;
2368 
2369 	list_for_each_entry(reloc, &sec->reloc_list, list) {
2370 		if (reloc->sym->type != STT_SECTION) {
2371 			WARN("unexpected relocation symbol type in %s", sec->name);
2372 			return -1;
2373 		}
2374 
2375 		insn = find_insn(file, reloc->sym->sec, reloc->addend);
2376 		if (!insn) {
2377 			WARN("bad .discard.instr_begin entry");
2378 			return -1;
2379 		}
2380 
2381 		insn->instr++;
2382 	}
2383 
2384 	return 0;
2385 }
2386 
2387 static int read_intra_function_calls(struct objtool_file *file)
2388 {
2389 	struct instruction *insn;
2390 	struct section *sec;
2391 	struct reloc *reloc;
2392 
2393 	sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
2394 	if (!sec)
2395 		return 0;
2396 
2397 	list_for_each_entry(reloc, &sec->reloc_list, list) {
2398 		unsigned long dest_off;
2399 
2400 		if (reloc->sym->type != STT_SECTION) {
2401 			WARN("unexpected relocation symbol type in %s",
2402 			     sec->name);
2403 			return -1;
2404 		}
2405 
2406 		insn = find_insn(file, reloc->sym->sec, reloc->addend);
2407 		if (!insn) {
2408 			WARN("bad .discard.intra_function_call entry");
2409 			return -1;
2410 		}
2411 
2412 		if (insn->type != INSN_CALL) {
2413 			WARN_FUNC("intra_function_call not a direct call",
2414 				  insn->sec, insn->offset);
2415 			return -1;
2416 		}
2417 
2418 		/*
2419 		 * Treat intra-function CALLs as JMPs, but with a stack_op.
2420 		 * See add_call_destinations(), which strips stack_ops from
2421 		 * normal CALLs.
2422 		 */
2423 		insn->type = INSN_JUMP_UNCONDITIONAL;
2424 
2425 		dest_off = arch_jump_destination(insn);
2426 		insn->jump_dest = find_insn(file, insn->sec, dest_off);
2427 		if (!insn->jump_dest) {
2428 			WARN_FUNC("can't find call dest at %s+0x%lx",
2429 				  insn->sec, insn->offset,
2430 				  insn->sec->name, dest_off);
2431 			return -1;
2432 		}
2433 	}
2434 
2435 	return 0;
2436 }
2437 
2438 /*
2439  * Return true if name matches an instrumentation function, where calls to that
2440  * function from noinstr code can safely be removed, but compilers won't do so.
2441  */
2442 static bool is_profiling_func(const char *name)
2443 {
2444 	/*
2445 	 * Many compilers cannot disable KCOV with a function attribute.
2446 	 */
2447 	if (!strncmp(name, "__sanitizer_cov_", 16))
2448 		return true;
2449 
2450 	/*
2451 	 * Some compilers currently do not remove __tsan_func_entry/exit nor
2452 	 * __tsan_atomic_signal_fence (used for barrier instrumentation) with
2453 	 * the __no_sanitize_thread attribute, remove them. Once the kernel's
2454 	 * minimum Clang version is 14.0, this can be removed.
2455 	 */
2456 	if (!strncmp(name, "__tsan_func_", 12) ||
2457 	    !strcmp(name, "__tsan_atomic_signal_fence"))
2458 		return true;
2459 
2460 	return false;
2461 }
2462 
2463 static int classify_symbols(struct objtool_file *file)
2464 {
2465 	struct section *sec;
2466 	struct symbol *func;
2467 
2468 	for_each_sec(file, sec) {
2469 		list_for_each_entry(func, &sec->symbol_list, list) {
2470 			if (func->bind != STB_GLOBAL)
2471 				continue;
2472 
2473 			if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2474 				     strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2475 				func->static_call_tramp = true;
2476 
2477 			if (arch_is_retpoline(func))
2478 				func->retpoline_thunk = true;
2479 
2480 			if (arch_is_rethunk(func))
2481 				func->return_thunk = true;
2482 
2483 			if (arch_ftrace_match(func->name))
2484 				func->fentry = true;
2485 
2486 			if (is_profiling_func(func->name))
2487 				func->profiling_func = true;
2488 		}
2489 	}
2490 
2491 	return 0;
2492 }
2493 
2494 static void mark_rodata(struct objtool_file *file)
2495 {
2496 	struct section *sec;
2497 	bool found = false;
2498 
2499 	/*
2500 	 * Search for the following rodata sections, each of which can
2501 	 * potentially contain jump tables:
2502 	 *
2503 	 * - .rodata: can contain GCC switch tables
2504 	 * - .rodata.<func>: same, if -fdata-sections is being used
2505 	 * - .rodata..c_jump_table: contains C annotated jump tables
2506 	 *
2507 	 * .rodata.str1.* sections are ignored; they don't contain jump tables.
2508 	 */
2509 	for_each_sec(file, sec) {
2510 		if (!strncmp(sec->name, ".rodata", 7) &&
2511 		    !strstr(sec->name, ".str1.")) {
2512 			sec->rodata = true;
2513 			found = true;
2514 		}
2515 	}
2516 
2517 	file->rodata = found;
2518 }
2519 
2520 static int decode_sections(struct objtool_file *file)
2521 {
2522 	int ret;
2523 
2524 	mark_rodata(file);
2525 
2526 	ret = init_pv_ops(file);
2527 	if (ret)
2528 		return ret;
2529 
2530 	/*
2531 	 * Must be before add_{jump_call}_destination.
2532 	 */
2533 	ret = classify_symbols(file);
2534 	if (ret)
2535 		return ret;
2536 
2537 	ret = decode_instructions(file);
2538 	if (ret)
2539 		return ret;
2540 
2541 	add_ignores(file);
2542 	add_uaccess_safe(file);
2543 
2544 	ret = add_ignore_alternatives(file);
2545 	if (ret)
2546 		return ret;
2547 
2548 	/*
2549 	 * Must be before read_unwind_hints() since that needs insn->noendbr.
2550 	 */
2551 	ret = read_noendbr_hints(file);
2552 	if (ret)
2553 		return ret;
2554 
2555 	/*
2556 	 * Must be before add_jump_destinations(), which depends on 'func'
2557 	 * being set for alternatives, to enable proper sibling call detection.
2558 	 */
2559 	if (opts.stackval || opts.orc || opts.uaccess || opts.noinstr) {
2560 		ret = add_special_section_alts(file);
2561 		if (ret)
2562 			return ret;
2563 	}
2564 
2565 	ret = add_jump_destinations(file);
2566 	if (ret)
2567 		return ret;
2568 
2569 	/*
2570 	 * Must be before add_call_destination(); it changes INSN_CALL to
2571 	 * INSN_JUMP.
2572 	 */
2573 	ret = read_intra_function_calls(file);
2574 	if (ret)
2575 		return ret;
2576 
2577 	ret = add_call_destinations(file);
2578 	if (ret)
2579 		return ret;
2580 
2581 	/*
2582 	 * Must be after add_call_destinations() such that it can override
2583 	 * dead_end_function() marks.
2584 	 */
2585 	ret = add_dead_ends(file);
2586 	if (ret)
2587 		return ret;
2588 
2589 	ret = add_jump_table_alts(file);
2590 	if (ret)
2591 		return ret;
2592 
2593 	ret = read_unwind_hints(file);
2594 	if (ret)
2595 		return ret;
2596 
2597 	ret = read_retpoline_hints(file);
2598 	if (ret)
2599 		return ret;
2600 
2601 	ret = read_instr_hints(file);
2602 	if (ret)
2603 		return ret;
2604 
2605 	return 0;
2606 }
2607 
2608 static bool is_fentry_call(struct instruction *insn)
2609 {
2610 	if (insn->type == INSN_CALL &&
2611 	    insn->call_dest &&
2612 	    insn->call_dest->fentry)
2613 		return true;
2614 
2615 	return false;
2616 }
2617 
2618 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2619 {
2620 	struct cfi_state *cfi = &state->cfi;
2621 	int i;
2622 
2623 	if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2624 		return true;
2625 
2626 	if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2627 		return true;
2628 
2629 	if (cfi->stack_size != initial_func_cfi.cfa.offset)
2630 		return true;
2631 
2632 	for (i = 0; i < CFI_NUM_REGS; i++) {
2633 		if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2634 		    cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2635 			return true;
2636 	}
2637 
2638 	return false;
2639 }
2640 
2641 static bool check_reg_frame_pos(const struct cfi_reg *reg,
2642 				int expected_offset)
2643 {
2644 	return reg->base == CFI_CFA &&
2645 	       reg->offset == expected_offset;
2646 }
2647 
2648 static bool has_valid_stack_frame(struct insn_state *state)
2649 {
2650 	struct cfi_state *cfi = &state->cfi;
2651 
2652 	if (cfi->cfa.base == CFI_BP &&
2653 	    check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2654 	    check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2655 		return true;
2656 
2657 	if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2658 		return true;
2659 
2660 	return false;
2661 }
2662 
2663 static int update_cfi_state_regs(struct instruction *insn,
2664 				  struct cfi_state *cfi,
2665 				  struct stack_op *op)
2666 {
2667 	struct cfi_reg *cfa = &cfi->cfa;
2668 
2669 	if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2670 		return 0;
2671 
2672 	/* push */
2673 	if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2674 		cfa->offset += 8;
2675 
2676 	/* pop */
2677 	if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2678 		cfa->offset -= 8;
2679 
2680 	/* add immediate to sp */
2681 	if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2682 	    op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2683 		cfa->offset -= op->src.offset;
2684 
2685 	return 0;
2686 }
2687 
2688 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2689 {
2690 	if (arch_callee_saved_reg(reg) &&
2691 	    cfi->regs[reg].base == CFI_UNDEFINED) {
2692 		cfi->regs[reg].base = base;
2693 		cfi->regs[reg].offset = offset;
2694 	}
2695 }
2696 
2697 static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2698 {
2699 	cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2700 	cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2701 }
2702 
2703 /*
2704  * A note about DRAP stack alignment:
2705  *
2706  * GCC has the concept of a DRAP register, which is used to help keep track of
2707  * the stack pointer when aligning the stack.  r10 or r13 is used as the DRAP
2708  * register.  The typical DRAP pattern is:
2709  *
2710  *   4c 8d 54 24 08		lea    0x8(%rsp),%r10
2711  *   48 83 e4 c0		and    $0xffffffffffffffc0,%rsp
2712  *   41 ff 72 f8		pushq  -0x8(%r10)
2713  *   55				push   %rbp
2714  *   48 89 e5			mov    %rsp,%rbp
2715  *				(more pushes)
2716  *   41 52			push   %r10
2717  *				...
2718  *   41 5a			pop    %r10
2719  *				(more pops)
2720  *   5d				pop    %rbp
2721  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2722  *   c3				retq
2723  *
2724  * There are some variations in the epilogues, like:
2725  *
2726  *   5b				pop    %rbx
2727  *   41 5a			pop    %r10
2728  *   41 5c			pop    %r12
2729  *   41 5d			pop    %r13
2730  *   41 5e			pop    %r14
2731  *   c9				leaveq
2732  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2733  *   c3				retq
2734  *
2735  * and:
2736  *
2737  *   4c 8b 55 e8		mov    -0x18(%rbp),%r10
2738  *   48 8b 5d e0		mov    -0x20(%rbp),%rbx
2739  *   4c 8b 65 f0		mov    -0x10(%rbp),%r12
2740  *   4c 8b 6d f8		mov    -0x8(%rbp),%r13
2741  *   c9				leaveq
2742  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2743  *   c3				retq
2744  *
2745  * Sometimes r13 is used as the DRAP register, in which case it's saved and
2746  * restored beforehand:
2747  *
2748  *   41 55			push   %r13
2749  *   4c 8d 6c 24 10		lea    0x10(%rsp),%r13
2750  *   48 83 e4 f0		and    $0xfffffffffffffff0,%rsp
2751  *				...
2752  *   49 8d 65 f0		lea    -0x10(%r13),%rsp
2753  *   41 5d			pop    %r13
2754  *   c3				retq
2755  */
2756 static int update_cfi_state(struct instruction *insn,
2757 			    struct instruction *next_insn,
2758 			    struct cfi_state *cfi, struct stack_op *op)
2759 {
2760 	struct cfi_reg *cfa = &cfi->cfa;
2761 	struct cfi_reg *regs = cfi->regs;
2762 
2763 	/* stack operations don't make sense with an undefined CFA */
2764 	if (cfa->base == CFI_UNDEFINED) {
2765 		if (insn_func(insn)) {
2766 			WARN_FUNC("undefined stack state", insn->sec, insn->offset);
2767 			return -1;
2768 		}
2769 		return 0;
2770 	}
2771 
2772 	if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2773 	    cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2774 		return update_cfi_state_regs(insn, cfi, op);
2775 
2776 	switch (op->dest.type) {
2777 
2778 	case OP_DEST_REG:
2779 		switch (op->src.type) {
2780 
2781 		case OP_SRC_REG:
2782 			if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2783 			    cfa->base == CFI_SP &&
2784 			    check_reg_frame_pos(&regs[CFI_BP], -cfa->offset)) {
2785 
2786 				/* mov %rsp, %rbp */
2787 				cfa->base = op->dest.reg;
2788 				cfi->bp_scratch = false;
2789 			}
2790 
2791 			else if (op->src.reg == CFI_SP &&
2792 				 op->dest.reg == CFI_BP && cfi->drap) {
2793 
2794 				/* drap: mov %rsp, %rbp */
2795 				regs[CFI_BP].base = CFI_BP;
2796 				regs[CFI_BP].offset = -cfi->stack_size;
2797 				cfi->bp_scratch = false;
2798 			}
2799 
2800 			else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2801 
2802 				/*
2803 				 * mov %rsp, %reg
2804 				 *
2805 				 * This is needed for the rare case where GCC
2806 				 * does:
2807 				 *
2808 				 *   mov    %rsp, %rax
2809 				 *   ...
2810 				 *   mov    %rax, %rsp
2811 				 */
2812 				cfi->vals[op->dest.reg].base = CFI_CFA;
2813 				cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2814 			}
2815 
2816 			else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2817 				 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2818 
2819 				/*
2820 				 * mov %rbp, %rsp
2821 				 *
2822 				 * Restore the original stack pointer (Clang).
2823 				 */
2824 				cfi->stack_size = -cfi->regs[CFI_BP].offset;
2825 			}
2826 
2827 			else if (op->dest.reg == cfa->base) {
2828 
2829 				/* mov %reg, %rsp */
2830 				if (cfa->base == CFI_SP &&
2831 				    cfi->vals[op->src.reg].base == CFI_CFA) {
2832 
2833 					/*
2834 					 * This is needed for the rare case
2835 					 * where GCC does something dumb like:
2836 					 *
2837 					 *   lea    0x8(%rsp), %rcx
2838 					 *   ...
2839 					 *   mov    %rcx, %rsp
2840 					 */
2841 					cfa->offset = -cfi->vals[op->src.reg].offset;
2842 					cfi->stack_size = cfa->offset;
2843 
2844 				} else if (cfa->base == CFI_SP &&
2845 					   cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2846 					   cfi->vals[op->src.reg].offset == cfa->offset) {
2847 
2848 					/*
2849 					 * Stack swizzle:
2850 					 *
2851 					 * 1: mov %rsp, (%[tos])
2852 					 * 2: mov %[tos], %rsp
2853 					 *    ...
2854 					 * 3: pop %rsp
2855 					 *
2856 					 * Where:
2857 					 *
2858 					 * 1 - places a pointer to the previous
2859 					 *     stack at the Top-of-Stack of the
2860 					 *     new stack.
2861 					 *
2862 					 * 2 - switches to the new stack.
2863 					 *
2864 					 * 3 - pops the Top-of-Stack to restore
2865 					 *     the original stack.
2866 					 *
2867 					 * Note: we set base to SP_INDIRECT
2868 					 * here and preserve offset. Therefore
2869 					 * when the unwinder reaches ToS it
2870 					 * will dereference SP and then add the
2871 					 * offset to find the next frame, IOW:
2872 					 * (%rsp) + offset.
2873 					 */
2874 					cfa->base = CFI_SP_INDIRECT;
2875 
2876 				} else {
2877 					cfa->base = CFI_UNDEFINED;
2878 					cfa->offset = 0;
2879 				}
2880 			}
2881 
2882 			else if (op->dest.reg == CFI_SP &&
2883 				 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2884 				 cfi->vals[op->src.reg].offset == cfa->offset) {
2885 
2886 				/*
2887 				 * The same stack swizzle case 2) as above. But
2888 				 * because we can't change cfa->base, case 3)
2889 				 * will become a regular POP. Pretend we're a
2890 				 * PUSH so things don't go unbalanced.
2891 				 */
2892 				cfi->stack_size += 8;
2893 			}
2894 
2895 
2896 			break;
2897 
2898 		case OP_SRC_ADD:
2899 			if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2900 
2901 				/* add imm, %rsp */
2902 				cfi->stack_size -= op->src.offset;
2903 				if (cfa->base == CFI_SP)
2904 					cfa->offset -= op->src.offset;
2905 				break;
2906 			}
2907 
2908 			if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2909 
2910 				/* lea disp(%rbp), %rsp */
2911 				cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2912 				break;
2913 			}
2914 
2915 			if (!cfi->drap && op->src.reg == CFI_SP &&
2916 			    op->dest.reg == CFI_BP && cfa->base == CFI_SP &&
2917 			    check_reg_frame_pos(&regs[CFI_BP], -cfa->offset + op->src.offset)) {
2918 
2919 				/* lea disp(%rsp), %rbp */
2920 				cfa->base = CFI_BP;
2921 				cfa->offset -= op->src.offset;
2922 				cfi->bp_scratch = false;
2923 				break;
2924 			}
2925 
2926 			if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2927 
2928 				/* drap: lea disp(%rsp), %drap */
2929 				cfi->drap_reg = op->dest.reg;
2930 
2931 				/*
2932 				 * lea disp(%rsp), %reg
2933 				 *
2934 				 * This is needed for the rare case where GCC
2935 				 * does something dumb like:
2936 				 *
2937 				 *   lea    0x8(%rsp), %rcx
2938 				 *   ...
2939 				 *   mov    %rcx, %rsp
2940 				 */
2941 				cfi->vals[op->dest.reg].base = CFI_CFA;
2942 				cfi->vals[op->dest.reg].offset = \
2943 					-cfi->stack_size + op->src.offset;
2944 
2945 				break;
2946 			}
2947 
2948 			if (cfi->drap && op->dest.reg == CFI_SP &&
2949 			    op->src.reg == cfi->drap_reg) {
2950 
2951 				 /* drap: lea disp(%drap), %rsp */
2952 				cfa->base = CFI_SP;
2953 				cfa->offset = cfi->stack_size = -op->src.offset;
2954 				cfi->drap_reg = CFI_UNDEFINED;
2955 				cfi->drap = false;
2956 				break;
2957 			}
2958 
2959 			if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
2960 				WARN_FUNC("unsupported stack register modification",
2961 					  insn->sec, insn->offset);
2962 				return -1;
2963 			}
2964 
2965 			break;
2966 
2967 		case OP_SRC_AND:
2968 			if (op->dest.reg != CFI_SP ||
2969 			    (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
2970 			    (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
2971 				WARN_FUNC("unsupported stack pointer realignment",
2972 					  insn->sec, insn->offset);
2973 				return -1;
2974 			}
2975 
2976 			if (cfi->drap_reg != CFI_UNDEFINED) {
2977 				/* drap: and imm, %rsp */
2978 				cfa->base = cfi->drap_reg;
2979 				cfa->offset = cfi->stack_size = 0;
2980 				cfi->drap = true;
2981 			}
2982 
2983 			/*
2984 			 * Older versions of GCC (4.8ish) realign the stack
2985 			 * without DRAP, with a frame pointer.
2986 			 */
2987 
2988 			break;
2989 
2990 		case OP_SRC_POP:
2991 		case OP_SRC_POPF:
2992 			if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
2993 
2994 				/* pop %rsp; # restore from a stack swizzle */
2995 				cfa->base = CFI_SP;
2996 				break;
2997 			}
2998 
2999 			if (!cfi->drap && op->dest.reg == cfa->base) {
3000 
3001 				/* pop %rbp */
3002 				cfa->base = CFI_SP;
3003 			}
3004 
3005 			if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
3006 			    op->dest.reg == cfi->drap_reg &&
3007 			    cfi->drap_offset == -cfi->stack_size) {
3008 
3009 				/* drap: pop %drap */
3010 				cfa->base = cfi->drap_reg;
3011 				cfa->offset = 0;
3012 				cfi->drap_offset = -1;
3013 
3014 			} else if (cfi->stack_size == -regs[op->dest.reg].offset) {
3015 
3016 				/* pop %reg */
3017 				restore_reg(cfi, op->dest.reg);
3018 			}
3019 
3020 			cfi->stack_size -= 8;
3021 			if (cfa->base == CFI_SP)
3022 				cfa->offset -= 8;
3023 
3024 			break;
3025 
3026 		case OP_SRC_REG_INDIRECT:
3027 			if (!cfi->drap && op->dest.reg == cfa->base &&
3028 			    op->dest.reg == CFI_BP) {
3029 
3030 				/* mov disp(%rsp), %rbp */
3031 				cfa->base = CFI_SP;
3032 				cfa->offset = cfi->stack_size;
3033 			}
3034 
3035 			if (cfi->drap && op->src.reg == CFI_BP &&
3036 			    op->src.offset == cfi->drap_offset) {
3037 
3038 				/* drap: mov disp(%rbp), %drap */
3039 				cfa->base = cfi->drap_reg;
3040 				cfa->offset = 0;
3041 				cfi->drap_offset = -1;
3042 			}
3043 
3044 			if (cfi->drap && op->src.reg == CFI_BP &&
3045 			    op->src.offset == regs[op->dest.reg].offset) {
3046 
3047 				/* drap: mov disp(%rbp), %reg */
3048 				restore_reg(cfi, op->dest.reg);
3049 
3050 			} else if (op->src.reg == cfa->base &&
3051 			    op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
3052 
3053 				/* mov disp(%rbp), %reg */
3054 				/* mov disp(%rsp), %reg */
3055 				restore_reg(cfi, op->dest.reg);
3056 
3057 			} else if (op->src.reg == CFI_SP &&
3058 				   op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
3059 
3060 				/* mov disp(%rsp), %reg */
3061 				restore_reg(cfi, op->dest.reg);
3062 			}
3063 
3064 			break;
3065 
3066 		default:
3067 			WARN_FUNC("unknown stack-related instruction",
3068 				  insn->sec, insn->offset);
3069 			return -1;
3070 		}
3071 
3072 		break;
3073 
3074 	case OP_DEST_PUSH:
3075 	case OP_DEST_PUSHF:
3076 		cfi->stack_size += 8;
3077 		if (cfa->base == CFI_SP)
3078 			cfa->offset += 8;
3079 
3080 		if (op->src.type != OP_SRC_REG)
3081 			break;
3082 
3083 		if (cfi->drap) {
3084 			if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3085 
3086 				/* drap: push %drap */
3087 				cfa->base = CFI_BP_INDIRECT;
3088 				cfa->offset = -cfi->stack_size;
3089 
3090 				/* save drap so we know when to restore it */
3091 				cfi->drap_offset = -cfi->stack_size;
3092 
3093 			} else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
3094 
3095 				/* drap: push %rbp */
3096 				cfi->stack_size = 0;
3097 
3098 			} else {
3099 
3100 				/* drap: push %reg */
3101 				save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
3102 			}
3103 
3104 		} else {
3105 
3106 			/* push %reg */
3107 			save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
3108 		}
3109 
3110 		/* detect when asm code uses rbp as a scratch register */
3111 		if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP &&
3112 		    cfa->base != CFI_BP)
3113 			cfi->bp_scratch = true;
3114 		break;
3115 
3116 	case OP_DEST_REG_INDIRECT:
3117 
3118 		if (cfi->drap) {
3119 			if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3120 
3121 				/* drap: mov %drap, disp(%rbp) */
3122 				cfa->base = CFI_BP_INDIRECT;
3123 				cfa->offset = op->dest.offset;
3124 
3125 				/* save drap offset so we know when to restore it */
3126 				cfi->drap_offset = op->dest.offset;
3127 			} else {
3128 
3129 				/* drap: mov reg, disp(%rbp) */
3130 				save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
3131 			}
3132 
3133 		} else if (op->dest.reg == cfa->base) {
3134 
3135 			/* mov reg, disp(%rbp) */
3136 			/* mov reg, disp(%rsp) */
3137 			save_reg(cfi, op->src.reg, CFI_CFA,
3138 				 op->dest.offset - cfi->cfa.offset);
3139 
3140 		} else if (op->dest.reg == CFI_SP) {
3141 
3142 			/* mov reg, disp(%rsp) */
3143 			save_reg(cfi, op->src.reg, CFI_CFA,
3144 				 op->dest.offset - cfi->stack_size);
3145 
3146 		} else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
3147 
3148 			/* mov %rsp, (%reg); # setup a stack swizzle. */
3149 			cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
3150 			cfi->vals[op->dest.reg].offset = cfa->offset;
3151 		}
3152 
3153 		break;
3154 
3155 	case OP_DEST_MEM:
3156 		if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
3157 			WARN_FUNC("unknown stack-related memory operation",
3158 				  insn->sec, insn->offset);
3159 			return -1;
3160 		}
3161 
3162 		/* pop mem */
3163 		cfi->stack_size -= 8;
3164 		if (cfa->base == CFI_SP)
3165 			cfa->offset -= 8;
3166 
3167 		break;
3168 
3169 	default:
3170 		WARN_FUNC("unknown stack-related instruction",
3171 			  insn->sec, insn->offset);
3172 		return -1;
3173 	}
3174 
3175 	return 0;
3176 }
3177 
3178 /*
3179  * The stack layouts of alternatives instructions can sometimes diverge when
3180  * they have stack modifications.  That's fine as long as the potential stack
3181  * layouts don't conflict at any given potential instruction boundary.
3182  *
3183  * Flatten the CFIs of the different alternative code streams (both original
3184  * and replacement) into a single shared CFI array which can be used to detect
3185  * conflicts and nicely feed a linear array of ORC entries to the unwinder.
3186  */
3187 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
3188 {
3189 	struct cfi_state **alt_cfi;
3190 	int group_off;
3191 
3192 	if (!insn->alt_group)
3193 		return 0;
3194 
3195 	if (!insn->cfi) {
3196 		WARN("CFI missing");
3197 		return -1;
3198 	}
3199 
3200 	alt_cfi = insn->alt_group->cfi;
3201 	group_off = insn->offset - insn->alt_group->first_insn->offset;
3202 
3203 	if (!alt_cfi[group_off]) {
3204 		alt_cfi[group_off] = insn->cfi;
3205 	} else {
3206 		if (cficmp(alt_cfi[group_off], insn->cfi)) {
3207 			WARN_FUNC("stack layout conflict in alternatives",
3208 				  insn->sec, insn->offset);
3209 			return -1;
3210 		}
3211 	}
3212 
3213 	return 0;
3214 }
3215 
3216 static int handle_insn_ops(struct instruction *insn,
3217 			   struct instruction *next_insn,
3218 			   struct insn_state *state)
3219 {
3220 	struct stack_op *op;
3221 
3222 	list_for_each_entry(op, &insn->stack_ops, list) {
3223 
3224 		if (update_cfi_state(insn, next_insn, &state->cfi, op))
3225 			return 1;
3226 
3227 		if (!insn->alt_group)
3228 			continue;
3229 
3230 		if (op->dest.type == OP_DEST_PUSHF) {
3231 			if (!state->uaccess_stack) {
3232 				state->uaccess_stack = 1;
3233 			} else if (state->uaccess_stack >> 31) {
3234 				WARN_FUNC("PUSHF stack exhausted",
3235 					  insn->sec, insn->offset);
3236 				return 1;
3237 			}
3238 			state->uaccess_stack <<= 1;
3239 			state->uaccess_stack  |= state->uaccess;
3240 		}
3241 
3242 		if (op->src.type == OP_SRC_POPF) {
3243 			if (state->uaccess_stack) {
3244 				state->uaccess = state->uaccess_stack & 1;
3245 				state->uaccess_stack >>= 1;
3246 				if (state->uaccess_stack == 1)
3247 					state->uaccess_stack = 0;
3248 			}
3249 		}
3250 	}
3251 
3252 	return 0;
3253 }
3254 
3255 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
3256 {
3257 	struct cfi_state *cfi1 = insn->cfi;
3258 	int i;
3259 
3260 	if (!cfi1) {
3261 		WARN("CFI missing");
3262 		return false;
3263 	}
3264 
3265 	if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
3266 
3267 		WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
3268 			  insn->sec, insn->offset,
3269 			  cfi1->cfa.base, cfi1->cfa.offset,
3270 			  cfi2->cfa.base, cfi2->cfa.offset);
3271 
3272 	} else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
3273 		for (i = 0; i < CFI_NUM_REGS; i++) {
3274 			if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
3275 				    sizeof(struct cfi_reg)))
3276 				continue;
3277 
3278 			WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
3279 				  insn->sec, insn->offset,
3280 				  i, cfi1->regs[i].base, cfi1->regs[i].offset,
3281 				  i, cfi2->regs[i].base, cfi2->regs[i].offset);
3282 			break;
3283 		}
3284 
3285 	} else if (cfi1->type != cfi2->type) {
3286 
3287 		WARN_FUNC("stack state mismatch: type1=%d type2=%d",
3288 			  insn->sec, insn->offset, cfi1->type, cfi2->type);
3289 
3290 	} else if (cfi1->drap != cfi2->drap ||
3291 		   (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
3292 		   (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
3293 
3294 		WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
3295 			  insn->sec, insn->offset,
3296 			  cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
3297 			  cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
3298 
3299 	} else
3300 		return true;
3301 
3302 	return false;
3303 }
3304 
3305 static inline bool func_uaccess_safe(struct symbol *func)
3306 {
3307 	if (func)
3308 		return func->uaccess_safe;
3309 
3310 	return false;
3311 }
3312 
3313 static inline const char *call_dest_name(struct instruction *insn)
3314 {
3315 	static char pvname[19];
3316 	struct reloc *rel;
3317 	int idx;
3318 
3319 	if (insn->call_dest)
3320 		return insn->call_dest->name;
3321 
3322 	rel = insn_reloc(NULL, insn);
3323 	if (rel && !strcmp(rel->sym->name, "pv_ops")) {
3324 		idx = (rel->addend / sizeof(void *));
3325 		snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
3326 		return pvname;
3327 	}
3328 
3329 	return "{dynamic}";
3330 }
3331 
3332 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
3333 {
3334 	struct symbol *target;
3335 	struct reloc *rel;
3336 	int idx;
3337 
3338 	rel = insn_reloc(file, insn);
3339 	if (!rel || strcmp(rel->sym->name, "pv_ops"))
3340 		return false;
3341 
3342 	idx = (arch_dest_reloc_offset(rel->addend) / sizeof(void *));
3343 
3344 	if (file->pv_ops[idx].clean)
3345 		return true;
3346 
3347 	file->pv_ops[idx].clean = true;
3348 
3349 	list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
3350 		if (!target->sec->noinstr) {
3351 			WARN("pv_ops[%d]: %s", idx, target->name);
3352 			file->pv_ops[idx].clean = false;
3353 		}
3354 	}
3355 
3356 	return file->pv_ops[idx].clean;
3357 }
3358 
3359 static inline bool noinstr_call_dest(struct objtool_file *file,
3360 				     struct instruction *insn,
3361 				     struct symbol *func)
3362 {
3363 	/*
3364 	 * We can't deal with indirect function calls at present;
3365 	 * assume they're instrumented.
3366 	 */
3367 	if (!func) {
3368 		if (file->pv_ops)
3369 			return pv_call_dest(file, insn);
3370 
3371 		return false;
3372 	}
3373 
3374 	/*
3375 	 * If the symbol is from a noinstr section; we good.
3376 	 */
3377 	if (func->sec->noinstr)
3378 		return true;
3379 
3380 	/*
3381 	 * If the symbol is a static_call trampoline, we can't tell.
3382 	 */
3383 	if (func->static_call_tramp)
3384 		return true;
3385 
3386 	/*
3387 	 * The __ubsan_handle_*() calls are like WARN(), they only happen when
3388 	 * something 'BAD' happened. At the risk of taking the machine down,
3389 	 * let them proceed to get the message out.
3390 	 */
3391 	if (!strncmp(func->name, "__ubsan_handle_", 15))
3392 		return true;
3393 
3394 	return false;
3395 }
3396 
3397 static int validate_call(struct objtool_file *file,
3398 			 struct instruction *insn,
3399 			 struct insn_state *state)
3400 {
3401 	if (state->noinstr && state->instr <= 0 &&
3402 	    !noinstr_call_dest(file, insn, insn->call_dest)) {
3403 		WARN_FUNC("call to %s() leaves .noinstr.text section",
3404 				insn->sec, insn->offset, call_dest_name(insn));
3405 		return 1;
3406 	}
3407 
3408 	if (state->uaccess && !func_uaccess_safe(insn->call_dest)) {
3409 		WARN_FUNC("call to %s() with UACCESS enabled",
3410 				insn->sec, insn->offset, call_dest_name(insn));
3411 		return 1;
3412 	}
3413 
3414 	if (state->df) {
3415 		WARN_FUNC("call to %s() with DF set",
3416 				insn->sec, insn->offset, call_dest_name(insn));
3417 		return 1;
3418 	}
3419 
3420 	return 0;
3421 }
3422 
3423 static int validate_sibling_call(struct objtool_file *file,
3424 				 struct instruction *insn,
3425 				 struct insn_state *state)
3426 {
3427 	if (insn_func(insn) && has_modified_stack_frame(insn, state)) {
3428 		WARN_FUNC("sibling call from callable instruction with modified stack frame",
3429 				insn->sec, insn->offset);
3430 		return 1;
3431 	}
3432 
3433 	return validate_call(file, insn, state);
3434 }
3435 
3436 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
3437 {
3438 	if (state->noinstr && state->instr > 0) {
3439 		WARN_FUNC("return with instrumentation enabled",
3440 			  insn->sec, insn->offset);
3441 		return 1;
3442 	}
3443 
3444 	if (state->uaccess && !func_uaccess_safe(func)) {
3445 		WARN_FUNC("return with UACCESS enabled",
3446 			  insn->sec, insn->offset);
3447 		return 1;
3448 	}
3449 
3450 	if (!state->uaccess && func_uaccess_safe(func)) {
3451 		WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function",
3452 			  insn->sec, insn->offset);
3453 		return 1;
3454 	}
3455 
3456 	if (state->df) {
3457 		WARN_FUNC("return with DF set",
3458 			  insn->sec, insn->offset);
3459 		return 1;
3460 	}
3461 
3462 	if (func && has_modified_stack_frame(insn, state)) {
3463 		WARN_FUNC("return with modified stack frame",
3464 			  insn->sec, insn->offset);
3465 		return 1;
3466 	}
3467 
3468 	if (state->cfi.bp_scratch) {
3469 		WARN_FUNC("BP used as a scratch register",
3470 			  insn->sec, insn->offset);
3471 		return 1;
3472 	}
3473 
3474 	return 0;
3475 }
3476 
3477 static struct instruction *next_insn_to_validate(struct objtool_file *file,
3478 						 struct instruction *insn)
3479 {
3480 	struct alt_group *alt_group = insn->alt_group;
3481 
3482 	/*
3483 	 * Simulate the fact that alternatives are patched in-place.  When the
3484 	 * end of a replacement alt_group is reached, redirect objtool flow to
3485 	 * the end of the original alt_group.
3486 	 */
3487 	if (alt_group && insn == alt_group->last_insn && alt_group->orig_group)
3488 		return next_insn_same_sec(file, alt_group->orig_group->last_insn);
3489 
3490 	return next_insn_same_sec(file, insn);
3491 }
3492 
3493 /*
3494  * Follow the branch starting at the given instruction, and recursively follow
3495  * any other branches (jumps).  Meanwhile, track the frame pointer state at
3496  * each instruction and validate all the rules described in
3497  * tools/objtool/Documentation/objtool.txt.
3498  */
3499 static int validate_branch(struct objtool_file *file, struct symbol *func,
3500 			   struct instruction *insn, struct insn_state state)
3501 {
3502 	struct alternative *alt;
3503 	struct instruction *next_insn, *prev_insn = NULL;
3504 	struct section *sec;
3505 	u8 visited;
3506 	int ret;
3507 
3508 	sec = insn->sec;
3509 
3510 	while (1) {
3511 		next_insn = next_insn_to_validate(file, insn);
3512 
3513 		if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
3514 			/* Ignore KCFI type preambles, which always fall through */
3515 			if (!strncmp(func->name, "__cfi_", 6) ||
3516 			    !strncmp(func->name, "__pfx_", 6))
3517 				return 0;
3518 
3519 			WARN("%s() falls through to next function %s()",
3520 			     func->name, insn_func(insn)->name);
3521 			return 1;
3522 		}
3523 
3524 		if (func && insn->ignore) {
3525 			WARN_FUNC("BUG: why am I validating an ignored function?",
3526 				  sec, insn->offset);
3527 			return 1;
3528 		}
3529 
3530 		visited = VISITED_BRANCH << state.uaccess;
3531 		if (insn->visited & VISITED_BRANCH_MASK) {
3532 			if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
3533 				return 1;
3534 
3535 			if (insn->visited & visited)
3536 				return 0;
3537 		} else {
3538 			nr_insns_visited++;
3539 		}
3540 
3541 		if (state.noinstr)
3542 			state.instr += insn->instr;
3543 
3544 		if (insn->hint) {
3545 			if (insn->restore) {
3546 				struct instruction *save_insn, *i;
3547 
3548 				i = insn;
3549 				save_insn = NULL;
3550 
3551 				sym_for_each_insn_continue_reverse(file, func, i) {
3552 					if (i->save) {
3553 						save_insn = i;
3554 						break;
3555 					}
3556 				}
3557 
3558 				if (!save_insn) {
3559 					WARN_FUNC("no corresponding CFI save for CFI restore",
3560 						  sec, insn->offset);
3561 					return 1;
3562 				}
3563 
3564 				if (!save_insn->visited) {
3565 					WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo",
3566 						  sec, insn->offset);
3567 					return 1;
3568 				}
3569 
3570 				insn->cfi = save_insn->cfi;
3571 				nr_cfi_reused++;
3572 			}
3573 
3574 			state.cfi = *insn->cfi;
3575 		} else {
3576 			/* XXX track if we actually changed state.cfi */
3577 
3578 			if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
3579 				insn->cfi = prev_insn->cfi;
3580 				nr_cfi_reused++;
3581 			} else {
3582 				insn->cfi = cfi_hash_find_or_add(&state.cfi);
3583 			}
3584 		}
3585 
3586 		insn->visited |= visited;
3587 
3588 		if (propagate_alt_cfi(file, insn))
3589 			return 1;
3590 
3591 		if (!insn->ignore_alts && !list_empty(&insn->alts)) {
3592 			bool skip_orig = false;
3593 
3594 			list_for_each_entry(alt, &insn->alts, list) {
3595 				if (alt->skip_orig)
3596 					skip_orig = true;
3597 
3598 				ret = validate_branch(file, func, alt->insn, state);
3599 				if (ret) {
3600 					if (opts.backtrace)
3601 						BT_FUNC("(alt)", insn);
3602 					return ret;
3603 				}
3604 			}
3605 
3606 			if (skip_orig)
3607 				return 0;
3608 		}
3609 
3610 		if (handle_insn_ops(insn, next_insn, &state))
3611 			return 1;
3612 
3613 		switch (insn->type) {
3614 
3615 		case INSN_RETURN:
3616 			return validate_return(func, insn, &state);
3617 
3618 		case INSN_CALL:
3619 		case INSN_CALL_DYNAMIC:
3620 			ret = validate_call(file, insn, &state);
3621 			if (ret)
3622 				return ret;
3623 
3624 			if (opts.stackval && func && !is_fentry_call(insn) &&
3625 			    !has_valid_stack_frame(&state)) {
3626 				WARN_FUNC("call without frame pointer save/setup",
3627 					  sec, insn->offset);
3628 				return 1;
3629 			}
3630 
3631 			if (insn->dead_end)
3632 				return 0;
3633 
3634 			break;
3635 
3636 		case INSN_JUMP_CONDITIONAL:
3637 		case INSN_JUMP_UNCONDITIONAL:
3638 			if (is_sibling_call(insn)) {
3639 				ret = validate_sibling_call(file, insn, &state);
3640 				if (ret)
3641 					return ret;
3642 
3643 			} else if (insn->jump_dest) {
3644 				ret = validate_branch(file, func,
3645 						      insn->jump_dest, state);
3646 				if (ret) {
3647 					if (opts.backtrace)
3648 						BT_FUNC("(branch)", insn);
3649 					return ret;
3650 				}
3651 			}
3652 
3653 			if (insn->type == INSN_JUMP_UNCONDITIONAL)
3654 				return 0;
3655 
3656 			break;
3657 
3658 		case INSN_JUMP_DYNAMIC:
3659 		case INSN_JUMP_DYNAMIC_CONDITIONAL:
3660 			if (is_sibling_call(insn)) {
3661 				ret = validate_sibling_call(file, insn, &state);
3662 				if (ret)
3663 					return ret;
3664 			}
3665 
3666 			if (insn->type == INSN_JUMP_DYNAMIC)
3667 				return 0;
3668 
3669 			break;
3670 
3671 		case INSN_CONTEXT_SWITCH:
3672 			if (func && (!next_insn || !next_insn->hint)) {
3673 				WARN_FUNC("unsupported instruction in callable function",
3674 					  sec, insn->offset);
3675 				return 1;
3676 			}
3677 			return 0;
3678 
3679 		case INSN_STAC:
3680 			if (state.uaccess) {
3681 				WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
3682 				return 1;
3683 			}
3684 
3685 			state.uaccess = true;
3686 			break;
3687 
3688 		case INSN_CLAC:
3689 			if (!state.uaccess && func) {
3690 				WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
3691 				return 1;
3692 			}
3693 
3694 			if (func_uaccess_safe(func) && !state.uaccess_stack) {
3695 				WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
3696 				return 1;
3697 			}
3698 
3699 			state.uaccess = false;
3700 			break;
3701 
3702 		case INSN_STD:
3703 			if (state.df) {
3704 				WARN_FUNC("recursive STD", sec, insn->offset);
3705 				return 1;
3706 			}
3707 
3708 			state.df = true;
3709 			break;
3710 
3711 		case INSN_CLD:
3712 			if (!state.df && func) {
3713 				WARN_FUNC("redundant CLD", sec, insn->offset);
3714 				return 1;
3715 			}
3716 
3717 			state.df = false;
3718 			break;
3719 
3720 		default:
3721 			break;
3722 		}
3723 
3724 		if (insn->dead_end)
3725 			return 0;
3726 
3727 		if (!next_insn) {
3728 			if (state.cfi.cfa.base == CFI_UNDEFINED)
3729 				return 0;
3730 			WARN("%s: unexpected end of section", sec->name);
3731 			return 1;
3732 		}
3733 
3734 		prev_insn = insn;
3735 		insn = next_insn;
3736 	}
3737 
3738 	return 0;
3739 }
3740 
3741 static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
3742 {
3743 	struct instruction *insn;
3744 	struct insn_state state;
3745 	int ret, warnings = 0;
3746 
3747 	if (!file->hints)
3748 		return 0;
3749 
3750 	init_insn_state(file, &state, sec);
3751 
3752 	if (sec) {
3753 		insn = find_insn(file, sec, 0);
3754 		if (!insn)
3755 			return 0;
3756 	} else {
3757 		insn = list_first_entry(&file->insn_list, typeof(*insn), list);
3758 	}
3759 
3760 	while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) {
3761 		if (insn->hint && !insn->visited && !insn->ignore) {
3762 			ret = validate_branch(file, insn_func(insn), insn, state);
3763 			if (ret && opts.backtrace)
3764 				BT_FUNC("<=== (hint)", insn);
3765 			warnings += ret;
3766 		}
3767 
3768 		insn = list_next_entry(insn, list);
3769 	}
3770 
3771 	return warnings;
3772 }
3773 
3774 /*
3775  * Validate rethunk entry constraint: must untrain RET before the first RET.
3776  *
3777  * Follow every branch (intra-function) and ensure ANNOTATE_UNRET_END comes
3778  * before an actual RET instruction.
3779  */
3780 static int validate_entry(struct objtool_file *file, struct instruction *insn)
3781 {
3782 	struct instruction *next, *dest;
3783 	int ret, warnings = 0;
3784 
3785 	for (;;) {
3786 		next = next_insn_to_validate(file, insn);
3787 
3788 		if (insn->visited & VISITED_ENTRY)
3789 			return 0;
3790 
3791 		insn->visited |= VISITED_ENTRY;
3792 
3793 		if (!insn->ignore_alts && !list_empty(&insn->alts)) {
3794 			struct alternative *alt;
3795 			bool skip_orig = false;
3796 
3797 			list_for_each_entry(alt, &insn->alts, list) {
3798 				if (alt->skip_orig)
3799 					skip_orig = true;
3800 
3801 				ret = validate_entry(file, alt->insn);
3802 				if (ret) {
3803 				        if (opts.backtrace)
3804 						BT_FUNC("(alt)", insn);
3805 					return ret;
3806 				}
3807 			}
3808 
3809 			if (skip_orig)
3810 				return 0;
3811 		}
3812 
3813 		switch (insn->type) {
3814 
3815 		case INSN_CALL_DYNAMIC:
3816 		case INSN_JUMP_DYNAMIC:
3817 		case INSN_JUMP_DYNAMIC_CONDITIONAL:
3818 			WARN_FUNC("early indirect call", insn->sec, insn->offset);
3819 			return 1;
3820 
3821 		case INSN_JUMP_UNCONDITIONAL:
3822 		case INSN_JUMP_CONDITIONAL:
3823 			if (!is_sibling_call(insn)) {
3824 				if (!insn->jump_dest) {
3825 					WARN_FUNC("unresolved jump target after linking?!?",
3826 						  insn->sec, insn->offset);
3827 					return -1;
3828 				}
3829 				ret = validate_entry(file, insn->jump_dest);
3830 				if (ret) {
3831 					if (opts.backtrace) {
3832 						BT_FUNC("(branch%s)", insn,
3833 							insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
3834 					}
3835 					return ret;
3836 				}
3837 
3838 				if (insn->type == INSN_JUMP_UNCONDITIONAL)
3839 					return 0;
3840 
3841 				break;
3842 			}
3843 
3844 			/* fallthrough */
3845 		case INSN_CALL:
3846 			dest = find_insn(file, insn->call_dest->sec,
3847 					 insn->call_dest->offset);
3848 			if (!dest) {
3849 				WARN("Unresolved function after linking!?: %s",
3850 				     insn->call_dest->name);
3851 				return -1;
3852 			}
3853 
3854 			ret = validate_entry(file, dest);
3855 			if (ret) {
3856 				if (opts.backtrace)
3857 					BT_FUNC("(call)", insn);
3858 				return ret;
3859 			}
3860 			/*
3861 			 * If a call returns without error, it must have seen UNTRAIN_RET.
3862 			 * Therefore any non-error return is a success.
3863 			 */
3864 			return 0;
3865 
3866 		case INSN_RETURN:
3867 			WARN_FUNC("RET before UNTRAIN", insn->sec, insn->offset);
3868 			return 1;
3869 
3870 		case INSN_NOP:
3871 			if (insn->retpoline_safe)
3872 				return 0;
3873 			break;
3874 
3875 		default:
3876 			break;
3877 		}
3878 
3879 		if (!next) {
3880 			WARN_FUNC("teh end!", insn->sec, insn->offset);
3881 			return -1;
3882 		}
3883 		insn = next;
3884 	}
3885 
3886 	return warnings;
3887 }
3888 
3889 /*
3890  * Validate that all branches starting at 'insn->entry' encounter UNRET_END
3891  * before RET.
3892  */
3893 static int validate_unret(struct objtool_file *file)
3894 {
3895 	struct instruction *insn;
3896 	int ret, warnings = 0;
3897 
3898 	for_each_insn(file, insn) {
3899 		if (!insn->entry)
3900 			continue;
3901 
3902 		ret = validate_entry(file, insn);
3903 		if (ret < 0) {
3904 			WARN_FUNC("Failed UNRET validation", insn->sec, insn->offset);
3905 			return ret;
3906 		}
3907 		warnings += ret;
3908 	}
3909 
3910 	return warnings;
3911 }
3912 
3913 static int validate_retpoline(struct objtool_file *file)
3914 {
3915 	struct instruction *insn;
3916 	int warnings = 0;
3917 
3918 	for_each_insn(file, insn) {
3919 		if (insn->type != INSN_JUMP_DYNAMIC &&
3920 		    insn->type != INSN_CALL_DYNAMIC &&
3921 		    insn->type != INSN_RETURN)
3922 			continue;
3923 
3924 		if (insn->retpoline_safe)
3925 			continue;
3926 
3927 		if (insn->sec->init)
3928 			continue;
3929 
3930 		if (insn->type == INSN_RETURN) {
3931 			if (opts.rethunk) {
3932 				WARN_FUNC("'naked' return found in RETHUNK build",
3933 					  insn->sec, insn->offset);
3934 			} else
3935 				continue;
3936 		} else {
3937 			WARN_FUNC("indirect %s found in RETPOLINE build",
3938 				  insn->sec, insn->offset,
3939 				  insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
3940 		}
3941 
3942 		warnings++;
3943 	}
3944 
3945 	return warnings;
3946 }
3947 
3948 static bool is_kasan_insn(struct instruction *insn)
3949 {
3950 	return (insn->type == INSN_CALL &&
3951 		!strcmp(insn->call_dest->name, "__asan_handle_no_return"));
3952 }
3953 
3954 static bool is_ubsan_insn(struct instruction *insn)
3955 {
3956 	return (insn->type == INSN_CALL &&
3957 		!strcmp(insn->call_dest->name,
3958 			"__ubsan_handle_builtin_unreachable"));
3959 }
3960 
3961 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
3962 {
3963 	int i;
3964 	struct instruction *prev_insn;
3965 
3966 	if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP)
3967 		return true;
3968 
3969 	/*
3970 	 * Ignore alternative replacement instructions.  This can happen
3971 	 * when a whitelisted function uses one of the ALTERNATIVE macros.
3972 	 */
3973 	if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
3974 	    !strcmp(insn->sec->name, ".altinstr_aux"))
3975 		return true;
3976 
3977 	/*
3978 	 * Whole archive runs might encounter dead code from weak symbols.
3979 	 * This is where the linker will have dropped the weak symbol in
3980 	 * favour of a regular symbol, but leaves the code in place.
3981 	 *
3982 	 * In this case we'll find a piece of code (whole function) that is not
3983 	 * covered by a !section symbol. Ignore them.
3984 	 */
3985 	if (opts.link && !insn_func(insn)) {
3986 		int size = find_symbol_hole_containing(insn->sec, insn->offset);
3987 		unsigned long end = insn->offset + size;
3988 
3989 		if (!size) /* not a hole */
3990 			return false;
3991 
3992 		if (size < 0) /* hole until the end */
3993 			return true;
3994 
3995 		sec_for_each_insn_continue(file, insn) {
3996 			/*
3997 			 * If we reach a visited instruction at or before the
3998 			 * end of the hole, ignore the unreachable.
3999 			 */
4000 			if (insn->visited)
4001 				return true;
4002 
4003 			if (insn->offset >= end)
4004 				break;
4005 
4006 			/*
4007 			 * If this hole jumps to a .cold function, mark it ignore too.
4008 			 */
4009 			if (insn->jump_dest && insn_func(insn->jump_dest) &&
4010 			    strstr(insn_func(insn->jump_dest)->name, ".cold")) {
4011 				struct instruction *dest = insn->jump_dest;
4012 				func_for_each_insn(file, insn_func(dest), dest)
4013 					dest->ignore = true;
4014 			}
4015 		}
4016 
4017 		return false;
4018 	}
4019 
4020 	if (!insn_func(insn))
4021 		return false;
4022 
4023 	if (insn_func(insn)->static_call_tramp)
4024 		return true;
4025 
4026 	/*
4027 	 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
4028 	 * __builtin_unreachable().  The BUG() macro has an unreachable() after
4029 	 * the UD2, which causes GCC's undefined trap logic to emit another UD2
4030 	 * (or occasionally a JMP to UD2).
4031 	 *
4032 	 * It may also insert a UD2 after calling a __noreturn function.
4033 	 */
4034 	prev_insn = list_prev_entry(insn, list);
4035 	if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) &&
4036 	    (insn->type == INSN_BUG ||
4037 	     (insn->type == INSN_JUMP_UNCONDITIONAL &&
4038 	      insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
4039 		return true;
4040 
4041 	/*
4042 	 * Check if this (or a subsequent) instruction is related to
4043 	 * CONFIG_UBSAN or CONFIG_KASAN.
4044 	 *
4045 	 * End the search at 5 instructions to avoid going into the weeds.
4046 	 */
4047 	for (i = 0; i < 5; i++) {
4048 
4049 		if (is_kasan_insn(insn) || is_ubsan_insn(insn))
4050 			return true;
4051 
4052 		if (insn->type == INSN_JUMP_UNCONDITIONAL) {
4053 			if (insn->jump_dest &&
4054 			    insn_func(insn->jump_dest) == insn_func(insn)) {
4055 				insn = insn->jump_dest;
4056 				continue;
4057 			}
4058 
4059 			break;
4060 		}
4061 
4062 		if (insn->offset + insn->len >= insn_func(insn)->offset + insn_func(insn)->len)
4063 			break;
4064 
4065 		insn = list_next_entry(insn, list);
4066 	}
4067 
4068 	return false;
4069 }
4070 
4071 static int add_prefix_symbol(struct objtool_file *file, struct symbol *func,
4072 			     struct instruction *insn)
4073 {
4074 	if (!opts.prefix)
4075 		return 0;
4076 
4077 	for (;;) {
4078 		struct instruction *prev = list_prev_entry(insn, list);
4079 		u64 offset;
4080 
4081 		if (&prev->list == &file->insn_list)
4082 			break;
4083 
4084 		if (prev->type != INSN_NOP)
4085 			break;
4086 
4087 		offset = func->offset - prev->offset;
4088 		if (offset >= opts.prefix) {
4089 			if (offset == opts.prefix) {
4090 				/*
4091 				 * Since the sec->symbol_list is ordered by
4092 				 * offset (see elf_add_symbol()) the added
4093 				 * symbol will not be seen by the iteration in
4094 				 * validate_section().
4095 				 *
4096 				 * Hence the lack of list_for_each_entry_safe()
4097 				 * there.
4098 				 *
4099 				 * The direct concequence is that prefix symbols
4100 				 * don't get visited (because pointless), except
4101 				 * for the logic in ignore_unreachable_insn()
4102 				 * that needs the terminating insn to be visited
4103 				 * otherwise it will report the hole.
4104 				 *
4105 				 * Hence mark the first instruction of the
4106 				 * prefix symbol as visisted.
4107 				 */
4108 				prev->visited |= VISITED_BRANCH;
4109 				elf_create_prefix_symbol(file->elf, func, opts.prefix);
4110 			}
4111 			break;
4112 		}
4113 		insn = prev;
4114 	}
4115 
4116 	return 0;
4117 }
4118 
4119 static int validate_symbol(struct objtool_file *file, struct section *sec,
4120 			   struct symbol *sym, struct insn_state *state)
4121 {
4122 	struct instruction *insn;
4123 	int ret;
4124 
4125 	if (!sym->len) {
4126 		WARN("%s() is missing an ELF size annotation", sym->name);
4127 		return 1;
4128 	}
4129 
4130 	if (sym->pfunc != sym || sym->alias != sym)
4131 		return 0;
4132 
4133 	insn = find_insn(file, sec, sym->offset);
4134 	if (!insn || insn->ignore || insn->visited)
4135 		return 0;
4136 
4137 	add_prefix_symbol(file, sym, insn);
4138 
4139 	state->uaccess = sym->uaccess_safe;
4140 
4141 	ret = validate_branch(file, insn_func(insn), insn, *state);
4142 	if (ret && opts.backtrace)
4143 		BT_FUNC("<=== (sym)", insn);
4144 	return ret;
4145 }
4146 
4147 static int validate_section(struct objtool_file *file, struct section *sec)
4148 {
4149 	struct insn_state state;
4150 	struct symbol *func;
4151 	int warnings = 0;
4152 
4153 	list_for_each_entry(func, &sec->symbol_list, list) {
4154 		if (func->type != STT_FUNC)
4155 			continue;
4156 
4157 		init_insn_state(file, &state, sec);
4158 		set_func_state(&state.cfi);
4159 
4160 		warnings += validate_symbol(file, sec, func, &state);
4161 	}
4162 
4163 	return warnings;
4164 }
4165 
4166 static int validate_noinstr_sections(struct objtool_file *file)
4167 {
4168 	struct section *sec;
4169 	int warnings = 0;
4170 
4171 	sec = find_section_by_name(file->elf, ".noinstr.text");
4172 	if (sec) {
4173 		warnings += validate_section(file, sec);
4174 		warnings += validate_unwind_hints(file, sec);
4175 	}
4176 
4177 	sec = find_section_by_name(file->elf, ".entry.text");
4178 	if (sec) {
4179 		warnings += validate_section(file, sec);
4180 		warnings += validate_unwind_hints(file, sec);
4181 	}
4182 
4183 	sec = find_section_by_name(file->elf, ".cpuidle.text");
4184 	if (sec) {
4185 		warnings += validate_section(file, sec);
4186 		warnings += validate_unwind_hints(file, sec);
4187 	}
4188 
4189 	return warnings;
4190 }
4191 
4192 static int validate_functions(struct objtool_file *file)
4193 {
4194 	struct section *sec;
4195 	int warnings = 0;
4196 
4197 	for_each_sec(file, sec) {
4198 		if (!(sec->sh.sh_flags & SHF_EXECINSTR))
4199 			continue;
4200 
4201 		warnings += validate_section(file, sec);
4202 	}
4203 
4204 	return warnings;
4205 }
4206 
4207 static void mark_endbr_used(struct instruction *insn)
4208 {
4209 	if (!list_empty(&insn->call_node))
4210 		list_del_init(&insn->call_node);
4211 }
4212 
4213 static bool noendbr_range(struct objtool_file *file, struct instruction *insn)
4214 {
4215 	struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1);
4216 	struct instruction *first;
4217 
4218 	if (!sym)
4219 		return false;
4220 
4221 	first = find_insn(file, sym->sec, sym->offset);
4222 	if (!first)
4223 		return false;
4224 
4225 	if (first->type != INSN_ENDBR && !first->noendbr)
4226 		return false;
4227 
4228 	return insn->offset == sym->offset + sym->len;
4229 }
4230 
4231 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
4232 {
4233 	struct instruction *dest;
4234 	struct reloc *reloc;
4235 	unsigned long off;
4236 	int warnings = 0;
4237 
4238 	/*
4239 	 * Looking for function pointer load relocations.  Ignore
4240 	 * direct/indirect branches:
4241 	 */
4242 	switch (insn->type) {
4243 	case INSN_CALL:
4244 	case INSN_CALL_DYNAMIC:
4245 	case INSN_JUMP_CONDITIONAL:
4246 	case INSN_JUMP_UNCONDITIONAL:
4247 	case INSN_JUMP_DYNAMIC:
4248 	case INSN_JUMP_DYNAMIC_CONDITIONAL:
4249 	case INSN_RETURN:
4250 	case INSN_NOP:
4251 		return 0;
4252 	default:
4253 		break;
4254 	}
4255 
4256 	for (reloc = insn_reloc(file, insn);
4257 	     reloc;
4258 	     reloc = find_reloc_by_dest_range(file->elf, insn->sec,
4259 					      reloc->offset + 1,
4260 					      (insn->offset + insn->len) - (reloc->offset + 1))) {
4261 
4262 		/*
4263 		 * static_call_update() references the trampoline, which
4264 		 * doesn't have (or need) ENDBR.  Skip warning in that case.
4265 		 */
4266 		if (reloc->sym->static_call_tramp)
4267 			continue;
4268 
4269 		off = reloc->sym->offset;
4270 		if (reloc->type == R_X86_64_PC32 || reloc->type == R_X86_64_PLT32)
4271 			off += arch_dest_reloc_offset(reloc->addend);
4272 		else
4273 			off += reloc->addend;
4274 
4275 		dest = find_insn(file, reloc->sym->sec, off);
4276 		if (!dest)
4277 			continue;
4278 
4279 		if (dest->type == INSN_ENDBR) {
4280 			mark_endbr_used(dest);
4281 			continue;
4282 		}
4283 
4284 		if (insn_func(dest) && insn_func(dest) == insn_func(insn)) {
4285 			/*
4286 			 * Anything from->to self is either _THIS_IP_ or
4287 			 * IRET-to-self.
4288 			 *
4289 			 * There is no sane way to annotate _THIS_IP_ since the
4290 			 * compiler treats the relocation as a constant and is
4291 			 * happy to fold in offsets, skewing any annotation we
4292 			 * do, leading to vast amounts of false-positives.
4293 			 *
4294 			 * There's also compiler generated _THIS_IP_ through
4295 			 * KCOV and such which we have no hope of annotating.
4296 			 *
4297 			 * As such, blanket accept self-references without
4298 			 * issue.
4299 			 */
4300 			continue;
4301 		}
4302 
4303 		/*
4304 		 * Accept anything ANNOTATE_NOENDBR.
4305 		 */
4306 		if (dest->noendbr)
4307 			continue;
4308 
4309 		/*
4310 		 * Accept if this is the instruction after a symbol
4311 		 * that is (no)endbr -- typical code-range usage.
4312 		 */
4313 		if (noendbr_range(file, dest))
4314 			continue;
4315 
4316 		WARN_FUNC("relocation to !ENDBR: %s",
4317 			  insn->sec, insn->offset,
4318 			  offstr(dest->sec, dest->offset));
4319 
4320 		warnings++;
4321 	}
4322 
4323 	return warnings;
4324 }
4325 
4326 static int validate_ibt_data_reloc(struct objtool_file *file,
4327 				   struct reloc *reloc)
4328 {
4329 	struct instruction *dest;
4330 
4331 	dest = find_insn(file, reloc->sym->sec,
4332 			 reloc->sym->offset + reloc->addend);
4333 	if (!dest)
4334 		return 0;
4335 
4336 	if (dest->type == INSN_ENDBR) {
4337 		mark_endbr_used(dest);
4338 		return 0;
4339 	}
4340 
4341 	if (dest->noendbr)
4342 		return 0;
4343 
4344 	WARN_FUNC("data relocation to !ENDBR: %s",
4345 		  reloc->sec->base, reloc->offset,
4346 		  offstr(dest->sec, dest->offset));
4347 
4348 	return 1;
4349 }
4350 
4351 /*
4352  * Validate IBT rules and remove used ENDBR instructions from the seal list.
4353  * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
4354  * NOPs) later, in create_ibt_endbr_seal_sections().
4355  */
4356 static int validate_ibt(struct objtool_file *file)
4357 {
4358 	struct section *sec;
4359 	struct reloc *reloc;
4360 	struct instruction *insn;
4361 	int warnings = 0;
4362 
4363 	for_each_insn(file, insn)
4364 		warnings += validate_ibt_insn(file, insn);
4365 
4366 	for_each_sec(file, sec) {
4367 
4368 		/* Already done by validate_ibt_insn() */
4369 		if (sec->sh.sh_flags & SHF_EXECINSTR)
4370 			continue;
4371 
4372 		if (!sec->reloc)
4373 			continue;
4374 
4375 		/*
4376 		 * These sections can reference text addresses, but not with
4377 		 * the intent to indirect branch to them.
4378 		 */
4379 		if ((!strncmp(sec->name, ".discard", 8) &&
4380 		     strcmp(sec->name, ".discard.ibt_endbr_noseal"))	||
4381 		    !strncmp(sec->name, ".debug", 6)			||
4382 		    !strcmp(sec->name, ".altinstructions")		||
4383 		    !strcmp(sec->name, ".ibt_endbr_seal")		||
4384 		    !strcmp(sec->name, ".orc_unwind_ip")		||
4385 		    !strcmp(sec->name, ".parainstructions")		||
4386 		    !strcmp(sec->name, ".retpoline_sites")		||
4387 		    !strcmp(sec->name, ".smp_locks")			||
4388 		    !strcmp(sec->name, ".static_call_sites")		||
4389 		    !strcmp(sec->name, "_error_injection_whitelist")	||
4390 		    !strcmp(sec->name, "_kprobe_blacklist")		||
4391 		    !strcmp(sec->name, "__bug_table")			||
4392 		    !strcmp(sec->name, "__ex_table")			||
4393 		    !strcmp(sec->name, "__jump_table")			||
4394 		    !strcmp(sec->name, "__mcount_loc")			||
4395 		    !strcmp(sec->name, ".kcfi_traps")			||
4396 		    strstr(sec->name, "__patchable_function_entries"))
4397 			continue;
4398 
4399 		list_for_each_entry(reloc, &sec->reloc->reloc_list, list)
4400 			warnings += validate_ibt_data_reloc(file, reloc);
4401 	}
4402 
4403 	return warnings;
4404 }
4405 
4406 static int validate_sls(struct objtool_file *file)
4407 {
4408 	struct instruction *insn, *next_insn;
4409 	int warnings = 0;
4410 
4411 	for_each_insn(file, insn) {
4412 		next_insn = next_insn_same_sec(file, insn);
4413 
4414 		if (insn->retpoline_safe)
4415 			continue;
4416 
4417 		switch (insn->type) {
4418 		case INSN_RETURN:
4419 			if (!next_insn || next_insn->type != INSN_TRAP) {
4420 				WARN_FUNC("missing int3 after ret",
4421 					  insn->sec, insn->offset);
4422 				warnings++;
4423 			}
4424 
4425 			break;
4426 		case INSN_JUMP_DYNAMIC:
4427 			if (!next_insn || next_insn->type != INSN_TRAP) {
4428 				WARN_FUNC("missing int3 after indirect jump",
4429 					  insn->sec, insn->offset);
4430 				warnings++;
4431 			}
4432 			break;
4433 		default:
4434 			break;
4435 		}
4436 	}
4437 
4438 	return warnings;
4439 }
4440 
4441 static int validate_reachable_instructions(struct objtool_file *file)
4442 {
4443 	struct instruction *insn;
4444 
4445 	if (file->ignore_unreachables)
4446 		return 0;
4447 
4448 	for_each_insn(file, insn) {
4449 		if (insn->visited || ignore_unreachable_insn(file, insn))
4450 			continue;
4451 
4452 		WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
4453 		return 1;
4454 	}
4455 
4456 	return 0;
4457 }
4458 
4459 int check(struct objtool_file *file)
4460 {
4461 	int ret, warnings = 0;
4462 
4463 	arch_initial_func_cfi_state(&initial_func_cfi);
4464 	init_cfi_state(&init_cfi);
4465 	init_cfi_state(&func_cfi);
4466 	set_func_state(&func_cfi);
4467 
4468 	if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3)))
4469 		goto out;
4470 
4471 	cfi_hash_add(&init_cfi);
4472 	cfi_hash_add(&func_cfi);
4473 
4474 	ret = decode_sections(file);
4475 	if (ret < 0)
4476 		goto out;
4477 
4478 	warnings += ret;
4479 
4480 	if (list_empty(&file->insn_list))
4481 		goto out;
4482 
4483 	if (opts.retpoline) {
4484 		ret = validate_retpoline(file);
4485 		if (ret < 0)
4486 			return ret;
4487 		warnings += ret;
4488 	}
4489 
4490 	if (opts.stackval || opts.orc || opts.uaccess) {
4491 		ret = validate_functions(file);
4492 		if (ret < 0)
4493 			goto out;
4494 		warnings += ret;
4495 
4496 		ret = validate_unwind_hints(file, NULL);
4497 		if (ret < 0)
4498 			goto out;
4499 		warnings += ret;
4500 
4501 		if (!warnings) {
4502 			ret = validate_reachable_instructions(file);
4503 			if (ret < 0)
4504 				goto out;
4505 			warnings += ret;
4506 		}
4507 
4508 	} else if (opts.noinstr) {
4509 		ret = validate_noinstr_sections(file);
4510 		if (ret < 0)
4511 			goto out;
4512 		warnings += ret;
4513 	}
4514 
4515 	if (opts.unret) {
4516 		/*
4517 		 * Must be after validate_branch() and friends, it plays
4518 		 * further games with insn->visited.
4519 		 */
4520 		ret = validate_unret(file);
4521 		if (ret < 0)
4522 			return ret;
4523 		warnings += ret;
4524 	}
4525 
4526 	if (opts.ibt) {
4527 		ret = validate_ibt(file);
4528 		if (ret < 0)
4529 			goto out;
4530 		warnings += ret;
4531 	}
4532 
4533 	if (opts.sls) {
4534 		ret = validate_sls(file);
4535 		if (ret < 0)
4536 			goto out;
4537 		warnings += ret;
4538 	}
4539 
4540 	if (opts.static_call) {
4541 		ret = create_static_call_sections(file);
4542 		if (ret < 0)
4543 			goto out;
4544 		warnings += ret;
4545 	}
4546 
4547 	if (opts.retpoline) {
4548 		ret = create_retpoline_sites_sections(file);
4549 		if (ret < 0)
4550 			goto out;
4551 		warnings += ret;
4552 	}
4553 
4554 	if (opts.cfi) {
4555 		ret = create_cfi_sections(file);
4556 		if (ret < 0)
4557 			goto out;
4558 		warnings += ret;
4559 	}
4560 
4561 	if (opts.rethunk) {
4562 		ret = create_return_sites_sections(file);
4563 		if (ret < 0)
4564 			goto out;
4565 		warnings += ret;
4566 
4567 		if (opts.hack_skylake) {
4568 			ret = create_direct_call_sections(file);
4569 			if (ret < 0)
4570 				goto out;
4571 			warnings += ret;
4572 		}
4573 	}
4574 
4575 	if (opts.mcount) {
4576 		ret = create_mcount_loc_sections(file);
4577 		if (ret < 0)
4578 			goto out;
4579 		warnings += ret;
4580 	}
4581 
4582 	if (opts.ibt) {
4583 		ret = create_ibt_endbr_seal_sections(file);
4584 		if (ret < 0)
4585 			goto out;
4586 		warnings += ret;
4587 	}
4588 
4589 	if (opts.orc && !list_empty(&file->insn_list)) {
4590 		ret = orc_create(file);
4591 		if (ret < 0)
4592 			goto out;
4593 		warnings += ret;
4594 	}
4595 
4596 
4597 	if (opts.stats) {
4598 		printf("nr_insns_visited: %ld\n", nr_insns_visited);
4599 		printf("nr_cfi: %ld\n", nr_cfi);
4600 		printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
4601 		printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
4602 	}
4603 
4604 out:
4605 	/*
4606 	 *  For now, don't fail the kernel build on fatal warnings.  These
4607 	 *  errors are still fairly common due to the growing matrix of
4608 	 *  supported toolchains and their recent pace of change.
4609 	 */
4610 	return 0;
4611 }
4612