xref: /openbmc/linux/kernel/extable.c (revision 808b64565b028bd072c985624d5cbe4b6f5a5a35)
11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
21da177e4SLinus Torvalds /* Rewritten by Rusty Russell, on the backs of many others...
31da177e4SLinus Torvalds    Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
41da177e4SLinus Torvalds 
51da177e4SLinus Torvalds */
68b96f011SFrederic Weisbecker #include <linux/ftrace.h>
7f80d2d77SDmitri Vorobiev #include <linux/memory.h>
88a293be0SPaul Gortmaker #include <linux/extable.h>
9505f2b97SIngo Molnar #include <linux/module.h>
10505f2b97SIngo Molnar #include <linux/mutex.h>
11505f2b97SIngo Molnar #include <linux/init.h>
125b485629SMasami Hiramatsu #include <linux/kprobes.h>
1374451e66SDaniel Borkmann #include <linux/filter.h>
14505f2b97SIngo Molnar 
151da177e4SLinus Torvalds #include <asm/sections.h>
167c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
17505f2b97SIngo Molnar 
18505f2b97SIngo Molnar /*
19505f2b97SIngo Molnar  * mutex protecting text section modification (dynamic code patching).
20505f2b97SIngo Molnar  * some users need to sleep (allocating memory...) while they hold this lock.
21505f2b97SIngo Molnar  *
22e846d139SZhou Chengming  * Note: Also protects SMP-alternatives modification on x86.
23e846d139SZhou Chengming  *
24505f2b97SIngo Molnar  * NOT exported to modules - patching kernel text is a really delicate matter.
25505f2b97SIngo Molnar  */
26505f2b97SIngo Molnar DEFINE_MUTEX(text_mutex);
271da177e4SLinus Torvalds 
281da177e4SLinus Torvalds extern struct exception_table_entry __start___ex_table[];
291da177e4SLinus Torvalds extern struct exception_table_entry __stop___ex_table[];
301da177e4SLinus Torvalds 
31d219e2e8SDavid Daney /* Cleared by build time tools if the table is already sorted. */
3200b71030SAndi Kleen u32 __initdata __visible main_extable_sort_needed = 1;
33d219e2e8SDavid Daney 
341da177e4SLinus Torvalds /* Sort the kernel's built-in exception table */
351da177e4SLinus Torvalds void __init sort_main_extable(void)
361da177e4SLinus Torvalds {
3763174f61SNathan Chancellor 	if (main_extable_sort_needed &&
3863174f61SNathan Chancellor 	    &__stop___ex_table > &__start___ex_table) {
39bec1b9e7SBorislav Petkov 		pr_notice("Sorting __ex_table...\n");
401da177e4SLinus Torvalds 		sort_extable(__start___ex_table, __stop___ex_table);
41bec1b9e7SBorislav Petkov 	}
421da177e4SLinus Torvalds }
431da177e4SLinus Torvalds 
4449ec9177SSantosh Sivaraj /* Given an address, look for it in the kernel exception table */
4549ec9177SSantosh Sivaraj const
4649ec9177SSantosh Sivaraj struct exception_table_entry *search_kernel_exception_table(unsigned long addr)
4749ec9177SSantosh Sivaraj {
4849ec9177SSantosh Sivaraj 	return search_extable(__start___ex_table,
4949ec9177SSantosh Sivaraj 			      __stop___ex_table - __start___ex_table, addr);
5049ec9177SSantosh Sivaraj }
5149ec9177SSantosh Sivaraj 
521da177e4SLinus Torvalds /* Given an address, look for it in the exception tables. */
531da177e4SLinus Torvalds const struct exception_table_entry *search_exception_tables(unsigned long addr)
541da177e4SLinus Torvalds {
551da177e4SLinus Torvalds 	const struct exception_table_entry *e;
561da177e4SLinus Torvalds 
5749ec9177SSantosh Sivaraj 	e = search_kernel_exception_table(addr);
581da177e4SLinus Torvalds 	if (!e)
591da177e4SLinus Torvalds 		e = search_module_extables(addr);
603dec541bSAlexei Starovoitov 	if (!e)
613dec541bSAlexei Starovoitov 		e = search_bpf_extables(addr);
621da177e4SLinus Torvalds 	return e;
631da177e4SLinus Torvalds }
641da177e4SLinus Torvalds 
65c0d80ddaSMarcin Nowakowski int notrace core_kernel_text(unsigned long addr)
661da177e4SLinus Torvalds {
67*808b6456SKefeng Wang 	if (is_kernel_text(addr))
681da177e4SLinus Torvalds 		return 1;
691da177e4SLinus Torvalds 
70d2635f20SChristophe Leroy 	if (system_state < SYSTEM_FREEING_INITMEM &&
71b9ad8fe7SKefeng Wang 	    is_kernel_inittext(addr))
721da177e4SLinus Torvalds 		return 1;
731da177e4SLinus Torvalds 	return 0;
741da177e4SLinus Torvalds }
751da177e4SLinus Torvalds 
763861a17bSFrederic Weisbecker int __kernel_text_address(unsigned long addr)
771da177e4SLinus Torvalds {
789aadde91SSteven Rostedt (VMware) 	if (kernel_text_address(addr))
7974451e66SDaniel Borkmann 		return 1;
804a44bac1SIngo Molnar 	/*
814a44bac1SIngo Molnar 	 * There might be init symbols in saved stacktraces.
824a44bac1SIngo Molnar 	 * Give those symbols a chance to be printed in
834a44bac1SIngo Molnar 	 * backtraces (such as lockdep traces).
844a44bac1SIngo Molnar 	 *
854a44bac1SIngo Molnar 	 * Since we are after the module-symbols check, there's
864a44bac1SIngo Molnar 	 * no danger of address overlap:
874a44bac1SIngo Molnar 	 */
88b9ad8fe7SKefeng Wang 	if (is_kernel_inittext(addr))
894a44bac1SIngo Molnar 		return 1;
904a44bac1SIngo Molnar 	return 0;
911da177e4SLinus Torvalds }
921da177e4SLinus Torvalds 
931da177e4SLinus Torvalds int kernel_text_address(unsigned long addr)
941da177e4SLinus Torvalds {
95e8cac8b1SSteven Rostedt (VMware) 	bool no_rcu;
96e8cac8b1SSteven Rostedt (VMware) 	int ret = 1;
97e8cac8b1SSteven Rostedt (VMware) 
981da177e4SLinus Torvalds 	if (core_kernel_text(addr))
991da177e4SLinus Torvalds 		return 1;
100e8cac8b1SSteven Rostedt (VMware) 
101e8cac8b1SSteven Rostedt (VMware) 	/*
102e8cac8b1SSteven Rostedt (VMware) 	 * If a stack dump happens while RCU is not watching, then
103e8cac8b1SSteven Rostedt (VMware) 	 * RCU needs to be notified that it requires to start
104e8cac8b1SSteven Rostedt (VMware) 	 * watching again. This can happen either by tracing that
105e8cac8b1SSteven Rostedt (VMware) 	 * triggers a stack trace, or a WARN() that happens during
106e8cac8b1SSteven Rostedt (VMware) 	 * coming back from idle, or cpu on or offlining.
107e8cac8b1SSteven Rostedt (VMware) 	 *
108e9b4e606SJiri Olsa 	 * is_module_text_address() as well as the kprobe slots,
109e9b4e606SJiri Olsa 	 * is_bpf_text_address() and is_bpf_image_address require
110e9b4e606SJiri Olsa 	 * RCU to be watching.
111e8cac8b1SSteven Rostedt (VMware) 	 */
112e8cac8b1SSteven Rostedt (VMware) 	no_rcu = !rcu_is_watching();
113e8cac8b1SSteven Rostedt (VMware) 
114e8cac8b1SSteven Rostedt (VMware) 	/* Treat this like an NMI as it can happen anywhere */
115e8cac8b1SSteven Rostedt (VMware) 	if (no_rcu)
116e8cac8b1SSteven Rostedt (VMware) 		rcu_nmi_enter();
117e8cac8b1SSteven Rostedt (VMware) 
118aec0be2dSSteven Rostedt (Red Hat) 	if (is_module_text_address(addr))
119e8cac8b1SSteven Rostedt (VMware) 		goto out;
1205b485629SMasami Hiramatsu 	if (is_ftrace_trampoline(addr))
121e8cac8b1SSteven Rostedt (VMware) 		goto out;
1225b485629SMasami Hiramatsu 	if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
123e8cac8b1SSteven Rostedt (VMware) 		goto out;
12474451e66SDaniel Borkmann 	if (is_bpf_text_address(addr))
125e8cac8b1SSteven Rostedt (VMware) 		goto out;
126e8cac8b1SSteven Rostedt (VMware) 	ret = 0;
127e8cac8b1SSteven Rostedt (VMware) out:
128e8cac8b1SSteven Rostedt (VMware) 	if (no_rcu)
129e8cac8b1SSteven Rostedt (VMware) 		rcu_nmi_exit();
130e8cac8b1SSteven Rostedt (VMware) 
131e8cac8b1SSteven Rostedt (VMware) 	return ret;
1321da177e4SLinus Torvalds }
133ab7476cfSArjan van de Ven 
134ab7476cfSArjan van de Ven /*
135ab7476cfSArjan van de Ven  * On some architectures (PPC64, IA64) function pointers
136ab7476cfSArjan van de Ven  * are actually only tokens to some data that then holds the
137ab7476cfSArjan van de Ven  * real function address. As a result, to find if a function
138ab7476cfSArjan van de Ven  * pointer is part of the kernel text, we need to do some
139ab7476cfSArjan van de Ven  * special dereferencing first.
140ab7476cfSArjan van de Ven  */
141ab7476cfSArjan van de Ven int func_ptr_is_kernel_text(void *ptr)
142ab7476cfSArjan van de Ven {
143ab7476cfSArjan van de Ven 	unsigned long addr;
144ab7476cfSArjan van de Ven 	addr = (unsigned long) dereference_function_descriptor(ptr);
145ab7476cfSArjan van de Ven 	if (core_kernel_text(addr))
146ab7476cfSArjan van de Ven 		return 1;
147a6e6abd5SRusty Russell 	return is_module_text_address(addr);
148ab7476cfSArjan van de Ven }
149