xref: /openbmc/linux/arch/x86/kernel/jump_label.c (revision 75020f2d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * jump label x86 support
4  *
5  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
6  *
7  */
8 #include <linux/jump_label.h>
9 #include <linux/memory.h>
10 #include <linux/uaccess.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/jhash.h>
14 #include <linux/cpu.h>
15 #include <asm/kprobes.h>
16 #include <asm/alternative.h>
17 #include <asm/text-patching.h>
18 
19 static void bug_at(const void *ip, int line)
20 {
21 	/*
22 	 * The location is not an op that we were expecting.
23 	 * Something went wrong. Crash the box, as something could be
24 	 * corrupting the kernel.
25 	 */
26 	pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph) %d\n", ip, ip, ip, line);
27 	BUG();
28 }
29 
30 static const void *
31 __jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type type)
32 {
33 	const void *expect, *code;
34 	const void *addr, *dest;
35 	int line;
36 
37 	addr = (void *)jump_entry_code(entry);
38 	dest = (void *)jump_entry_target(entry);
39 
40 	code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
41 
42 	if (type == JUMP_LABEL_JMP) {
43 		expect = x86_nops[5]; line = __LINE__;
44 	} else {
45 		expect = code; line = __LINE__;
46 	}
47 
48 	if (memcmp(addr, expect, JUMP_LABEL_NOP_SIZE))
49 		bug_at(addr, line);
50 
51 	if (type == JUMP_LABEL_NOP)
52 		code = x86_nops[5];
53 
54 	return code;
55 }
56 
57 static inline void __jump_label_transform(struct jump_entry *entry,
58 					  enum jump_label_type type,
59 					  int init)
60 {
61 	const void *opcode = __jump_label_set_jump_code(entry, type);
62 
63 	/*
64 	 * As long as only a single processor is running and the code is still
65 	 * not marked as RO, text_poke_early() can be used; Checking that
66 	 * system_state is SYSTEM_BOOTING guarantees it. It will be set to
67 	 * SYSTEM_SCHEDULING before other cores are awaken and before the
68 	 * code is write-protected.
69 	 *
70 	 * At the time the change is being done, just ignore whether we
71 	 * are doing nop -> jump or jump -> nop transition, and assume
72 	 * always nop being the 'currently valid' instruction
73 	 */
74 	if (init || system_state == SYSTEM_BOOTING) {
75 		text_poke_early((void *)jump_entry_code(entry), opcode,
76 				JUMP_LABEL_NOP_SIZE);
77 		return;
78 	}
79 
80 	text_poke_bp((void *)jump_entry_code(entry), opcode, JUMP_LABEL_NOP_SIZE, NULL);
81 }
82 
83 static void __ref jump_label_transform(struct jump_entry *entry,
84 				       enum jump_label_type type,
85 				       int init)
86 {
87 	mutex_lock(&text_mutex);
88 	__jump_label_transform(entry, type, init);
89 	mutex_unlock(&text_mutex);
90 }
91 
92 void arch_jump_label_transform(struct jump_entry *entry,
93 			       enum jump_label_type type)
94 {
95 	jump_label_transform(entry, type, 0);
96 }
97 
98 bool arch_jump_label_transform_queue(struct jump_entry *entry,
99 				     enum jump_label_type type)
100 {
101 	const void *opcode;
102 
103 	if (system_state == SYSTEM_BOOTING) {
104 		/*
105 		 * Fallback to the non-batching mode.
106 		 */
107 		arch_jump_label_transform(entry, type);
108 		return true;
109 	}
110 
111 	mutex_lock(&text_mutex);
112 	opcode = __jump_label_set_jump_code(entry, type);
113 	text_poke_queue((void *)jump_entry_code(entry),
114 			opcode, JUMP_LABEL_NOP_SIZE, NULL);
115 	mutex_unlock(&text_mutex);
116 	return true;
117 }
118 
119 void arch_jump_label_transform_apply(void)
120 {
121 	mutex_lock(&text_mutex);
122 	text_poke_finish();
123 	mutex_unlock(&text_mutex);
124 }
125 
126 static enum {
127 	JL_STATE_START,
128 	JL_STATE_NO_UPDATE,
129 	JL_STATE_UPDATE,
130 } jlstate __initdata_or_module = JL_STATE_START;
131 
132 __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
133 				      enum jump_label_type type)
134 {
135 	if (jlstate == JL_STATE_UPDATE)
136 		jump_label_transform(entry, type, 1);
137 }
138