xref: /openbmc/linux/arch/x86/kernel/jump_label.c (revision e7253313)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * jump label x86 support
4  *
5  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
6  *
7  */
8 #include <linux/jump_label.h>
9 #include <linux/memory.h>
10 #include <linux/uaccess.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/jhash.h>
14 #include <linux/cpu.h>
15 #include <asm/kprobes.h>
16 #include <asm/alternative.h>
17 #include <asm/text-patching.h>
18 
19 union jump_code_union {
20 	char code[JUMP_LABEL_NOP_SIZE];
21 	struct {
22 		char jump;
23 		int offset;
24 	} __attribute__((packed));
25 };
26 
27 static void bug_at(unsigned char *ip, int line)
28 {
29 	/*
30 	 * The location is not an op that we were expecting.
31 	 * Something went wrong. Crash the box, as something could be
32 	 * corrupting the kernel.
33 	 */
34 	pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph) %d\n", ip, ip, ip, line);
35 	BUG();
36 }
37 
38 static void __jump_label_set_jump_code(struct jump_entry *entry,
39 				       enum jump_label_type type,
40 				       union jump_code_union *code,
41 				       int init)
42 {
43 	const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
44 	const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
45 	const void *expect;
46 	int line;
47 
48 	code->jump = 0xe9;
49 	code->offset = jump_entry_target(entry) -
50 		       (jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE);
51 
52 	if (init) {
53 		expect = default_nop; line = __LINE__;
54 	} else if (type == JUMP_LABEL_JMP) {
55 		expect = ideal_nop; line = __LINE__;
56 	} else {
57 		expect = code->code; line = __LINE__;
58 	}
59 
60 	if (memcmp((void *)jump_entry_code(entry), expect, JUMP_LABEL_NOP_SIZE))
61 		bug_at((void *)jump_entry_code(entry), line);
62 
63 	if (type == JUMP_LABEL_NOP)
64 		memcpy(code, ideal_nop, JUMP_LABEL_NOP_SIZE);
65 }
66 
67 static void __ref __jump_label_transform(struct jump_entry *entry,
68 					 enum jump_label_type type,
69 					 int init)
70 {
71 	union jump_code_union code;
72 
73 	__jump_label_set_jump_code(entry, type, &code, init);
74 
75 	/*
76 	 * As long as only a single processor is running and the code is still
77 	 * not marked as RO, text_poke_early() can be used; Checking that
78 	 * system_state is SYSTEM_BOOTING guarantees it. It will be set to
79 	 * SYSTEM_SCHEDULING before other cores are awaken and before the
80 	 * code is write-protected.
81 	 *
82 	 * At the time the change is being done, just ignore whether we
83 	 * are doing nop -> jump or jump -> nop transition, and assume
84 	 * always nop being the 'currently valid' instruction
85 	 */
86 	if (init || system_state == SYSTEM_BOOTING) {
87 		text_poke_early((void *)jump_entry_code(entry), &code,
88 				JUMP_LABEL_NOP_SIZE);
89 		return;
90 	}
91 
92 	text_poke_bp((void *)jump_entry_code(entry), &code, JUMP_LABEL_NOP_SIZE, NULL);
93 }
94 
95 void arch_jump_label_transform(struct jump_entry *entry,
96 			       enum jump_label_type type)
97 {
98 	mutex_lock(&text_mutex);
99 	__jump_label_transform(entry, type, 0);
100 	mutex_unlock(&text_mutex);
101 }
102 
103 #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
104 static struct text_poke_loc tp_vec[TP_VEC_MAX];
105 static int tp_vec_nr;
106 
107 bool arch_jump_label_transform_queue(struct jump_entry *entry,
108 				     enum jump_label_type type)
109 {
110 	struct text_poke_loc *tp;
111 	void *entry_code;
112 
113 	if (system_state == SYSTEM_BOOTING) {
114 		/*
115 		 * Fallback to the non-batching mode.
116 		 */
117 		arch_jump_label_transform(entry, type);
118 		return true;
119 	}
120 
121 	/*
122 	 * No more space in the vector, tell upper layer to apply
123 	 * the queue before continuing.
124 	 */
125 	if (tp_vec_nr == TP_VEC_MAX)
126 		return false;
127 
128 	tp = &tp_vec[tp_vec_nr];
129 
130 	entry_code = (void *)jump_entry_code(entry);
131 
132 	/*
133 	 * The INT3 handler will do a bsearch in the queue, so we need entries
134 	 * to be sorted. We can survive an unsorted list by rejecting the entry,
135 	 * forcing the generic jump_label code to apply the queue. Warning once,
136 	 * to raise the attention to the case of an unsorted entry that is
137 	 * better not happen, because, in the worst case we will perform in the
138 	 * same way as we do without batching - with some more overhead.
139 	 */
140 	if (tp_vec_nr > 0) {
141 		int prev = tp_vec_nr - 1;
142 		struct text_poke_loc *prev_tp = &tp_vec[prev];
143 
144 		if (WARN_ON_ONCE(prev_tp->addr > entry_code))
145 			return false;
146 	}
147 
148 	__jump_label_set_jump_code(entry, type,
149 				   (union jump_code_union *)&tp->text, 0);
150 
151 	text_poke_loc_init(tp, entry_code, NULL, JUMP_LABEL_NOP_SIZE, NULL);
152 
153 	tp_vec_nr++;
154 
155 	return true;
156 }
157 
158 void arch_jump_label_transform_apply(void)
159 {
160 	if (!tp_vec_nr)
161 		return;
162 
163 	mutex_lock(&text_mutex);
164 	text_poke_bp_batch(tp_vec, tp_vec_nr);
165 	mutex_unlock(&text_mutex);
166 
167 	tp_vec_nr = 0;
168 }
169 
170 static enum {
171 	JL_STATE_START,
172 	JL_STATE_NO_UPDATE,
173 	JL_STATE_UPDATE,
174 } jlstate __initdata_or_module = JL_STATE_START;
175 
176 __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
177 				      enum jump_label_type type)
178 {
179 	/*
180 	 * This function is called at boot up and when modules are
181 	 * first loaded. Check if the default nop, the one that is
182 	 * inserted at compile time, is the ideal nop. If it is, then
183 	 * we do not need to update the nop, and we can leave it as is.
184 	 * If it is not, then we need to update the nop to the ideal nop.
185 	 */
186 	if (jlstate == JL_STATE_START) {
187 		const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
188 		const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
189 
190 		if (memcmp(ideal_nop, default_nop, 5) != 0)
191 			jlstate = JL_STATE_UPDATE;
192 		else
193 			jlstate = JL_STATE_NO_UPDATE;
194 	}
195 	if (jlstate == JL_STATE_UPDATE)
196 		__jump_label_transform(entry, type, 1);
197 }
198