xref: /openbmc/linux/kernel/jump_label.c (revision d78c317f)
1 /*
2  * jump label support
3  *
4  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5  * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com>
6  *
7  */
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 #include <linux/err.h>
15 #include <linux/jump_label.h>
16 
17 #ifdef HAVE_JUMP_LABEL
18 
19 /* mutex to protect coming/going of the the jump_label table */
20 static DEFINE_MUTEX(jump_label_mutex);
21 
22 void jump_label_lock(void)
23 {
24 	mutex_lock(&jump_label_mutex);
25 }
26 
27 void jump_label_unlock(void)
28 {
29 	mutex_unlock(&jump_label_mutex);
30 }
31 
32 bool jump_label_enabled(struct jump_label_key *key)
33 {
34 	return !!atomic_read(&key->enabled);
35 }
36 
37 static int jump_label_cmp(const void *a, const void *b)
38 {
39 	const struct jump_entry *jea = a;
40 	const struct jump_entry *jeb = b;
41 
42 	if (jea->key < jeb->key)
43 		return -1;
44 
45 	if (jea->key > jeb->key)
46 		return 1;
47 
48 	return 0;
49 }
50 
51 static void
52 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
53 {
54 	unsigned long size;
55 
56 	size = (((unsigned long)stop - (unsigned long)start)
57 					/ sizeof(struct jump_entry));
58 	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
59 }
60 
61 static void jump_label_update(struct jump_label_key *key, int enable);
62 
63 void jump_label_inc(struct jump_label_key *key)
64 {
65 	if (atomic_inc_not_zero(&key->enabled))
66 		return;
67 
68 	jump_label_lock();
69 	if (atomic_read(&key->enabled) == 0)
70 		jump_label_update(key, JUMP_LABEL_ENABLE);
71 	atomic_inc(&key->enabled);
72 	jump_label_unlock();
73 }
74 EXPORT_SYMBOL_GPL(jump_label_inc);
75 
76 static void __jump_label_dec(struct jump_label_key *key,
77 		unsigned long rate_limit, struct delayed_work *work)
78 {
79 	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex))
80 		return;
81 
82 	if (rate_limit) {
83 		atomic_inc(&key->enabled);
84 		schedule_delayed_work(work, rate_limit);
85 	} else
86 		jump_label_update(key, JUMP_LABEL_DISABLE);
87 
88 	jump_label_unlock();
89 }
90 EXPORT_SYMBOL_GPL(jump_label_dec);
91 
92 static void jump_label_update_timeout(struct work_struct *work)
93 {
94 	struct jump_label_key_deferred *key =
95 		container_of(work, struct jump_label_key_deferred, work.work);
96 	__jump_label_dec(&key->key, 0, NULL);
97 }
98 
99 void jump_label_dec(struct jump_label_key *key)
100 {
101 	__jump_label_dec(key, 0, NULL);
102 }
103 
104 void jump_label_dec_deferred(struct jump_label_key_deferred *key)
105 {
106 	__jump_label_dec(&key->key, key->timeout, &key->work);
107 }
108 
109 
110 void jump_label_rate_limit(struct jump_label_key_deferred *key,
111 		unsigned long rl)
112 {
113 	key->timeout = rl;
114 	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
115 }
116 
117 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
118 {
119 	if (entry->code <= (unsigned long)end &&
120 		entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
121 		return 1;
122 
123 	return 0;
124 }
125 
126 static int __jump_label_text_reserved(struct jump_entry *iter_start,
127 		struct jump_entry *iter_stop, void *start, void *end)
128 {
129 	struct jump_entry *iter;
130 
131 	iter = iter_start;
132 	while (iter < iter_stop) {
133 		if (addr_conflict(iter, start, end))
134 			return 1;
135 		iter++;
136 	}
137 
138 	return 0;
139 }
140 
141 /*
142  * Update code which is definitely not currently executing.
143  * Architectures which need heavyweight synchronization to modify
144  * running code can override this to make the non-live update case
145  * cheaper.
146  */
147 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
148 					    enum jump_label_type type)
149 {
150 	arch_jump_label_transform(entry, type);
151 }
152 
153 static void __jump_label_update(struct jump_label_key *key,
154 				struct jump_entry *entry,
155 				struct jump_entry *stop, int enable)
156 {
157 	for (; (entry < stop) &&
158 	      (entry->key == (jump_label_t)(unsigned long)key);
159 	      entry++) {
160 		/*
161 		 * entry->code set to 0 invalidates module init text sections
162 		 * kernel_text_address() verifies we are not in core kernel
163 		 * init code, see jump_label_invalidate_module_init().
164 		 */
165 		if (entry->code && kernel_text_address(entry->code))
166 			arch_jump_label_transform(entry, enable);
167 	}
168 }
169 
170 void __init jump_label_init(void)
171 {
172 	struct jump_entry *iter_start = __start___jump_table;
173 	struct jump_entry *iter_stop = __stop___jump_table;
174 	struct jump_label_key *key = NULL;
175 	struct jump_entry *iter;
176 
177 	jump_label_lock();
178 	jump_label_sort_entries(iter_start, iter_stop);
179 
180 	for (iter = iter_start; iter < iter_stop; iter++) {
181 		struct jump_label_key *iterk;
182 
183 		iterk = (struct jump_label_key *)(unsigned long)iter->key;
184 		arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ?
185 						 JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE);
186 		if (iterk == key)
187 			continue;
188 
189 		key = iterk;
190 		key->entries = iter;
191 #ifdef CONFIG_MODULES
192 		key->next = NULL;
193 #endif
194 	}
195 	jump_label_unlock();
196 }
197 
198 #ifdef CONFIG_MODULES
199 
200 struct jump_label_mod {
201 	struct jump_label_mod *next;
202 	struct jump_entry *entries;
203 	struct module *mod;
204 };
205 
206 static int __jump_label_mod_text_reserved(void *start, void *end)
207 {
208 	struct module *mod;
209 
210 	mod = __module_text_address((unsigned long)start);
211 	if (!mod)
212 		return 0;
213 
214 	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
215 
216 	return __jump_label_text_reserved(mod->jump_entries,
217 				mod->jump_entries + mod->num_jump_entries,
218 				start, end);
219 }
220 
221 static void __jump_label_mod_update(struct jump_label_key *key, int enable)
222 {
223 	struct jump_label_mod *mod = key->next;
224 
225 	while (mod) {
226 		struct module *m = mod->mod;
227 
228 		__jump_label_update(key, mod->entries,
229 				    m->jump_entries + m->num_jump_entries,
230 				    enable);
231 		mod = mod->next;
232 	}
233 }
234 
235 /***
236  * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
237  * @mod: module to patch
238  *
239  * Allow for run-time selection of the optimal nops. Before the module
240  * loads patch these with arch_get_jump_label_nop(), which is specified by
241  * the arch specific jump label code.
242  */
243 void jump_label_apply_nops(struct module *mod)
244 {
245 	struct jump_entry *iter_start = mod->jump_entries;
246 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
247 	struct jump_entry *iter;
248 
249 	/* if the module doesn't have jump label entries, just return */
250 	if (iter_start == iter_stop)
251 		return;
252 
253 	for (iter = iter_start; iter < iter_stop; iter++) {
254 		struct jump_label_key *iterk;
255 
256 		iterk = (struct jump_label_key *)(unsigned long)iter->key;
257 		arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ?
258 				JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE);
259 	}
260 }
261 
262 static int jump_label_add_module(struct module *mod)
263 {
264 	struct jump_entry *iter_start = mod->jump_entries;
265 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
266 	struct jump_entry *iter;
267 	struct jump_label_key *key = NULL;
268 	struct jump_label_mod *jlm;
269 
270 	/* if the module doesn't have jump label entries, just return */
271 	if (iter_start == iter_stop)
272 		return 0;
273 
274 	jump_label_sort_entries(iter_start, iter_stop);
275 
276 	for (iter = iter_start; iter < iter_stop; iter++) {
277 		if (iter->key == (jump_label_t)(unsigned long)key)
278 			continue;
279 
280 		key = (struct jump_label_key *)(unsigned long)iter->key;
281 
282 		if (__module_address(iter->key) == mod) {
283 			atomic_set(&key->enabled, 0);
284 			key->entries = iter;
285 			key->next = NULL;
286 			continue;
287 		}
288 
289 		jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL);
290 		if (!jlm)
291 			return -ENOMEM;
292 
293 		jlm->mod = mod;
294 		jlm->entries = iter;
295 		jlm->next = key->next;
296 		key->next = jlm;
297 
298 		if (jump_label_enabled(key))
299 			__jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
300 	}
301 
302 	return 0;
303 }
304 
305 static void jump_label_del_module(struct module *mod)
306 {
307 	struct jump_entry *iter_start = mod->jump_entries;
308 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
309 	struct jump_entry *iter;
310 	struct jump_label_key *key = NULL;
311 	struct jump_label_mod *jlm, **prev;
312 
313 	for (iter = iter_start; iter < iter_stop; iter++) {
314 		if (iter->key == (jump_label_t)(unsigned long)key)
315 			continue;
316 
317 		key = (struct jump_label_key *)(unsigned long)iter->key;
318 
319 		if (__module_address(iter->key) == mod)
320 			continue;
321 
322 		prev = &key->next;
323 		jlm = key->next;
324 
325 		while (jlm && jlm->mod != mod) {
326 			prev = &jlm->next;
327 			jlm = jlm->next;
328 		}
329 
330 		if (jlm) {
331 			*prev = jlm->next;
332 			kfree(jlm);
333 		}
334 	}
335 }
336 
337 static void jump_label_invalidate_module_init(struct module *mod)
338 {
339 	struct jump_entry *iter_start = mod->jump_entries;
340 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
341 	struct jump_entry *iter;
342 
343 	for (iter = iter_start; iter < iter_stop; iter++) {
344 		if (within_module_init(iter->code, mod))
345 			iter->code = 0;
346 	}
347 }
348 
349 static int
350 jump_label_module_notify(struct notifier_block *self, unsigned long val,
351 			 void *data)
352 {
353 	struct module *mod = data;
354 	int ret = 0;
355 
356 	switch (val) {
357 	case MODULE_STATE_COMING:
358 		jump_label_lock();
359 		ret = jump_label_add_module(mod);
360 		if (ret)
361 			jump_label_del_module(mod);
362 		jump_label_unlock();
363 		break;
364 	case MODULE_STATE_GOING:
365 		jump_label_lock();
366 		jump_label_del_module(mod);
367 		jump_label_unlock();
368 		break;
369 	case MODULE_STATE_LIVE:
370 		jump_label_lock();
371 		jump_label_invalidate_module_init(mod);
372 		jump_label_unlock();
373 		break;
374 	}
375 
376 	return notifier_from_errno(ret);
377 }
378 
379 struct notifier_block jump_label_module_nb = {
380 	.notifier_call = jump_label_module_notify,
381 	.priority = 1, /* higher than tracepoints */
382 };
383 
384 static __init int jump_label_init_module(void)
385 {
386 	return register_module_notifier(&jump_label_module_nb);
387 }
388 early_initcall(jump_label_init_module);
389 
390 #endif /* CONFIG_MODULES */
391 
392 /***
393  * jump_label_text_reserved - check if addr range is reserved
394  * @start: start text addr
395  * @end: end text addr
396  *
397  * checks if the text addr located between @start and @end
398  * overlaps with any of the jump label patch addresses. Code
399  * that wants to modify kernel text should first verify that
400  * it does not overlap with any of the jump label addresses.
401  * Caller must hold jump_label_mutex.
402  *
403  * returns 1 if there is an overlap, 0 otherwise
404  */
405 int jump_label_text_reserved(void *start, void *end)
406 {
407 	int ret = __jump_label_text_reserved(__start___jump_table,
408 			__stop___jump_table, start, end);
409 
410 	if (ret)
411 		return ret;
412 
413 #ifdef CONFIG_MODULES
414 	ret = __jump_label_mod_text_reserved(start, end);
415 #endif
416 	return ret;
417 }
418 
419 static void jump_label_update(struct jump_label_key *key, int enable)
420 {
421 	struct jump_entry *entry = key->entries, *stop = __stop___jump_table;
422 
423 #ifdef CONFIG_MODULES
424 	struct module *mod = __module_address((jump_label_t)key);
425 
426 	__jump_label_mod_update(key, enable);
427 
428 	if (mod)
429 		stop = mod->jump_entries + mod->num_jump_entries;
430 #endif
431 	/* if there are no users, entry can be NULL */
432 	if (entry)
433 		__jump_label_update(key, entry, stop, enable);
434 }
435 
436 #endif
437