xref: /openbmc/linux/arch/x86/kernel/alternative.c (revision 4da722ca)
1 #define pr_fmt(fmt) "SMP alternatives: " fmt
2 
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/mutex.h>
6 #include <linux/list.h>
7 #include <linux/stringify.h>
8 #include <linux/mm.h>
9 #include <linux/vmalloc.h>
10 #include <linux/memory.h>
11 #include <linux/stop_machine.h>
12 #include <linux/slab.h>
13 #include <linux/kdebug.h>
14 #include <asm/text-patching.h>
15 #include <asm/alternative.h>
16 #include <asm/sections.h>
17 #include <asm/pgtable.h>
18 #include <asm/mce.h>
19 #include <asm/nmi.h>
20 #include <asm/cacheflush.h>
21 #include <asm/tlbflush.h>
22 #include <asm/io.h>
23 #include <asm/fixmap.h>
24 
25 int __read_mostly alternatives_patched;
26 
27 EXPORT_SYMBOL_GPL(alternatives_patched);
28 
29 #define MAX_PATCH_LEN (255-1)
30 
31 static int __initdata_or_module debug_alternative;
32 
33 static int __init debug_alt(char *str)
34 {
35 	debug_alternative = 1;
36 	return 1;
37 }
38 __setup("debug-alternative", debug_alt);
39 
40 static int noreplace_smp;
41 
42 static int __init setup_noreplace_smp(char *str)
43 {
44 	noreplace_smp = 1;
45 	return 1;
46 }
47 __setup("noreplace-smp", setup_noreplace_smp);
48 
49 #ifdef CONFIG_PARAVIRT
50 static int __initdata_or_module noreplace_paravirt = 0;
51 
52 static int __init setup_noreplace_paravirt(char *str)
53 {
54 	noreplace_paravirt = 1;
55 	return 1;
56 }
57 __setup("noreplace-paravirt", setup_noreplace_paravirt);
58 #endif
59 
60 #define DPRINTK(fmt, args...)						\
61 do {									\
62 	if (debug_alternative)						\
63 		printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args);	\
64 } while (0)
65 
66 #define DUMP_BYTES(buf, len, fmt, args...)				\
67 do {									\
68 	if (unlikely(debug_alternative)) {				\
69 		int j;							\
70 									\
71 		if (!(len))						\
72 			break;						\
73 									\
74 		printk(KERN_DEBUG fmt, ##args);				\
75 		for (j = 0; j < (len) - 1; j++)				\
76 			printk(KERN_CONT "%02hhx ", buf[j]);		\
77 		printk(KERN_CONT "%02hhx\n", buf[j]);			\
78 	}								\
79 } while (0)
80 
81 /*
82  * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
83  * that correspond to that nop. Getting from one nop to the next, we
84  * add to the array the offset that is equal to the sum of all sizes of
85  * nops preceding the one we are after.
86  *
87  * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
88  * nice symmetry of sizes of the previous nops.
89  */
90 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
91 static const unsigned char intelnops[] =
92 {
93 	GENERIC_NOP1,
94 	GENERIC_NOP2,
95 	GENERIC_NOP3,
96 	GENERIC_NOP4,
97 	GENERIC_NOP5,
98 	GENERIC_NOP6,
99 	GENERIC_NOP7,
100 	GENERIC_NOP8,
101 	GENERIC_NOP5_ATOMIC
102 };
103 static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
104 {
105 	NULL,
106 	intelnops,
107 	intelnops + 1,
108 	intelnops + 1 + 2,
109 	intelnops + 1 + 2 + 3,
110 	intelnops + 1 + 2 + 3 + 4,
111 	intelnops + 1 + 2 + 3 + 4 + 5,
112 	intelnops + 1 + 2 + 3 + 4 + 5 + 6,
113 	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
114 	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
115 };
116 #endif
117 
118 #ifdef K8_NOP1
119 static const unsigned char k8nops[] =
120 {
121 	K8_NOP1,
122 	K8_NOP2,
123 	K8_NOP3,
124 	K8_NOP4,
125 	K8_NOP5,
126 	K8_NOP6,
127 	K8_NOP7,
128 	K8_NOP8,
129 	K8_NOP5_ATOMIC
130 };
131 static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
132 {
133 	NULL,
134 	k8nops,
135 	k8nops + 1,
136 	k8nops + 1 + 2,
137 	k8nops + 1 + 2 + 3,
138 	k8nops + 1 + 2 + 3 + 4,
139 	k8nops + 1 + 2 + 3 + 4 + 5,
140 	k8nops + 1 + 2 + 3 + 4 + 5 + 6,
141 	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
142 	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
143 };
144 #endif
145 
146 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
147 static const unsigned char k7nops[] =
148 {
149 	K7_NOP1,
150 	K7_NOP2,
151 	K7_NOP3,
152 	K7_NOP4,
153 	K7_NOP5,
154 	K7_NOP6,
155 	K7_NOP7,
156 	K7_NOP8,
157 	K7_NOP5_ATOMIC
158 };
159 static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
160 {
161 	NULL,
162 	k7nops,
163 	k7nops + 1,
164 	k7nops + 1 + 2,
165 	k7nops + 1 + 2 + 3,
166 	k7nops + 1 + 2 + 3 + 4,
167 	k7nops + 1 + 2 + 3 + 4 + 5,
168 	k7nops + 1 + 2 + 3 + 4 + 5 + 6,
169 	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
170 	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
171 };
172 #endif
173 
174 #ifdef P6_NOP1
175 static const unsigned char p6nops[] =
176 {
177 	P6_NOP1,
178 	P6_NOP2,
179 	P6_NOP3,
180 	P6_NOP4,
181 	P6_NOP5,
182 	P6_NOP6,
183 	P6_NOP7,
184 	P6_NOP8,
185 	P6_NOP5_ATOMIC
186 };
187 static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
188 {
189 	NULL,
190 	p6nops,
191 	p6nops + 1,
192 	p6nops + 1 + 2,
193 	p6nops + 1 + 2 + 3,
194 	p6nops + 1 + 2 + 3 + 4,
195 	p6nops + 1 + 2 + 3 + 4 + 5,
196 	p6nops + 1 + 2 + 3 + 4 + 5 + 6,
197 	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
198 	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
199 };
200 #endif
201 
202 /* Initialize these to a safe default */
203 #ifdef CONFIG_X86_64
204 const unsigned char * const *ideal_nops = p6_nops;
205 #else
206 const unsigned char * const *ideal_nops = intel_nops;
207 #endif
208 
209 void __init arch_init_ideal_nops(void)
210 {
211 	switch (boot_cpu_data.x86_vendor) {
212 	case X86_VENDOR_INTEL:
213 		/*
214 		 * Due to a decoder implementation quirk, some
215 		 * specific Intel CPUs actually perform better with
216 		 * the "k8_nops" than with the SDM-recommended NOPs.
217 		 */
218 		if (boot_cpu_data.x86 == 6 &&
219 		    boot_cpu_data.x86_model >= 0x0f &&
220 		    boot_cpu_data.x86_model != 0x1c &&
221 		    boot_cpu_data.x86_model != 0x26 &&
222 		    boot_cpu_data.x86_model != 0x27 &&
223 		    boot_cpu_data.x86_model < 0x30) {
224 			ideal_nops = k8_nops;
225 		} else if (boot_cpu_has(X86_FEATURE_NOPL)) {
226 			   ideal_nops = p6_nops;
227 		} else {
228 #ifdef CONFIG_X86_64
229 			ideal_nops = k8_nops;
230 #else
231 			ideal_nops = intel_nops;
232 #endif
233 		}
234 		break;
235 
236 	case X86_VENDOR_AMD:
237 		if (boot_cpu_data.x86 > 0xf) {
238 			ideal_nops = p6_nops;
239 			return;
240 		}
241 
242 		/* fall through */
243 
244 	default:
245 #ifdef CONFIG_X86_64
246 		ideal_nops = k8_nops;
247 #else
248 		if (boot_cpu_has(X86_FEATURE_K8))
249 			ideal_nops = k8_nops;
250 		else if (boot_cpu_has(X86_FEATURE_K7))
251 			ideal_nops = k7_nops;
252 		else
253 			ideal_nops = intel_nops;
254 #endif
255 	}
256 }
257 
258 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
259 static void __init_or_module add_nops(void *insns, unsigned int len)
260 {
261 	while (len > 0) {
262 		unsigned int noplen = len;
263 		if (noplen > ASM_NOP_MAX)
264 			noplen = ASM_NOP_MAX;
265 		memcpy(insns, ideal_nops[noplen], noplen);
266 		insns += noplen;
267 		len -= noplen;
268 	}
269 }
270 
271 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
272 extern s32 __smp_locks[], __smp_locks_end[];
273 void *text_poke_early(void *addr, const void *opcode, size_t len);
274 
275 /*
276  * Are we looking at a near JMP with a 1 or 4-byte displacement.
277  */
278 static inline bool is_jmp(const u8 opcode)
279 {
280 	return opcode == 0xeb || opcode == 0xe9;
281 }
282 
283 static void __init_or_module
284 recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
285 {
286 	u8 *next_rip, *tgt_rip;
287 	s32 n_dspl, o_dspl;
288 	int repl_len;
289 
290 	if (a->replacementlen != 5)
291 		return;
292 
293 	o_dspl = *(s32 *)(insnbuf + 1);
294 
295 	/* next_rip of the replacement JMP */
296 	next_rip = repl_insn + a->replacementlen;
297 	/* target rip of the replacement JMP */
298 	tgt_rip  = next_rip + o_dspl;
299 	n_dspl = tgt_rip - orig_insn;
300 
301 	DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl);
302 
303 	if (tgt_rip - orig_insn >= 0) {
304 		if (n_dspl - 2 <= 127)
305 			goto two_byte_jmp;
306 		else
307 			goto five_byte_jmp;
308 	/* negative offset */
309 	} else {
310 		if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
311 			goto two_byte_jmp;
312 		else
313 			goto five_byte_jmp;
314 	}
315 
316 two_byte_jmp:
317 	n_dspl -= 2;
318 
319 	insnbuf[0] = 0xeb;
320 	insnbuf[1] = (s8)n_dspl;
321 	add_nops(insnbuf + 2, 3);
322 
323 	repl_len = 2;
324 	goto done;
325 
326 five_byte_jmp:
327 	n_dspl -= 5;
328 
329 	insnbuf[0] = 0xe9;
330 	*(s32 *)&insnbuf[1] = n_dspl;
331 
332 	repl_len = 5;
333 
334 done:
335 
336 	DPRINTK("final displ: 0x%08x, JMP 0x%lx",
337 		n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
338 }
339 
340 /*
341  * "noinline" to cause control flow change and thus invalidate I$ and
342  * cause refetch after modification.
343  */
344 static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
345 {
346 	unsigned long flags;
347 
348 	if (instr[0] != 0x90)
349 		return;
350 
351 	local_irq_save(flags);
352 	add_nops(instr + (a->instrlen - a->padlen), a->padlen);
353 	local_irq_restore(flags);
354 
355 	DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
356 		   instr, a->instrlen - a->padlen, a->padlen);
357 }
358 
359 /*
360  * Replace instructions with better alternatives for this CPU type. This runs
361  * before SMP is initialized to avoid SMP problems with self modifying code.
362  * This implies that asymmetric systems where APs have less capabilities than
363  * the boot processor are not handled. Tough. Make sure you disable such
364  * features by hand.
365  *
366  * Marked "noinline" to cause control flow change and thus insn cache
367  * to refetch changed I$ lines.
368  */
369 void __init_or_module noinline apply_alternatives(struct alt_instr *start,
370 						  struct alt_instr *end)
371 {
372 	struct alt_instr *a;
373 	u8 *instr, *replacement;
374 	u8 insnbuf[MAX_PATCH_LEN];
375 
376 	DPRINTK("alt table %p -> %p", start, end);
377 	/*
378 	 * The scan order should be from start to end. A later scanned
379 	 * alternative code can overwrite previously scanned alternative code.
380 	 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
381 	 * patch code.
382 	 *
383 	 * So be careful if you want to change the scan order to any other
384 	 * order.
385 	 */
386 	for (a = start; a < end; a++) {
387 		int insnbuf_sz = 0;
388 
389 		instr = (u8 *)&a->instr_offset + a->instr_offset;
390 		replacement = (u8 *)&a->repl_offset + a->repl_offset;
391 		BUG_ON(a->instrlen > sizeof(insnbuf));
392 		BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
393 		if (!boot_cpu_has(a->cpuid)) {
394 			if (a->padlen > 1)
395 				optimize_nops(a, instr);
396 
397 			continue;
398 		}
399 
400 		DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d",
401 			a->cpuid >> 5,
402 			a->cpuid & 0x1f,
403 			instr, a->instrlen,
404 			replacement, a->replacementlen, a->padlen);
405 
406 		DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr);
407 		DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement);
408 
409 		memcpy(insnbuf, replacement, a->replacementlen);
410 		insnbuf_sz = a->replacementlen;
411 
412 		/*
413 		 * 0xe8 is a relative jump; fix the offset.
414 		 *
415 		 * Instruction length is checked before the opcode to avoid
416 		 * accessing uninitialized bytes for zero-length replacements.
417 		 */
418 		if (a->replacementlen == 5 && *insnbuf == 0xe8) {
419 			*(s32 *)(insnbuf + 1) += replacement - instr;
420 			DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
421 				*(s32 *)(insnbuf + 1),
422 				(unsigned long)instr + *(s32 *)(insnbuf + 1) + 5);
423 		}
424 
425 		if (a->replacementlen && is_jmp(replacement[0]))
426 			recompute_jump(a, instr, replacement, insnbuf);
427 
428 		if (a->instrlen > a->replacementlen) {
429 			add_nops(insnbuf + a->replacementlen,
430 				 a->instrlen - a->replacementlen);
431 			insnbuf_sz += a->instrlen - a->replacementlen;
432 		}
433 		DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr);
434 
435 		text_poke_early(instr, insnbuf, insnbuf_sz);
436 	}
437 }
438 
439 #ifdef CONFIG_SMP
440 static void alternatives_smp_lock(const s32 *start, const s32 *end,
441 				  u8 *text, u8 *text_end)
442 {
443 	const s32 *poff;
444 
445 	mutex_lock(&text_mutex);
446 	for (poff = start; poff < end; poff++) {
447 		u8 *ptr = (u8 *)poff + *poff;
448 
449 		if (!*poff || ptr < text || ptr >= text_end)
450 			continue;
451 		/* turn DS segment override prefix into lock prefix */
452 		if (*ptr == 0x3e)
453 			text_poke(ptr, ((unsigned char []){0xf0}), 1);
454 	}
455 	mutex_unlock(&text_mutex);
456 }
457 
458 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
459 				    u8 *text, u8 *text_end)
460 {
461 	const s32 *poff;
462 
463 	mutex_lock(&text_mutex);
464 	for (poff = start; poff < end; poff++) {
465 		u8 *ptr = (u8 *)poff + *poff;
466 
467 		if (!*poff || ptr < text || ptr >= text_end)
468 			continue;
469 		/* turn lock prefix into DS segment override prefix */
470 		if (*ptr == 0xf0)
471 			text_poke(ptr, ((unsigned char []){0x3E}), 1);
472 	}
473 	mutex_unlock(&text_mutex);
474 }
475 
476 struct smp_alt_module {
477 	/* what is this ??? */
478 	struct module	*mod;
479 	char		*name;
480 
481 	/* ptrs to lock prefixes */
482 	const s32	*locks;
483 	const s32	*locks_end;
484 
485 	/* .text segment, needed to avoid patching init code ;) */
486 	u8		*text;
487 	u8		*text_end;
488 
489 	struct list_head next;
490 };
491 static LIST_HEAD(smp_alt_modules);
492 static DEFINE_MUTEX(smp_alt);
493 static bool uniproc_patched = false;	/* protected by smp_alt */
494 
495 void __init_or_module alternatives_smp_module_add(struct module *mod,
496 						  char *name,
497 						  void *locks, void *locks_end,
498 						  void *text,  void *text_end)
499 {
500 	struct smp_alt_module *smp;
501 
502 	mutex_lock(&smp_alt);
503 	if (!uniproc_patched)
504 		goto unlock;
505 
506 	if (num_possible_cpus() == 1)
507 		/* Don't bother remembering, we'll never have to undo it. */
508 		goto smp_unlock;
509 
510 	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
511 	if (NULL == smp)
512 		/* we'll run the (safe but slow) SMP code then ... */
513 		goto unlock;
514 
515 	smp->mod	= mod;
516 	smp->name	= name;
517 	smp->locks	= locks;
518 	smp->locks_end	= locks_end;
519 	smp->text	= text;
520 	smp->text_end	= text_end;
521 	DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
522 		smp->locks, smp->locks_end,
523 		smp->text, smp->text_end, smp->name);
524 
525 	list_add_tail(&smp->next, &smp_alt_modules);
526 smp_unlock:
527 	alternatives_smp_unlock(locks, locks_end, text, text_end);
528 unlock:
529 	mutex_unlock(&smp_alt);
530 }
531 
532 void __init_or_module alternatives_smp_module_del(struct module *mod)
533 {
534 	struct smp_alt_module *item;
535 
536 	mutex_lock(&smp_alt);
537 	list_for_each_entry(item, &smp_alt_modules, next) {
538 		if (mod != item->mod)
539 			continue;
540 		list_del(&item->next);
541 		kfree(item);
542 		break;
543 	}
544 	mutex_unlock(&smp_alt);
545 }
546 
547 void alternatives_enable_smp(void)
548 {
549 	struct smp_alt_module *mod;
550 
551 	/* Why bother if there are no other CPUs? */
552 	BUG_ON(num_possible_cpus() == 1);
553 
554 	mutex_lock(&smp_alt);
555 
556 	if (uniproc_patched) {
557 		pr_info("switching to SMP code\n");
558 		BUG_ON(num_online_cpus() != 1);
559 		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
560 		clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
561 		list_for_each_entry(mod, &smp_alt_modules, next)
562 			alternatives_smp_lock(mod->locks, mod->locks_end,
563 					      mod->text, mod->text_end);
564 		uniproc_patched = false;
565 	}
566 	mutex_unlock(&smp_alt);
567 }
568 
569 /* Return 1 if the address range is reserved for smp-alternatives */
570 int alternatives_text_reserved(void *start, void *end)
571 {
572 	struct smp_alt_module *mod;
573 	const s32 *poff;
574 	u8 *text_start = start;
575 	u8 *text_end = end;
576 
577 	list_for_each_entry(mod, &smp_alt_modules, next) {
578 		if (mod->text > text_end || mod->text_end < text_start)
579 			continue;
580 		for (poff = mod->locks; poff < mod->locks_end; poff++) {
581 			const u8 *ptr = (const u8 *)poff + *poff;
582 
583 			if (text_start <= ptr && text_end > ptr)
584 				return 1;
585 		}
586 	}
587 
588 	return 0;
589 }
590 #endif /* CONFIG_SMP */
591 
592 #ifdef CONFIG_PARAVIRT
593 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
594 				     struct paravirt_patch_site *end)
595 {
596 	struct paravirt_patch_site *p;
597 	char insnbuf[MAX_PATCH_LEN];
598 
599 	if (noreplace_paravirt)
600 		return;
601 
602 	for (p = start; p < end; p++) {
603 		unsigned int used;
604 
605 		BUG_ON(p->len > MAX_PATCH_LEN);
606 		/* prep the buffer with the original instructions */
607 		memcpy(insnbuf, p->instr, p->len);
608 		used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
609 					 (unsigned long)p->instr, p->len);
610 
611 		BUG_ON(used > p->len);
612 
613 		/* Pad the rest with nops */
614 		add_nops(insnbuf + used, p->len - used);
615 		text_poke_early(p->instr, insnbuf, p->len);
616 	}
617 }
618 extern struct paravirt_patch_site __start_parainstructions[],
619 	__stop_parainstructions[];
620 #endif	/* CONFIG_PARAVIRT */
621 
622 void __init alternative_instructions(void)
623 {
624 	/* The patching is not fully atomic, so try to avoid local interruptions
625 	   that might execute the to be patched code.
626 	   Other CPUs are not running. */
627 	stop_nmi();
628 
629 	/*
630 	 * Don't stop machine check exceptions while patching.
631 	 * MCEs only happen when something got corrupted and in this
632 	 * case we must do something about the corruption.
633 	 * Ignoring it is worse than a unlikely patching race.
634 	 * Also machine checks tend to be broadcast and if one CPU
635 	 * goes into machine check the others follow quickly, so we don't
636 	 * expect a machine check to cause undue problems during to code
637 	 * patching.
638 	 */
639 
640 	apply_alternatives(__alt_instructions, __alt_instructions_end);
641 
642 #ifdef CONFIG_SMP
643 	/* Patch to UP if other cpus not imminent. */
644 	if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
645 		uniproc_patched = true;
646 		alternatives_smp_module_add(NULL, "core kernel",
647 					    __smp_locks, __smp_locks_end,
648 					    _text, _etext);
649 	}
650 
651 	if (!uniproc_patched || num_possible_cpus() == 1)
652 		free_init_pages("SMP alternatives",
653 				(unsigned long)__smp_locks,
654 				(unsigned long)__smp_locks_end);
655 #endif
656 
657 	apply_paravirt(__parainstructions, __parainstructions_end);
658 
659 	restart_nmi();
660 	alternatives_patched = 1;
661 }
662 
663 /**
664  * text_poke_early - Update instructions on a live kernel at boot time
665  * @addr: address to modify
666  * @opcode: source of the copy
667  * @len: length to copy
668  *
669  * When you use this code to patch more than one byte of an instruction
670  * you need to make sure that other CPUs cannot execute this code in parallel.
671  * Also no thread must be currently preempted in the middle of these
672  * instructions. And on the local CPU you need to be protected again NMI or MCE
673  * handlers seeing an inconsistent instruction while you patch.
674  */
675 void *__init_or_module text_poke_early(void *addr, const void *opcode,
676 					      size_t len)
677 {
678 	unsigned long flags;
679 	local_irq_save(flags);
680 	memcpy(addr, opcode, len);
681 	local_irq_restore(flags);
682 	/* Could also do a CLFLUSH here to speed up CPU recovery; but
683 	   that causes hangs on some VIA CPUs. */
684 	return addr;
685 }
686 
687 /**
688  * text_poke - Update instructions on a live kernel
689  * @addr: address to modify
690  * @opcode: source of the copy
691  * @len: length to copy
692  *
693  * Only atomic text poke/set should be allowed when not doing early patching.
694  * It means the size must be writable atomically and the address must be aligned
695  * in a way that permits an atomic write. It also makes sure we fit on a single
696  * page.
697  *
698  * Note: Must be called under text_mutex.
699  */
700 void *text_poke(void *addr, const void *opcode, size_t len)
701 {
702 	unsigned long flags;
703 	char *vaddr;
704 	struct page *pages[2];
705 	int i;
706 
707 	if (!core_kernel_text((unsigned long)addr)) {
708 		pages[0] = vmalloc_to_page(addr);
709 		pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
710 	} else {
711 		pages[0] = virt_to_page(addr);
712 		WARN_ON(!PageReserved(pages[0]));
713 		pages[1] = virt_to_page(addr + PAGE_SIZE);
714 	}
715 	BUG_ON(!pages[0]);
716 	local_irq_save(flags);
717 	set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
718 	if (pages[1])
719 		set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
720 	vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
721 	memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
722 	clear_fixmap(FIX_TEXT_POKE0);
723 	if (pages[1])
724 		clear_fixmap(FIX_TEXT_POKE1);
725 	local_flush_tlb();
726 	sync_core();
727 	/* Could also do a CLFLUSH here to speed up CPU recovery; but
728 	   that causes hangs on some VIA CPUs. */
729 	for (i = 0; i < len; i++)
730 		BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
731 	local_irq_restore(flags);
732 	return addr;
733 }
734 
735 static void do_sync_core(void *info)
736 {
737 	sync_core();
738 }
739 
740 static bool bp_patching_in_progress;
741 static void *bp_int3_handler, *bp_int3_addr;
742 
743 int poke_int3_handler(struct pt_regs *regs)
744 {
745 	/* bp_patching_in_progress */
746 	smp_rmb();
747 
748 	if (likely(!bp_patching_in_progress))
749 		return 0;
750 
751 	if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
752 		return 0;
753 
754 	/* set up the specified breakpoint handler */
755 	regs->ip = (unsigned long) bp_int3_handler;
756 
757 	return 1;
758 
759 }
760 
761 /**
762  * text_poke_bp() -- update instructions on live kernel on SMP
763  * @addr:	address to patch
764  * @opcode:	opcode of new instruction
765  * @len:	length to copy
766  * @handler:	address to jump to when the temporary breakpoint is hit
767  *
768  * Modify multi-byte instruction by using int3 breakpoint on SMP.
769  * We completely avoid stop_machine() here, and achieve the
770  * synchronization using int3 breakpoint.
771  *
772  * The way it is done:
773  *	- add a int3 trap to the address that will be patched
774  *	- sync cores
775  *	- update all but the first byte of the patched range
776  *	- sync cores
777  *	- replace the first byte (int3) by the first byte of
778  *	  replacing opcode
779  *	- sync cores
780  *
781  * Note: must be called under text_mutex.
782  */
783 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
784 {
785 	unsigned char int3 = 0xcc;
786 
787 	bp_int3_handler = handler;
788 	bp_int3_addr = (u8 *)addr + sizeof(int3);
789 	bp_patching_in_progress = true;
790 	/*
791 	 * Corresponding read barrier in int3 notifier for
792 	 * making sure the in_progress flags is correctly ordered wrt.
793 	 * patching
794 	 */
795 	smp_wmb();
796 
797 	text_poke(addr, &int3, sizeof(int3));
798 
799 	on_each_cpu(do_sync_core, NULL, 1);
800 
801 	if (len - sizeof(int3) > 0) {
802 		/* patch all but the first byte */
803 		text_poke((char *)addr + sizeof(int3),
804 			  (const char *) opcode + sizeof(int3),
805 			  len - sizeof(int3));
806 		/*
807 		 * According to Intel, this core syncing is very likely
808 		 * not necessary and we'd be safe even without it. But
809 		 * better safe than sorry (plus there's not only Intel).
810 		 */
811 		on_each_cpu(do_sync_core, NULL, 1);
812 	}
813 
814 	/* patch the first byte */
815 	text_poke(addr, opcode, sizeof(int3));
816 
817 	on_each_cpu(do_sync_core, NULL, 1);
818 
819 	bp_patching_in_progress = false;
820 	smp_wmb();
821 
822 	return addr;
823 }
824 
825