xref: /openbmc/linux/arch/x86/kernel/alternative.c (revision a16be368)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "SMP alternatives: " fmt
3 
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/mutex.h>
7 #include <linux/list.h>
8 #include <linux/stringify.h>
9 #include <linux/mm.h>
10 #include <linux/vmalloc.h>
11 #include <linux/memory.h>
12 #include <linux/stop_machine.h>
13 #include <linux/slab.h>
14 #include <linux/kdebug.h>
15 #include <linux/kprobes.h>
16 #include <linux/mmu_context.h>
17 #include <linux/bsearch.h>
18 #include <asm/text-patching.h>
19 #include <asm/alternative.h>
20 #include <asm/sections.h>
21 #include <asm/mce.h>
22 #include <asm/nmi.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
25 #include <asm/insn.h>
26 #include <asm/io.h>
27 #include <asm/fixmap.h>
28 
29 int __read_mostly alternatives_patched;
30 
31 EXPORT_SYMBOL_GPL(alternatives_patched);
32 
33 #define MAX_PATCH_LEN (255-1)
34 
35 static int __initdata_or_module debug_alternative;
36 
37 static int __init debug_alt(char *str)
38 {
39 	debug_alternative = 1;
40 	return 1;
41 }
42 __setup("debug-alternative", debug_alt);
43 
44 static int noreplace_smp;
45 
46 static int __init setup_noreplace_smp(char *str)
47 {
48 	noreplace_smp = 1;
49 	return 1;
50 }
51 __setup("noreplace-smp", setup_noreplace_smp);
52 
53 #define DPRINTK(fmt, args...)						\
54 do {									\
55 	if (debug_alternative)						\
56 		printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args);	\
57 } while (0)
58 
59 #define DUMP_BYTES(buf, len, fmt, args...)				\
60 do {									\
61 	if (unlikely(debug_alternative)) {				\
62 		int j;							\
63 									\
64 		if (!(len))						\
65 			break;						\
66 									\
67 		printk(KERN_DEBUG fmt, ##args);				\
68 		for (j = 0; j < (len) - 1; j++)				\
69 			printk(KERN_CONT "%02hhx ", buf[j]);		\
70 		printk(KERN_CONT "%02hhx\n", buf[j]);			\
71 	}								\
72 } while (0)
73 
74 /*
75  * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
76  * that correspond to that nop. Getting from one nop to the next, we
77  * add to the array the offset that is equal to the sum of all sizes of
78  * nops preceding the one we are after.
79  *
80  * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
81  * nice symmetry of sizes of the previous nops.
82  */
83 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
84 static const unsigned char intelnops[] =
85 {
86 	GENERIC_NOP1,
87 	GENERIC_NOP2,
88 	GENERIC_NOP3,
89 	GENERIC_NOP4,
90 	GENERIC_NOP5,
91 	GENERIC_NOP6,
92 	GENERIC_NOP7,
93 	GENERIC_NOP8,
94 	GENERIC_NOP5_ATOMIC
95 };
96 static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
97 {
98 	NULL,
99 	intelnops,
100 	intelnops + 1,
101 	intelnops + 1 + 2,
102 	intelnops + 1 + 2 + 3,
103 	intelnops + 1 + 2 + 3 + 4,
104 	intelnops + 1 + 2 + 3 + 4 + 5,
105 	intelnops + 1 + 2 + 3 + 4 + 5 + 6,
106 	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
107 	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
108 };
109 #endif
110 
111 #ifdef K8_NOP1
112 static const unsigned char k8nops[] =
113 {
114 	K8_NOP1,
115 	K8_NOP2,
116 	K8_NOP3,
117 	K8_NOP4,
118 	K8_NOP5,
119 	K8_NOP6,
120 	K8_NOP7,
121 	K8_NOP8,
122 	K8_NOP5_ATOMIC
123 };
124 static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
125 {
126 	NULL,
127 	k8nops,
128 	k8nops + 1,
129 	k8nops + 1 + 2,
130 	k8nops + 1 + 2 + 3,
131 	k8nops + 1 + 2 + 3 + 4,
132 	k8nops + 1 + 2 + 3 + 4 + 5,
133 	k8nops + 1 + 2 + 3 + 4 + 5 + 6,
134 	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
135 	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
136 };
137 #endif
138 
139 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
140 static const unsigned char k7nops[] =
141 {
142 	K7_NOP1,
143 	K7_NOP2,
144 	K7_NOP3,
145 	K7_NOP4,
146 	K7_NOP5,
147 	K7_NOP6,
148 	K7_NOP7,
149 	K7_NOP8,
150 	K7_NOP5_ATOMIC
151 };
152 static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
153 {
154 	NULL,
155 	k7nops,
156 	k7nops + 1,
157 	k7nops + 1 + 2,
158 	k7nops + 1 + 2 + 3,
159 	k7nops + 1 + 2 + 3 + 4,
160 	k7nops + 1 + 2 + 3 + 4 + 5,
161 	k7nops + 1 + 2 + 3 + 4 + 5 + 6,
162 	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
163 	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
164 };
165 #endif
166 
167 #ifdef P6_NOP1
168 static const unsigned char p6nops[] =
169 {
170 	P6_NOP1,
171 	P6_NOP2,
172 	P6_NOP3,
173 	P6_NOP4,
174 	P6_NOP5,
175 	P6_NOP6,
176 	P6_NOP7,
177 	P6_NOP8,
178 	P6_NOP5_ATOMIC
179 };
180 static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
181 {
182 	NULL,
183 	p6nops,
184 	p6nops + 1,
185 	p6nops + 1 + 2,
186 	p6nops + 1 + 2 + 3,
187 	p6nops + 1 + 2 + 3 + 4,
188 	p6nops + 1 + 2 + 3 + 4 + 5,
189 	p6nops + 1 + 2 + 3 + 4 + 5 + 6,
190 	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
191 	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
192 };
193 #endif
194 
195 /* Initialize these to a safe default */
196 #ifdef CONFIG_X86_64
197 const unsigned char * const *ideal_nops = p6_nops;
198 #else
199 const unsigned char * const *ideal_nops = intel_nops;
200 #endif
201 
202 void __init arch_init_ideal_nops(void)
203 {
204 	switch (boot_cpu_data.x86_vendor) {
205 	case X86_VENDOR_INTEL:
206 		/*
207 		 * Due to a decoder implementation quirk, some
208 		 * specific Intel CPUs actually perform better with
209 		 * the "k8_nops" than with the SDM-recommended NOPs.
210 		 */
211 		if (boot_cpu_data.x86 == 6 &&
212 		    boot_cpu_data.x86_model >= 0x0f &&
213 		    boot_cpu_data.x86_model != 0x1c &&
214 		    boot_cpu_data.x86_model != 0x26 &&
215 		    boot_cpu_data.x86_model != 0x27 &&
216 		    boot_cpu_data.x86_model < 0x30) {
217 			ideal_nops = k8_nops;
218 		} else if (boot_cpu_has(X86_FEATURE_NOPL)) {
219 			   ideal_nops = p6_nops;
220 		} else {
221 #ifdef CONFIG_X86_64
222 			ideal_nops = k8_nops;
223 #else
224 			ideal_nops = intel_nops;
225 #endif
226 		}
227 		break;
228 
229 	case X86_VENDOR_HYGON:
230 		ideal_nops = p6_nops;
231 		return;
232 
233 	case X86_VENDOR_AMD:
234 		if (boot_cpu_data.x86 > 0xf) {
235 			ideal_nops = p6_nops;
236 			return;
237 		}
238 
239 		/* fall through */
240 
241 	default:
242 #ifdef CONFIG_X86_64
243 		ideal_nops = k8_nops;
244 #else
245 		if (boot_cpu_has(X86_FEATURE_K8))
246 			ideal_nops = k8_nops;
247 		else if (boot_cpu_has(X86_FEATURE_K7))
248 			ideal_nops = k7_nops;
249 		else
250 			ideal_nops = intel_nops;
251 #endif
252 	}
253 }
254 
255 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
256 static void __init_or_module add_nops(void *insns, unsigned int len)
257 {
258 	while (len > 0) {
259 		unsigned int noplen = len;
260 		if (noplen > ASM_NOP_MAX)
261 			noplen = ASM_NOP_MAX;
262 		memcpy(insns, ideal_nops[noplen], noplen);
263 		insns += noplen;
264 		len -= noplen;
265 	}
266 }
267 
268 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
269 extern s32 __smp_locks[], __smp_locks_end[];
270 void text_poke_early(void *addr, const void *opcode, size_t len);
271 
272 /*
273  * Are we looking at a near JMP with a 1 or 4-byte displacement.
274  */
275 static inline bool is_jmp(const u8 opcode)
276 {
277 	return opcode == 0xeb || opcode == 0xe9;
278 }
279 
280 static void __init_or_module
281 recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff)
282 {
283 	u8 *next_rip, *tgt_rip;
284 	s32 n_dspl, o_dspl;
285 	int repl_len;
286 
287 	if (a->replacementlen != 5)
288 		return;
289 
290 	o_dspl = *(s32 *)(insn_buff + 1);
291 
292 	/* next_rip of the replacement JMP */
293 	next_rip = repl_insn + a->replacementlen;
294 	/* target rip of the replacement JMP */
295 	tgt_rip  = next_rip + o_dspl;
296 	n_dspl = tgt_rip - orig_insn;
297 
298 	DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
299 
300 	if (tgt_rip - orig_insn >= 0) {
301 		if (n_dspl - 2 <= 127)
302 			goto two_byte_jmp;
303 		else
304 			goto five_byte_jmp;
305 	/* negative offset */
306 	} else {
307 		if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
308 			goto two_byte_jmp;
309 		else
310 			goto five_byte_jmp;
311 	}
312 
313 two_byte_jmp:
314 	n_dspl -= 2;
315 
316 	insn_buff[0] = 0xeb;
317 	insn_buff[1] = (s8)n_dspl;
318 	add_nops(insn_buff + 2, 3);
319 
320 	repl_len = 2;
321 	goto done;
322 
323 five_byte_jmp:
324 	n_dspl -= 5;
325 
326 	insn_buff[0] = 0xe9;
327 	*(s32 *)&insn_buff[1] = n_dspl;
328 
329 	repl_len = 5;
330 
331 done:
332 
333 	DPRINTK("final displ: 0x%08x, JMP 0x%lx",
334 		n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
335 }
336 
337 /*
338  * "noinline" to cause control flow change and thus invalidate I$ and
339  * cause refetch after modification.
340  */
341 static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
342 {
343 	unsigned long flags;
344 	int i;
345 
346 	for (i = 0; i < a->padlen; i++) {
347 		if (instr[i] != 0x90)
348 			return;
349 	}
350 
351 	local_irq_save(flags);
352 	add_nops(instr + (a->instrlen - a->padlen), a->padlen);
353 	local_irq_restore(flags);
354 
355 	DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
356 		   instr, a->instrlen - a->padlen, a->padlen);
357 }
358 
359 /*
360  * Replace instructions with better alternatives for this CPU type. This runs
361  * before SMP is initialized to avoid SMP problems with self modifying code.
362  * This implies that asymmetric systems where APs have less capabilities than
363  * the boot processor are not handled. Tough. Make sure you disable such
364  * features by hand.
365  *
366  * Marked "noinline" to cause control flow change and thus insn cache
367  * to refetch changed I$ lines.
368  */
369 void __init_or_module noinline apply_alternatives(struct alt_instr *start,
370 						  struct alt_instr *end)
371 {
372 	struct alt_instr *a;
373 	u8 *instr, *replacement;
374 	u8 insn_buff[MAX_PATCH_LEN];
375 
376 	DPRINTK("alt table %px, -> %px", start, end);
377 	/*
378 	 * The scan order should be from start to end. A later scanned
379 	 * alternative code can overwrite previously scanned alternative code.
380 	 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
381 	 * patch code.
382 	 *
383 	 * So be careful if you want to change the scan order to any other
384 	 * order.
385 	 */
386 	for (a = start; a < end; a++) {
387 		int insn_buff_sz = 0;
388 
389 		instr = (u8 *)&a->instr_offset + a->instr_offset;
390 		replacement = (u8 *)&a->repl_offset + a->repl_offset;
391 		BUG_ON(a->instrlen > sizeof(insn_buff));
392 		BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
393 		if (!boot_cpu_has(a->cpuid)) {
394 			if (a->padlen > 1)
395 				optimize_nops(a, instr);
396 
397 			continue;
398 		}
399 
400 		DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d",
401 			a->cpuid >> 5,
402 			a->cpuid & 0x1f,
403 			instr, instr, a->instrlen,
404 			replacement, a->replacementlen, a->padlen);
405 
406 		DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
407 		DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
408 
409 		memcpy(insn_buff, replacement, a->replacementlen);
410 		insn_buff_sz = a->replacementlen;
411 
412 		/*
413 		 * 0xe8 is a relative jump; fix the offset.
414 		 *
415 		 * Instruction length is checked before the opcode to avoid
416 		 * accessing uninitialized bytes for zero-length replacements.
417 		 */
418 		if (a->replacementlen == 5 && *insn_buff == 0xe8) {
419 			*(s32 *)(insn_buff + 1) += replacement - instr;
420 			DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
421 				*(s32 *)(insn_buff + 1),
422 				(unsigned long)instr + *(s32 *)(insn_buff + 1) + 5);
423 		}
424 
425 		if (a->replacementlen && is_jmp(replacement[0]))
426 			recompute_jump(a, instr, replacement, insn_buff);
427 
428 		if (a->instrlen > a->replacementlen) {
429 			add_nops(insn_buff + a->replacementlen,
430 				 a->instrlen - a->replacementlen);
431 			insn_buff_sz += a->instrlen - a->replacementlen;
432 		}
433 		DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
434 
435 		text_poke_early(instr, insn_buff, insn_buff_sz);
436 	}
437 }
438 
439 #ifdef CONFIG_SMP
440 static void alternatives_smp_lock(const s32 *start, const s32 *end,
441 				  u8 *text, u8 *text_end)
442 {
443 	const s32 *poff;
444 
445 	for (poff = start; poff < end; poff++) {
446 		u8 *ptr = (u8 *)poff + *poff;
447 
448 		if (!*poff || ptr < text || ptr >= text_end)
449 			continue;
450 		/* turn DS segment override prefix into lock prefix */
451 		if (*ptr == 0x3e)
452 			text_poke(ptr, ((unsigned char []){0xf0}), 1);
453 	}
454 }
455 
456 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
457 				    u8 *text, u8 *text_end)
458 {
459 	const s32 *poff;
460 
461 	for (poff = start; poff < end; poff++) {
462 		u8 *ptr = (u8 *)poff + *poff;
463 
464 		if (!*poff || ptr < text || ptr >= text_end)
465 			continue;
466 		/* turn lock prefix into DS segment override prefix */
467 		if (*ptr == 0xf0)
468 			text_poke(ptr, ((unsigned char []){0x3E}), 1);
469 	}
470 }
471 
472 struct smp_alt_module {
473 	/* what is this ??? */
474 	struct module	*mod;
475 	char		*name;
476 
477 	/* ptrs to lock prefixes */
478 	const s32	*locks;
479 	const s32	*locks_end;
480 
481 	/* .text segment, needed to avoid patching init code ;) */
482 	u8		*text;
483 	u8		*text_end;
484 
485 	struct list_head next;
486 };
487 static LIST_HEAD(smp_alt_modules);
488 static bool uniproc_patched = false;	/* protected by text_mutex */
489 
490 void __init_or_module alternatives_smp_module_add(struct module *mod,
491 						  char *name,
492 						  void *locks, void *locks_end,
493 						  void *text,  void *text_end)
494 {
495 	struct smp_alt_module *smp;
496 
497 	mutex_lock(&text_mutex);
498 	if (!uniproc_patched)
499 		goto unlock;
500 
501 	if (num_possible_cpus() == 1)
502 		/* Don't bother remembering, we'll never have to undo it. */
503 		goto smp_unlock;
504 
505 	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
506 	if (NULL == smp)
507 		/* we'll run the (safe but slow) SMP code then ... */
508 		goto unlock;
509 
510 	smp->mod	= mod;
511 	smp->name	= name;
512 	smp->locks	= locks;
513 	smp->locks_end	= locks_end;
514 	smp->text	= text;
515 	smp->text_end	= text_end;
516 	DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
517 		smp->locks, smp->locks_end,
518 		smp->text, smp->text_end, smp->name);
519 
520 	list_add_tail(&smp->next, &smp_alt_modules);
521 smp_unlock:
522 	alternatives_smp_unlock(locks, locks_end, text, text_end);
523 unlock:
524 	mutex_unlock(&text_mutex);
525 }
526 
527 void __init_or_module alternatives_smp_module_del(struct module *mod)
528 {
529 	struct smp_alt_module *item;
530 
531 	mutex_lock(&text_mutex);
532 	list_for_each_entry(item, &smp_alt_modules, next) {
533 		if (mod != item->mod)
534 			continue;
535 		list_del(&item->next);
536 		kfree(item);
537 		break;
538 	}
539 	mutex_unlock(&text_mutex);
540 }
541 
542 void alternatives_enable_smp(void)
543 {
544 	struct smp_alt_module *mod;
545 
546 	/* Why bother if there are no other CPUs? */
547 	BUG_ON(num_possible_cpus() == 1);
548 
549 	mutex_lock(&text_mutex);
550 
551 	if (uniproc_patched) {
552 		pr_info("switching to SMP code\n");
553 		BUG_ON(num_online_cpus() != 1);
554 		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
555 		clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
556 		list_for_each_entry(mod, &smp_alt_modules, next)
557 			alternatives_smp_lock(mod->locks, mod->locks_end,
558 					      mod->text, mod->text_end);
559 		uniproc_patched = false;
560 	}
561 	mutex_unlock(&text_mutex);
562 }
563 
564 /*
565  * Return 1 if the address range is reserved for SMP-alternatives.
566  * Must hold text_mutex.
567  */
568 int alternatives_text_reserved(void *start, void *end)
569 {
570 	struct smp_alt_module *mod;
571 	const s32 *poff;
572 	u8 *text_start = start;
573 	u8 *text_end = end;
574 
575 	lockdep_assert_held(&text_mutex);
576 
577 	list_for_each_entry(mod, &smp_alt_modules, next) {
578 		if (mod->text > text_end || mod->text_end < text_start)
579 			continue;
580 		for (poff = mod->locks; poff < mod->locks_end; poff++) {
581 			const u8 *ptr = (const u8 *)poff + *poff;
582 
583 			if (text_start <= ptr && text_end > ptr)
584 				return 1;
585 		}
586 	}
587 
588 	return 0;
589 }
590 #endif /* CONFIG_SMP */
591 
592 #ifdef CONFIG_PARAVIRT
593 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
594 				     struct paravirt_patch_site *end)
595 {
596 	struct paravirt_patch_site *p;
597 	char insn_buff[MAX_PATCH_LEN];
598 
599 	for (p = start; p < end; p++) {
600 		unsigned int used;
601 
602 		BUG_ON(p->len > MAX_PATCH_LEN);
603 		/* prep the buffer with the original instructions */
604 		memcpy(insn_buff, p->instr, p->len);
605 		used = pv_ops.init.patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
606 
607 		BUG_ON(used > p->len);
608 
609 		/* Pad the rest with nops */
610 		add_nops(insn_buff + used, p->len - used);
611 		text_poke_early(p->instr, insn_buff, p->len);
612 	}
613 }
614 extern struct paravirt_patch_site __start_parainstructions[],
615 	__stop_parainstructions[];
616 #endif	/* CONFIG_PARAVIRT */
617 
618 /*
619  * Self-test for the INT3 based CALL emulation code.
620  *
621  * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
622  * properly and that there is a stack gap between the INT3 frame and the
623  * previous context. Without this gap doing a virtual PUSH on the interrupted
624  * stack would corrupt the INT3 IRET frame.
625  *
626  * See entry_{32,64}.S for more details.
627  */
628 
629 /*
630  * We define the int3_magic() function in assembly to control the calling
631  * convention such that we can 'call' it from assembly.
632  */
633 
634 extern void int3_magic(unsigned int *ptr); /* defined in asm */
635 
636 asm (
637 "	.pushsection	.init.text, \"ax\", @progbits\n"
638 "	.type		int3_magic, @function\n"
639 "int3_magic:\n"
640 "	movl	$1, (%" _ASM_ARG1 ")\n"
641 "	ret\n"
642 "	.size		int3_magic, .-int3_magic\n"
643 "	.popsection\n"
644 );
645 
646 extern __initdata unsigned long int3_selftest_ip; /* defined in asm below */
647 
648 static int __init
649 int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
650 {
651 	struct die_args *args = data;
652 	struct pt_regs *regs = args->regs;
653 
654 	if (!regs || user_mode(regs))
655 		return NOTIFY_DONE;
656 
657 	if (val != DIE_INT3)
658 		return NOTIFY_DONE;
659 
660 	if (regs->ip - INT3_INSN_SIZE != int3_selftest_ip)
661 		return NOTIFY_DONE;
662 
663 	int3_emulate_call(regs, (unsigned long)&int3_magic);
664 	return NOTIFY_STOP;
665 }
666 
667 static void __init int3_selftest(void)
668 {
669 	static __initdata struct notifier_block int3_exception_nb = {
670 		.notifier_call	= int3_exception_notify,
671 		.priority	= INT_MAX-1, /* last */
672 	};
673 	unsigned int val = 0;
674 
675 	BUG_ON(register_die_notifier(&int3_exception_nb));
676 
677 	/*
678 	 * Basically: int3_magic(&val); but really complicated :-)
679 	 *
680 	 * Stick the address of the INT3 instruction into int3_selftest_ip,
681 	 * then trigger the INT3, padded with NOPs to match a CALL instruction
682 	 * length.
683 	 */
684 	asm volatile ("1: int3; nop; nop; nop; nop\n\t"
685 		      ".pushsection .init.data,\"aw\"\n\t"
686 		      ".align " __ASM_SEL(4, 8) "\n\t"
687 		      ".type int3_selftest_ip, @object\n\t"
688 		      ".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t"
689 		      "int3_selftest_ip:\n\t"
690 		      __ASM_SEL(.long, .quad) " 1b\n\t"
691 		      ".popsection\n\t"
692 		      : ASM_CALL_CONSTRAINT
693 		      : __ASM_SEL_RAW(a, D) (&val)
694 		      : "memory");
695 
696 	BUG_ON(val != 1);
697 
698 	unregister_die_notifier(&int3_exception_nb);
699 }
700 
701 void __init alternative_instructions(void)
702 {
703 	int3_selftest();
704 
705 	/*
706 	 * The patching is not fully atomic, so try to avoid local
707 	 * interruptions that might execute the to be patched code.
708 	 * Other CPUs are not running.
709 	 */
710 	stop_nmi();
711 
712 	/*
713 	 * Don't stop machine check exceptions while patching.
714 	 * MCEs only happen when something got corrupted and in this
715 	 * case we must do something about the corruption.
716 	 * Ignoring it is worse than an unlikely patching race.
717 	 * Also machine checks tend to be broadcast and if one CPU
718 	 * goes into machine check the others follow quickly, so we don't
719 	 * expect a machine check to cause undue problems during to code
720 	 * patching.
721 	 */
722 
723 	apply_alternatives(__alt_instructions, __alt_instructions_end);
724 
725 #ifdef CONFIG_SMP
726 	/* Patch to UP if other cpus not imminent. */
727 	if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
728 		uniproc_patched = true;
729 		alternatives_smp_module_add(NULL, "core kernel",
730 					    __smp_locks, __smp_locks_end,
731 					    _text, _etext);
732 	}
733 
734 	if (!uniproc_patched || num_possible_cpus() == 1) {
735 		free_init_pages("SMP alternatives",
736 				(unsigned long)__smp_locks,
737 				(unsigned long)__smp_locks_end);
738 	}
739 #endif
740 
741 	apply_paravirt(__parainstructions, __parainstructions_end);
742 
743 	restart_nmi();
744 	alternatives_patched = 1;
745 }
746 
747 /**
748  * text_poke_early - Update instructions on a live kernel at boot time
749  * @addr: address to modify
750  * @opcode: source of the copy
751  * @len: length to copy
752  *
753  * When you use this code to patch more than one byte of an instruction
754  * you need to make sure that other CPUs cannot execute this code in parallel.
755  * Also no thread must be currently preempted in the middle of these
756  * instructions. And on the local CPU you need to be protected against NMI or
757  * MCE handlers seeing an inconsistent instruction while you patch.
758  */
759 void __init_or_module text_poke_early(void *addr, const void *opcode,
760 				      size_t len)
761 {
762 	unsigned long flags;
763 
764 	if (boot_cpu_has(X86_FEATURE_NX) &&
765 	    is_module_text_address((unsigned long)addr)) {
766 		/*
767 		 * Modules text is marked initially as non-executable, so the
768 		 * code cannot be running and speculative code-fetches are
769 		 * prevented. Just change the code.
770 		 */
771 		memcpy(addr, opcode, len);
772 	} else {
773 		local_irq_save(flags);
774 		memcpy(addr, opcode, len);
775 		local_irq_restore(flags);
776 		sync_core();
777 
778 		/*
779 		 * Could also do a CLFLUSH here to speed up CPU recovery; but
780 		 * that causes hangs on some VIA CPUs.
781 		 */
782 	}
783 }
784 
785 typedef struct {
786 	struct mm_struct *mm;
787 } temp_mm_state_t;
788 
789 /*
790  * Using a temporary mm allows to set temporary mappings that are not accessible
791  * by other CPUs. Such mappings are needed to perform sensitive memory writes
792  * that override the kernel memory protections (e.g., W^X), without exposing the
793  * temporary page-table mappings that are required for these write operations to
794  * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
795  * mapping is torn down.
796  *
797  * Context: The temporary mm needs to be used exclusively by a single core. To
798  *          harden security IRQs must be disabled while the temporary mm is
799  *          loaded, thereby preventing interrupt handler bugs from overriding
800  *          the kernel memory protection.
801  */
802 static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
803 {
804 	temp_mm_state_t temp_state;
805 
806 	lockdep_assert_irqs_disabled();
807 	temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
808 	switch_mm_irqs_off(NULL, mm, current);
809 
810 	/*
811 	 * If breakpoints are enabled, disable them while the temporary mm is
812 	 * used. Userspace might set up watchpoints on addresses that are used
813 	 * in the temporary mm, which would lead to wrong signals being sent or
814 	 * crashes.
815 	 *
816 	 * Note that breakpoints are not disabled selectively, which also causes
817 	 * kernel breakpoints (e.g., perf's) to be disabled. This might be
818 	 * undesirable, but still seems reasonable as the code that runs in the
819 	 * temporary mm should be short.
820 	 */
821 	if (hw_breakpoint_active())
822 		hw_breakpoint_disable();
823 
824 	return temp_state;
825 }
826 
827 static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
828 {
829 	lockdep_assert_irqs_disabled();
830 	switch_mm_irqs_off(NULL, prev_state.mm, current);
831 
832 	/*
833 	 * Restore the breakpoints if they were disabled before the temporary mm
834 	 * was loaded.
835 	 */
836 	if (hw_breakpoint_active())
837 		hw_breakpoint_restore();
838 }
839 
840 __ro_after_init struct mm_struct *poking_mm;
841 __ro_after_init unsigned long poking_addr;
842 
843 static void *__text_poke(void *addr, const void *opcode, size_t len)
844 {
845 	bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
846 	struct page *pages[2] = {NULL};
847 	temp_mm_state_t prev;
848 	unsigned long flags;
849 	pte_t pte, *ptep;
850 	spinlock_t *ptl;
851 	pgprot_t pgprot;
852 
853 	/*
854 	 * While boot memory allocator is running we cannot use struct pages as
855 	 * they are not yet initialized. There is no way to recover.
856 	 */
857 	BUG_ON(!after_bootmem);
858 
859 	if (!core_kernel_text((unsigned long)addr)) {
860 		pages[0] = vmalloc_to_page(addr);
861 		if (cross_page_boundary)
862 			pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
863 	} else {
864 		pages[0] = virt_to_page(addr);
865 		WARN_ON(!PageReserved(pages[0]));
866 		if (cross_page_boundary)
867 			pages[1] = virt_to_page(addr + PAGE_SIZE);
868 	}
869 	/*
870 	 * If something went wrong, crash and burn since recovery paths are not
871 	 * implemented.
872 	 */
873 	BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
874 
875 	local_irq_save(flags);
876 
877 	/*
878 	 * Map the page without the global bit, as TLB flushing is done with
879 	 * flush_tlb_mm_range(), which is intended for non-global PTEs.
880 	 */
881 	pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
882 
883 	/*
884 	 * The lock is not really needed, but this allows to avoid open-coding.
885 	 */
886 	ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
887 
888 	/*
889 	 * This must not fail; preallocated in poking_init().
890 	 */
891 	VM_BUG_ON(!ptep);
892 
893 	pte = mk_pte(pages[0], pgprot);
894 	set_pte_at(poking_mm, poking_addr, ptep, pte);
895 
896 	if (cross_page_boundary) {
897 		pte = mk_pte(pages[1], pgprot);
898 		set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
899 	}
900 
901 	/*
902 	 * Loading the temporary mm behaves as a compiler barrier, which
903 	 * guarantees that the PTE will be set at the time memcpy() is done.
904 	 */
905 	prev = use_temporary_mm(poking_mm);
906 
907 	kasan_disable_current();
908 	memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len);
909 	kasan_enable_current();
910 
911 	/*
912 	 * Ensure that the PTE is only cleared after the instructions of memcpy
913 	 * were issued by using a compiler barrier.
914 	 */
915 	barrier();
916 
917 	pte_clear(poking_mm, poking_addr, ptep);
918 	if (cross_page_boundary)
919 		pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
920 
921 	/*
922 	 * Loading the previous page-table hierarchy requires a serializing
923 	 * instruction that already allows the core to see the updated version.
924 	 * Xen-PV is assumed to serialize execution in a similar manner.
925 	 */
926 	unuse_temporary_mm(prev);
927 
928 	/*
929 	 * Flushing the TLB might involve IPIs, which would require enabled
930 	 * IRQs, but not if the mm is not used, as it is in this point.
931 	 */
932 	flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
933 			   (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
934 			   PAGE_SHIFT, false);
935 
936 	/*
937 	 * If the text does not match what we just wrote then something is
938 	 * fundamentally screwy; there's nothing we can really do about that.
939 	 */
940 	BUG_ON(memcmp(addr, opcode, len));
941 
942 	pte_unmap_unlock(ptep, ptl);
943 	local_irq_restore(flags);
944 	return addr;
945 }
946 
947 /**
948  * text_poke - Update instructions on a live kernel
949  * @addr: address to modify
950  * @opcode: source of the copy
951  * @len: length to copy
952  *
953  * Only atomic text poke/set should be allowed when not doing early patching.
954  * It means the size must be writable atomically and the address must be aligned
955  * in a way that permits an atomic write. It also makes sure we fit on a single
956  * page.
957  *
958  * Note that the caller must ensure that if the modified code is part of a
959  * module, the module would not be removed during poking. This can be achieved
960  * by registering a module notifier, and ordering module removal and patching
961  * trough a mutex.
962  */
963 void *text_poke(void *addr, const void *opcode, size_t len)
964 {
965 	lockdep_assert_held(&text_mutex);
966 
967 	return __text_poke(addr, opcode, len);
968 }
969 
970 /**
971  * text_poke_kgdb - Update instructions on a live kernel by kgdb
972  * @addr: address to modify
973  * @opcode: source of the copy
974  * @len: length to copy
975  *
976  * Only atomic text poke/set should be allowed when not doing early patching.
977  * It means the size must be writable atomically and the address must be aligned
978  * in a way that permits an atomic write. It also makes sure we fit on a single
979  * page.
980  *
981  * Context: should only be used by kgdb, which ensures no other core is running,
982  *	    despite the fact it does not hold the text_mutex.
983  */
984 void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
985 {
986 	return __text_poke(addr, opcode, len);
987 }
988 
989 static void do_sync_core(void *info)
990 {
991 	sync_core();
992 }
993 
994 void text_poke_sync(void)
995 {
996 	on_each_cpu(do_sync_core, NULL, 1);
997 }
998 
999 struct text_poke_loc {
1000 	s32 rel_addr; /* addr := _stext + rel_addr */
1001 	s32 rel32;
1002 	u8 opcode;
1003 	const u8 text[POKE_MAX_OPCODE_SIZE];
1004 };
1005 
1006 struct bp_patching_desc {
1007 	struct text_poke_loc *vec;
1008 	int nr_entries;
1009 	atomic_t refs;
1010 };
1011 
1012 static struct bp_patching_desc *bp_desc;
1013 
1014 static __always_inline
1015 struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
1016 {
1017 	struct bp_patching_desc *desc = __READ_ONCE(*descp); /* rcu_dereference */
1018 
1019 	if (!desc || !arch_atomic_inc_not_zero(&desc->refs))
1020 		return NULL;
1021 
1022 	return desc;
1023 }
1024 
1025 static __always_inline void put_desc(struct bp_patching_desc *desc)
1026 {
1027 	smp_mb__before_atomic();
1028 	arch_atomic_dec(&desc->refs);
1029 }
1030 
1031 static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
1032 {
1033 	return _stext + tp->rel_addr;
1034 }
1035 
1036 static __always_inline int patch_cmp(const void *key, const void *elt)
1037 {
1038 	struct text_poke_loc *tp = (struct text_poke_loc *) elt;
1039 
1040 	if (key < text_poke_addr(tp))
1041 		return -1;
1042 	if (key > text_poke_addr(tp))
1043 		return 1;
1044 	return 0;
1045 }
1046 
1047 int noinstr poke_int3_handler(struct pt_regs *regs)
1048 {
1049 	struct bp_patching_desc *desc;
1050 	struct text_poke_loc *tp;
1051 	int len, ret = 0;
1052 	void *ip;
1053 
1054 	if (user_mode(regs))
1055 		return 0;
1056 
1057 	/*
1058 	 * Having observed our INT3 instruction, we now must observe
1059 	 * bp_desc:
1060 	 *
1061 	 *	bp_desc = desc			INT3
1062 	 *	WMB				RMB
1063 	 *	write INT3			if (desc)
1064 	 */
1065 	smp_rmb();
1066 
1067 	desc = try_get_desc(&bp_desc);
1068 	if (!desc)
1069 		return 0;
1070 
1071 	/*
1072 	 * Discount the INT3. See text_poke_bp_batch().
1073 	 */
1074 	ip = (void *) regs->ip - INT3_INSN_SIZE;
1075 
1076 	/*
1077 	 * Skip the binary search if there is a single member in the vector.
1078 	 */
1079 	if (unlikely(desc->nr_entries > 1)) {
1080 		tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
1081 				      sizeof(struct text_poke_loc),
1082 				      patch_cmp);
1083 		if (!tp)
1084 			goto out_put;
1085 	} else {
1086 		tp = desc->vec;
1087 		if (text_poke_addr(tp) != ip)
1088 			goto out_put;
1089 	}
1090 
1091 	len = text_opcode_size(tp->opcode);
1092 	ip += len;
1093 
1094 	switch (tp->opcode) {
1095 	case INT3_INSN_OPCODE:
1096 		/*
1097 		 * Someone poked an explicit INT3, they'll want to handle it,
1098 		 * do not consume.
1099 		 */
1100 		goto out_put;
1101 
1102 	case CALL_INSN_OPCODE:
1103 		int3_emulate_call(regs, (long)ip + tp->rel32);
1104 		break;
1105 
1106 	case JMP32_INSN_OPCODE:
1107 	case JMP8_INSN_OPCODE:
1108 		int3_emulate_jmp(regs, (long)ip + tp->rel32);
1109 		break;
1110 
1111 	default:
1112 		BUG();
1113 	}
1114 
1115 	ret = 1;
1116 
1117 out_put:
1118 	put_desc(desc);
1119 	return ret;
1120 }
1121 
1122 #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
1123 static struct text_poke_loc tp_vec[TP_VEC_MAX];
1124 static int tp_vec_nr;
1125 
1126 /**
1127  * text_poke_bp_batch() -- update instructions on live kernel on SMP
1128  * @tp:			vector of instructions to patch
1129  * @nr_entries:		number of entries in the vector
1130  *
1131  * Modify multi-byte instruction by using int3 breakpoint on SMP.
1132  * We completely avoid stop_machine() here, and achieve the
1133  * synchronization using int3 breakpoint.
1134  *
1135  * The way it is done:
1136  *	- For each entry in the vector:
1137  *		- add a int3 trap to the address that will be patched
1138  *	- sync cores
1139  *	- For each entry in the vector:
1140  *		- update all but the first byte of the patched range
1141  *	- sync cores
1142  *	- For each entry in the vector:
1143  *		- replace the first byte (int3) by the first byte of
1144  *		  replacing opcode
1145  *	- sync cores
1146  */
1147 static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
1148 {
1149 	struct bp_patching_desc desc = {
1150 		.vec = tp,
1151 		.nr_entries = nr_entries,
1152 		.refs = ATOMIC_INIT(1),
1153 	};
1154 	unsigned char int3 = INT3_INSN_OPCODE;
1155 	unsigned int i;
1156 	int do_sync;
1157 
1158 	lockdep_assert_held(&text_mutex);
1159 
1160 	smp_store_release(&bp_desc, &desc); /* rcu_assign_pointer */
1161 
1162 	/*
1163 	 * Corresponding read barrier in int3 notifier for making sure the
1164 	 * nr_entries and handler are correctly ordered wrt. patching.
1165 	 */
1166 	smp_wmb();
1167 
1168 	/*
1169 	 * First step: add a int3 trap to the address that will be patched.
1170 	 */
1171 	for (i = 0; i < nr_entries; i++)
1172 		text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
1173 
1174 	text_poke_sync();
1175 
1176 	/*
1177 	 * Second step: update all but the first byte of the patched range.
1178 	 */
1179 	for (do_sync = 0, i = 0; i < nr_entries; i++) {
1180 		int len = text_opcode_size(tp[i].opcode);
1181 
1182 		if (len - INT3_INSN_SIZE > 0) {
1183 			text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1184 				  (const char *)tp[i].text + INT3_INSN_SIZE,
1185 				  len - INT3_INSN_SIZE);
1186 			do_sync++;
1187 		}
1188 	}
1189 
1190 	if (do_sync) {
1191 		/*
1192 		 * According to Intel, this core syncing is very likely
1193 		 * not necessary and we'd be safe even without it. But
1194 		 * better safe than sorry (plus there's not only Intel).
1195 		 */
1196 		text_poke_sync();
1197 	}
1198 
1199 	/*
1200 	 * Third step: replace the first byte (int3) by the first byte of
1201 	 * replacing opcode.
1202 	 */
1203 	for (do_sync = 0, i = 0; i < nr_entries; i++) {
1204 		if (tp[i].text[0] == INT3_INSN_OPCODE)
1205 			continue;
1206 
1207 		text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE);
1208 		do_sync++;
1209 	}
1210 
1211 	if (do_sync)
1212 		text_poke_sync();
1213 
1214 	/*
1215 	 * Remove and synchronize_rcu(), except we have a very primitive
1216 	 * refcount based completion.
1217 	 */
1218 	WRITE_ONCE(bp_desc, NULL); /* RCU_INIT_POINTER */
1219 	if (!atomic_dec_and_test(&desc.refs))
1220 		atomic_cond_read_acquire(&desc.refs, !VAL);
1221 }
1222 
1223 static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
1224 			       const void *opcode, size_t len, const void *emulate)
1225 {
1226 	struct insn insn;
1227 
1228 	memcpy((void *)tp->text, opcode, len);
1229 	if (!emulate)
1230 		emulate = opcode;
1231 
1232 	kernel_insn_init(&insn, emulate, MAX_INSN_SIZE);
1233 	insn_get_length(&insn);
1234 
1235 	BUG_ON(!insn_complete(&insn));
1236 	BUG_ON(len != insn.length);
1237 
1238 	tp->rel_addr = addr - (void *)_stext;
1239 	tp->opcode = insn.opcode.bytes[0];
1240 
1241 	switch (tp->opcode) {
1242 	case INT3_INSN_OPCODE:
1243 		break;
1244 
1245 	case CALL_INSN_OPCODE:
1246 	case JMP32_INSN_OPCODE:
1247 	case JMP8_INSN_OPCODE:
1248 		tp->rel32 = insn.immediate.value;
1249 		break;
1250 
1251 	default: /* assume NOP */
1252 		switch (len) {
1253 		case 2: /* NOP2 -- emulate as JMP8+0 */
1254 			BUG_ON(memcmp(emulate, ideal_nops[len], len));
1255 			tp->opcode = JMP8_INSN_OPCODE;
1256 			tp->rel32 = 0;
1257 			break;
1258 
1259 		case 5: /* NOP5 -- emulate as JMP32+0 */
1260 			BUG_ON(memcmp(emulate, ideal_nops[NOP_ATOMIC5], len));
1261 			tp->opcode = JMP32_INSN_OPCODE;
1262 			tp->rel32 = 0;
1263 			break;
1264 
1265 		default: /* unknown instruction */
1266 			BUG();
1267 		}
1268 		break;
1269 	}
1270 }
1271 
1272 /*
1273  * We hard rely on the tp_vec being ordered; ensure this is so by flushing
1274  * early if needed.
1275  */
1276 static bool tp_order_fail(void *addr)
1277 {
1278 	struct text_poke_loc *tp;
1279 
1280 	if (!tp_vec_nr)
1281 		return false;
1282 
1283 	if (!addr) /* force */
1284 		return true;
1285 
1286 	tp = &tp_vec[tp_vec_nr - 1];
1287 	if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
1288 		return true;
1289 
1290 	return false;
1291 }
1292 
1293 static void text_poke_flush(void *addr)
1294 {
1295 	if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
1296 		text_poke_bp_batch(tp_vec, tp_vec_nr);
1297 		tp_vec_nr = 0;
1298 	}
1299 }
1300 
1301 void text_poke_finish(void)
1302 {
1303 	text_poke_flush(NULL);
1304 }
1305 
1306 void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
1307 {
1308 	struct text_poke_loc *tp;
1309 
1310 	if (unlikely(system_state == SYSTEM_BOOTING)) {
1311 		text_poke_early(addr, opcode, len);
1312 		return;
1313 	}
1314 
1315 	text_poke_flush(addr);
1316 
1317 	tp = &tp_vec[tp_vec_nr++];
1318 	text_poke_loc_init(tp, addr, opcode, len, emulate);
1319 }
1320 
1321 /**
1322  * text_poke_bp() -- update instructions on live kernel on SMP
1323  * @addr:	address to patch
1324  * @opcode:	opcode of new instruction
1325  * @len:	length to copy
1326  * @handler:	address to jump to when the temporary breakpoint is hit
1327  *
1328  * Update a single instruction with the vector in the stack, avoiding
1329  * dynamically allocated memory. This function should be used when it is
1330  * not possible to allocate memory.
1331  */
1332 void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
1333 {
1334 	struct text_poke_loc tp;
1335 
1336 	if (unlikely(system_state == SYSTEM_BOOTING)) {
1337 		text_poke_early(addr, opcode, len);
1338 		return;
1339 	}
1340 
1341 	text_poke_loc_init(&tp, addr, opcode, len, emulate);
1342 	text_poke_bp_batch(&tp, 1);
1343 }
1344