xref: /openbmc/linux/arch/x86/kernel/alternative.c (revision 1644d755d0b06d0f1ee93e1c44336c29386546b4)
1  // SPDX-License-Identifier: GPL-2.0-only
2  #define pr_fmt(fmt) "SMP alternatives: " fmt
3  
4  #include <linux/module.h>
5  #include <linux/sched.h>
6  #include <linux/perf_event.h>
7  #include <linux/mutex.h>
8  #include <linux/list.h>
9  #include <linux/stringify.h>
10  #include <linux/highmem.h>
11  #include <linux/mm.h>
12  #include <linux/vmalloc.h>
13  #include <linux/memory.h>
14  #include <linux/stop_machine.h>
15  #include <linux/slab.h>
16  #include <linux/kdebug.h>
17  #include <linux/kprobes.h>
18  #include <linux/mmu_context.h>
19  #include <linux/bsearch.h>
20  #include <linux/sync_core.h>
21  #include <asm/text-patching.h>
22  #include <asm/alternative.h>
23  #include <asm/sections.h>
24  #include <asm/mce.h>
25  #include <asm/nmi.h>
26  #include <asm/cacheflush.h>
27  #include <asm/tlbflush.h>
28  #include <asm/insn.h>
29  #include <asm/io.h>
30  #include <asm/fixmap.h>
31  #include <asm/paravirt.h>
32  #include <asm/asm-prototypes.h>
33  
34  int __read_mostly alternatives_patched;
35  
36  EXPORT_SYMBOL_GPL(alternatives_patched);
37  
38  #define MAX_PATCH_LEN (255-1)
39  
40  #define DA_ALL		(~0)
41  #define DA_ALT		0x01
42  #define DA_RET		0x02
43  #define DA_RETPOLINE	0x04
44  #define DA_ENDBR	0x08
45  #define DA_SMP		0x10
46  
47  static unsigned int __initdata_or_module debug_alternative;
48  
debug_alt(char * str)49  static int __init debug_alt(char *str)
50  {
51  	if (str && *str == '=')
52  		str++;
53  
54  	if (!str || kstrtouint(str, 0, &debug_alternative))
55  		debug_alternative = DA_ALL;
56  
57  	return 1;
58  }
59  __setup("debug-alternative", debug_alt);
60  
61  static int noreplace_smp;
62  
setup_noreplace_smp(char * str)63  static int __init setup_noreplace_smp(char *str)
64  {
65  	noreplace_smp = 1;
66  	return 1;
67  }
68  __setup("noreplace-smp", setup_noreplace_smp);
69  
70  #define DPRINTK(type, fmt, args...)					\
71  do {									\
72  	if (debug_alternative & DA_##type)				\
73  		printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args);		\
74  } while (0)
75  
76  #define DUMP_BYTES(type, buf, len, fmt, args...)			\
77  do {									\
78  	if (unlikely(debug_alternative & DA_##type)) {			\
79  		int j;							\
80  									\
81  		if (!(len))						\
82  			break;						\
83  									\
84  		printk(KERN_DEBUG pr_fmt(fmt), ##args);			\
85  		for (j = 0; j < (len) - 1; j++)				\
86  			printk(KERN_CONT "%02hhx ", buf[j]);		\
87  		printk(KERN_CONT "%02hhx\n", buf[j]);			\
88  	}								\
89  } while (0)
90  
91  static const unsigned char x86nops[] =
92  {
93  	BYTES_NOP1,
94  	BYTES_NOP2,
95  	BYTES_NOP3,
96  	BYTES_NOP4,
97  	BYTES_NOP5,
98  	BYTES_NOP6,
99  	BYTES_NOP7,
100  	BYTES_NOP8,
101  #ifdef CONFIG_64BIT
102  	BYTES_NOP9,
103  	BYTES_NOP10,
104  	BYTES_NOP11,
105  #endif
106  };
107  
108  const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
109  {
110  	NULL,
111  	x86nops,
112  	x86nops + 1,
113  	x86nops + 1 + 2,
114  	x86nops + 1 + 2 + 3,
115  	x86nops + 1 + 2 + 3 + 4,
116  	x86nops + 1 + 2 + 3 + 4 + 5,
117  	x86nops + 1 + 2 + 3 + 4 + 5 + 6,
118  	x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
119  #ifdef CONFIG_64BIT
120  	x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
121  	x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9,
122  	x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10,
123  #endif
124  };
125  
126  /*
127   * Fill the buffer with a single effective instruction of size @len.
128   *
129   * In order not to issue an ORC stack depth tracking CFI entry (Call Frame Info)
130   * for every single-byte NOP, try to generate the maximally available NOP of
131   * size <= ASM_NOP_MAX such that only a single CFI entry is generated (vs one for
132   * each single-byte NOPs). If @len to fill out is > ASM_NOP_MAX, pad with INT3 and
133   * *jump* over instead of executing long and daft NOPs.
134   */
add_nop(u8 * instr,unsigned int len)135  static void __init_or_module add_nop(u8 *instr, unsigned int len)
136  {
137  	u8 *target = instr + len;
138  
139  	if (!len)
140  		return;
141  
142  	if (len <= ASM_NOP_MAX) {
143  		memcpy(instr, x86_nops[len], len);
144  		return;
145  	}
146  
147  	if (len < 128) {
148  		__text_gen_insn(instr, JMP8_INSN_OPCODE, instr, target, JMP8_INSN_SIZE);
149  		instr += JMP8_INSN_SIZE;
150  	} else {
151  		__text_gen_insn(instr, JMP32_INSN_OPCODE, instr, target, JMP32_INSN_SIZE);
152  		instr += JMP32_INSN_SIZE;
153  	}
154  
155  	for (;instr < target; instr++)
156  		*instr = INT3_INSN_OPCODE;
157  }
158  
159  extern s32 __retpoline_sites[], __retpoline_sites_end[];
160  extern s32 __return_sites[], __return_sites_end[];
161  extern s32 __cfi_sites[], __cfi_sites_end[];
162  extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[];
163  extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
164  extern s32 __smp_locks[], __smp_locks_end[];
165  void text_poke_early(void *addr, const void *opcode, size_t len);
166  
167  /*
168   * Matches NOP and NOPL, not any of the other possible NOPs.
169   */
insn_is_nop(struct insn * insn)170  static bool insn_is_nop(struct insn *insn)
171  {
172  	/* Anything NOP, but no REP NOP */
173  	if (insn->opcode.bytes[0] == 0x90 &&
174  	    (!insn->prefixes.nbytes || insn->prefixes.bytes[0] != 0xF3))
175  		return true;
176  
177  	/* NOPL */
178  	if (insn->opcode.bytes[0] == 0x0F && insn->opcode.bytes[1] == 0x1F)
179  		return true;
180  
181  	/* TODO: more nops */
182  
183  	return false;
184  }
185  
186  /*
187   * Find the offset of the first non-NOP instruction starting at @offset
188   * but no further than @len.
189   */
skip_nops(u8 * instr,int offset,int len)190  static int skip_nops(u8 *instr, int offset, int len)
191  {
192  	struct insn insn;
193  
194  	for (; offset < len; offset += insn.length) {
195  		if (insn_decode_kernel(&insn, &instr[offset]))
196  			break;
197  
198  		if (!insn_is_nop(&insn))
199  			break;
200  	}
201  
202  	return offset;
203  }
204  
205  /*
206   * Optimize a sequence of NOPs, possibly preceded by an unconditional jump
207   * to the end of the NOP sequence into a single NOP.
208   */
209  static bool __init_or_module
__optimize_nops(u8 * instr,size_t len,struct insn * insn,int * next,int * prev,int * target)210  __optimize_nops(u8 *instr, size_t len, struct insn *insn, int *next, int *prev, int *target)
211  {
212  	int i = *next - insn->length;
213  
214  	switch (insn->opcode.bytes[0]) {
215  	case JMP8_INSN_OPCODE:
216  	case JMP32_INSN_OPCODE:
217  		*prev = i;
218  		*target = *next + insn->immediate.value;
219  		return false;
220  	}
221  
222  	if (insn_is_nop(insn)) {
223  		int nop = i;
224  
225  		*next = skip_nops(instr, *next, len);
226  		if (*target && *next == *target)
227  			nop = *prev;
228  
229  		add_nop(instr + nop, *next - nop);
230  		DUMP_BYTES(ALT, instr, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, *next);
231  		return true;
232  	}
233  
234  	*target = 0;
235  	return false;
236  }
237  
238  /*
239   * "noinline" to cause control flow change and thus invalidate I$ and
240   * cause refetch after modification.
241   */
optimize_nops(u8 * instr,size_t len)242  static void __init_or_module noinline optimize_nops(u8 *instr, size_t len)
243  {
244  	int prev, target = 0;
245  
246  	for (int next, i = 0; i < len; i = next) {
247  		struct insn insn;
248  
249  		if (insn_decode_kernel(&insn, &instr[i]))
250  			return;
251  
252  		next = i + insn.length;
253  
254  		__optimize_nops(instr, len, &insn, &next, &prev, &target);
255  	}
256  }
257  
optimize_nops_inplace(u8 * instr,size_t len)258  static void __init_or_module noinline optimize_nops_inplace(u8 *instr, size_t len)
259  {
260  	unsigned long flags;
261  
262  	local_irq_save(flags);
263  	optimize_nops(instr, len);
264  	sync_core();
265  	local_irq_restore(flags);
266  }
267  
268  /*
269   * In this context, "source" is where the instructions are placed in the
270   * section .altinstr_replacement, for example during kernel build by the
271   * toolchain.
272   * "Destination" is where the instructions are being patched in by this
273   * machinery.
274   *
275   * The source offset is:
276   *
277   *   src_imm = target - src_next_ip                  (1)
278   *
279   * and the target offset is:
280   *
281   *   dst_imm = target - dst_next_ip                  (2)
282   *
283   * so rework (1) as an expression for target like:
284   *
285   *   target = src_imm + src_next_ip                  (1a)
286   *
287   * and substitute in (2) to get:
288   *
289   *   dst_imm = (src_imm + src_next_ip) - dst_next_ip (3)
290   *
291   * Now, since the instruction stream is 'identical' at src and dst (it
292   * is being copied after all) it can be stated that:
293   *
294   *   src_next_ip = src + ip_offset
295   *   dst_next_ip = dst + ip_offset                   (4)
296   *
297   * Substitute (4) in (3) and observe ip_offset being cancelled out to
298   * obtain:
299   *
300   *   dst_imm = src_imm + (src + ip_offset) - (dst + ip_offset)
301   *           = src_imm + src - dst + ip_offset - ip_offset
302   *           = src_imm + src - dst                   (5)
303   *
304   * IOW, only the relative displacement of the code block matters.
305   */
306  
307  #define apply_reloc_n(n_, p_, d_)				\
308  	do {							\
309  		s32 v = *(s##n_ *)(p_);				\
310  		v += (d_);					\
311  		BUG_ON((v >> 31) != (v >> (n_-1)));		\
312  		*(s##n_ *)(p_) = (s##n_)v;			\
313  	} while (0)
314  
315  
316  static __always_inline
apply_reloc(int n,void * ptr,uintptr_t diff)317  void apply_reloc(int n, void *ptr, uintptr_t diff)
318  {
319  	switch (n) {
320  	case 1: apply_reloc_n(8, ptr, diff); break;
321  	case 2: apply_reloc_n(16, ptr, diff); break;
322  	case 4: apply_reloc_n(32, ptr, diff); break;
323  	default: BUG();
324  	}
325  }
326  
327  static __always_inline
need_reloc(unsigned long offset,u8 * src,size_t src_len)328  bool need_reloc(unsigned long offset, u8 *src, size_t src_len)
329  {
330  	u8 *target = src + offset;
331  	/*
332  	 * If the target is inside the patched block, it's relative to the
333  	 * block itself and does not need relocation.
334  	 */
335  	return (target < src || target > src + src_len);
336  }
337  
338  static void __init_or_module noinline
apply_relocation(u8 * buf,size_t len,u8 * dest,u8 * src,size_t src_len)339  apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
340  {
341  	int prev, target = 0;
342  
343  	for (int next, i = 0; i < len; i = next) {
344  		struct insn insn;
345  
346  		if (WARN_ON_ONCE(insn_decode_kernel(&insn, &buf[i])))
347  			return;
348  
349  		next = i + insn.length;
350  
351  		if (__optimize_nops(buf, len, &insn, &next, &prev, &target))
352  			continue;
353  
354  		switch (insn.opcode.bytes[0]) {
355  		case 0x0f:
356  			if (insn.opcode.bytes[1] < 0x80 ||
357  			    insn.opcode.bytes[1] > 0x8f)
358  				break;
359  
360  			fallthrough;	/* Jcc.d32 */
361  		case 0x70 ... 0x7f:	/* Jcc.d8 */
362  		case JMP8_INSN_OPCODE:
363  		case JMP32_INSN_OPCODE:
364  		case CALL_INSN_OPCODE:
365  			if (need_reloc(next + insn.immediate.value, src, src_len)) {
366  				apply_reloc(insn.immediate.nbytes,
367  					    buf + i + insn_offset_immediate(&insn),
368  					    src - dest);
369  			}
370  
371  			/*
372  			 * Where possible, convert JMP.d32 into JMP.d8.
373  			 */
374  			if (insn.opcode.bytes[0] == JMP32_INSN_OPCODE) {
375  				s32 imm = insn.immediate.value;
376  				imm += src - dest;
377  				imm += JMP32_INSN_SIZE - JMP8_INSN_SIZE;
378  				if ((imm >> 31) == (imm >> 7)) {
379  					buf[i+0] = JMP8_INSN_OPCODE;
380  					buf[i+1] = (s8)imm;
381  
382  					memset(&buf[i+2], INT3_INSN_OPCODE, insn.length - 2);
383  				}
384  			}
385  			break;
386  		}
387  
388  		if (insn_rip_relative(&insn)) {
389  			if (need_reloc(next + insn.displacement.value, src, src_len)) {
390  				apply_reloc(insn.displacement.nbytes,
391  					    buf + i + insn_offset_displacement(&insn),
392  					    src - dest);
393  			}
394  		}
395  	}
396  }
397  
398  /*
399   * Replace instructions with better alternatives for this CPU type. This runs
400   * before SMP is initialized to avoid SMP problems with self modifying code.
401   * This implies that asymmetric systems where APs have less capabilities than
402   * the boot processor are not handled. Tough. Make sure you disable such
403   * features by hand.
404   *
405   * Marked "noinline" to cause control flow change and thus insn cache
406   * to refetch changed I$ lines.
407   */
apply_alternatives(struct alt_instr * start,struct alt_instr * end)408  void __init_or_module noinline apply_alternatives(struct alt_instr *start,
409  						  struct alt_instr *end)
410  {
411  	struct alt_instr *a;
412  	u8 *instr, *replacement;
413  	u8 insn_buff[MAX_PATCH_LEN];
414  
415  	DPRINTK(ALT, "alt table %px, -> %px", start, end);
416  
417  	/*
418  	 * In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using
419  	 * cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here.
420  	 * During the process, KASAN becomes confused seeing partial LA57
421  	 * conversion and triggers a false-positive out-of-bound report.
422  	 *
423  	 * Disable KASAN until the patching is complete.
424  	 */
425  	kasan_disable_current();
426  
427  	/*
428  	 * The scan order should be from start to end. A later scanned
429  	 * alternative code can overwrite previously scanned alternative code.
430  	 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
431  	 * patch code.
432  	 *
433  	 * So be careful if you want to change the scan order to any other
434  	 * order.
435  	 */
436  	for (a = start; a < end; a++) {
437  		int insn_buff_sz = 0;
438  
439  		instr = (u8 *)&a->instr_offset + a->instr_offset;
440  		replacement = (u8 *)&a->repl_offset + a->repl_offset;
441  		BUG_ON(a->instrlen > sizeof(insn_buff));
442  		BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
443  
444  		/*
445  		 * Patch if either:
446  		 * - feature is present
447  		 * - feature not present but ALT_FLAG_NOT is set to mean,
448  		 *   patch if feature is *NOT* present.
449  		 */
450  		if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) {
451  			optimize_nops_inplace(instr, a->instrlen);
452  			continue;
453  		}
454  
455  		DPRINTK(ALT, "feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d)",
456  			(a->flags & ALT_FLAG_NOT) ? "!" : "",
457  			a->cpuid >> 5,
458  			a->cpuid & 0x1f,
459  			instr, instr, a->instrlen,
460  			replacement, a->replacementlen);
461  
462  		memcpy(insn_buff, replacement, a->replacementlen);
463  		insn_buff_sz = a->replacementlen;
464  
465  		for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
466  			insn_buff[insn_buff_sz] = 0x90;
467  
468  		apply_relocation(insn_buff, a->instrlen, instr, replacement, a->replacementlen);
469  
470  		DUMP_BYTES(ALT, instr, a->instrlen, "%px:   old_insn: ", instr);
471  		DUMP_BYTES(ALT, replacement, a->replacementlen, "%px:   rpl_insn: ", replacement);
472  		DUMP_BYTES(ALT, insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
473  
474  		text_poke_early(instr, insn_buff, insn_buff_sz);
475  	}
476  
477  	kasan_enable_current();
478  }
479  
is_jcc32(struct insn * insn)480  static inline bool is_jcc32(struct insn *insn)
481  {
482  	/* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
483  	return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80;
484  }
485  
486  #if defined(CONFIG_RETPOLINE) && defined(CONFIG_OBJTOOL)
487  
488  /*
489   * CALL/JMP *%\reg
490   */
emit_indirect(int op,int reg,u8 * bytes)491  static int emit_indirect(int op, int reg, u8 *bytes)
492  {
493  	int i = 0;
494  	u8 modrm;
495  
496  	switch (op) {
497  	case CALL_INSN_OPCODE:
498  		modrm = 0x10; /* Reg = 2; CALL r/m */
499  		break;
500  
501  	case JMP32_INSN_OPCODE:
502  		modrm = 0x20; /* Reg = 4; JMP r/m */
503  		break;
504  
505  	default:
506  		WARN_ON_ONCE(1);
507  		return -1;
508  	}
509  
510  	if (reg >= 8) {
511  		bytes[i++] = 0x41; /* REX.B prefix */
512  		reg -= 8;
513  	}
514  
515  	modrm |= 0xc0; /* Mod = 3 */
516  	modrm += reg;
517  
518  	bytes[i++] = 0xff; /* opcode */
519  	bytes[i++] = modrm;
520  
521  	return i;
522  }
523  
emit_call_track_retpoline(void * addr,struct insn * insn,int reg,u8 * bytes)524  static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
525  {
526  	u8 op = insn->opcode.bytes[0];
527  	int i = 0;
528  
529  	/*
530  	 * Clang does 'weird' Jcc __x86_indirect_thunk_r11 conditional
531  	 * tail-calls. Deal with them.
532  	 */
533  	if (is_jcc32(insn)) {
534  		bytes[i++] = op;
535  		op = insn->opcode.bytes[1];
536  		goto clang_jcc;
537  	}
538  
539  	if (insn->length == 6)
540  		bytes[i++] = 0x2e; /* CS-prefix */
541  
542  	switch (op) {
543  	case CALL_INSN_OPCODE:
544  		__text_gen_insn(bytes+i, op, addr+i,
545  				__x86_indirect_call_thunk_array[reg],
546  				CALL_INSN_SIZE);
547  		i += CALL_INSN_SIZE;
548  		break;
549  
550  	case JMP32_INSN_OPCODE:
551  clang_jcc:
552  		__text_gen_insn(bytes+i, op, addr+i,
553  				__x86_indirect_jump_thunk_array[reg],
554  				JMP32_INSN_SIZE);
555  		i += JMP32_INSN_SIZE;
556  		break;
557  
558  	default:
559  		WARN(1, "%pS %px %*ph\n", addr, addr, 6, addr);
560  		return -1;
561  	}
562  
563  	WARN_ON_ONCE(i != insn->length);
564  
565  	return i;
566  }
567  
568  /*
569   * Rewrite the compiler generated retpoline thunk calls.
570   *
571   * For spectre_v2=off (!X86_FEATURE_RETPOLINE), rewrite them into immediate
572   * indirect instructions, avoiding the extra indirection.
573   *
574   * For example, convert:
575   *
576   *   CALL __x86_indirect_thunk_\reg
577   *
578   * into:
579   *
580   *   CALL *%\reg
581   *
582   * It also tries to inline spectre_v2=retpoline,lfence when size permits.
583   */
patch_retpoline(void * addr,struct insn * insn,u8 * bytes)584  static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
585  {
586  	retpoline_thunk_t *target;
587  	int reg, ret, i = 0;
588  	u8 op, cc;
589  
590  	target = addr + insn->length + insn->immediate.value;
591  	reg = target - __x86_indirect_thunk_array;
592  
593  	if (WARN_ON_ONCE(reg & ~0xf))
594  		return -1;
595  
596  	/* If anyone ever does: CALL/JMP *%rsp, we're in deep trouble. */
597  	BUG_ON(reg == 4);
598  
599  	if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
600  	    !cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
601  		if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
602  			return emit_call_track_retpoline(addr, insn, reg, bytes);
603  
604  		return -1;
605  	}
606  
607  	op = insn->opcode.bytes[0];
608  
609  	/*
610  	 * Convert:
611  	 *
612  	 *   Jcc.d32 __x86_indirect_thunk_\reg
613  	 *
614  	 * into:
615  	 *
616  	 *   Jncc.d8 1f
617  	 *   [ LFENCE ]
618  	 *   JMP *%\reg
619  	 *   [ NOP ]
620  	 * 1:
621  	 */
622  	if (is_jcc32(insn)) {
623  		cc = insn->opcode.bytes[1] & 0xf;
624  		cc ^= 1; /* invert condition */
625  
626  		bytes[i++] = 0x70 + cc;        /* Jcc.d8 */
627  		bytes[i++] = insn->length - 2; /* sizeof(Jcc.d8) == 2 */
628  
629  		/* Continue as if: JMP.d32 __x86_indirect_thunk_\reg */
630  		op = JMP32_INSN_OPCODE;
631  	}
632  
633  	/*
634  	 * For RETPOLINE_LFENCE: prepend the indirect CALL/JMP with an LFENCE.
635  	 */
636  	if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
637  		bytes[i++] = 0x0f;
638  		bytes[i++] = 0xae;
639  		bytes[i++] = 0xe8; /* LFENCE */
640  	}
641  
642  	ret = emit_indirect(op, reg, bytes + i);
643  	if (ret < 0)
644  		return ret;
645  	i += ret;
646  
647  	/*
648  	 * The compiler is supposed to EMIT an INT3 after every unconditional
649  	 * JMP instruction due to AMD BTC. However, if the compiler is too old
650  	 * or SLS isn't enabled, we still need an INT3 after indirect JMPs
651  	 * even on Intel.
652  	 */
653  	if (op == JMP32_INSN_OPCODE && i < insn->length)
654  		bytes[i++] = INT3_INSN_OPCODE;
655  
656  	for (; i < insn->length;)
657  		bytes[i++] = BYTES_NOP1;
658  
659  	return i;
660  }
661  
662  /*
663   * Generated by 'objtool --retpoline'.
664   */
apply_retpolines(s32 * start,s32 * end)665  void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
666  {
667  	s32 *s;
668  
669  	for (s = start; s < end; s++) {
670  		void *addr = (void *)s + *s;
671  		struct insn insn;
672  		int len, ret;
673  		u8 bytes[16];
674  		u8 op1, op2;
675  
676  		ret = insn_decode_kernel(&insn, addr);
677  		if (WARN_ON_ONCE(ret < 0))
678  			continue;
679  
680  		op1 = insn.opcode.bytes[0];
681  		op2 = insn.opcode.bytes[1];
682  
683  		switch (op1) {
684  		case CALL_INSN_OPCODE:
685  		case JMP32_INSN_OPCODE:
686  			break;
687  
688  		case 0x0f: /* escape */
689  			if (op2 >= 0x80 && op2 <= 0x8f)
690  				break;
691  			fallthrough;
692  		default:
693  			WARN_ON_ONCE(1);
694  			continue;
695  		}
696  
697  		DPRINTK(RETPOLINE, "retpoline at: %pS (%px) len: %d to: %pS",
698  			addr, addr, insn.length,
699  			addr + insn.length + insn.immediate.value);
700  
701  		len = patch_retpoline(addr, &insn, bytes);
702  		if (len == insn.length) {
703  			optimize_nops(bytes, len);
704  			DUMP_BYTES(RETPOLINE, ((u8*)addr),  len, "%px: orig: ", addr);
705  			DUMP_BYTES(RETPOLINE, ((u8*)bytes), len, "%px: repl: ", addr);
706  			text_poke_early(addr, bytes, len);
707  		}
708  	}
709  }
710  
711  #ifdef CONFIG_RETHUNK
712  
713  /*
714   * Rewrite the compiler generated return thunk tail-calls.
715   *
716   * For example, convert:
717   *
718   *   JMP __x86_return_thunk
719   *
720   * into:
721   *
722   *   RET
723   */
patch_return(void * addr,struct insn * insn,u8 * bytes)724  static int patch_return(void *addr, struct insn *insn, u8 *bytes)
725  {
726  	int i = 0;
727  
728  	/* Patch the custom return thunks... */
729  	if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
730  		i = JMP32_INSN_SIZE;
731  		__text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
732  	} else {
733  		/* ... or patch them out if not needed. */
734  		bytes[i++] = RET_INSN_OPCODE;
735  	}
736  
737  	for (; i < insn->length;)
738  		bytes[i++] = INT3_INSN_OPCODE;
739  	return i;
740  }
741  
apply_returns(s32 * start,s32 * end)742  void __init_or_module noinline apply_returns(s32 *start, s32 *end)
743  {
744  	s32 *s;
745  
746  	if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
747  		static_call_force_reinit();
748  
749  	for (s = start; s < end; s++) {
750  		void *dest = NULL, *addr = (void *)s + *s;
751  		struct insn insn;
752  		int len, ret;
753  		u8 bytes[16];
754  		u8 op;
755  
756  		ret = insn_decode_kernel(&insn, addr);
757  		if (WARN_ON_ONCE(ret < 0))
758  			continue;
759  
760  		op = insn.opcode.bytes[0];
761  		if (op == JMP32_INSN_OPCODE)
762  			dest = addr + insn.length + insn.immediate.value;
763  
764  		if (__static_call_fixup(addr, op, dest) ||
765  		    WARN_ONCE(dest != &__x86_return_thunk,
766  			      "missing return thunk: %pS-%pS: %*ph",
767  			      addr, dest, 5, addr))
768  			continue;
769  
770  		DPRINTK(RET, "return thunk at: %pS (%px) len: %d to: %pS",
771  			addr, addr, insn.length,
772  			addr + insn.length + insn.immediate.value);
773  
774  		len = patch_return(addr, &insn, bytes);
775  		if (len == insn.length) {
776  			DUMP_BYTES(RET, ((u8*)addr),  len, "%px: orig: ", addr);
777  			DUMP_BYTES(RET, ((u8*)bytes), len, "%px: repl: ", addr);
778  			text_poke_early(addr, bytes, len);
779  		}
780  	}
781  }
782  #else
apply_returns(s32 * start,s32 * end)783  void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
784  #endif /* CONFIG_RETHUNK */
785  
786  #else /* !CONFIG_RETPOLINE || !CONFIG_OBJTOOL */
787  
apply_retpolines(s32 * start,s32 * end)788  void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
apply_returns(s32 * start,s32 * end)789  void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
790  
791  #endif /* CONFIG_RETPOLINE && CONFIG_OBJTOOL */
792  
793  #ifdef CONFIG_X86_KERNEL_IBT
794  
795  static void poison_cfi(void *addr);
796  
poison_endbr(void * addr,bool warn)797  static void __init_or_module poison_endbr(void *addr, bool warn)
798  {
799  	u32 endbr, poison = gen_endbr_poison();
800  
801  	if (WARN_ON_ONCE(get_kernel_nofault(endbr, addr)))
802  		return;
803  
804  	if (!is_endbr(endbr)) {
805  		WARN_ON_ONCE(warn);
806  		return;
807  	}
808  
809  	DPRINTK(ENDBR, "ENDBR at: %pS (%px)", addr, addr);
810  
811  	/*
812  	 * When we have IBT, the lack of ENDBR will trigger #CP
813  	 */
814  	DUMP_BYTES(ENDBR, ((u8*)addr), 4, "%px: orig: ", addr);
815  	DUMP_BYTES(ENDBR, ((u8*)&poison), 4, "%px: repl: ", addr);
816  	text_poke_early(addr, &poison, 4);
817  }
818  
819  /*
820   * Generated by: objtool --ibt
821   *
822   * Seal the functions for indirect calls by clobbering the ENDBR instructions
823   * and the kCFI hash value.
824   */
apply_seal_endbr(s32 * start,s32 * end)825  void __init_or_module noinline apply_seal_endbr(s32 *start, s32 *end)
826  {
827  	s32 *s;
828  
829  	for (s = start; s < end; s++) {
830  		void *addr = (void *)s + *s;
831  
832  		poison_endbr(addr, true);
833  		if (IS_ENABLED(CONFIG_FINEIBT))
834  			poison_cfi(addr - 16);
835  	}
836  }
837  
838  #else
839  
apply_seal_endbr(s32 * start,s32 * end)840  void __init_or_module apply_seal_endbr(s32 *start, s32 *end) { }
841  
842  #endif /* CONFIG_X86_KERNEL_IBT */
843  
844  #ifdef CONFIG_FINEIBT
845  
846  enum cfi_mode {
847  	CFI_DEFAULT,
848  	CFI_OFF,
849  	CFI_KCFI,
850  	CFI_FINEIBT,
851  };
852  
853  static enum cfi_mode cfi_mode __ro_after_init = CFI_DEFAULT;
854  static bool cfi_rand __ro_after_init = true;
855  static u32  cfi_seed __ro_after_init;
856  
857  /*
858   * Re-hash the CFI hash with a boot-time seed while making sure the result is
859   * not a valid ENDBR instruction.
860   */
cfi_rehash(u32 hash)861  static u32 cfi_rehash(u32 hash)
862  {
863  	hash ^= cfi_seed;
864  	while (unlikely(is_endbr(hash) || is_endbr(-hash))) {
865  		bool lsb = hash & 1;
866  		hash >>= 1;
867  		if (lsb)
868  			hash ^= 0x80200003;
869  	}
870  	return hash;
871  }
872  
cfi_parse_cmdline(char * str)873  static __init int cfi_parse_cmdline(char *str)
874  {
875  	if (!str)
876  		return -EINVAL;
877  
878  	while (str) {
879  		char *next = strchr(str, ',');
880  		if (next) {
881  			*next = 0;
882  			next++;
883  		}
884  
885  		if (!strcmp(str, "auto")) {
886  			cfi_mode = CFI_DEFAULT;
887  		} else if (!strcmp(str, "off")) {
888  			cfi_mode = CFI_OFF;
889  			cfi_rand = false;
890  		} else if (!strcmp(str, "kcfi")) {
891  			cfi_mode = CFI_KCFI;
892  		} else if (!strcmp(str, "fineibt")) {
893  			cfi_mode = CFI_FINEIBT;
894  		} else if (!strcmp(str, "norand")) {
895  			cfi_rand = false;
896  		} else {
897  			pr_err("Ignoring unknown cfi option (%s).", str);
898  		}
899  
900  		str = next;
901  	}
902  
903  	return 0;
904  }
905  early_param("cfi", cfi_parse_cmdline);
906  
907  /*
908   * kCFI						FineIBT
909   *
910   * __cfi_\func:					__cfi_\func:
911   *	movl   $0x12345678,%eax		// 5	     endbr64			// 4
912   *	nop					     subl   $0x12345678,%r10d   // 7
913   *	nop					     jz     1f			// 2
914   *	nop					     ud2			// 2
915   *	nop					1:   nop			// 1
916   *	nop
917   *	nop
918   *	nop
919   *	nop
920   *	nop
921   *	nop
922   *	nop
923   *
924   *
925   * caller:					caller:
926   *	movl	$(-0x12345678),%r10d	 // 6	     movl   $0x12345678,%r10d	// 6
927   *	addl	$-15(%r11),%r10d	 // 4	     sub    $16,%r11		// 4
928   *	je	1f			 // 2	     nop4			// 4
929   *	ud2				 // 2
930   * 1:	call	__x86_indirect_thunk_r11 // 5	     call   *%r11; nop2;	// 5
931   *
932   */
933  
934  asm(	".pushsection .rodata			\n"
935  	"fineibt_preamble_start:		\n"
936  	"	endbr64				\n"
937  	"	subl	$0x12345678, %r10d	\n"
938  	"	je	fineibt_preamble_end	\n"
939  	"	ud2				\n"
940  	"	nop				\n"
941  	"fineibt_preamble_end:			\n"
942  	".popsection\n"
943  );
944  
945  extern u8 fineibt_preamble_start[];
946  extern u8 fineibt_preamble_end[];
947  
948  #define fineibt_preamble_size (fineibt_preamble_end - fineibt_preamble_start)
949  #define fineibt_preamble_hash 7
950  
951  asm(	".pushsection .rodata			\n"
952  	"fineibt_caller_start:			\n"
953  	"	movl	$0x12345678, %r10d	\n"
954  	"	sub	$16, %r11		\n"
955  	ASM_NOP4
956  	"fineibt_caller_end:			\n"
957  	".popsection				\n"
958  );
959  
960  extern u8 fineibt_caller_start[];
961  extern u8 fineibt_caller_end[];
962  
963  #define fineibt_caller_size (fineibt_caller_end - fineibt_caller_start)
964  #define fineibt_caller_hash 2
965  
966  #define fineibt_caller_jmp (fineibt_caller_size - 2)
967  
decode_preamble_hash(void * addr)968  static u32 decode_preamble_hash(void *addr)
969  {
970  	u8 *p = addr;
971  
972  	/* b8 78 56 34 12          mov    $0x12345678,%eax */
973  	if (p[0] == 0xb8)
974  		return *(u32 *)(addr + 1);
975  
976  	return 0; /* invalid hash value */
977  }
978  
decode_caller_hash(void * addr)979  static u32 decode_caller_hash(void *addr)
980  {
981  	u8 *p = addr;
982  
983  	/* 41 ba 78 56 34 12       mov    $0x12345678,%r10d */
984  	if (p[0] == 0x41 && p[1] == 0xba)
985  		return -*(u32 *)(addr + 2);
986  
987  	/* e8 0c 78 56 34 12	   jmp.d8  +12 */
988  	if (p[0] == JMP8_INSN_OPCODE && p[1] == fineibt_caller_jmp)
989  		return -*(u32 *)(addr + 2);
990  
991  	return 0; /* invalid hash value */
992  }
993  
994  /* .retpoline_sites */
cfi_disable_callers(s32 * start,s32 * end)995  static int cfi_disable_callers(s32 *start, s32 *end)
996  {
997  	/*
998  	 * Disable kCFI by patching in a JMP.d8, this leaves the hash immediate
999  	 * in tact for later usage. Also see decode_caller_hash() and
1000  	 * cfi_rewrite_callers().
1001  	 */
1002  	const u8 jmp[] = { JMP8_INSN_OPCODE, fineibt_caller_jmp };
1003  	s32 *s;
1004  
1005  	for (s = start; s < end; s++) {
1006  		void *addr = (void *)s + *s;
1007  		u32 hash;
1008  
1009  		addr -= fineibt_caller_size;
1010  		hash = decode_caller_hash(addr);
1011  		if (!hash) /* nocfi callers */
1012  			continue;
1013  
1014  		text_poke_early(addr, jmp, 2);
1015  	}
1016  
1017  	return 0;
1018  }
1019  
cfi_enable_callers(s32 * start,s32 * end)1020  static int cfi_enable_callers(s32 *start, s32 *end)
1021  {
1022  	/*
1023  	 * Re-enable kCFI, undo what cfi_disable_callers() did.
1024  	 */
1025  	const u8 mov[] = { 0x41, 0xba };
1026  	s32 *s;
1027  
1028  	for (s = start; s < end; s++) {
1029  		void *addr = (void *)s + *s;
1030  		u32 hash;
1031  
1032  		addr -= fineibt_caller_size;
1033  		hash = decode_caller_hash(addr);
1034  		if (!hash) /* nocfi callers */
1035  			continue;
1036  
1037  		text_poke_early(addr, mov, 2);
1038  	}
1039  
1040  	return 0;
1041  }
1042  
1043  /* .cfi_sites */
cfi_rand_preamble(s32 * start,s32 * end)1044  static int cfi_rand_preamble(s32 *start, s32 *end)
1045  {
1046  	s32 *s;
1047  
1048  	for (s = start; s < end; s++) {
1049  		void *addr = (void *)s + *s;
1050  		u32 hash;
1051  
1052  		hash = decode_preamble_hash(addr);
1053  		if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n",
1054  			 addr, addr, 5, addr))
1055  			return -EINVAL;
1056  
1057  		hash = cfi_rehash(hash);
1058  		text_poke_early(addr + 1, &hash, 4);
1059  	}
1060  
1061  	return 0;
1062  }
1063  
cfi_rewrite_preamble(s32 * start,s32 * end)1064  static int cfi_rewrite_preamble(s32 *start, s32 *end)
1065  {
1066  	s32 *s;
1067  
1068  	for (s = start; s < end; s++) {
1069  		void *addr = (void *)s + *s;
1070  		u32 hash;
1071  
1072  		hash = decode_preamble_hash(addr);
1073  		if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n",
1074  			 addr, addr, 5, addr))
1075  			return -EINVAL;
1076  
1077  		text_poke_early(addr, fineibt_preamble_start, fineibt_preamble_size);
1078  		WARN_ON(*(u32 *)(addr + fineibt_preamble_hash) != 0x12345678);
1079  		text_poke_early(addr + fineibt_preamble_hash, &hash, 4);
1080  	}
1081  
1082  	return 0;
1083  }
1084  
cfi_rewrite_endbr(s32 * start,s32 * end)1085  static void cfi_rewrite_endbr(s32 *start, s32 *end)
1086  {
1087  	s32 *s;
1088  
1089  	for (s = start; s < end; s++) {
1090  		void *addr = (void *)s + *s;
1091  
1092  		poison_endbr(addr+16, false);
1093  	}
1094  }
1095  
1096  /* .retpoline_sites */
cfi_rand_callers(s32 * start,s32 * end)1097  static int cfi_rand_callers(s32 *start, s32 *end)
1098  {
1099  	s32 *s;
1100  
1101  	for (s = start; s < end; s++) {
1102  		void *addr = (void *)s + *s;
1103  		u32 hash;
1104  
1105  		addr -= fineibt_caller_size;
1106  		hash = decode_caller_hash(addr);
1107  		if (hash) {
1108  			hash = -cfi_rehash(hash);
1109  			text_poke_early(addr + 2, &hash, 4);
1110  		}
1111  	}
1112  
1113  	return 0;
1114  }
1115  
cfi_rewrite_callers(s32 * start,s32 * end)1116  static int cfi_rewrite_callers(s32 *start, s32 *end)
1117  {
1118  	s32 *s;
1119  
1120  	for (s = start; s < end; s++) {
1121  		void *addr = (void *)s + *s;
1122  		u32 hash;
1123  
1124  		addr -= fineibt_caller_size;
1125  		hash = decode_caller_hash(addr);
1126  		if (hash) {
1127  			text_poke_early(addr, fineibt_caller_start, fineibt_caller_size);
1128  			WARN_ON(*(u32 *)(addr + fineibt_caller_hash) != 0x12345678);
1129  			text_poke_early(addr + fineibt_caller_hash, &hash, 4);
1130  		}
1131  		/* rely on apply_retpolines() */
1132  	}
1133  
1134  	return 0;
1135  }
1136  
__apply_fineibt(s32 * start_retpoline,s32 * end_retpoline,s32 * start_cfi,s32 * end_cfi,bool builtin)1137  static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1138  			    s32 *start_cfi, s32 *end_cfi, bool builtin)
1139  {
1140  	int ret;
1141  
1142  	if (WARN_ONCE(fineibt_preamble_size != 16,
1143  		      "FineIBT preamble wrong size: %ld", fineibt_preamble_size))
1144  		return;
1145  
1146  	if (cfi_mode == CFI_DEFAULT) {
1147  		cfi_mode = CFI_KCFI;
1148  		if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT))
1149  			cfi_mode = CFI_FINEIBT;
1150  	}
1151  
1152  	/*
1153  	 * Rewrite the callers to not use the __cfi_ stubs, such that we might
1154  	 * rewrite them. This disables all CFI. If this succeeds but any of the
1155  	 * later stages fails, we're without CFI.
1156  	 */
1157  	ret = cfi_disable_callers(start_retpoline, end_retpoline);
1158  	if (ret)
1159  		goto err;
1160  
1161  	if (cfi_rand) {
1162  		if (builtin)
1163  			cfi_seed = get_random_u32();
1164  
1165  		ret = cfi_rand_preamble(start_cfi, end_cfi);
1166  		if (ret)
1167  			goto err;
1168  
1169  		ret = cfi_rand_callers(start_retpoline, end_retpoline);
1170  		if (ret)
1171  			goto err;
1172  	}
1173  
1174  	switch (cfi_mode) {
1175  	case CFI_OFF:
1176  		if (builtin)
1177  			pr_info("Disabling CFI\n");
1178  		return;
1179  
1180  	case CFI_KCFI:
1181  		ret = cfi_enable_callers(start_retpoline, end_retpoline);
1182  		if (ret)
1183  			goto err;
1184  
1185  		if (builtin)
1186  			pr_info("Using kCFI\n");
1187  		return;
1188  
1189  	case CFI_FINEIBT:
1190  		/* place the FineIBT preamble at func()-16 */
1191  		ret = cfi_rewrite_preamble(start_cfi, end_cfi);
1192  		if (ret)
1193  			goto err;
1194  
1195  		/* rewrite the callers to target func()-16 */
1196  		ret = cfi_rewrite_callers(start_retpoline, end_retpoline);
1197  		if (ret)
1198  			goto err;
1199  
1200  		/* now that nobody targets func()+0, remove ENDBR there */
1201  		cfi_rewrite_endbr(start_cfi, end_cfi);
1202  
1203  		if (builtin)
1204  			pr_info("Using FineIBT CFI\n");
1205  		return;
1206  
1207  	default:
1208  		break;
1209  	}
1210  
1211  err:
1212  	pr_err("Something went horribly wrong trying to rewrite the CFI implementation.\n");
1213  }
1214  
poison_hash(void * addr)1215  static inline void poison_hash(void *addr)
1216  {
1217  	*(u32 *)addr = 0;
1218  }
1219  
poison_cfi(void * addr)1220  static void poison_cfi(void *addr)
1221  {
1222  	switch (cfi_mode) {
1223  	case CFI_FINEIBT:
1224  		/*
1225  		 * __cfi_\func:
1226  		 *	osp nopl (%rax)
1227  		 *	subl	$0, %r10d
1228  		 *	jz	1f
1229  		 *	ud2
1230  		 * 1:	nop
1231  		 */
1232  		poison_endbr(addr, false);
1233  		poison_hash(addr + fineibt_preamble_hash);
1234  		break;
1235  
1236  	case CFI_KCFI:
1237  		/*
1238  		 * __cfi_\func:
1239  		 *	movl	$0, %eax
1240  		 *	.skip	11, 0x90
1241  		 */
1242  		poison_hash(addr + 1);
1243  		break;
1244  
1245  	default:
1246  		break;
1247  	}
1248  }
1249  
1250  #else
1251  
__apply_fineibt(s32 * start_retpoline,s32 * end_retpoline,s32 * start_cfi,s32 * end_cfi,bool builtin)1252  static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1253  			    s32 *start_cfi, s32 *end_cfi, bool builtin)
1254  {
1255  }
1256  
1257  #ifdef CONFIG_X86_KERNEL_IBT
poison_cfi(void * addr)1258  static void poison_cfi(void *addr) { }
1259  #endif
1260  
1261  #endif
1262  
apply_fineibt(s32 * start_retpoline,s32 * end_retpoline,s32 * start_cfi,s32 * end_cfi)1263  void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1264  		   s32 *start_cfi, s32 *end_cfi)
1265  {
1266  	return __apply_fineibt(start_retpoline, end_retpoline,
1267  			       start_cfi, end_cfi,
1268  			       /* .builtin = */ false);
1269  }
1270  
1271  #ifdef CONFIG_SMP
alternatives_smp_lock(const s32 * start,const s32 * end,u8 * text,u8 * text_end)1272  static void alternatives_smp_lock(const s32 *start, const s32 *end,
1273  				  u8 *text, u8 *text_end)
1274  {
1275  	const s32 *poff;
1276  
1277  	for (poff = start; poff < end; poff++) {
1278  		u8 *ptr = (u8 *)poff + *poff;
1279  
1280  		if (!*poff || ptr < text || ptr >= text_end)
1281  			continue;
1282  		/* turn DS segment override prefix into lock prefix */
1283  		if (*ptr == 0x3e)
1284  			text_poke(ptr, ((unsigned char []){0xf0}), 1);
1285  	}
1286  }
1287  
alternatives_smp_unlock(const s32 * start,const s32 * end,u8 * text,u8 * text_end)1288  static void alternatives_smp_unlock(const s32 *start, const s32 *end,
1289  				    u8 *text, u8 *text_end)
1290  {
1291  	const s32 *poff;
1292  
1293  	for (poff = start; poff < end; poff++) {
1294  		u8 *ptr = (u8 *)poff + *poff;
1295  
1296  		if (!*poff || ptr < text || ptr >= text_end)
1297  			continue;
1298  		/* turn lock prefix into DS segment override prefix */
1299  		if (*ptr == 0xf0)
1300  			text_poke(ptr, ((unsigned char []){0x3E}), 1);
1301  	}
1302  }
1303  
1304  struct smp_alt_module {
1305  	/* what is this ??? */
1306  	struct module	*mod;
1307  	char		*name;
1308  
1309  	/* ptrs to lock prefixes */
1310  	const s32	*locks;
1311  	const s32	*locks_end;
1312  
1313  	/* .text segment, needed to avoid patching init code ;) */
1314  	u8		*text;
1315  	u8		*text_end;
1316  
1317  	struct list_head next;
1318  };
1319  static LIST_HEAD(smp_alt_modules);
1320  static bool uniproc_patched = false;	/* protected by text_mutex */
1321  
alternatives_smp_module_add(struct module * mod,char * name,void * locks,void * locks_end,void * text,void * text_end)1322  void __init_or_module alternatives_smp_module_add(struct module *mod,
1323  						  char *name,
1324  						  void *locks, void *locks_end,
1325  						  void *text,  void *text_end)
1326  {
1327  	struct smp_alt_module *smp;
1328  
1329  	mutex_lock(&text_mutex);
1330  	if (!uniproc_patched)
1331  		goto unlock;
1332  
1333  	if (num_possible_cpus() == 1)
1334  		/* Don't bother remembering, we'll never have to undo it. */
1335  		goto smp_unlock;
1336  
1337  	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
1338  	if (NULL == smp)
1339  		/* we'll run the (safe but slow) SMP code then ... */
1340  		goto unlock;
1341  
1342  	smp->mod	= mod;
1343  	smp->name	= name;
1344  	smp->locks	= locks;
1345  	smp->locks_end	= locks_end;
1346  	smp->text	= text;
1347  	smp->text_end	= text_end;
1348  	DPRINTK(SMP, "locks %p -> %p, text %p -> %p, name %s\n",
1349  		smp->locks, smp->locks_end,
1350  		smp->text, smp->text_end, smp->name);
1351  
1352  	list_add_tail(&smp->next, &smp_alt_modules);
1353  smp_unlock:
1354  	alternatives_smp_unlock(locks, locks_end, text, text_end);
1355  unlock:
1356  	mutex_unlock(&text_mutex);
1357  }
1358  
alternatives_smp_module_del(struct module * mod)1359  void __init_or_module alternatives_smp_module_del(struct module *mod)
1360  {
1361  	struct smp_alt_module *item;
1362  
1363  	mutex_lock(&text_mutex);
1364  	list_for_each_entry(item, &smp_alt_modules, next) {
1365  		if (mod != item->mod)
1366  			continue;
1367  		list_del(&item->next);
1368  		kfree(item);
1369  		break;
1370  	}
1371  	mutex_unlock(&text_mutex);
1372  }
1373  
alternatives_enable_smp(void)1374  void alternatives_enable_smp(void)
1375  {
1376  	struct smp_alt_module *mod;
1377  
1378  	/* Why bother if there are no other CPUs? */
1379  	BUG_ON(num_possible_cpus() == 1);
1380  
1381  	mutex_lock(&text_mutex);
1382  
1383  	if (uniproc_patched) {
1384  		pr_info("switching to SMP code\n");
1385  		BUG_ON(num_online_cpus() != 1);
1386  		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
1387  		clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
1388  		list_for_each_entry(mod, &smp_alt_modules, next)
1389  			alternatives_smp_lock(mod->locks, mod->locks_end,
1390  					      mod->text, mod->text_end);
1391  		uniproc_patched = false;
1392  	}
1393  	mutex_unlock(&text_mutex);
1394  }
1395  
1396  /*
1397   * Return 1 if the address range is reserved for SMP-alternatives.
1398   * Must hold text_mutex.
1399   */
alternatives_text_reserved(void * start,void * end)1400  int alternatives_text_reserved(void *start, void *end)
1401  {
1402  	struct smp_alt_module *mod;
1403  	const s32 *poff;
1404  	u8 *text_start = start;
1405  	u8 *text_end = end;
1406  
1407  	lockdep_assert_held(&text_mutex);
1408  
1409  	list_for_each_entry(mod, &smp_alt_modules, next) {
1410  		if (mod->text > text_end || mod->text_end < text_start)
1411  			continue;
1412  		for (poff = mod->locks; poff < mod->locks_end; poff++) {
1413  			const u8 *ptr = (const u8 *)poff + *poff;
1414  
1415  			if (text_start <= ptr && text_end > ptr)
1416  				return 1;
1417  		}
1418  	}
1419  
1420  	return 0;
1421  }
1422  #endif /* CONFIG_SMP */
1423  
1424  #ifdef CONFIG_PARAVIRT
1425  
1426  /* Use this to add nops to a buffer, then text_poke the whole buffer. */
add_nops(void * insns,unsigned int len)1427  static void __init_or_module add_nops(void *insns, unsigned int len)
1428  {
1429  	while (len > 0) {
1430  		unsigned int noplen = len;
1431  		if (noplen > ASM_NOP_MAX)
1432  			noplen = ASM_NOP_MAX;
1433  		memcpy(insns, x86_nops[noplen], noplen);
1434  		insns += noplen;
1435  		len -= noplen;
1436  	}
1437  }
1438  
apply_paravirt(struct paravirt_patch_site * start,struct paravirt_patch_site * end)1439  void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
1440  				     struct paravirt_patch_site *end)
1441  {
1442  	struct paravirt_patch_site *p;
1443  	char insn_buff[MAX_PATCH_LEN];
1444  
1445  	for (p = start; p < end; p++) {
1446  		unsigned int used;
1447  
1448  		BUG_ON(p->len > MAX_PATCH_LEN);
1449  		/* prep the buffer with the original instructions */
1450  		memcpy(insn_buff, p->instr, p->len);
1451  		used = paravirt_patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
1452  
1453  		BUG_ON(used > p->len);
1454  
1455  		/* Pad the rest with nops */
1456  		add_nops(insn_buff + used, p->len - used);
1457  		text_poke_early(p->instr, insn_buff, p->len);
1458  	}
1459  }
1460  extern struct paravirt_patch_site __start_parainstructions[],
1461  	__stop_parainstructions[];
1462  #endif	/* CONFIG_PARAVIRT */
1463  
1464  /*
1465   * Self-test for the INT3 based CALL emulation code.
1466   *
1467   * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
1468   * properly and that there is a stack gap between the INT3 frame and the
1469   * previous context. Without this gap doing a virtual PUSH on the interrupted
1470   * stack would corrupt the INT3 IRET frame.
1471   *
1472   * See entry_{32,64}.S for more details.
1473   */
1474  
1475  /*
1476   * We define the int3_magic() function in assembly to control the calling
1477   * convention such that we can 'call' it from assembly.
1478   */
1479  
1480  extern void int3_magic(unsigned int *ptr); /* defined in asm */
1481  
1482  asm (
1483  "	.pushsection	.init.text, \"ax\", @progbits\n"
1484  "	.type		int3_magic, @function\n"
1485  "int3_magic:\n"
1486  	ANNOTATE_NOENDBR
1487  "	movl	$1, (%" _ASM_ARG1 ")\n"
1488  	ASM_RET
1489  "	.size		int3_magic, .-int3_magic\n"
1490  "	.popsection\n"
1491  );
1492  
1493  extern void int3_selftest_ip(void); /* defined in asm below */
1494  
1495  static int __init
int3_exception_notify(struct notifier_block * self,unsigned long val,void * data)1496  int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
1497  {
1498  	unsigned long selftest = (unsigned long)&int3_selftest_ip;
1499  	struct die_args *args = data;
1500  	struct pt_regs *regs = args->regs;
1501  
1502  	OPTIMIZER_HIDE_VAR(selftest);
1503  
1504  	if (!regs || user_mode(regs))
1505  		return NOTIFY_DONE;
1506  
1507  	if (val != DIE_INT3)
1508  		return NOTIFY_DONE;
1509  
1510  	if (regs->ip - INT3_INSN_SIZE != selftest)
1511  		return NOTIFY_DONE;
1512  
1513  	int3_emulate_call(regs, (unsigned long)&int3_magic);
1514  	return NOTIFY_STOP;
1515  }
1516  
1517  /* Must be noinline to ensure uniqueness of int3_selftest_ip. */
int3_selftest(void)1518  static noinline void __init int3_selftest(void)
1519  {
1520  	static __initdata struct notifier_block int3_exception_nb = {
1521  		.notifier_call	= int3_exception_notify,
1522  		.priority	= INT_MAX-1, /* last */
1523  	};
1524  	unsigned int val = 0;
1525  
1526  	BUG_ON(register_die_notifier(&int3_exception_nb));
1527  
1528  	/*
1529  	 * Basically: int3_magic(&val); but really complicated :-)
1530  	 *
1531  	 * INT3 padded with NOP to CALL_INSN_SIZE. The int3_exception_nb
1532  	 * notifier above will emulate CALL for us.
1533  	 */
1534  	asm volatile ("int3_selftest_ip:\n\t"
1535  		      ANNOTATE_NOENDBR
1536  		      "    int3; nop; nop; nop; nop\n\t"
1537  		      : ASM_CALL_CONSTRAINT
1538  		      : __ASM_SEL_RAW(a, D) (&val)
1539  		      : "memory");
1540  
1541  	BUG_ON(val != 1);
1542  
1543  	unregister_die_notifier(&int3_exception_nb);
1544  }
1545  
1546  static __initdata int __alt_reloc_selftest_addr;
1547  
1548  extern void __init __alt_reloc_selftest(void *arg);
__alt_reloc_selftest(void * arg)1549  __visible noinline void __init __alt_reloc_selftest(void *arg)
1550  {
1551  	WARN_ON(arg != &__alt_reloc_selftest_addr);
1552  }
1553  
alt_reloc_selftest(void)1554  static noinline void __init alt_reloc_selftest(void)
1555  {
1556  	/*
1557  	 * Tests apply_relocation().
1558  	 *
1559  	 * This has a relative immediate (CALL) in a place other than the first
1560  	 * instruction and additionally on x86_64 we get a RIP-relative LEA:
1561  	 *
1562  	 *   lea    0x0(%rip),%rdi  # 5d0: R_X86_64_PC32    .init.data+0x5566c
1563  	 *   call   +0              # 5d5: R_X86_64_PLT32   __alt_reloc_selftest-0x4
1564  	 *
1565  	 * Getting this wrong will either crash and burn or tickle the WARN
1566  	 * above.
1567  	 */
1568  	asm_inline volatile (
1569  		ALTERNATIVE("", "lea %[mem], %%" _ASM_ARG1 "; call __alt_reloc_selftest;", X86_FEATURE_ALWAYS)
1570  		: /* output */
1571  		: [mem] "m" (__alt_reloc_selftest_addr)
1572  		: _ASM_ARG1
1573  	);
1574  }
1575  
alternative_instructions(void)1576  void __init alternative_instructions(void)
1577  {
1578  	int3_selftest();
1579  
1580  	/*
1581  	 * The patching is not fully atomic, so try to avoid local
1582  	 * interruptions that might execute the to be patched code.
1583  	 * Other CPUs are not running.
1584  	 */
1585  	stop_nmi();
1586  
1587  	/*
1588  	 * Don't stop machine check exceptions while patching.
1589  	 * MCEs only happen when something got corrupted and in this
1590  	 * case we must do something about the corruption.
1591  	 * Ignoring it is worse than an unlikely patching race.
1592  	 * Also machine checks tend to be broadcast and if one CPU
1593  	 * goes into machine check the others follow quickly, so we don't
1594  	 * expect a machine check to cause undue problems during to code
1595  	 * patching.
1596  	 */
1597  
1598  	/*
1599  	 * Paravirt patching and alternative patching can be combined to
1600  	 * replace a function call with a short direct code sequence (e.g.
1601  	 * by setting a constant return value instead of doing that in an
1602  	 * external function).
1603  	 * In order to make this work the following sequence is required:
1604  	 * 1. set (artificial) features depending on used paravirt
1605  	 *    functions which can later influence alternative patching
1606  	 * 2. apply paravirt patching (generally replacing an indirect
1607  	 *    function call with a direct one)
1608  	 * 3. apply alternative patching (e.g. replacing a direct function
1609  	 *    call with a custom code sequence)
1610  	 * Doing paravirt patching after alternative patching would clobber
1611  	 * the optimization of the custom code with a function call again.
1612  	 */
1613  	paravirt_set_cap();
1614  
1615  	/*
1616  	 * First patch paravirt functions, such that we overwrite the indirect
1617  	 * call with the direct call.
1618  	 */
1619  	apply_paravirt(__parainstructions, __parainstructions_end);
1620  
1621  	__apply_fineibt(__retpoline_sites, __retpoline_sites_end,
1622  			__cfi_sites, __cfi_sites_end, true);
1623  
1624  	/*
1625  	 * Rewrite the retpolines, must be done before alternatives since
1626  	 * those can rewrite the retpoline thunks.
1627  	 */
1628  	apply_retpolines(__retpoline_sites, __retpoline_sites_end);
1629  	apply_returns(__return_sites, __return_sites_end);
1630  
1631  	/*
1632  	 * Then patch alternatives, such that those paravirt calls that are in
1633  	 * alternatives can be overwritten by their immediate fragments.
1634  	 */
1635  	apply_alternatives(__alt_instructions, __alt_instructions_end);
1636  
1637  	/*
1638  	 * Now all calls are established. Apply the call thunks if
1639  	 * required.
1640  	 */
1641  	callthunks_patch_builtin_calls();
1642  
1643  	/*
1644  	 * Seal all functions that do not have their address taken.
1645  	 */
1646  	apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
1647  
1648  #ifdef CONFIG_SMP
1649  	/* Patch to UP if other cpus not imminent. */
1650  	if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
1651  		uniproc_patched = true;
1652  		alternatives_smp_module_add(NULL, "core kernel",
1653  					    __smp_locks, __smp_locks_end,
1654  					    _text, _etext);
1655  	}
1656  
1657  	if (!uniproc_patched || num_possible_cpus() == 1) {
1658  		free_init_pages("SMP alternatives",
1659  				(unsigned long)__smp_locks,
1660  				(unsigned long)__smp_locks_end);
1661  	}
1662  #endif
1663  
1664  	restart_nmi();
1665  	alternatives_patched = 1;
1666  
1667  	alt_reloc_selftest();
1668  }
1669  
1670  /**
1671   * text_poke_early - Update instructions on a live kernel at boot time
1672   * @addr: address to modify
1673   * @opcode: source of the copy
1674   * @len: length to copy
1675   *
1676   * When you use this code to patch more than one byte of an instruction
1677   * you need to make sure that other CPUs cannot execute this code in parallel.
1678   * Also no thread must be currently preempted in the middle of these
1679   * instructions. And on the local CPU you need to be protected against NMI or
1680   * MCE handlers seeing an inconsistent instruction while you patch.
1681   */
text_poke_early(void * addr,const void * opcode,size_t len)1682  void __init_or_module text_poke_early(void *addr, const void *opcode,
1683  				      size_t len)
1684  {
1685  	unsigned long flags;
1686  
1687  	if (boot_cpu_has(X86_FEATURE_NX) &&
1688  	    is_module_text_address((unsigned long)addr)) {
1689  		/*
1690  		 * Modules text is marked initially as non-executable, so the
1691  		 * code cannot be running and speculative code-fetches are
1692  		 * prevented. Just change the code.
1693  		 */
1694  		memcpy(addr, opcode, len);
1695  	} else {
1696  		local_irq_save(flags);
1697  		memcpy(addr, opcode, len);
1698  		sync_core();
1699  		local_irq_restore(flags);
1700  
1701  		/*
1702  		 * Could also do a CLFLUSH here to speed up CPU recovery; but
1703  		 * that causes hangs on some VIA CPUs.
1704  		 */
1705  	}
1706  }
1707  
1708  typedef struct {
1709  	struct mm_struct *mm;
1710  } temp_mm_state_t;
1711  
1712  /*
1713   * Using a temporary mm allows to set temporary mappings that are not accessible
1714   * by other CPUs. Such mappings are needed to perform sensitive memory writes
1715   * that override the kernel memory protections (e.g., W^X), without exposing the
1716   * temporary page-table mappings that are required for these write operations to
1717   * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
1718   * mapping is torn down.
1719   *
1720   * Context: The temporary mm needs to be used exclusively by a single core. To
1721   *          harden security IRQs must be disabled while the temporary mm is
1722   *          loaded, thereby preventing interrupt handler bugs from overriding
1723   *          the kernel memory protection.
1724   */
use_temporary_mm(struct mm_struct * mm)1725  static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
1726  {
1727  	temp_mm_state_t temp_state;
1728  
1729  	lockdep_assert_irqs_disabled();
1730  
1731  	/*
1732  	 * Make sure not to be in TLB lazy mode, as otherwise we'll end up
1733  	 * with a stale address space WITHOUT being in lazy mode after
1734  	 * restoring the previous mm.
1735  	 */
1736  	if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
1737  		leave_mm(smp_processor_id());
1738  
1739  	temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
1740  	switch_mm_irqs_off(NULL, mm, current);
1741  
1742  	/*
1743  	 * If breakpoints are enabled, disable them while the temporary mm is
1744  	 * used. Userspace might set up watchpoints on addresses that are used
1745  	 * in the temporary mm, which would lead to wrong signals being sent or
1746  	 * crashes.
1747  	 *
1748  	 * Note that breakpoints are not disabled selectively, which also causes
1749  	 * kernel breakpoints (e.g., perf's) to be disabled. This might be
1750  	 * undesirable, but still seems reasonable as the code that runs in the
1751  	 * temporary mm should be short.
1752  	 */
1753  	if (hw_breakpoint_active())
1754  		hw_breakpoint_disable();
1755  
1756  	return temp_state;
1757  }
1758  
unuse_temporary_mm(temp_mm_state_t prev_state)1759  static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
1760  {
1761  	lockdep_assert_irqs_disabled();
1762  	switch_mm_irqs_off(NULL, prev_state.mm, current);
1763  
1764  	/*
1765  	 * Restore the breakpoints if they were disabled before the temporary mm
1766  	 * was loaded.
1767  	 */
1768  	if (hw_breakpoint_active())
1769  		hw_breakpoint_restore();
1770  }
1771  
1772  __ro_after_init struct mm_struct *poking_mm;
1773  __ro_after_init unsigned long poking_addr;
1774  
text_poke_memcpy(void * dst,const void * src,size_t len)1775  static void text_poke_memcpy(void *dst, const void *src, size_t len)
1776  {
1777  	memcpy(dst, src, len);
1778  }
1779  
text_poke_memset(void * dst,const void * src,size_t len)1780  static void text_poke_memset(void *dst, const void *src, size_t len)
1781  {
1782  	int c = *(const int *)src;
1783  
1784  	memset(dst, c, len);
1785  }
1786  
1787  typedef void text_poke_f(void *dst, const void *src, size_t len);
1788  
__text_poke(text_poke_f func,void * addr,const void * src,size_t len)1789  static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t len)
1790  {
1791  	bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
1792  	struct page *pages[2] = {NULL};
1793  	temp_mm_state_t prev;
1794  	unsigned long flags;
1795  	pte_t pte, *ptep;
1796  	spinlock_t *ptl;
1797  	pgprot_t pgprot;
1798  
1799  	/*
1800  	 * While boot memory allocator is running we cannot use struct pages as
1801  	 * they are not yet initialized. There is no way to recover.
1802  	 */
1803  	BUG_ON(!after_bootmem);
1804  
1805  	if (!core_kernel_text((unsigned long)addr)) {
1806  		pages[0] = vmalloc_to_page(addr);
1807  		if (cross_page_boundary)
1808  			pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
1809  	} else {
1810  		pages[0] = virt_to_page(addr);
1811  		WARN_ON(!PageReserved(pages[0]));
1812  		if (cross_page_boundary)
1813  			pages[1] = virt_to_page(addr + PAGE_SIZE);
1814  	}
1815  	/*
1816  	 * If something went wrong, crash and burn since recovery paths are not
1817  	 * implemented.
1818  	 */
1819  	BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
1820  
1821  	/*
1822  	 * Map the page without the global bit, as TLB flushing is done with
1823  	 * flush_tlb_mm_range(), which is intended for non-global PTEs.
1824  	 */
1825  	pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
1826  
1827  	/*
1828  	 * The lock is not really needed, but this allows to avoid open-coding.
1829  	 */
1830  	ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
1831  
1832  	/*
1833  	 * This must not fail; preallocated in poking_init().
1834  	 */
1835  	VM_BUG_ON(!ptep);
1836  
1837  	local_irq_save(flags);
1838  
1839  	pte = mk_pte(pages[0], pgprot);
1840  	set_pte_at(poking_mm, poking_addr, ptep, pte);
1841  
1842  	if (cross_page_boundary) {
1843  		pte = mk_pte(pages[1], pgprot);
1844  		set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
1845  	}
1846  
1847  	/*
1848  	 * Loading the temporary mm behaves as a compiler barrier, which
1849  	 * guarantees that the PTE will be set at the time memcpy() is done.
1850  	 */
1851  	prev = use_temporary_mm(poking_mm);
1852  
1853  	kasan_disable_current();
1854  	func((u8 *)poking_addr + offset_in_page(addr), src, len);
1855  	kasan_enable_current();
1856  
1857  	/*
1858  	 * Ensure that the PTE is only cleared after the instructions of memcpy
1859  	 * were issued by using a compiler barrier.
1860  	 */
1861  	barrier();
1862  
1863  	pte_clear(poking_mm, poking_addr, ptep);
1864  	if (cross_page_boundary)
1865  		pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
1866  
1867  	/*
1868  	 * Loading the previous page-table hierarchy requires a serializing
1869  	 * instruction that already allows the core to see the updated version.
1870  	 * Xen-PV is assumed to serialize execution in a similar manner.
1871  	 */
1872  	unuse_temporary_mm(prev);
1873  
1874  	/*
1875  	 * Flushing the TLB might involve IPIs, which would require enabled
1876  	 * IRQs, but not if the mm is not used, as it is in this point.
1877  	 */
1878  	flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
1879  			   (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
1880  			   PAGE_SHIFT, false);
1881  
1882  	if (func == text_poke_memcpy) {
1883  		/*
1884  		 * If the text does not match what we just wrote then something is
1885  		 * fundamentally screwy; there's nothing we can really do about that.
1886  		 */
1887  		BUG_ON(memcmp(addr, src, len));
1888  	}
1889  
1890  	local_irq_restore(flags);
1891  	pte_unmap_unlock(ptep, ptl);
1892  	return addr;
1893  }
1894  
1895  /**
1896   * text_poke - Update instructions on a live kernel
1897   * @addr: address to modify
1898   * @opcode: source of the copy
1899   * @len: length to copy
1900   *
1901   * Only atomic text poke/set should be allowed when not doing early patching.
1902   * It means the size must be writable atomically and the address must be aligned
1903   * in a way that permits an atomic write. It also makes sure we fit on a single
1904   * page.
1905   *
1906   * Note that the caller must ensure that if the modified code is part of a
1907   * module, the module would not be removed during poking. This can be achieved
1908   * by registering a module notifier, and ordering module removal and patching
1909   * trough a mutex.
1910   */
text_poke(void * addr,const void * opcode,size_t len)1911  void *text_poke(void *addr, const void *opcode, size_t len)
1912  {
1913  	lockdep_assert_held(&text_mutex);
1914  
1915  	return __text_poke(text_poke_memcpy, addr, opcode, len);
1916  }
1917  
1918  /**
1919   * text_poke_kgdb - Update instructions on a live kernel by kgdb
1920   * @addr: address to modify
1921   * @opcode: source of the copy
1922   * @len: length to copy
1923   *
1924   * Only atomic text poke/set should be allowed when not doing early patching.
1925   * It means the size must be writable atomically and the address must be aligned
1926   * in a way that permits an atomic write. It also makes sure we fit on a single
1927   * page.
1928   *
1929   * Context: should only be used by kgdb, which ensures no other core is running,
1930   *	    despite the fact it does not hold the text_mutex.
1931   */
text_poke_kgdb(void * addr,const void * opcode,size_t len)1932  void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
1933  {
1934  	return __text_poke(text_poke_memcpy, addr, opcode, len);
1935  }
1936  
text_poke_copy_locked(void * addr,const void * opcode,size_t len,bool core_ok)1937  void *text_poke_copy_locked(void *addr, const void *opcode, size_t len,
1938  			    bool core_ok)
1939  {
1940  	unsigned long start = (unsigned long)addr;
1941  	size_t patched = 0;
1942  
1943  	if (WARN_ON_ONCE(!core_ok && core_kernel_text(start)))
1944  		return NULL;
1945  
1946  	while (patched < len) {
1947  		unsigned long ptr = start + patched;
1948  		size_t s;
1949  
1950  		s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
1951  
1952  		__text_poke(text_poke_memcpy, (void *)ptr, opcode + patched, s);
1953  		patched += s;
1954  	}
1955  	return addr;
1956  }
1957  
1958  /**
1959   * text_poke_copy - Copy instructions into (an unused part of) RX memory
1960   * @addr: address to modify
1961   * @opcode: source of the copy
1962   * @len: length to copy, could be more than 2x PAGE_SIZE
1963   *
1964   * Not safe against concurrent execution; useful for JITs to dump
1965   * new code blocks into unused regions of RX memory. Can be used in
1966   * conjunction with synchronize_rcu_tasks() to wait for existing
1967   * execution to quiesce after having made sure no existing functions
1968   * pointers are live.
1969   */
text_poke_copy(void * addr,const void * opcode,size_t len)1970  void *text_poke_copy(void *addr, const void *opcode, size_t len)
1971  {
1972  	mutex_lock(&text_mutex);
1973  	addr = text_poke_copy_locked(addr, opcode, len, false);
1974  	mutex_unlock(&text_mutex);
1975  	return addr;
1976  }
1977  
1978  /**
1979   * text_poke_set - memset into (an unused part of) RX memory
1980   * @addr: address to modify
1981   * @c: the byte to fill the area with
1982   * @len: length to copy, could be more than 2x PAGE_SIZE
1983   *
1984   * This is useful to overwrite unused regions of RX memory with illegal
1985   * instructions.
1986   */
text_poke_set(void * addr,int c,size_t len)1987  void *text_poke_set(void *addr, int c, size_t len)
1988  {
1989  	unsigned long start = (unsigned long)addr;
1990  	size_t patched = 0;
1991  
1992  	if (WARN_ON_ONCE(core_kernel_text(start)))
1993  		return NULL;
1994  
1995  	mutex_lock(&text_mutex);
1996  	while (patched < len) {
1997  		unsigned long ptr = start + patched;
1998  		size_t s;
1999  
2000  		s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
2001  
2002  		__text_poke(text_poke_memset, (void *)ptr, (void *)&c, s);
2003  		patched += s;
2004  	}
2005  	mutex_unlock(&text_mutex);
2006  	return addr;
2007  }
2008  
do_sync_core(void * info)2009  static void do_sync_core(void *info)
2010  {
2011  	sync_core();
2012  }
2013  
text_poke_sync(void)2014  void text_poke_sync(void)
2015  {
2016  	on_each_cpu(do_sync_core, NULL, 1);
2017  }
2018  
2019  /*
2020   * NOTE: crazy scheme to allow patching Jcc.d32 but not increase the size of
2021   * this thing. When len == 6 everything is prefixed with 0x0f and we map
2022   * opcode to Jcc.d8, using len to distinguish.
2023   */
2024  struct text_poke_loc {
2025  	/* addr := _stext + rel_addr */
2026  	s32 rel_addr;
2027  	s32 disp;
2028  	u8 len;
2029  	u8 opcode;
2030  	const u8 text[POKE_MAX_OPCODE_SIZE];
2031  	/* see text_poke_bp_batch() */
2032  	u8 old;
2033  };
2034  
2035  struct bp_patching_desc {
2036  	struct text_poke_loc *vec;
2037  	int nr_entries;
2038  	atomic_t refs;
2039  };
2040  
2041  static struct bp_patching_desc bp_desc;
2042  
2043  static __always_inline
try_get_desc(void)2044  struct bp_patching_desc *try_get_desc(void)
2045  {
2046  	struct bp_patching_desc *desc = &bp_desc;
2047  
2048  	if (!raw_atomic_inc_not_zero(&desc->refs))
2049  		return NULL;
2050  
2051  	return desc;
2052  }
2053  
put_desc(void)2054  static __always_inline void put_desc(void)
2055  {
2056  	struct bp_patching_desc *desc = &bp_desc;
2057  
2058  	smp_mb__before_atomic();
2059  	raw_atomic_dec(&desc->refs);
2060  }
2061  
text_poke_addr(struct text_poke_loc * tp)2062  static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
2063  {
2064  	return _stext + tp->rel_addr;
2065  }
2066  
patch_cmp(const void * key,const void * elt)2067  static __always_inline int patch_cmp(const void *key, const void *elt)
2068  {
2069  	struct text_poke_loc *tp = (struct text_poke_loc *) elt;
2070  
2071  	if (key < text_poke_addr(tp))
2072  		return -1;
2073  	if (key > text_poke_addr(tp))
2074  		return 1;
2075  	return 0;
2076  }
2077  
poke_int3_handler(struct pt_regs * regs)2078  noinstr int poke_int3_handler(struct pt_regs *regs)
2079  {
2080  	struct bp_patching_desc *desc;
2081  	struct text_poke_loc *tp;
2082  	int ret = 0;
2083  	void *ip;
2084  
2085  	if (user_mode(regs))
2086  		return 0;
2087  
2088  	/*
2089  	 * Having observed our INT3 instruction, we now must observe
2090  	 * bp_desc with non-zero refcount:
2091  	 *
2092  	 *	bp_desc.refs = 1		INT3
2093  	 *	WMB				RMB
2094  	 *	write INT3			if (bp_desc.refs != 0)
2095  	 */
2096  	smp_rmb();
2097  
2098  	desc = try_get_desc();
2099  	if (!desc)
2100  		return 0;
2101  
2102  	/*
2103  	 * Discount the INT3. See text_poke_bp_batch().
2104  	 */
2105  	ip = (void *) regs->ip - INT3_INSN_SIZE;
2106  
2107  	/*
2108  	 * Skip the binary search if there is a single member in the vector.
2109  	 */
2110  	if (unlikely(desc->nr_entries > 1)) {
2111  		tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
2112  				      sizeof(struct text_poke_loc),
2113  				      patch_cmp);
2114  		if (!tp)
2115  			goto out_put;
2116  	} else {
2117  		tp = desc->vec;
2118  		if (text_poke_addr(tp) != ip)
2119  			goto out_put;
2120  	}
2121  
2122  	ip += tp->len;
2123  
2124  	switch (tp->opcode) {
2125  	case INT3_INSN_OPCODE:
2126  		/*
2127  		 * Someone poked an explicit INT3, they'll want to handle it,
2128  		 * do not consume.
2129  		 */
2130  		goto out_put;
2131  
2132  	case RET_INSN_OPCODE:
2133  		int3_emulate_ret(regs);
2134  		break;
2135  
2136  	case CALL_INSN_OPCODE:
2137  		int3_emulate_call(regs, (long)ip + tp->disp);
2138  		break;
2139  
2140  	case JMP32_INSN_OPCODE:
2141  	case JMP8_INSN_OPCODE:
2142  		int3_emulate_jmp(regs, (long)ip + tp->disp);
2143  		break;
2144  
2145  	case 0x70 ... 0x7f: /* Jcc */
2146  		int3_emulate_jcc(regs, tp->opcode & 0xf, (long)ip, tp->disp);
2147  		break;
2148  
2149  	default:
2150  		BUG();
2151  	}
2152  
2153  	ret = 1;
2154  
2155  out_put:
2156  	put_desc();
2157  	return ret;
2158  }
2159  
2160  #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
2161  static struct text_poke_loc tp_vec[TP_VEC_MAX];
2162  static int tp_vec_nr;
2163  
2164  /**
2165   * text_poke_bp_batch() -- update instructions on live kernel on SMP
2166   * @tp:			vector of instructions to patch
2167   * @nr_entries:		number of entries in the vector
2168   *
2169   * Modify multi-byte instruction by using int3 breakpoint on SMP.
2170   * We completely avoid stop_machine() here, and achieve the
2171   * synchronization using int3 breakpoint.
2172   *
2173   * The way it is done:
2174   *	- For each entry in the vector:
2175   *		- add a int3 trap to the address that will be patched
2176   *	- sync cores
2177   *	- For each entry in the vector:
2178   *		- update all but the first byte of the patched range
2179   *	- sync cores
2180   *	- For each entry in the vector:
2181   *		- replace the first byte (int3) by the first byte of
2182   *		  replacing opcode
2183   *	- sync cores
2184   */
text_poke_bp_batch(struct text_poke_loc * tp,unsigned int nr_entries)2185  static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
2186  {
2187  	unsigned char int3 = INT3_INSN_OPCODE;
2188  	unsigned int i;
2189  	int do_sync;
2190  
2191  	lockdep_assert_held(&text_mutex);
2192  
2193  	bp_desc.vec = tp;
2194  	bp_desc.nr_entries = nr_entries;
2195  
2196  	/*
2197  	 * Corresponds to the implicit memory barrier in try_get_desc() to
2198  	 * ensure reading a non-zero refcount provides up to date bp_desc data.
2199  	 */
2200  	atomic_set_release(&bp_desc.refs, 1);
2201  
2202  	/*
2203  	 * Function tracing can enable thousands of places that need to be
2204  	 * updated. This can take quite some time, and with full kernel debugging
2205  	 * enabled, this could cause the softlockup watchdog to trigger.
2206  	 * This function gets called every 256 entries added to be patched.
2207  	 * Call cond_resched() here to make sure that other tasks can get scheduled
2208  	 * while processing all the functions being patched.
2209  	 */
2210  	cond_resched();
2211  
2212  	/*
2213  	 * Corresponding read barrier in int3 notifier for making sure the
2214  	 * nr_entries and handler are correctly ordered wrt. patching.
2215  	 */
2216  	smp_wmb();
2217  
2218  	/*
2219  	 * First step: add a int3 trap to the address that will be patched.
2220  	 */
2221  	for (i = 0; i < nr_entries; i++) {
2222  		tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
2223  		text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
2224  	}
2225  
2226  	text_poke_sync();
2227  
2228  	/*
2229  	 * Second step: update all but the first byte of the patched range.
2230  	 */
2231  	for (do_sync = 0, i = 0; i < nr_entries; i++) {
2232  		u8 old[POKE_MAX_OPCODE_SIZE+1] = { tp[i].old, };
2233  		u8 _new[POKE_MAX_OPCODE_SIZE+1];
2234  		const u8 *new = tp[i].text;
2235  		int len = tp[i].len;
2236  
2237  		if (len - INT3_INSN_SIZE > 0) {
2238  			memcpy(old + INT3_INSN_SIZE,
2239  			       text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
2240  			       len - INT3_INSN_SIZE);
2241  
2242  			if (len == 6) {
2243  				_new[0] = 0x0f;
2244  				memcpy(_new + 1, new, 5);
2245  				new = _new;
2246  			}
2247  
2248  			text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
2249  				  new + INT3_INSN_SIZE,
2250  				  len - INT3_INSN_SIZE);
2251  
2252  			do_sync++;
2253  		}
2254  
2255  		/*
2256  		 * Emit a perf event to record the text poke, primarily to
2257  		 * support Intel PT decoding which must walk the executable code
2258  		 * to reconstruct the trace. The flow up to here is:
2259  		 *   - write INT3 byte
2260  		 *   - IPI-SYNC
2261  		 *   - write instruction tail
2262  		 * At this point the actual control flow will be through the
2263  		 * INT3 and handler and not hit the old or new instruction.
2264  		 * Intel PT outputs FUP/TIP packets for the INT3, so the flow
2265  		 * can still be decoded. Subsequently:
2266  		 *   - emit RECORD_TEXT_POKE with the new instruction
2267  		 *   - IPI-SYNC
2268  		 *   - write first byte
2269  		 *   - IPI-SYNC
2270  		 * So before the text poke event timestamp, the decoder will see
2271  		 * either the old instruction flow or FUP/TIP of INT3. After the
2272  		 * text poke event timestamp, the decoder will see either the
2273  		 * new instruction flow or FUP/TIP of INT3. Thus decoders can
2274  		 * use the timestamp as the point at which to modify the
2275  		 * executable code.
2276  		 * The old instruction is recorded so that the event can be
2277  		 * processed forwards or backwards.
2278  		 */
2279  		perf_event_text_poke(text_poke_addr(&tp[i]), old, len, new, len);
2280  	}
2281  
2282  	if (do_sync) {
2283  		/*
2284  		 * According to Intel, this core syncing is very likely
2285  		 * not necessary and we'd be safe even without it. But
2286  		 * better safe than sorry (plus there's not only Intel).
2287  		 */
2288  		text_poke_sync();
2289  	}
2290  
2291  	/*
2292  	 * Third step: replace the first byte (int3) by the first byte of
2293  	 * replacing opcode.
2294  	 */
2295  	for (do_sync = 0, i = 0; i < nr_entries; i++) {
2296  		u8 byte = tp[i].text[0];
2297  
2298  		if (tp[i].len == 6)
2299  			byte = 0x0f;
2300  
2301  		if (byte == INT3_INSN_OPCODE)
2302  			continue;
2303  
2304  		text_poke(text_poke_addr(&tp[i]), &byte, INT3_INSN_SIZE);
2305  		do_sync++;
2306  	}
2307  
2308  	if (do_sync)
2309  		text_poke_sync();
2310  
2311  	/*
2312  	 * Remove and wait for refs to be zero.
2313  	 */
2314  	if (!atomic_dec_and_test(&bp_desc.refs))
2315  		atomic_cond_read_acquire(&bp_desc.refs, !VAL);
2316  }
2317  
text_poke_loc_init(struct text_poke_loc * tp,void * addr,const void * opcode,size_t len,const void * emulate)2318  static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
2319  			       const void *opcode, size_t len, const void *emulate)
2320  {
2321  	struct insn insn;
2322  	int ret, i = 0;
2323  
2324  	if (len == 6)
2325  		i = 1;
2326  	memcpy((void *)tp->text, opcode+i, len-i);
2327  	if (!emulate)
2328  		emulate = opcode;
2329  
2330  	ret = insn_decode_kernel(&insn, emulate);
2331  	BUG_ON(ret < 0);
2332  
2333  	tp->rel_addr = addr - (void *)_stext;
2334  	tp->len = len;
2335  	tp->opcode = insn.opcode.bytes[0];
2336  
2337  	if (is_jcc32(&insn)) {
2338  		/*
2339  		 * Map Jcc.d32 onto Jcc.d8 and use len to distinguish.
2340  		 */
2341  		tp->opcode = insn.opcode.bytes[1] - 0x10;
2342  	}
2343  
2344  	switch (tp->opcode) {
2345  	case RET_INSN_OPCODE:
2346  	case JMP32_INSN_OPCODE:
2347  	case JMP8_INSN_OPCODE:
2348  		/*
2349  		 * Control flow instructions without implied execution of the
2350  		 * next instruction can be padded with INT3.
2351  		 */
2352  		for (i = insn.length; i < len; i++)
2353  			BUG_ON(tp->text[i] != INT3_INSN_OPCODE);
2354  		break;
2355  
2356  	default:
2357  		BUG_ON(len != insn.length);
2358  	}
2359  
2360  	switch (tp->opcode) {
2361  	case INT3_INSN_OPCODE:
2362  	case RET_INSN_OPCODE:
2363  		break;
2364  
2365  	case CALL_INSN_OPCODE:
2366  	case JMP32_INSN_OPCODE:
2367  	case JMP8_INSN_OPCODE:
2368  	case 0x70 ... 0x7f: /* Jcc */
2369  		tp->disp = insn.immediate.value;
2370  		break;
2371  
2372  	default: /* assume NOP */
2373  		switch (len) {
2374  		case 2: /* NOP2 -- emulate as JMP8+0 */
2375  			BUG_ON(memcmp(emulate, x86_nops[len], len));
2376  			tp->opcode = JMP8_INSN_OPCODE;
2377  			tp->disp = 0;
2378  			break;
2379  
2380  		case 5: /* NOP5 -- emulate as JMP32+0 */
2381  			BUG_ON(memcmp(emulate, x86_nops[len], len));
2382  			tp->opcode = JMP32_INSN_OPCODE;
2383  			tp->disp = 0;
2384  			break;
2385  
2386  		default: /* unknown instruction */
2387  			BUG();
2388  		}
2389  		break;
2390  	}
2391  }
2392  
2393  /*
2394   * We hard rely on the tp_vec being ordered; ensure this is so by flushing
2395   * early if needed.
2396   */
tp_order_fail(void * addr)2397  static bool tp_order_fail(void *addr)
2398  {
2399  	struct text_poke_loc *tp;
2400  
2401  	if (!tp_vec_nr)
2402  		return false;
2403  
2404  	if (!addr) /* force */
2405  		return true;
2406  
2407  	tp = &tp_vec[tp_vec_nr - 1];
2408  	if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
2409  		return true;
2410  
2411  	return false;
2412  }
2413  
text_poke_flush(void * addr)2414  static void text_poke_flush(void *addr)
2415  {
2416  	if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
2417  		text_poke_bp_batch(tp_vec, tp_vec_nr);
2418  		tp_vec_nr = 0;
2419  	}
2420  }
2421  
text_poke_finish(void)2422  void text_poke_finish(void)
2423  {
2424  	text_poke_flush(NULL);
2425  }
2426  
text_poke_queue(void * addr,const void * opcode,size_t len,const void * emulate)2427  void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
2428  {
2429  	struct text_poke_loc *tp;
2430  
2431  	text_poke_flush(addr);
2432  
2433  	tp = &tp_vec[tp_vec_nr++];
2434  	text_poke_loc_init(tp, addr, opcode, len, emulate);
2435  }
2436  
2437  /**
2438   * text_poke_bp() -- update instructions on live kernel on SMP
2439   * @addr:	address to patch
2440   * @opcode:	opcode of new instruction
2441   * @len:	length to copy
2442   * @emulate:	instruction to be emulated
2443   *
2444   * Update a single instruction with the vector in the stack, avoiding
2445   * dynamically allocated memory. This function should be used when it is
2446   * not possible to allocate memory.
2447   */
text_poke_bp(void * addr,const void * opcode,size_t len,const void * emulate)2448  void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
2449  {
2450  	struct text_poke_loc tp;
2451  
2452  	text_poke_loc_init(&tp, addr, opcode, len, emulate);
2453  	text_poke_bp_batch(&tp, 1);
2454  }
2455