1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
4  *
5  *  Modifications for ppc64:
6  *      Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
7  *
8  *  Copyright 2008 Michael Ellerman, IBM Corporation.
9  */
10 
11 #include <linux/types.h>
12 #include <linux/jump_label.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <asm/cputable.h>
18 #include <asm/code-patching.h>
19 #include <asm/page.h>
20 #include <asm/sections.h>
21 #include <asm/setup.h>
22 #include <asm/security_features.h>
23 #include <asm/firmware.h>
24 #include <asm/inst.h>
25 
26 struct fixup_entry {
27 	unsigned long	mask;
28 	unsigned long	value;
29 	long		start_off;
30 	long		end_off;
31 	long		alt_start_off;
32 	long		alt_end_off;
33 };
34 
35 static struct ppc_inst *calc_addr(struct fixup_entry *fcur, long offset)
36 {
37 	/*
38 	 * We store the offset to the code as a negative offset from
39 	 * the start of the alt_entry, to support the VDSO. This
40 	 * routine converts that back into an actual address.
41 	 */
42 	return (struct ppc_inst *)((unsigned long)fcur + offset);
43 }
44 
45 static int patch_alt_instruction(struct ppc_inst *src, struct ppc_inst *dest,
46 				 struct ppc_inst *alt_start, struct ppc_inst *alt_end)
47 {
48 	int err;
49 	struct ppc_inst instr;
50 
51 	instr = ppc_inst_read(src);
52 
53 	if (instr_is_relative_branch(*src)) {
54 		struct ppc_inst *target = (struct ppc_inst *)branch_target(src);
55 
56 		/* Branch within the section doesn't need translating */
57 		if (target < alt_start || target > alt_end) {
58 			err = translate_branch(&instr, dest, src);
59 			if (err)
60 				return 1;
61 		}
62 	}
63 
64 	raw_patch_instruction(dest, instr);
65 
66 	return 0;
67 }
68 
69 static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
70 {
71 	struct ppc_inst *start, *end, *alt_start, *alt_end, *src, *dest, nop;
72 
73 	start = calc_addr(fcur, fcur->start_off);
74 	end = calc_addr(fcur, fcur->end_off);
75 	alt_start = calc_addr(fcur, fcur->alt_start_off);
76 	alt_end = calc_addr(fcur, fcur->alt_end_off);
77 
78 	if ((alt_end - alt_start) > (end - start))
79 		return 1;
80 
81 	if ((value & fcur->mask) == fcur->value)
82 		return 0;
83 
84 	src = alt_start;
85 	dest = start;
86 
87 	for (; src < alt_end; src = ppc_inst_next(src, src),
88 			      dest = ppc_inst_next(dest, dest)) {
89 		if (patch_alt_instruction(src, dest, alt_start, alt_end))
90 			return 1;
91 	}
92 
93 	nop = ppc_inst(PPC_INST_NOP);
94 	for (; dest < end; dest = ppc_inst_next(dest, &nop))
95 		raw_patch_instruction(dest, nop);
96 
97 	return 0;
98 }
99 
100 void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
101 {
102 	struct fixup_entry *fcur, *fend;
103 
104 	fcur = fixup_start;
105 	fend = fixup_end;
106 
107 	for (; fcur < fend; fcur++) {
108 		if (patch_feature_section(value, fcur)) {
109 			WARN_ON(1);
110 			printk("Unable to patch feature section at %p - %p" \
111 				" with %p - %p\n",
112 				calc_addr(fcur, fcur->start_off),
113 				calc_addr(fcur, fcur->end_off),
114 				calc_addr(fcur, fcur->alt_start_off),
115 				calc_addr(fcur, fcur->alt_end_off));
116 		}
117 	}
118 }
119 
120 #ifdef CONFIG_PPC_BOOK3S_64
121 static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
122 {
123 	unsigned int instrs[3], *dest;
124 	long *start, *end;
125 	int i;
126 
127 	start = PTRRELOC(&__start___stf_entry_barrier_fixup);
128 	end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
129 
130 	instrs[0] = 0x60000000; /* nop */
131 	instrs[1] = 0x60000000; /* nop */
132 	instrs[2] = 0x60000000; /* nop */
133 
134 	i = 0;
135 	if (types & STF_BARRIER_FALLBACK) {
136 		instrs[i++] = 0x7d4802a6; /* mflr r10		*/
137 		instrs[i++] = 0x60000000; /* branch patched below */
138 		instrs[i++] = 0x7d4803a6; /* mtlr r10		*/
139 	} else if (types & STF_BARRIER_EIEIO) {
140 		instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
141 	} else if (types & STF_BARRIER_SYNC_ORI) {
142 		instrs[i++] = 0x7c0004ac; /* hwsync		*/
143 		instrs[i++] = 0xe94d0000; /* ld r10,0(r13)	*/
144 		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
145 	}
146 
147 	for (i = 0; start < end; start++, i++) {
148 		dest = (void *)start + *start;
149 
150 		pr_devel("patching dest %lx\n", (unsigned long)dest);
151 
152 		patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
153 
154 		if (types & STF_BARRIER_FALLBACK)
155 			patch_branch((struct ppc_inst *)(dest + 1),
156 				     (unsigned long)&stf_barrier_fallback,
157 				     BRANCH_SET_LINK);
158 		else
159 			patch_instruction((struct ppc_inst *)(dest + 1),
160 					  ppc_inst(instrs[1]));
161 
162 		patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
163 	}
164 
165 	printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
166 		(types == STF_BARRIER_NONE)                  ? "no" :
167 		(types == STF_BARRIER_FALLBACK)              ? "fallback" :
168 		(types == STF_BARRIER_EIEIO)                 ? "eieio" :
169 		(types == (STF_BARRIER_SYNC_ORI))            ? "hwsync"
170 		                                           : "unknown");
171 }
172 
173 static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
174 {
175 	unsigned int instrs[6], *dest;
176 	long *start, *end;
177 	int i;
178 
179 	start = PTRRELOC(&__start___stf_exit_barrier_fixup);
180 	end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
181 
182 	instrs[0] = 0x60000000; /* nop */
183 	instrs[1] = 0x60000000; /* nop */
184 	instrs[2] = 0x60000000; /* nop */
185 	instrs[3] = 0x60000000; /* nop */
186 	instrs[4] = 0x60000000; /* nop */
187 	instrs[5] = 0x60000000; /* nop */
188 
189 	i = 0;
190 	if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
191 		if (cpu_has_feature(CPU_FTR_HVMODE)) {
192 			instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
193 			instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
194 		} else {
195 			instrs[i++] = 0x7db243a6; /* mtsprg 2,r13	*/
196 			instrs[i++] = 0x7db142a6; /* mfsprg r13,1    */
197 	        }
198 		instrs[i++] = 0x7c0004ac; /* hwsync		*/
199 		instrs[i++] = 0xe9ad0000; /* ld r13,0(r13)	*/
200 		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
201 		if (cpu_has_feature(CPU_FTR_HVMODE)) {
202 			instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
203 		} else {
204 			instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
205 		}
206 	} else if (types & STF_BARRIER_EIEIO) {
207 		instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
208 	}
209 
210 	for (i = 0; start < end; start++, i++) {
211 		dest = (void *)start + *start;
212 
213 		pr_devel("patching dest %lx\n", (unsigned long)dest);
214 
215 		patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
216 		patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
217 		patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
218 		patch_instruction((struct ppc_inst *)(dest + 3), ppc_inst(instrs[3]));
219 		patch_instruction((struct ppc_inst *)(dest + 4), ppc_inst(instrs[4]));
220 		patch_instruction((struct ppc_inst *)(dest + 5), ppc_inst(instrs[5]));
221 	}
222 	printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
223 		(types == STF_BARRIER_NONE)                  ? "no" :
224 		(types == STF_BARRIER_FALLBACK)              ? "fallback" :
225 		(types == STF_BARRIER_EIEIO)                 ? "eieio" :
226 		(types == (STF_BARRIER_SYNC_ORI))            ? "hwsync"
227 		                                           : "unknown");
228 }
229 
230 
231 void do_stf_barrier_fixups(enum stf_barrier_type types)
232 {
233 	do_stf_entry_barrier_fixups(types);
234 	do_stf_exit_barrier_fixups(types);
235 }
236 
237 void do_uaccess_flush_fixups(enum l1d_flush_type types)
238 {
239 	unsigned int instrs[4], *dest;
240 	long *start, *end;
241 	int i;
242 
243 	start = PTRRELOC(&__start___uaccess_flush_fixup);
244 	end = PTRRELOC(&__stop___uaccess_flush_fixup);
245 
246 	instrs[0] = 0x60000000; /* nop */
247 	instrs[1] = 0x60000000; /* nop */
248 	instrs[2] = 0x60000000; /* nop */
249 	instrs[3] = 0x4e800020; /* blr */
250 
251 	i = 0;
252 	if (types == L1D_FLUSH_FALLBACK) {
253 		instrs[3] = 0x60000000; /* nop */
254 		/* fallthrough to fallback flush */
255 	}
256 
257 	if (types & L1D_FLUSH_ORI) {
258 		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
259 		instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
260 	}
261 
262 	if (types & L1D_FLUSH_MTTRIG)
263 		instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
264 
265 	for (i = 0; start < end; start++, i++) {
266 		dest = (void *)start + *start;
267 
268 		pr_devel("patching dest %lx\n", (unsigned long)dest);
269 
270 		patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
271 
272 		patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
273 		patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
274 		patch_instruction((struct ppc_inst *)(dest + 3), ppc_inst(instrs[3]));
275 	}
276 
277 	printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i,
278 		(types == L1D_FLUSH_NONE)       ? "no" :
279 		(types == L1D_FLUSH_FALLBACK)   ? "fallback displacement" :
280 		(types &  L1D_FLUSH_ORI)        ? (types & L1D_FLUSH_MTTRIG)
281 							? "ori+mttrig type"
282 							: "ori type" :
283 		(types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
284 						: "unknown");
285 }
286 
287 void do_entry_flush_fixups(enum l1d_flush_type types)
288 {
289 	unsigned int instrs[3], *dest;
290 	long *start, *end;
291 	int i;
292 
293 	instrs[0] = 0x60000000; /* nop */
294 	instrs[1] = 0x60000000; /* nop */
295 	instrs[2] = 0x60000000; /* nop */
296 
297 	i = 0;
298 	if (types == L1D_FLUSH_FALLBACK) {
299 		instrs[i++] = 0x7d4802a6; /* mflr r10		*/
300 		instrs[i++] = 0x60000000; /* branch patched below */
301 		instrs[i++] = 0x7d4803a6; /* mtlr r10		*/
302 	}
303 
304 	if (types & L1D_FLUSH_ORI) {
305 		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
306 		instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
307 	}
308 
309 	if (types & L1D_FLUSH_MTTRIG)
310 		instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
311 
312 	start = PTRRELOC(&__start___entry_flush_fixup);
313 	end = PTRRELOC(&__stop___entry_flush_fixup);
314 	for (i = 0; start < end; start++, i++) {
315 		dest = (void *)start + *start;
316 
317 		pr_devel("patching dest %lx\n", (unsigned long)dest);
318 
319 		patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
320 
321 		if (types == L1D_FLUSH_FALLBACK)
322 			patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&entry_flush_fallback,
323 				     BRANCH_SET_LINK);
324 		else
325 			patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
326 
327 		patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
328 	}
329 
330 	start = PTRRELOC(&__start___scv_entry_flush_fixup);
331 	end = PTRRELOC(&__stop___scv_entry_flush_fixup);
332 	for (; start < end; start++, i++) {
333 		dest = (void *)start + *start;
334 
335 		pr_devel("patching dest %lx\n", (unsigned long)dest);
336 
337 		patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
338 
339 		if (types == L1D_FLUSH_FALLBACK)
340 			patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&scv_entry_flush_fallback,
341 				     BRANCH_SET_LINK);
342 		else
343 			patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
344 
345 		patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
346 	}
347 
348 
349 	printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
350 		(types == L1D_FLUSH_NONE)       ? "no" :
351 		(types == L1D_FLUSH_FALLBACK)   ? "fallback displacement" :
352 		(types &  L1D_FLUSH_ORI)        ? (types & L1D_FLUSH_MTTRIG)
353 							? "ori+mttrig type"
354 							: "ori type" :
355 		(types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
356 						: "unknown");
357 }
358 
359 void do_rfi_flush_fixups(enum l1d_flush_type types)
360 {
361 	unsigned int instrs[3], *dest;
362 	long *start, *end;
363 	int i;
364 
365 	start = PTRRELOC(&__start___rfi_flush_fixup);
366 	end = PTRRELOC(&__stop___rfi_flush_fixup);
367 
368 	instrs[0] = 0x60000000; /* nop */
369 	instrs[1] = 0x60000000; /* nop */
370 	instrs[2] = 0x60000000; /* nop */
371 
372 	if (types & L1D_FLUSH_FALLBACK)
373 		/* b .+16 to fallback flush */
374 		instrs[0] = 0x48000010;
375 
376 	i = 0;
377 	if (types & L1D_FLUSH_ORI) {
378 		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
379 		instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
380 	}
381 
382 	if (types & L1D_FLUSH_MTTRIG)
383 		instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
384 
385 	for (i = 0; start < end; start++, i++) {
386 		dest = (void *)start + *start;
387 
388 		pr_devel("patching dest %lx\n", (unsigned long)dest);
389 
390 		patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
391 		patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
392 		patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
393 	}
394 
395 	printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
396 		(types == L1D_FLUSH_NONE)       ? "no" :
397 		(types == L1D_FLUSH_FALLBACK)   ? "fallback displacement" :
398 		(types &  L1D_FLUSH_ORI)        ? (types & L1D_FLUSH_MTTRIG)
399 							? "ori+mttrig type"
400 							: "ori type" :
401 		(types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
402 						: "unknown");
403 }
404 
405 void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
406 {
407 	unsigned int instr, *dest;
408 	long *start, *end;
409 	int i;
410 
411 	start = fixup_start;
412 	end = fixup_end;
413 
414 	instr = 0x60000000; /* nop */
415 
416 	if (enable) {
417 		pr_info("barrier-nospec: using ORI speculation barrier\n");
418 		instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */
419 	}
420 
421 	for (i = 0; start < end; start++, i++) {
422 		dest = (void *)start + *start;
423 
424 		pr_devel("patching dest %lx\n", (unsigned long)dest);
425 		patch_instruction((struct ppc_inst *)dest, ppc_inst(instr));
426 	}
427 
428 	printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
429 }
430 
431 #endif /* CONFIG_PPC_BOOK3S_64 */
432 
433 #ifdef CONFIG_PPC_BARRIER_NOSPEC
434 void do_barrier_nospec_fixups(bool enable)
435 {
436 	void *start, *end;
437 
438 	start = PTRRELOC(&__start___barrier_nospec_fixup);
439 	end = PTRRELOC(&__stop___barrier_nospec_fixup);
440 
441 	do_barrier_nospec_fixups_range(enable, start, end);
442 }
443 #endif /* CONFIG_PPC_BARRIER_NOSPEC */
444 
445 #ifdef CONFIG_PPC_FSL_BOOK3E
446 void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
447 {
448 	unsigned int instr[2], *dest;
449 	long *start, *end;
450 	int i;
451 
452 	start = fixup_start;
453 	end = fixup_end;
454 
455 	instr[0] = PPC_INST_NOP;
456 	instr[1] = PPC_INST_NOP;
457 
458 	if (enable) {
459 		pr_info("barrier-nospec: using isync; sync as speculation barrier\n");
460 		instr[0] = PPC_INST_ISYNC;
461 		instr[1] = PPC_INST_SYNC;
462 	}
463 
464 	for (i = 0; start < end; start++, i++) {
465 		dest = (void *)start + *start;
466 
467 		pr_devel("patching dest %lx\n", (unsigned long)dest);
468 		patch_instruction((struct ppc_inst *)dest, ppc_inst(instr[0]));
469 		patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instr[1]));
470 	}
471 
472 	printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
473 }
474 
475 static void patch_btb_flush_section(long *curr)
476 {
477 	unsigned int *start, *end;
478 
479 	start = (void *)curr + *curr;
480 	end = (void *)curr + *(curr + 1);
481 	for (; start < end; start++) {
482 		pr_devel("patching dest %lx\n", (unsigned long)start);
483 		patch_instruction((struct ppc_inst *)start, ppc_inst(PPC_INST_NOP));
484 	}
485 }
486 
487 void do_btb_flush_fixups(void)
488 {
489 	long *start, *end;
490 
491 	start = PTRRELOC(&__start__btb_flush_fixup);
492 	end = PTRRELOC(&__stop__btb_flush_fixup);
493 
494 	for (; start < end; start += 2)
495 		patch_btb_flush_section(start);
496 }
497 #endif /* CONFIG_PPC_FSL_BOOK3E */
498 
499 void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
500 {
501 	long *start, *end;
502 	struct ppc_inst *dest;
503 
504 	if (!(value & CPU_FTR_LWSYNC))
505 		return ;
506 
507 	start = fixup_start;
508 	end = fixup_end;
509 
510 	for (; start < end; start++) {
511 		dest = (void *)start + *start;
512 		raw_patch_instruction(dest, ppc_inst(PPC_INST_LWSYNC));
513 	}
514 }
515 
516 static void do_final_fixups(void)
517 {
518 #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
519 	struct ppc_inst inst, *src, *dest, *end;
520 
521 	if (PHYSICAL_START == 0)
522 		return;
523 
524 	src = (struct ppc_inst *)(KERNELBASE + PHYSICAL_START);
525 	dest = (struct ppc_inst *)KERNELBASE;
526 	end = (void *)src + (__end_interrupts - _stext);
527 
528 	while (src < end) {
529 		inst = ppc_inst_read(src);
530 		raw_patch_instruction(dest, inst);
531 		src = ppc_inst_next(src, src);
532 		dest = ppc_inst_next(dest, dest);
533 	}
534 #endif
535 }
536 
537 static unsigned long __initdata saved_cpu_features;
538 static unsigned int __initdata saved_mmu_features;
539 #ifdef CONFIG_PPC64
540 static unsigned long __initdata saved_firmware_features;
541 #endif
542 
543 void __init apply_feature_fixups(void)
544 {
545 	struct cpu_spec *spec = PTRRELOC(*PTRRELOC(&cur_cpu_spec));
546 
547 	*PTRRELOC(&saved_cpu_features) = spec->cpu_features;
548 	*PTRRELOC(&saved_mmu_features) = spec->mmu_features;
549 
550 	/*
551 	 * Apply the CPU-specific and firmware specific fixups to kernel text
552 	 * (nop out sections not relevant to this CPU or this firmware).
553 	 */
554 	do_feature_fixups(spec->cpu_features,
555 			  PTRRELOC(&__start___ftr_fixup),
556 			  PTRRELOC(&__stop___ftr_fixup));
557 
558 	do_feature_fixups(spec->mmu_features,
559 			  PTRRELOC(&__start___mmu_ftr_fixup),
560 			  PTRRELOC(&__stop___mmu_ftr_fixup));
561 
562 	do_lwsync_fixups(spec->cpu_features,
563 			 PTRRELOC(&__start___lwsync_fixup),
564 			 PTRRELOC(&__stop___lwsync_fixup));
565 
566 #ifdef CONFIG_PPC64
567 	saved_firmware_features = powerpc_firmware_features;
568 	do_feature_fixups(powerpc_firmware_features,
569 			  &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
570 #endif
571 	do_final_fixups();
572 }
573 
574 void __init setup_feature_keys(void)
575 {
576 	/*
577 	 * Initialise jump label. This causes all the cpu/mmu_has_feature()
578 	 * checks to take on their correct polarity based on the current set of
579 	 * CPU/MMU features.
580 	 */
581 	jump_label_init();
582 	cpu_feature_keys_init();
583 	mmu_feature_keys_init();
584 }
585 
586 static int __init check_features(void)
587 {
588 	WARN(saved_cpu_features != cur_cpu_spec->cpu_features,
589 	     "CPU features changed after feature patching!\n");
590 	WARN(saved_mmu_features != cur_cpu_spec->mmu_features,
591 	     "MMU features changed after feature patching!\n");
592 #ifdef CONFIG_PPC64
593 	WARN(saved_firmware_features != powerpc_firmware_features,
594 	     "Firmware features changed after feature patching!\n");
595 #endif
596 
597 	return 0;
598 }
599 late_initcall(check_features);
600 
601 #ifdef CONFIG_FTR_FIXUP_SELFTEST
602 
603 #define check(x)	\
604 	if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__);
605 
606 /* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
607 static struct fixup_entry fixup;
608 
609 static long calc_offset(struct fixup_entry *entry, unsigned int *p)
610 {
611 	return (unsigned long)p - (unsigned long)entry;
612 }
613 
614 static void test_basic_patching(void)
615 {
616 	extern unsigned int ftr_fixup_test1[];
617 	extern unsigned int end_ftr_fixup_test1[];
618 	extern unsigned int ftr_fixup_test1_orig[];
619 	extern unsigned int ftr_fixup_test1_expected[];
620 	int size = 4 * (end_ftr_fixup_test1 - ftr_fixup_test1);
621 
622 	fixup.value = fixup.mask = 8;
623 	fixup.start_off = calc_offset(&fixup, ftr_fixup_test1 + 1);
624 	fixup.end_off = calc_offset(&fixup, ftr_fixup_test1 + 2);
625 	fixup.alt_start_off = fixup.alt_end_off = 0;
626 
627 	/* Sanity check */
628 	check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
629 
630 	/* Check we don't patch if the value matches */
631 	patch_feature_section(8, &fixup);
632 	check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
633 
634 	/* Check we do patch if the value doesn't match */
635 	patch_feature_section(0, &fixup);
636 	check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
637 
638 	/* Check we do patch if the mask doesn't match */
639 	memcpy(ftr_fixup_test1, ftr_fixup_test1_orig, size);
640 	check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
641 	patch_feature_section(~8, &fixup);
642 	check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
643 }
644 
645 static void test_alternative_patching(void)
646 {
647 	extern unsigned int ftr_fixup_test2[];
648 	extern unsigned int end_ftr_fixup_test2[];
649 	extern unsigned int ftr_fixup_test2_orig[];
650 	extern unsigned int ftr_fixup_test2_alt[];
651 	extern unsigned int ftr_fixup_test2_expected[];
652 	int size = 4 * (end_ftr_fixup_test2 - ftr_fixup_test2);
653 
654 	fixup.value = fixup.mask = 0xF;
655 	fixup.start_off = calc_offset(&fixup, ftr_fixup_test2 + 1);
656 	fixup.end_off = calc_offset(&fixup, ftr_fixup_test2 + 2);
657 	fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test2_alt);
658 	fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test2_alt + 1);
659 
660 	/* Sanity check */
661 	check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
662 
663 	/* Check we don't patch if the value matches */
664 	patch_feature_section(0xF, &fixup);
665 	check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
666 
667 	/* Check we do patch if the value doesn't match */
668 	patch_feature_section(0, &fixup);
669 	check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
670 
671 	/* Check we do patch if the mask doesn't match */
672 	memcpy(ftr_fixup_test2, ftr_fixup_test2_orig, size);
673 	check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
674 	patch_feature_section(~0xF, &fixup);
675 	check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
676 }
677 
678 static void test_alternative_case_too_big(void)
679 {
680 	extern unsigned int ftr_fixup_test3[];
681 	extern unsigned int end_ftr_fixup_test3[];
682 	extern unsigned int ftr_fixup_test3_orig[];
683 	extern unsigned int ftr_fixup_test3_alt[];
684 	int size = 4 * (end_ftr_fixup_test3 - ftr_fixup_test3);
685 
686 	fixup.value = fixup.mask = 0xC;
687 	fixup.start_off = calc_offset(&fixup, ftr_fixup_test3 + 1);
688 	fixup.end_off = calc_offset(&fixup, ftr_fixup_test3 + 2);
689 	fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test3_alt);
690 	fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test3_alt + 2);
691 
692 	/* Sanity check */
693 	check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
694 
695 	/* Expect nothing to be patched, and the error returned to us */
696 	check(patch_feature_section(0xF, &fixup) == 1);
697 	check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
698 	check(patch_feature_section(0, &fixup) == 1);
699 	check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
700 	check(patch_feature_section(~0xF, &fixup) == 1);
701 	check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
702 }
703 
704 static void test_alternative_case_too_small(void)
705 {
706 	extern unsigned int ftr_fixup_test4[];
707 	extern unsigned int end_ftr_fixup_test4[];
708 	extern unsigned int ftr_fixup_test4_orig[];
709 	extern unsigned int ftr_fixup_test4_alt[];
710 	extern unsigned int ftr_fixup_test4_expected[];
711 	int size = 4 * (end_ftr_fixup_test4 - ftr_fixup_test4);
712 	unsigned long flag;
713 
714 	/* Check a high-bit flag */
715 	flag = 1UL << ((sizeof(unsigned long) - 1) * 8);
716 	fixup.value = fixup.mask = flag;
717 	fixup.start_off = calc_offset(&fixup, ftr_fixup_test4 + 1);
718 	fixup.end_off = calc_offset(&fixup, ftr_fixup_test4 + 5);
719 	fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test4_alt);
720 	fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test4_alt + 2);
721 
722 	/* Sanity check */
723 	check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
724 
725 	/* Check we don't patch if the value matches */
726 	patch_feature_section(flag, &fixup);
727 	check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
728 
729 	/* Check we do patch if the value doesn't match */
730 	patch_feature_section(0, &fixup);
731 	check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
732 
733 	/* Check we do patch if the mask doesn't match */
734 	memcpy(ftr_fixup_test4, ftr_fixup_test4_orig, size);
735 	check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
736 	patch_feature_section(~flag, &fixup);
737 	check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
738 }
739 
740 static void test_alternative_case_with_branch(void)
741 {
742 	extern unsigned int ftr_fixup_test5[];
743 	extern unsigned int end_ftr_fixup_test5[];
744 	extern unsigned int ftr_fixup_test5_expected[];
745 	int size = 4 * (end_ftr_fixup_test5 - ftr_fixup_test5);
746 
747 	check(memcmp(ftr_fixup_test5, ftr_fixup_test5_expected, size) == 0);
748 }
749 
750 static void test_alternative_case_with_external_branch(void)
751 {
752 	extern unsigned int ftr_fixup_test6[];
753 	extern unsigned int end_ftr_fixup_test6[];
754 	extern unsigned int ftr_fixup_test6_expected[];
755 	int size = 4 * (end_ftr_fixup_test6 - ftr_fixup_test6);
756 
757 	check(memcmp(ftr_fixup_test6, ftr_fixup_test6_expected, size) == 0);
758 }
759 
760 static void test_alternative_case_with_branch_to_end(void)
761 {
762 	extern unsigned int ftr_fixup_test7[];
763 	extern unsigned int end_ftr_fixup_test7[];
764 	extern unsigned int ftr_fixup_test7_expected[];
765 	int size = 4 * (end_ftr_fixup_test7 - ftr_fixup_test7);
766 
767 	check(memcmp(ftr_fixup_test7, ftr_fixup_test7_expected, size) == 0);
768 }
769 
770 static void test_cpu_macros(void)
771 {
772 	extern u8 ftr_fixup_test_FTR_macros[];
773 	extern u8 ftr_fixup_test_FTR_macros_expected[];
774 	unsigned long size = ftr_fixup_test_FTR_macros_expected -
775 			     ftr_fixup_test_FTR_macros;
776 
777 	/* The fixups have already been done for us during boot */
778 	check(memcmp(ftr_fixup_test_FTR_macros,
779 		     ftr_fixup_test_FTR_macros_expected, size) == 0);
780 }
781 
782 static void test_fw_macros(void)
783 {
784 #ifdef CONFIG_PPC64
785 	extern u8 ftr_fixup_test_FW_FTR_macros[];
786 	extern u8 ftr_fixup_test_FW_FTR_macros_expected[];
787 	unsigned long size = ftr_fixup_test_FW_FTR_macros_expected -
788 			     ftr_fixup_test_FW_FTR_macros;
789 
790 	/* The fixups have already been done for us during boot */
791 	check(memcmp(ftr_fixup_test_FW_FTR_macros,
792 		     ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
793 #endif
794 }
795 
796 static void test_lwsync_macros(void)
797 {
798 	extern u8 lwsync_fixup_test[];
799 	extern u8 end_lwsync_fixup_test[];
800 	extern u8 lwsync_fixup_test_expected_LWSYNC[];
801 	extern u8 lwsync_fixup_test_expected_SYNC[];
802 	unsigned long size = end_lwsync_fixup_test -
803 			     lwsync_fixup_test;
804 
805 	/* The fixups have already been done for us during boot */
806 	if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
807 		check(memcmp(lwsync_fixup_test,
808 			     lwsync_fixup_test_expected_LWSYNC, size) == 0);
809 	} else {
810 		check(memcmp(lwsync_fixup_test,
811 			     lwsync_fixup_test_expected_SYNC, size) == 0);
812 	}
813 }
814 
815 #ifdef CONFIG_PPC64
816 static void __init test_prefix_patching(void)
817 {
818 	extern unsigned int ftr_fixup_prefix1[];
819 	extern unsigned int end_ftr_fixup_prefix1[];
820 	extern unsigned int ftr_fixup_prefix1_orig[];
821 	extern unsigned int ftr_fixup_prefix1_expected[];
822 	int size = sizeof(unsigned int) * (end_ftr_fixup_prefix1 - ftr_fixup_prefix1);
823 
824 	fixup.value = fixup.mask = 8;
825 	fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix1 + 1);
826 	fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix1 + 3);
827 	fixup.alt_start_off = fixup.alt_end_off = 0;
828 
829 	/* Sanity check */
830 	check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_orig, size) == 0);
831 
832 	patch_feature_section(0, &fixup);
833 	check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_expected, size) == 0);
834 	check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_orig, size) != 0);
835 }
836 
837 static void __init test_prefix_alt_patching(void)
838 {
839 	extern unsigned int ftr_fixup_prefix2[];
840 	extern unsigned int end_ftr_fixup_prefix2[];
841 	extern unsigned int ftr_fixup_prefix2_orig[];
842 	extern unsigned int ftr_fixup_prefix2_expected[];
843 	extern unsigned int ftr_fixup_prefix2_alt[];
844 	int size = sizeof(unsigned int) * (end_ftr_fixup_prefix2 - ftr_fixup_prefix2);
845 
846 	fixup.value = fixup.mask = 8;
847 	fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix2 + 1);
848 	fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix2 + 3);
849 	fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_prefix2_alt);
850 	fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_prefix2_alt + 2);
851 	/* Sanity check */
852 	check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_orig, size) == 0);
853 
854 	patch_feature_section(0, &fixup);
855 	check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_expected, size) == 0);
856 	check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_orig, size) != 0);
857 }
858 
859 static void __init test_prefix_word_alt_patching(void)
860 {
861 	extern unsigned int ftr_fixup_prefix3[];
862 	extern unsigned int end_ftr_fixup_prefix3[];
863 	extern unsigned int ftr_fixup_prefix3_orig[];
864 	extern unsigned int ftr_fixup_prefix3_expected[];
865 	extern unsigned int ftr_fixup_prefix3_alt[];
866 	int size = sizeof(unsigned int) * (end_ftr_fixup_prefix3 - ftr_fixup_prefix3);
867 
868 	fixup.value = fixup.mask = 8;
869 	fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix3 + 1);
870 	fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix3 + 4);
871 	fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_prefix3_alt);
872 	fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_prefix3_alt + 3);
873 	/* Sanity check */
874 	check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_orig, size) == 0);
875 
876 	patch_feature_section(0, &fixup);
877 	check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_expected, size) == 0);
878 	patch_feature_section(0, &fixup);
879 	check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_orig, size) != 0);
880 }
881 #else
882 static inline void test_prefix_patching(void) {}
883 static inline void test_prefix_alt_patching(void) {}
884 static inline void test_prefix_word_alt_patching(void) {}
885 #endif /* CONFIG_PPC64 */
886 
887 static int __init test_feature_fixups(void)
888 {
889 	printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
890 
891 	test_basic_patching();
892 	test_alternative_patching();
893 	test_alternative_case_too_big();
894 	test_alternative_case_too_small();
895 	test_alternative_case_with_branch();
896 	test_alternative_case_with_external_branch();
897 	test_alternative_case_with_branch_to_end();
898 	test_cpu_macros();
899 	test_fw_macros();
900 	test_lwsync_macros();
901 	test_prefix_patching();
902 	test_prefix_alt_patching();
903 	test_prefix_word_alt_patching();
904 
905 	return 0;
906 }
907 late_initcall(test_feature_fixups);
908 
909 #endif /* CONFIG_FTR_FIXUP_SELFTEST */
910