12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
251c52e86SMichael Ellerman /*
351c52e86SMichael Ellerman * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
451c52e86SMichael Ellerman *
551c52e86SMichael Ellerman * Modifications for ppc64:
651c52e86SMichael Ellerman * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
751c52e86SMichael Ellerman *
851c52e86SMichael Ellerman * Copyright 2008 Michael Ellerman, IBM Corporation.
951c52e86SMichael Ellerman */
1051c52e86SMichael Ellerman
113880ecb0SStephen Rothwell #include <linux/types.h>
12309b315bSAneesh Kumar K.V #include <linux/jump_label.h>
1351c52e86SMichael Ellerman #include <linux/kernel.h>
14362e7701SMichael Ellerman #include <linux/string.h>
15362e7701SMichael Ellerman #include <linux/init.h>
16589ee628SIngo Molnar #include <linux/sched/mm.h>
178ec7791bSMichael Ellerman #include <linux/stop_machine.h>
1851c52e86SMichael Ellerman #include <asm/cputable.h>
1951c52e86SMichael Ellerman #include <asm/code-patching.h>
2013799748SNicholas Piggin #include <asm/interrupt.h>
21d715e433SAnton Blanchard #include <asm/page.h>
22d715e433SAnton Blanchard #include <asm/sections.h>
239402c684SBenjamin Herrenschmidt #include <asm/setup.h>
24a048a07dSNicholas Piggin #include <asm/security_features.h>
259402c684SBenjamin Herrenschmidt #include <asm/firmware.h>
2675346251SJordan Niethe #include <asm/inst.h>
2751c52e86SMichael Ellerman
2851c52e86SMichael Ellerman struct fixup_entry {
2951c52e86SMichael Ellerman unsigned long mask;
3051c52e86SMichael Ellerman unsigned long value;
3151c52e86SMichael Ellerman long start_off;
3251c52e86SMichael Ellerman long end_off;
33fac23fe4SMichael Ellerman long alt_start_off;
34fac23fe4SMichael Ellerman long alt_end_off;
3551c52e86SMichael Ellerman };
3651c52e86SMichael Ellerman
calc_addr(struct fixup_entry * fcur,long offset)3769d4d6e5SChristophe Leroy static u32 *calc_addr(struct fixup_entry *fcur, long offset)
3851c52e86SMichael Ellerman {
399b1a735dSMichael Ellerman /*
409b1a735dSMichael Ellerman * We store the offset to the code as a negative offset from
419b1a735dSMichael Ellerman * the start of the alt_entry, to support the VDSO. This
429b1a735dSMichael Ellerman * routine converts that back into an actual address.
439b1a735dSMichael Ellerman */
4469d4d6e5SChristophe Leroy return (u32 *)((unsigned long)fcur + offset);
459b1a735dSMichael Ellerman }
469b1a735dSMichael Ellerman
patch_alt_instruction(u32 * src,u32 * dest,u32 * alt_start,u32 * alt_end)4769d4d6e5SChristophe Leroy static int patch_alt_instruction(u32 *src, u32 *dest, u32 *alt_start, u32 *alt_end)
489b1a735dSMichael Ellerman {
497c95d889SJordan Niethe int err;
50c545b9f0SChristophe Leroy ppc_inst_t instr;
519b1a735dSMichael Ellerman
52f8faaffaSJordan Niethe instr = ppc_inst_read(src);
539b1a735dSMichael Ellerman
5418c85964SChristophe Leroy if (instr_is_relative_branch(ppc_inst_read(src))) {
5569d4d6e5SChristophe Leroy u32 *target = (u32 *)branch_target(src);
569b1a735dSMichael Ellerman
579b1a735dSMichael Ellerman /* Branch within the section doesn't need translating */
58b8858581SMichael Ellerman if (target < alt_start || target > alt_end) {
597c95d889SJordan Niethe err = translate_branch(&instr, dest, src);
607c95d889SJordan Niethe if (err)
619b1a735dSMichael Ellerman return 1;
629b1a735dSMichael Ellerman }
639b1a735dSMichael Ellerman }
649b1a735dSMichael Ellerman
658183d99fSChristophe Leroy raw_patch_instruction(dest, instr);
669b1a735dSMichael Ellerman
679b1a735dSMichael Ellerman return 0;
689b1a735dSMichael Ellerman }
699b1a735dSMichael Ellerman
patch_feature_section_mask(unsigned long value,unsigned long mask,struct fixup_entry * fcur)70*6b289911SChristophe Leroy static int patch_feature_section_mask(unsigned long value, unsigned long mask,
71*6b289911SChristophe Leroy struct fixup_entry *fcur)
729b1a735dSMichael Ellerman {
7369d4d6e5SChristophe Leroy u32 *start, *end, *alt_start, *alt_end, *src, *dest;
749b1a735dSMichael Ellerman
759b1a735dSMichael Ellerman start = calc_addr(fcur, fcur->start_off);
769b1a735dSMichael Ellerman end = calc_addr(fcur, fcur->end_off);
779b1a735dSMichael Ellerman alt_start = calc_addr(fcur, fcur->alt_start_off);
789b1a735dSMichael Ellerman alt_end = calc_addr(fcur, fcur->alt_end_off);
799b1a735dSMichael Ellerman
809b1a735dSMichael Ellerman if ((alt_end - alt_start) > (end - start))
819b1a735dSMichael Ellerman return 1;
8251c52e86SMichael Ellerman
83*6b289911SChristophe Leroy if ((value & fcur->mask & mask) == (fcur->value & mask))
849b1a735dSMichael Ellerman return 0;
8551c52e86SMichael Ellerman
869b1a735dSMichael Ellerman src = alt_start;
879b1a735dSMichael Ellerman dest = start;
8851c52e86SMichael Ellerman
89c5ff46d6SMichael Ellerman for (; src < alt_end; src = ppc_inst_next(src, src),
90c5ff46d6SMichael Ellerman dest = ppc_inst_next(dest, dest)) {
919b1a735dSMichael Ellerman if (patch_alt_instruction(src, dest, alt_start, alt_end))
929b1a735dSMichael Ellerman return 1;
9351c52e86SMichael Ellerman }
949b1a735dSMichael Ellerman
9569d4d6e5SChristophe Leroy for (; dest < end; dest++)
9669d4d6e5SChristophe Leroy raw_patch_instruction(dest, ppc_inst(PPC_RAW_NOP()));
979b1a735dSMichael Ellerman
989b1a735dSMichael Ellerman return 0;
9951c52e86SMichael Ellerman }
10051c52e86SMichael Ellerman
do_feature_fixups_mask(unsigned long value,unsigned long mask,void * fixup_start,void * fixup_end)101*6b289911SChristophe Leroy static void do_feature_fixups_mask(unsigned long value, unsigned long mask,
102*6b289911SChristophe Leroy void *fixup_start, void *fixup_end)
10351c52e86SMichael Ellerman {
10451c52e86SMichael Ellerman struct fixup_entry *fcur, *fend;
10551c52e86SMichael Ellerman
10651c52e86SMichael Ellerman fcur = fixup_start;
10751c52e86SMichael Ellerman fend = fixup_end;
10851c52e86SMichael Ellerman
1099b1a735dSMichael Ellerman for (; fcur < fend; fcur++) {
110*6b289911SChristophe Leroy if (patch_feature_section_mask(value, mask, fcur)) {
1111856c020SMichael Ellerman WARN_ON(1);
1129b1a735dSMichael Ellerman printk("Unable to patch feature section at %p - %p" \
1139b1a735dSMichael Ellerman " with %p - %p\n",
1149b1a735dSMichael Ellerman calc_addr(fcur, fcur->start_off),
1159b1a735dSMichael Ellerman calc_addr(fcur, fcur->end_off),
1169b1a735dSMichael Ellerman calc_addr(fcur, fcur->alt_start_off),
1179b1a735dSMichael Ellerman calc_addr(fcur, fcur->alt_end_off));
1189b1a735dSMichael Ellerman }
1199b1a735dSMichael Ellerman }
12051c52e86SMichael Ellerman }
121362e7701SMichael Ellerman
do_feature_fixups(unsigned long value,void * fixup_start,void * fixup_end)122*6b289911SChristophe Leroy void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
123*6b289911SChristophe Leroy {
124*6b289911SChristophe Leroy do_feature_fixups_mask(value, ~0, fixup_start, fixup_end);
125*6b289911SChristophe Leroy }
126*6b289911SChristophe Leroy
1273d1dbbcaSChristophe Leroy #ifdef CONFIG_PPC_BARRIER_NOSPEC
is_fixup_addr_valid(void * dest,size_t size)128b988e779SChristophe Leroy static bool is_fixup_addr_valid(void *dest, size_t size)
129b988e779SChristophe Leroy {
130b988e779SChristophe Leroy return system_state < SYSTEM_FREEING_INITMEM ||
131b988e779SChristophe Leroy !init_section_contains(dest, size);
132b988e779SChristophe Leroy }
133b988e779SChristophe Leroy
do_patch_fixups(long * start,long * end,unsigned int * instrs,int num)1343d1dbbcaSChristophe Leroy static int do_patch_fixups(long *start, long *end, unsigned int *instrs, int num)
1353d1dbbcaSChristophe Leroy {
1363d1dbbcaSChristophe Leroy int i;
1373d1dbbcaSChristophe Leroy
1383d1dbbcaSChristophe Leroy for (i = 0; start < end; start++, i++) {
1393d1dbbcaSChristophe Leroy int j;
1403d1dbbcaSChristophe Leroy unsigned int *dest = (void *)start + *start;
1413d1dbbcaSChristophe Leroy
142b988e779SChristophe Leroy if (!is_fixup_addr_valid(dest, sizeof(*instrs) * num))
143b988e779SChristophe Leroy continue;
144b988e779SChristophe Leroy
1453d1dbbcaSChristophe Leroy pr_devel("patching dest %lx\n", (unsigned long)dest);
1463d1dbbcaSChristophe Leroy
1473d1dbbcaSChristophe Leroy for (j = 0; j < num; j++)
1483d1dbbcaSChristophe Leroy patch_instruction(dest + j, ppc_inst(instrs[j]));
1493d1dbbcaSChristophe Leroy }
1503d1dbbcaSChristophe Leroy return i;
1513d1dbbcaSChristophe Leroy }
1523d1dbbcaSChristophe Leroy #endif
1533d1dbbcaSChristophe Leroy
154aa8a5e00SMichael Ellerman #ifdef CONFIG_PPC_BOOK3S_64
do_patch_entry_fixups(long * start,long * end,unsigned int * instrs,bool do_fallback,void * fallback)1556076dc34SChristophe Leroy static int do_patch_entry_fixups(long *start, long *end, unsigned int *instrs,
1566076dc34SChristophe Leroy bool do_fallback, void *fallback)
1576076dc34SChristophe Leroy {
1586076dc34SChristophe Leroy int i;
1596076dc34SChristophe Leroy
1606076dc34SChristophe Leroy for (i = 0; start < end; start++, i++) {
1616076dc34SChristophe Leroy unsigned int *dest = (void *)start + *start;
1626076dc34SChristophe Leroy
163b988e779SChristophe Leroy if (!is_fixup_addr_valid(dest, sizeof(*instrs) * 3))
164b988e779SChristophe Leroy continue;
165b988e779SChristophe Leroy
1666076dc34SChristophe Leroy pr_devel("patching dest %lx\n", (unsigned long)dest);
1676076dc34SChristophe Leroy
1686076dc34SChristophe Leroy // See comment in do_entry_flush_fixups() RE order of patching
1696076dc34SChristophe Leroy if (do_fallback) {
1706076dc34SChristophe Leroy patch_instruction(dest, ppc_inst(instrs[0]));
1716076dc34SChristophe Leroy patch_instruction(dest + 2, ppc_inst(instrs[2]));
1726076dc34SChristophe Leroy patch_branch(dest + 1, (unsigned long)fallback, BRANCH_SET_LINK);
1736076dc34SChristophe Leroy } else {
1746076dc34SChristophe Leroy patch_instruction(dest + 1, ppc_inst(instrs[1]));
1756076dc34SChristophe Leroy patch_instruction(dest + 2, ppc_inst(instrs[2]));
1766076dc34SChristophe Leroy patch_instruction(dest, ppc_inst(instrs[0]));
1776076dc34SChristophe Leroy }
1786076dc34SChristophe Leroy }
1796076dc34SChristophe Leroy return i;
1806076dc34SChristophe Leroy }
1816076dc34SChristophe Leroy
do_stf_entry_barrier_fixups(enum stf_barrier_type types)1823b30c6e8SBreno Leitao static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
183a048a07dSNicholas Piggin {
1846076dc34SChristophe Leroy unsigned int instrs[3];
185a048a07dSNicholas Piggin long *start, *end;
186a048a07dSNicholas Piggin int i;
187a048a07dSNicholas Piggin
1881fc0c27bSDaniel Axtens start = PTRRELOC(&__start___stf_entry_barrier_fixup);
189a048a07dSNicholas Piggin end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
190a048a07dSNicholas Piggin
191ef909ba9SChristophe Leroy instrs[0] = PPC_RAW_NOP();
192ef909ba9SChristophe Leroy instrs[1] = PPC_RAW_NOP();
193ef909ba9SChristophe Leroy instrs[2] = PPC_RAW_NOP();
194a048a07dSNicholas Piggin
195a048a07dSNicholas Piggin i = 0;
196a048a07dSNicholas Piggin if (types & STF_BARRIER_FALLBACK) {
197ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_MFLR(_R10);
198ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_NOP(); /* branch patched below */
199ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_MTLR(_R10);
200a048a07dSNicholas Piggin } else if (types & STF_BARRIER_EIEIO) {
201ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_EIEIO() | 0x02000000; /* eieio + bit 6 hint */
202a048a07dSNicholas Piggin } else if (types & STF_BARRIER_SYNC_ORI) {
203ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_SYNC();
204ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_LD(_R10, _R13, 0);
205ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
206a048a07dSNicholas Piggin }
207a048a07dSNicholas Piggin
2086076dc34SChristophe Leroy i = do_patch_entry_fixups(start, end, instrs, types & STF_BARRIER_FALLBACK,
2096076dc34SChristophe Leroy &stf_barrier_fallback);
210a048a07dSNicholas Piggin
211a048a07dSNicholas Piggin printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
212a048a07dSNicholas Piggin (types == STF_BARRIER_NONE) ? "no" :
213a048a07dSNicholas Piggin (types == STF_BARRIER_FALLBACK) ? "fallback" :
214a048a07dSNicholas Piggin (types == STF_BARRIER_EIEIO) ? "eieio" :
215a048a07dSNicholas Piggin (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
216a048a07dSNicholas Piggin : "unknown");
217a048a07dSNicholas Piggin }
218a048a07dSNicholas Piggin
do_stf_exit_barrier_fixups(enum stf_barrier_type types)2193b30c6e8SBreno Leitao static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
220a048a07dSNicholas Piggin {
2213d1dbbcaSChristophe Leroy unsigned int instrs[6];
222a048a07dSNicholas Piggin long *start, *end;
223a048a07dSNicholas Piggin int i;
224a048a07dSNicholas Piggin
2251fc0c27bSDaniel Axtens start = PTRRELOC(&__start___stf_exit_barrier_fixup);
226a048a07dSNicholas Piggin end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
227a048a07dSNicholas Piggin
228ef909ba9SChristophe Leroy instrs[0] = PPC_RAW_NOP();
229ef909ba9SChristophe Leroy instrs[1] = PPC_RAW_NOP();
230ef909ba9SChristophe Leroy instrs[2] = PPC_RAW_NOP();
231ef909ba9SChristophe Leroy instrs[3] = PPC_RAW_NOP();
232ef909ba9SChristophe Leroy instrs[4] = PPC_RAW_NOP();
233ef909ba9SChristophe Leroy instrs[5] = PPC_RAW_NOP();
234a048a07dSNicholas Piggin
235a048a07dSNicholas Piggin i = 0;
236a048a07dSNicholas Piggin if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
237a048a07dSNicholas Piggin if (cpu_has_feature(CPU_FTR_HVMODE)) {
238ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_MTSPR(SPRN_HSPRG1, _R13);
239ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_HSPRG0);
240a048a07dSNicholas Piggin } else {
241ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_MTSPR(SPRN_SPRG2, _R13);
242ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_SPRG1);
243a048a07dSNicholas Piggin }
244ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_SYNC();
245ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_LD(_R13, _R13, 0);
246ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
247ef909ba9SChristophe Leroy if (cpu_has_feature(CPU_FTR_HVMODE))
248ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_HSPRG1);
249ef909ba9SChristophe Leroy else
250ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_SPRG2);
251a048a07dSNicholas Piggin } else if (types & STF_BARRIER_EIEIO) {
252ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_EIEIO() | 0x02000000; /* eieio + bit 6 hint */
253a048a07dSNicholas Piggin }
254a048a07dSNicholas Piggin
2553d1dbbcaSChristophe Leroy i = do_patch_fixups(start, end, instrs, ARRAY_SIZE(instrs));
256a048a07dSNicholas Piggin
257a048a07dSNicholas Piggin printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
258a048a07dSNicholas Piggin (types == STF_BARRIER_NONE) ? "no" :
259a048a07dSNicholas Piggin (types == STF_BARRIER_FALLBACK) ? "fallback" :
260a048a07dSNicholas Piggin (types == STF_BARRIER_EIEIO) ? "eieio" :
261a048a07dSNicholas Piggin (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
262a048a07dSNicholas Piggin : "unknown");
263a048a07dSNicholas Piggin }
264a048a07dSNicholas Piggin
26513799748SNicholas Piggin static bool stf_exit_reentrant = false;
26613799748SNicholas Piggin static bool rfi_exit_reentrant = false;
2673c12b4dfSRussell Currey static DEFINE_MUTEX(exit_flush_lock);
26813799748SNicholas Piggin
__do_stf_barrier_fixups(void * data)2698ec7791bSMichael Ellerman static int __do_stf_barrier_fixups(void *data)
2708ec7791bSMichael Ellerman {
2718ec7791bSMichael Ellerman enum stf_barrier_type *types = data;
2728ec7791bSMichael Ellerman
2738ec7791bSMichael Ellerman do_stf_entry_barrier_fixups(*types);
2748ec7791bSMichael Ellerman do_stf_exit_barrier_fixups(*types);
2758ec7791bSMichael Ellerman
2768ec7791bSMichael Ellerman return 0;
2778ec7791bSMichael Ellerman }
278a048a07dSNicholas Piggin
do_stf_barrier_fixups(enum stf_barrier_type types)279a048a07dSNicholas Piggin void do_stf_barrier_fixups(enum stf_barrier_type types)
280a048a07dSNicholas Piggin {
2818ec7791bSMichael Ellerman /*
2828ec7791bSMichael Ellerman * The call to the fallback entry flush, and the fallback/sync-ori exit
28313799748SNicholas Piggin * flush can not be safely patched in/out while other CPUs are
28413799748SNicholas Piggin * executing them. So call __do_stf_barrier_fixups() on one CPU while
28513799748SNicholas Piggin * all other CPUs spin in the stop machine core with interrupts hard
28613799748SNicholas Piggin * disabled.
28713799748SNicholas Piggin *
28813799748SNicholas Piggin * The branch to mark interrupt exits non-reentrant is enabled first,
28913799748SNicholas Piggin * then stop_machine runs which will ensure all CPUs are out of the
29013799748SNicholas Piggin * low level interrupt exit code before patching. After the patching,
29113799748SNicholas Piggin * if allowed, then flip the branch to allow fast exits.
2928ec7791bSMichael Ellerman */
2933c12b4dfSRussell Currey
2943c12b4dfSRussell Currey // Prevent static key update races with do_rfi_flush_fixups()
2953c12b4dfSRussell Currey mutex_lock(&exit_flush_lock);
29613799748SNicholas Piggin static_branch_enable(&interrupt_exit_not_reentrant);
29713799748SNicholas Piggin
2988ec7791bSMichael Ellerman stop_machine(__do_stf_barrier_fixups, &types, NULL);
29913799748SNicholas Piggin
30013799748SNicholas Piggin if ((types & STF_BARRIER_FALLBACK) || (types & STF_BARRIER_SYNC_ORI))
30113799748SNicholas Piggin stf_exit_reentrant = false;
30213799748SNicholas Piggin else
30313799748SNicholas Piggin stf_exit_reentrant = true;
30413799748SNicholas Piggin
30513799748SNicholas Piggin if (stf_exit_reentrant && rfi_exit_reentrant)
30613799748SNicholas Piggin static_branch_disable(&interrupt_exit_not_reentrant);
3073c12b4dfSRussell Currey
3083c12b4dfSRussell Currey mutex_unlock(&exit_flush_lock);
309a048a07dSNicholas Piggin }
310a048a07dSNicholas Piggin
do_uaccess_flush_fixups(enum l1d_flush_type types)3119a32a7e7SNicholas Piggin void do_uaccess_flush_fixups(enum l1d_flush_type types)
3129a32a7e7SNicholas Piggin {
3133d1dbbcaSChristophe Leroy unsigned int instrs[4];
3149a32a7e7SNicholas Piggin long *start, *end;
3159a32a7e7SNicholas Piggin int i;
3169a32a7e7SNicholas Piggin
3179a32a7e7SNicholas Piggin start = PTRRELOC(&__start___uaccess_flush_fixup);
3189a32a7e7SNicholas Piggin end = PTRRELOC(&__stop___uaccess_flush_fixup);
3199a32a7e7SNicholas Piggin
320ef909ba9SChristophe Leroy instrs[0] = PPC_RAW_NOP();
321ef909ba9SChristophe Leroy instrs[1] = PPC_RAW_NOP();
322ef909ba9SChristophe Leroy instrs[2] = PPC_RAW_NOP();
323ef909ba9SChristophe Leroy instrs[3] = PPC_RAW_BLR();
3249a32a7e7SNicholas Piggin
3259a32a7e7SNicholas Piggin i = 0;
3269a32a7e7SNicholas Piggin if (types == L1D_FLUSH_FALLBACK) {
327ef909ba9SChristophe Leroy instrs[3] = PPC_RAW_NOP();
3289a32a7e7SNicholas Piggin /* fallthrough to fallback flush */
3299a32a7e7SNicholas Piggin }
3309a32a7e7SNicholas Piggin
3319a32a7e7SNicholas Piggin if (types & L1D_FLUSH_ORI) {
332ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
333ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */
3349a32a7e7SNicholas Piggin }
3359a32a7e7SNicholas Piggin
3369a32a7e7SNicholas Piggin if (types & L1D_FLUSH_MTTRIG)
337ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0);
3389a32a7e7SNicholas Piggin
3393d1dbbcaSChristophe Leroy i = do_patch_fixups(start, end, instrs, ARRAY_SIZE(instrs));
3409a32a7e7SNicholas Piggin
3419a32a7e7SNicholas Piggin printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i,
3429a32a7e7SNicholas Piggin (types == L1D_FLUSH_NONE) ? "no" :
3439a32a7e7SNicholas Piggin (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
3449a32a7e7SNicholas Piggin (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
3459a32a7e7SNicholas Piggin ? "ori+mttrig type"
3469a32a7e7SNicholas Piggin : "ori type" :
3479a32a7e7SNicholas Piggin (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
3489a32a7e7SNicholas Piggin : "unknown");
3499a32a7e7SNicholas Piggin }
3509a32a7e7SNicholas Piggin
__do_entry_flush_fixups(void * data)351aec86b05SMichael Ellerman static int __do_entry_flush_fixups(void *data)
352f7964378SNicholas Piggin {
353aec86b05SMichael Ellerman enum l1d_flush_type types = *(enum l1d_flush_type *)data;
3546076dc34SChristophe Leroy unsigned int instrs[3];
355f7964378SNicholas Piggin long *start, *end;
356f7964378SNicholas Piggin int i;
357f7964378SNicholas Piggin
358ef909ba9SChristophe Leroy instrs[0] = PPC_RAW_NOP();
359ef909ba9SChristophe Leroy instrs[1] = PPC_RAW_NOP();
360ef909ba9SChristophe Leroy instrs[2] = PPC_RAW_NOP();
361f7964378SNicholas Piggin
362f7964378SNicholas Piggin i = 0;
363f7964378SNicholas Piggin if (types == L1D_FLUSH_FALLBACK) {
364ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_MFLR(_R10);
365ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_NOP(); /* branch patched below */
366ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_MTLR(_R10);
367f7964378SNicholas Piggin }
368f7964378SNicholas Piggin
369f7964378SNicholas Piggin if (types & L1D_FLUSH_ORI) {
370ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
371ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */
372f7964378SNicholas Piggin }
373f7964378SNicholas Piggin
374f7964378SNicholas Piggin if (types & L1D_FLUSH_MTTRIG)
375ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0);
376f7964378SNicholas Piggin
37749b39ec2SMichael Ellerman /*
37849b39ec2SMichael Ellerman * If we're patching in or out the fallback flush we need to be careful about the
37949b39ec2SMichael Ellerman * order in which we patch instructions. That's because it's possible we could
38049b39ec2SMichael Ellerman * take a page fault after patching one instruction, so the sequence of
38149b39ec2SMichael Ellerman * instructions must be safe even in a half patched state.
38249b39ec2SMichael Ellerman *
38349b39ec2SMichael Ellerman * To make that work, when patching in the fallback flush we patch in this order:
38449b39ec2SMichael Ellerman * - the mflr (dest)
38549b39ec2SMichael Ellerman * - the mtlr (dest + 2)
38649b39ec2SMichael Ellerman * - the branch (dest + 1)
38749b39ec2SMichael Ellerman *
38849b39ec2SMichael Ellerman * That ensures the sequence is safe to execute at any point. In contrast if we
38949b39ec2SMichael Ellerman * patch the mtlr last, it's possible we could return from the branch and not
39049b39ec2SMichael Ellerman * restore LR, leading to a crash later.
39149b39ec2SMichael Ellerman *
39249b39ec2SMichael Ellerman * When patching out the fallback flush (either with nops or another flush type),
39349b39ec2SMichael Ellerman * we patch in this order:
39449b39ec2SMichael Ellerman * - the branch (dest + 1)
39549b39ec2SMichael Ellerman * - the mtlr (dest + 2)
39649b39ec2SMichael Ellerman * - the mflr (dest)
39749b39ec2SMichael Ellerman *
39849b39ec2SMichael Ellerman * Note we are protected by stop_machine() from other CPUs executing the code in a
39949b39ec2SMichael Ellerman * semi-patched state.
40049b39ec2SMichael Ellerman */
40149b39ec2SMichael Ellerman
40208685be7SNicholas Piggin start = PTRRELOC(&__start___entry_flush_fixup);
40308685be7SNicholas Piggin end = PTRRELOC(&__stop___entry_flush_fixup);
4046076dc34SChristophe Leroy i = do_patch_entry_fixups(start, end, instrs, types == L1D_FLUSH_FALLBACK,
4056076dc34SChristophe Leroy &entry_flush_fallback);
406f7964378SNicholas Piggin
40708685be7SNicholas Piggin start = PTRRELOC(&__start___scv_entry_flush_fixup);
40808685be7SNicholas Piggin end = PTRRELOC(&__stop___scv_entry_flush_fixup);
4096076dc34SChristophe Leroy i += do_patch_entry_fixups(start, end, instrs, types == L1D_FLUSH_FALLBACK,
4106076dc34SChristophe Leroy &scv_entry_flush_fallback);
41108685be7SNicholas Piggin
412f7964378SNicholas Piggin printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
413f7964378SNicholas Piggin (types == L1D_FLUSH_NONE) ? "no" :
414f7964378SNicholas Piggin (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
415f7964378SNicholas Piggin (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
416f7964378SNicholas Piggin ? "ori+mttrig type"
417f7964378SNicholas Piggin : "ori type" :
418f7964378SNicholas Piggin (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
419f7964378SNicholas Piggin : "unknown");
420aec86b05SMichael Ellerman
421aec86b05SMichael Ellerman return 0;
422aec86b05SMichael Ellerman }
423aec86b05SMichael Ellerman
do_entry_flush_fixups(enum l1d_flush_type types)424aec86b05SMichael Ellerman void do_entry_flush_fixups(enum l1d_flush_type types)
425aec86b05SMichael Ellerman {
426aec86b05SMichael Ellerman /*
427aec86b05SMichael Ellerman * The call to the fallback flush can not be safely patched in/out while
428aec86b05SMichael Ellerman * other CPUs are executing it. So call __do_entry_flush_fixups() on one
429aec86b05SMichael Ellerman * CPU while all other CPUs spin in the stop machine core with interrupts
430aec86b05SMichael Ellerman * hard disabled.
431aec86b05SMichael Ellerman */
432aec86b05SMichael Ellerman stop_machine(__do_entry_flush_fixups, &types, NULL);
433f7964378SNicholas Piggin }
434f7964378SNicholas Piggin
__do_rfi_flush_fixups(void * data)43513799748SNicholas Piggin static int __do_rfi_flush_fixups(void *data)
436aa8a5e00SMichael Ellerman {
43713799748SNicholas Piggin enum l1d_flush_type types = *(enum l1d_flush_type *)data;
4383d1dbbcaSChristophe Leroy unsigned int instrs[3];
439aa8a5e00SMichael Ellerman long *start, *end;
440aa8a5e00SMichael Ellerman int i;
441aa8a5e00SMichael Ellerman
4421fc0c27bSDaniel Axtens start = PTRRELOC(&__start___rfi_flush_fixup);
443aa8a5e00SMichael Ellerman end = PTRRELOC(&__stop___rfi_flush_fixup);
444aa8a5e00SMichael Ellerman
445ef909ba9SChristophe Leroy instrs[0] = PPC_RAW_NOP();
446ef909ba9SChristophe Leroy instrs[1] = PPC_RAW_NOP();
447ef909ba9SChristophe Leroy instrs[2] = PPC_RAW_NOP();
448aa8a5e00SMichael Ellerman
449aa8a5e00SMichael Ellerman if (types & L1D_FLUSH_FALLBACK)
450aa8a5e00SMichael Ellerman /* b .+16 to fallback flush */
4514390a58eSChristophe Leroy instrs[0] = PPC_RAW_BRANCH(16);
452aa8a5e00SMichael Ellerman
453aa8a5e00SMichael Ellerman i = 0;
454aa8a5e00SMichael Ellerman if (types & L1D_FLUSH_ORI) {
455ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
456ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */
457aa8a5e00SMichael Ellerman }
458aa8a5e00SMichael Ellerman
459aa8a5e00SMichael Ellerman if (types & L1D_FLUSH_MTTRIG)
460ef909ba9SChristophe Leroy instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0);
461aa8a5e00SMichael Ellerman
4623d1dbbcaSChristophe Leroy i = do_patch_fixups(start, end, instrs, ARRAY_SIZE(instrs));
463aa8a5e00SMichael Ellerman
4640063d61cSMauricio Faria de Oliveira printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
4650063d61cSMauricio Faria de Oliveira (types == L1D_FLUSH_NONE) ? "no" :
4660063d61cSMauricio Faria de Oliveira (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
4670063d61cSMauricio Faria de Oliveira (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
4680063d61cSMauricio Faria de Oliveira ? "ori+mttrig type"
4690063d61cSMauricio Faria de Oliveira : "ori type" :
4700063d61cSMauricio Faria de Oliveira (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
4710063d61cSMauricio Faria de Oliveira : "unknown");
47213799748SNicholas Piggin
47313799748SNicholas Piggin return 0;
47413799748SNicholas Piggin }
47513799748SNicholas Piggin
do_rfi_flush_fixups(enum l1d_flush_type types)47613799748SNicholas Piggin void do_rfi_flush_fixups(enum l1d_flush_type types)
47713799748SNicholas Piggin {
47813799748SNicholas Piggin /*
47913799748SNicholas Piggin * stop_machine gets all CPUs out of the interrupt exit handler same
48013799748SNicholas Piggin * as do_stf_barrier_fixups. do_rfi_flush_fixups patching can run
48113799748SNicholas Piggin * without stop_machine, so this could be achieved with a broadcast
48213799748SNicholas Piggin * IPI instead, but this matches the stf sequence.
48313799748SNicholas Piggin */
4843c12b4dfSRussell Currey
4853c12b4dfSRussell Currey // Prevent static key update races with do_stf_barrier_fixups()
4863c12b4dfSRussell Currey mutex_lock(&exit_flush_lock);
48713799748SNicholas Piggin static_branch_enable(&interrupt_exit_not_reentrant);
48813799748SNicholas Piggin
48913799748SNicholas Piggin stop_machine(__do_rfi_flush_fixups, &types, NULL);
49013799748SNicholas Piggin
49113799748SNicholas Piggin if (types & L1D_FLUSH_FALLBACK)
49213799748SNicholas Piggin rfi_exit_reentrant = false;
49313799748SNicholas Piggin else
49413799748SNicholas Piggin rfi_exit_reentrant = true;
49513799748SNicholas Piggin
49613799748SNicholas Piggin if (stf_exit_reentrant && rfi_exit_reentrant)
49713799748SNicholas Piggin static_branch_disable(&interrupt_exit_not_reentrant);
4983c12b4dfSRussell Currey
4993c12b4dfSRussell Currey mutex_unlock(&exit_flush_lock);
500aa8a5e00SMichael Ellerman }
5012eea7f06SMichal Suchanek
do_barrier_nospec_fixups_range(bool enable,void * fixup_start,void * fixup_end)502815069caSMichal Suchanek void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
5032eea7f06SMichal Suchanek {
5043d1dbbcaSChristophe Leroy unsigned int instr;
5052eea7f06SMichal Suchanek long *start, *end;
5062eea7f06SMichal Suchanek int i;
5072eea7f06SMichal Suchanek
508815069caSMichal Suchanek start = fixup_start;
509815069caSMichal Suchanek end = fixup_end;
5102eea7f06SMichal Suchanek
511ef909ba9SChristophe Leroy instr = PPC_RAW_NOP();
5122eea7f06SMichal Suchanek
5132eea7f06SMichal Suchanek if (enable) {
5142eea7f06SMichal Suchanek pr_info("barrier-nospec: using ORI speculation barrier\n");
515ef909ba9SChristophe Leroy instr = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
5162eea7f06SMichal Suchanek }
5172eea7f06SMichal Suchanek
5183d1dbbcaSChristophe Leroy i = do_patch_fixups(start, end, &instr, 1);
5192eea7f06SMichal Suchanek
5202eea7f06SMichal Suchanek printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
5212eea7f06SMichal Suchanek }
5222eea7f06SMichal Suchanek
523179ab1cbSMichael Ellerman #endif /* CONFIG_PPC_BOOK3S_64 */
524179ab1cbSMichael Ellerman
525179ab1cbSMichael Ellerman #ifdef CONFIG_PPC_BARRIER_NOSPEC
do_barrier_nospec_fixups(bool enable)526815069caSMichal Suchanek void do_barrier_nospec_fixups(bool enable)
527815069caSMichal Suchanek {
528815069caSMichal Suchanek void *start, *end;
529815069caSMichal Suchanek
5301fc0c27bSDaniel Axtens start = PTRRELOC(&__start___barrier_nospec_fixup);
531815069caSMichal Suchanek end = PTRRELOC(&__stop___barrier_nospec_fixup);
532815069caSMichal Suchanek
533815069caSMichal Suchanek do_barrier_nospec_fixups_range(enable, start, end);
534815069caSMichal Suchanek }
535179ab1cbSMichael Ellerman #endif /* CONFIG_PPC_BARRIER_NOSPEC */
536aa8a5e00SMichael Ellerman
5373e731858SChristophe Leroy #ifdef CONFIG_PPC_E500
do_barrier_nospec_fixups_range(bool enable,void * fixup_start,void * fixup_end)538ebcd1bfcSDiana Craciun void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
539ebcd1bfcSDiana Craciun {
5403d1dbbcaSChristophe Leroy unsigned int instr[2];
541ebcd1bfcSDiana Craciun long *start, *end;
542ebcd1bfcSDiana Craciun int i;
543ebcd1bfcSDiana Craciun
544ebcd1bfcSDiana Craciun start = fixup_start;
545ebcd1bfcSDiana Craciun end = fixup_end;
546ebcd1bfcSDiana Craciun
547ef909ba9SChristophe Leroy instr[0] = PPC_RAW_NOP();
548ef909ba9SChristophe Leroy instr[1] = PPC_RAW_NOP();
549ebcd1bfcSDiana Craciun
550ebcd1bfcSDiana Craciun if (enable) {
551ebcd1bfcSDiana Craciun pr_info("barrier-nospec: using isync; sync as speculation barrier\n");
552ef909ba9SChristophe Leroy instr[0] = PPC_RAW_ISYNC();
553ef909ba9SChristophe Leroy instr[1] = PPC_RAW_SYNC();
554ebcd1bfcSDiana Craciun }
555ebcd1bfcSDiana Craciun
5563d1dbbcaSChristophe Leroy i = do_patch_fixups(start, end, instr, ARRAY_SIZE(instr));
557ebcd1bfcSDiana Craciun
558ebcd1bfcSDiana Craciun printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
559ebcd1bfcSDiana Craciun }
56076a5eaa3SDiana Craciun
patch_btb_flush_section(long * curr)561ce0c6be9SNick Child static void __init patch_btb_flush_section(long *curr)
56276a5eaa3SDiana Craciun {
56376a5eaa3SDiana Craciun unsigned int *start, *end;
56476a5eaa3SDiana Craciun
56576a5eaa3SDiana Craciun start = (void *)curr + *curr;
56676a5eaa3SDiana Craciun end = (void *)curr + *(curr + 1);
56776a5eaa3SDiana Craciun for (; start < end; start++) {
56876a5eaa3SDiana Craciun pr_devel("patching dest %lx\n", (unsigned long)start);
56969d4d6e5SChristophe Leroy patch_instruction(start, ppc_inst(PPC_RAW_NOP()));
57076a5eaa3SDiana Craciun }
57176a5eaa3SDiana Craciun }
57276a5eaa3SDiana Craciun
do_btb_flush_fixups(void)573ce0c6be9SNick Child void __init do_btb_flush_fixups(void)
57476a5eaa3SDiana Craciun {
57576a5eaa3SDiana Craciun long *start, *end;
57676a5eaa3SDiana Craciun
57776a5eaa3SDiana Craciun start = PTRRELOC(&__start__btb_flush_fixup);
57876a5eaa3SDiana Craciun end = PTRRELOC(&__stop__btb_flush_fixup);
57976a5eaa3SDiana Craciun
58076a5eaa3SDiana Craciun for (; start < end; start += 2)
58176a5eaa3SDiana Craciun patch_btb_flush_section(start);
58276a5eaa3SDiana Craciun }
5833e731858SChristophe Leroy #endif /* CONFIG_PPC_E500 */
584ebcd1bfcSDiana Craciun
do_lwsync_fixups(unsigned long value,void * fixup_start,void * fixup_end)5852d1b2027SKumar Gala void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
5862d1b2027SKumar Gala {
5873d98ffbfSBenjamin Herrenschmidt long *start, *end;
58869d4d6e5SChristophe Leroy u32 *dest;
5892d1b2027SKumar Gala
5902d1b2027SKumar Gala if (!(value & CPU_FTR_LWSYNC))
5912d1b2027SKumar Gala return ;
5922d1b2027SKumar Gala
5932d1b2027SKumar Gala start = fixup_start;
5942d1b2027SKumar Gala end = fixup_end;
5952d1b2027SKumar Gala
5962d1b2027SKumar Gala for (; start < end; start++) {
5972d1b2027SKumar Gala dest = (void *)start + *start;
59875346251SJordan Niethe raw_patch_instruction(dest, ppc_inst(PPC_INST_LWSYNC));
5992d1b2027SKumar Gala }
6002d1b2027SKumar Gala }
6012d1b2027SKumar Gala
do_final_fixups(void)602ce0c6be9SNick Child static void __init do_final_fixups(void)
603d715e433SAnton Blanchard {
604d715e433SAnton Blanchard #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
605c545b9f0SChristophe Leroy ppc_inst_t inst;
60669d4d6e5SChristophe Leroy u32 *src, *dest, *end;
607d715e433SAnton Blanchard
608d715e433SAnton Blanchard if (PHYSICAL_START == 0)
609d715e433SAnton Blanchard return;
610d715e433SAnton Blanchard
61169d4d6e5SChristophe Leroy src = (u32 *)(KERNELBASE + PHYSICAL_START);
61269d4d6e5SChristophe Leroy dest = (u32 *)KERNELBASE;
613622cf6f4SJordan Niethe end = (void *)src + (__end_interrupts - _stext);
614d715e433SAnton Blanchard
615622cf6f4SJordan Niethe while (src < end) {
616622cf6f4SJordan Niethe inst = ppc_inst_read(src);
617622cf6f4SJordan Niethe raw_patch_instruction(dest, inst);
618c5ff46d6SMichael Ellerman src = ppc_inst_next(src, src);
619c5ff46d6SMichael Ellerman dest = ppc_inst_next(dest, dest);
620d715e433SAnton Blanchard }
621d715e433SAnton Blanchard #endif
622d715e433SAnton Blanchard }
623d715e433SAnton Blanchard
624a28e46f1SMichael Ellerman static unsigned long __initdata saved_cpu_features;
625a28e46f1SMichael Ellerman static unsigned int __initdata saved_mmu_features;
626a28e46f1SMichael Ellerman #ifdef CONFIG_PPC64
627a28e46f1SMichael Ellerman static unsigned long __initdata saved_firmware_features;
628a28e46f1SMichael Ellerman #endif
629a28e46f1SMichael Ellerman
apply_feature_fixups(void)630a28e46f1SMichael Ellerman void __init apply_feature_fixups(void)
6319402c684SBenjamin Herrenschmidt {
6322c0f9951SBenjamin Herrenschmidt struct cpu_spec *spec = PTRRELOC(*PTRRELOC(&cur_cpu_spec));
6339402c684SBenjamin Herrenschmidt
634a28e46f1SMichael Ellerman *PTRRELOC(&saved_cpu_features) = spec->cpu_features;
635a28e46f1SMichael Ellerman *PTRRELOC(&saved_mmu_features) = spec->mmu_features;
636a28e46f1SMichael Ellerman
6379402c684SBenjamin Herrenschmidt /*
6389402c684SBenjamin Herrenschmidt * Apply the CPU-specific and firmware specific fixups to kernel text
6399402c684SBenjamin Herrenschmidt * (nop out sections not relevant to this CPU or this firmware).
6409402c684SBenjamin Herrenschmidt */
6419402c684SBenjamin Herrenschmidt do_feature_fixups(spec->cpu_features,
6429402c684SBenjamin Herrenschmidt PTRRELOC(&__start___ftr_fixup),
6439402c684SBenjamin Herrenschmidt PTRRELOC(&__stop___ftr_fixup));
6449402c684SBenjamin Herrenschmidt
6459402c684SBenjamin Herrenschmidt do_feature_fixups(spec->mmu_features,
6469402c684SBenjamin Herrenschmidt PTRRELOC(&__start___mmu_ftr_fixup),
6479402c684SBenjamin Herrenschmidt PTRRELOC(&__stop___mmu_ftr_fixup));
6489402c684SBenjamin Herrenschmidt
6499402c684SBenjamin Herrenschmidt do_lwsync_fixups(spec->cpu_features,
6509402c684SBenjamin Herrenschmidt PTRRELOC(&__start___lwsync_fixup),
6519402c684SBenjamin Herrenschmidt PTRRELOC(&__stop___lwsync_fixup));
6529402c684SBenjamin Herrenschmidt
6539402c684SBenjamin Herrenschmidt #ifdef CONFIG_PPC64
654a28e46f1SMichael Ellerman saved_firmware_features = powerpc_firmware_features;
6559402c684SBenjamin Herrenschmidt do_feature_fixups(powerpc_firmware_features,
6569402c684SBenjamin Herrenschmidt &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
6579402c684SBenjamin Herrenschmidt #endif
6589402c684SBenjamin Herrenschmidt do_final_fixups();
65997f6e0ccSBenjamin Herrenschmidt }
660309b315bSAneesh Kumar K.V
update_mmu_feature_fixups(unsigned long mask)661*6b289911SChristophe Leroy void __init update_mmu_feature_fixups(unsigned long mask)
662*6b289911SChristophe Leroy {
663*6b289911SChristophe Leroy saved_mmu_features &= ~mask;
664*6b289911SChristophe Leroy saved_mmu_features |= cur_cpu_spec->mmu_features & mask;
665*6b289911SChristophe Leroy
666*6b289911SChristophe Leroy do_feature_fixups_mask(cur_cpu_spec->mmu_features, mask,
667*6b289911SChristophe Leroy PTRRELOC(&__start___mmu_ftr_fixup),
668*6b289911SChristophe Leroy PTRRELOC(&__stop___mmu_ftr_fixup));
669*6b289911SChristophe Leroy mmu_feature_keys_init();
670*6b289911SChristophe Leroy }
671*6b289911SChristophe Leroy
setup_feature_keys(void)67297f6e0ccSBenjamin Herrenschmidt void __init setup_feature_keys(void)
67397f6e0ccSBenjamin Herrenschmidt {
674309b315bSAneesh Kumar K.V /*
675309b315bSAneesh Kumar K.V * Initialise jump label. This causes all the cpu/mmu_has_feature()
676309b315bSAneesh Kumar K.V * checks to take on their correct polarity based on the current set of
677309b315bSAneesh Kumar K.V * CPU/MMU features.
678309b315bSAneesh Kumar K.V */
679309b315bSAneesh Kumar K.V jump_label_init();
6804db73271SKevin Hao cpu_feature_keys_init();
681c12e6f24SKevin Hao mmu_feature_keys_init();
6829402c684SBenjamin Herrenschmidt }
6839402c684SBenjamin Herrenschmidt
check_features(void)684a28e46f1SMichael Ellerman static int __init check_features(void)
685a28e46f1SMichael Ellerman {
686a28e46f1SMichael Ellerman WARN(saved_cpu_features != cur_cpu_spec->cpu_features,
687a28e46f1SMichael Ellerman "CPU features changed after feature patching!\n");
688a28e46f1SMichael Ellerman WARN(saved_mmu_features != cur_cpu_spec->mmu_features,
689a28e46f1SMichael Ellerman "MMU features changed after feature patching!\n");
690a28e46f1SMichael Ellerman #ifdef CONFIG_PPC64
691a28e46f1SMichael Ellerman WARN(saved_firmware_features != powerpc_firmware_features,
692a28e46f1SMichael Ellerman "Firmware features changed after feature patching!\n");
693a28e46f1SMichael Ellerman #endif
694a28e46f1SMichael Ellerman
695a28e46f1SMichael Ellerman return 0;
696a28e46f1SMichael Ellerman }
697a28e46f1SMichael Ellerman late_initcall(check_features);
698a28e46f1SMichael Ellerman
699362e7701SMichael Ellerman #ifdef CONFIG_FTR_FIXUP_SELFTEST
700362e7701SMichael Ellerman
701362e7701SMichael Ellerman #define check(x) \
702362e7701SMichael Ellerman if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__);
703362e7701SMichael Ellerman
patch_feature_section(unsigned long value,struct fixup_entry * fcur)704*6b289911SChristophe Leroy static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
705*6b289911SChristophe Leroy {
706*6b289911SChristophe Leroy return patch_feature_section_mask(value, ~0, fcur);
707*6b289911SChristophe Leroy }
708*6b289911SChristophe Leroy
709362e7701SMichael Ellerman /* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
710362e7701SMichael Ellerman static struct fixup_entry fixup;
711362e7701SMichael Ellerman
calc_offset(struct fixup_entry * entry,unsigned int * p)712ce0c6be9SNick Child static long __init calc_offset(struct fixup_entry *entry, unsigned int *p)
713362e7701SMichael Ellerman {
714362e7701SMichael Ellerman return (unsigned long)p - (unsigned long)entry;
715362e7701SMichael Ellerman }
716362e7701SMichael Ellerman
test_basic_patching(void)717ce0c6be9SNick Child static void __init test_basic_patching(void)
718362e7701SMichael Ellerman {
719c69a48cdSDaniel Axtens extern unsigned int ftr_fixup_test1[];
720c69a48cdSDaniel Axtens extern unsigned int end_ftr_fixup_test1[];
721c69a48cdSDaniel Axtens extern unsigned int ftr_fixup_test1_orig[];
722c69a48cdSDaniel Axtens extern unsigned int ftr_fixup_test1_expected[];
723cad0e390SMichael Ellerman int size = 4 * (end_ftr_fixup_test1 - ftr_fixup_test1);
724362e7701SMichael Ellerman
725362e7701SMichael Ellerman fixup.value = fixup.mask = 8;
726c69a48cdSDaniel Axtens fixup.start_off = calc_offset(&fixup, ftr_fixup_test1 + 1);
727c69a48cdSDaniel Axtens fixup.end_off = calc_offset(&fixup, ftr_fixup_test1 + 2);
728362e7701SMichael Ellerman fixup.alt_start_off = fixup.alt_end_off = 0;
729362e7701SMichael Ellerman
730362e7701SMichael Ellerman /* Sanity check */
731c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
732362e7701SMichael Ellerman
733362e7701SMichael Ellerman /* Check we don't patch if the value matches */
734362e7701SMichael Ellerman patch_feature_section(8, &fixup);
735c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
736362e7701SMichael Ellerman
737362e7701SMichael Ellerman /* Check we do patch if the value doesn't match */
738362e7701SMichael Ellerman patch_feature_section(0, &fixup);
739c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
740362e7701SMichael Ellerman
741362e7701SMichael Ellerman /* Check we do patch if the mask doesn't match */
742c69a48cdSDaniel Axtens memcpy(ftr_fixup_test1, ftr_fixup_test1_orig, size);
743c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
744362e7701SMichael Ellerman patch_feature_section(~8, &fixup);
745c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
746362e7701SMichael Ellerman }
747362e7701SMichael Ellerman
test_alternative_patching(void)748ce0c6be9SNick Child static void __init test_alternative_patching(void)
749362e7701SMichael Ellerman {
750c69a48cdSDaniel Axtens extern unsigned int ftr_fixup_test2[];
751c69a48cdSDaniel Axtens extern unsigned int end_ftr_fixup_test2[];
752c69a48cdSDaniel Axtens extern unsigned int ftr_fixup_test2_orig[];
753c69a48cdSDaniel Axtens extern unsigned int ftr_fixup_test2_alt[];
754c69a48cdSDaniel Axtens extern unsigned int ftr_fixup_test2_expected[];
755cad0e390SMichael Ellerman int size = 4 * (end_ftr_fixup_test2 - ftr_fixup_test2);
756362e7701SMichael Ellerman
757362e7701SMichael Ellerman fixup.value = fixup.mask = 0xF;
758c69a48cdSDaniel Axtens fixup.start_off = calc_offset(&fixup, ftr_fixup_test2 + 1);
759c69a48cdSDaniel Axtens fixup.end_off = calc_offset(&fixup, ftr_fixup_test2 + 2);
760c69a48cdSDaniel Axtens fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test2_alt);
761c69a48cdSDaniel Axtens fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test2_alt + 1);
762362e7701SMichael Ellerman
763362e7701SMichael Ellerman /* Sanity check */
764c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
765362e7701SMichael Ellerman
766362e7701SMichael Ellerman /* Check we don't patch if the value matches */
767362e7701SMichael Ellerman patch_feature_section(0xF, &fixup);
768c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
769362e7701SMichael Ellerman
770362e7701SMichael Ellerman /* Check we do patch if the value doesn't match */
771362e7701SMichael Ellerman patch_feature_section(0, &fixup);
772c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
773362e7701SMichael Ellerman
774362e7701SMichael Ellerman /* Check we do patch if the mask doesn't match */
775c69a48cdSDaniel Axtens memcpy(ftr_fixup_test2, ftr_fixup_test2_orig, size);
776c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
777362e7701SMichael Ellerman patch_feature_section(~0xF, &fixup);
778c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
779362e7701SMichael Ellerman }
780362e7701SMichael Ellerman
test_alternative_case_too_big(void)781ce0c6be9SNick Child static void __init test_alternative_case_too_big(void)
782362e7701SMichael Ellerman {
783c69a48cdSDaniel Axtens extern unsigned int ftr_fixup_test3[];
784c69a48cdSDaniel Axtens extern unsigned int end_ftr_fixup_test3[];
785c69a48cdSDaniel Axtens extern unsigned int ftr_fixup_test3_orig[];
786c69a48cdSDaniel Axtens extern unsigned int ftr_fixup_test3_alt[];
787cad0e390SMichael Ellerman int size = 4 * (end_ftr_fixup_test3 - ftr_fixup_test3);
788362e7701SMichael Ellerman
789362e7701SMichael Ellerman fixup.value = fixup.mask = 0xC;
790c69a48cdSDaniel Axtens fixup.start_off = calc_offset(&fixup, ftr_fixup_test3 + 1);
791c69a48cdSDaniel Axtens fixup.end_off = calc_offset(&fixup, ftr_fixup_test3 + 2);
792c69a48cdSDaniel Axtens fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test3_alt);
793c69a48cdSDaniel Axtens fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test3_alt + 2);
794362e7701SMichael Ellerman
795362e7701SMichael Ellerman /* Sanity check */
796c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
797362e7701SMichael Ellerman
798362e7701SMichael Ellerman /* Expect nothing to be patched, and the error returned to us */
799362e7701SMichael Ellerman check(patch_feature_section(0xF, &fixup) == 1);
800c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
801362e7701SMichael Ellerman check(patch_feature_section(0, &fixup) == 1);
802c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
803362e7701SMichael Ellerman check(patch_feature_section(~0xF, &fixup) == 1);
804c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
805362e7701SMichael Ellerman }
806362e7701SMichael Ellerman
test_alternative_case_too_small(void)807ce0c6be9SNick Child static void __init test_alternative_case_too_small(void)
808362e7701SMichael Ellerman {
809c69a48cdSDaniel Axtens extern unsigned int ftr_fixup_test4[];
810c69a48cdSDaniel Axtens extern unsigned int end_ftr_fixup_test4[];
811c69a48cdSDaniel Axtens extern unsigned int ftr_fixup_test4_orig[];
812c69a48cdSDaniel Axtens extern unsigned int ftr_fixup_test4_alt[];
813c69a48cdSDaniel Axtens extern unsigned int ftr_fixup_test4_expected[];
814cad0e390SMichael Ellerman int size = 4 * (end_ftr_fixup_test4 - ftr_fixup_test4);
815362e7701SMichael Ellerman unsigned long flag;
816362e7701SMichael Ellerman
817362e7701SMichael Ellerman /* Check a high-bit flag */
818362e7701SMichael Ellerman flag = 1UL << ((sizeof(unsigned long) - 1) * 8);
819362e7701SMichael Ellerman fixup.value = fixup.mask = flag;
820c69a48cdSDaniel Axtens fixup.start_off = calc_offset(&fixup, ftr_fixup_test4 + 1);
821c69a48cdSDaniel Axtens fixup.end_off = calc_offset(&fixup, ftr_fixup_test4 + 5);
822c69a48cdSDaniel Axtens fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test4_alt);
823c69a48cdSDaniel Axtens fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test4_alt + 2);
824362e7701SMichael Ellerman
825362e7701SMichael Ellerman /* Sanity check */
826c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
827362e7701SMichael Ellerman
828362e7701SMichael Ellerman /* Check we don't patch if the value matches */
829362e7701SMichael Ellerman patch_feature_section(flag, &fixup);
830c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
831362e7701SMichael Ellerman
832362e7701SMichael Ellerman /* Check we do patch if the value doesn't match */
833362e7701SMichael Ellerman patch_feature_section(0, &fixup);
834c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
835362e7701SMichael Ellerman
836362e7701SMichael Ellerman /* Check we do patch if the mask doesn't match */
837c69a48cdSDaniel Axtens memcpy(ftr_fixup_test4, ftr_fixup_test4_orig, size);
838c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
839362e7701SMichael Ellerman patch_feature_section(~flag, &fixup);
840c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
841362e7701SMichael Ellerman }
842362e7701SMichael Ellerman
test_alternative_case_with_branch(void)843362e7701SMichael Ellerman static void test_alternative_case_with_branch(void)
844362e7701SMichael Ellerman {
845c69a48cdSDaniel Axtens extern unsigned int ftr_fixup_test5[];
846c69a48cdSDaniel Axtens extern unsigned int end_ftr_fixup_test5[];
847c69a48cdSDaniel Axtens extern unsigned int ftr_fixup_test5_expected[];
848cad0e390SMichael Ellerman int size = 4 * (end_ftr_fixup_test5 - ftr_fixup_test5);
849362e7701SMichael Ellerman
850c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test5, ftr_fixup_test5_expected, size) == 0);
851362e7701SMichael Ellerman }
852362e7701SMichael Ellerman
test_alternative_case_with_external_branch(void)853ce0c6be9SNick Child static void __init test_alternative_case_with_external_branch(void)
854362e7701SMichael Ellerman {
855c69a48cdSDaniel Axtens extern unsigned int ftr_fixup_test6[];
856c69a48cdSDaniel Axtens extern unsigned int end_ftr_fixup_test6[];
857c69a48cdSDaniel Axtens extern unsigned int ftr_fixup_test6_expected[];
858cad0e390SMichael Ellerman int size = 4 * (end_ftr_fixup_test6 - ftr_fixup_test6);
859362e7701SMichael Ellerman
860c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test6, ftr_fixup_test6_expected, size) == 0);
861362e7701SMichael Ellerman }
862362e7701SMichael Ellerman
test_alternative_case_with_branch_to_end(void)863ce0c6be9SNick Child static void __init test_alternative_case_with_branch_to_end(void)
8646158faedSMichael Ellerman {
8656158faedSMichael Ellerman extern unsigned int ftr_fixup_test7[];
8666158faedSMichael Ellerman extern unsigned int end_ftr_fixup_test7[];
8676158faedSMichael Ellerman extern unsigned int ftr_fixup_test7_expected[];
8686158faedSMichael Ellerman int size = 4 * (end_ftr_fixup_test7 - ftr_fixup_test7);
8696158faedSMichael Ellerman
8706158faedSMichael Ellerman check(memcmp(ftr_fixup_test7, ftr_fixup_test7_expected, size) == 0);
8716158faedSMichael Ellerman }
8726158faedSMichael Ellerman
test_cpu_macros(void)873ce0c6be9SNick Child static void __init test_cpu_macros(void)
874362e7701SMichael Ellerman {
875c69a48cdSDaniel Axtens extern u8 ftr_fixup_test_FTR_macros[];
876c69a48cdSDaniel Axtens extern u8 ftr_fixup_test_FTR_macros_expected[];
877c69a48cdSDaniel Axtens unsigned long size = ftr_fixup_test_FTR_macros_expected -
878c69a48cdSDaniel Axtens ftr_fixup_test_FTR_macros;
879362e7701SMichael Ellerman
880362e7701SMichael Ellerman /* The fixups have already been done for us during boot */
881c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test_FTR_macros,
882c69a48cdSDaniel Axtens ftr_fixup_test_FTR_macros_expected, size) == 0);
883362e7701SMichael Ellerman }
884362e7701SMichael Ellerman
test_fw_macros(void)885ce0c6be9SNick Child static void __init test_fw_macros(void)
886362e7701SMichael Ellerman {
887362e7701SMichael Ellerman #ifdef CONFIG_PPC64
888c69a48cdSDaniel Axtens extern u8 ftr_fixup_test_FW_FTR_macros[];
889c69a48cdSDaniel Axtens extern u8 ftr_fixup_test_FW_FTR_macros_expected[];
890c69a48cdSDaniel Axtens unsigned long size = ftr_fixup_test_FW_FTR_macros_expected -
891c69a48cdSDaniel Axtens ftr_fixup_test_FW_FTR_macros;
892362e7701SMichael Ellerman
893362e7701SMichael Ellerman /* The fixups have already been done for us during boot */
894c69a48cdSDaniel Axtens check(memcmp(ftr_fixup_test_FW_FTR_macros,
895c69a48cdSDaniel Axtens ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
896362e7701SMichael Ellerman #endif
897362e7701SMichael Ellerman }
898362e7701SMichael Ellerman
test_lwsync_macros(void)899ce0c6be9SNick Child static void __init test_lwsync_macros(void)
9002d1b2027SKumar Gala {
901c69a48cdSDaniel Axtens extern u8 lwsync_fixup_test[];
902c69a48cdSDaniel Axtens extern u8 end_lwsync_fixup_test[];
903c69a48cdSDaniel Axtens extern u8 lwsync_fixup_test_expected_LWSYNC[];
904c69a48cdSDaniel Axtens extern u8 lwsync_fixup_test_expected_SYNC[];
905c69a48cdSDaniel Axtens unsigned long size = end_lwsync_fixup_test -
906c69a48cdSDaniel Axtens lwsync_fixup_test;
9072d1b2027SKumar Gala
9082d1b2027SKumar Gala /* The fixups have already been done for us during boot */
9092d1b2027SKumar Gala if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
910c69a48cdSDaniel Axtens check(memcmp(lwsync_fixup_test,
911c69a48cdSDaniel Axtens lwsync_fixup_test_expected_LWSYNC, size) == 0);
9122d1b2027SKumar Gala } else {
913c69a48cdSDaniel Axtens check(memcmp(lwsync_fixup_test,
914c69a48cdSDaniel Axtens lwsync_fixup_test_expected_SYNC, size) == 0);
9152d1b2027SKumar Gala }
9162d1b2027SKumar Gala }
9172d1b2027SKumar Gala
918785b79d1SJordan Niethe #ifdef CONFIG_PPC64
test_prefix_patching(void)919785b79d1SJordan Niethe static void __init test_prefix_patching(void)
920785b79d1SJordan Niethe {
921785b79d1SJordan Niethe extern unsigned int ftr_fixup_prefix1[];
922785b79d1SJordan Niethe extern unsigned int end_ftr_fixup_prefix1[];
923785b79d1SJordan Niethe extern unsigned int ftr_fixup_prefix1_orig[];
924785b79d1SJordan Niethe extern unsigned int ftr_fixup_prefix1_expected[];
925785b79d1SJordan Niethe int size = sizeof(unsigned int) * (end_ftr_fixup_prefix1 - ftr_fixup_prefix1);
926785b79d1SJordan Niethe
927785b79d1SJordan Niethe fixup.value = fixup.mask = 8;
928785b79d1SJordan Niethe fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix1 + 1);
929785b79d1SJordan Niethe fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix1 + 3);
930785b79d1SJordan Niethe fixup.alt_start_off = fixup.alt_end_off = 0;
931785b79d1SJordan Niethe
932785b79d1SJordan Niethe /* Sanity check */
933785b79d1SJordan Niethe check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_orig, size) == 0);
934785b79d1SJordan Niethe
935785b79d1SJordan Niethe patch_feature_section(0, &fixup);
936785b79d1SJordan Niethe check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_expected, size) == 0);
937785b79d1SJordan Niethe check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_orig, size) != 0);
938785b79d1SJordan Niethe }
939785b79d1SJordan Niethe
test_prefix_alt_patching(void)940785b79d1SJordan Niethe static void __init test_prefix_alt_patching(void)
941785b79d1SJordan Niethe {
942785b79d1SJordan Niethe extern unsigned int ftr_fixup_prefix2[];
943785b79d1SJordan Niethe extern unsigned int end_ftr_fixup_prefix2[];
944785b79d1SJordan Niethe extern unsigned int ftr_fixup_prefix2_orig[];
945785b79d1SJordan Niethe extern unsigned int ftr_fixup_prefix2_expected[];
946785b79d1SJordan Niethe extern unsigned int ftr_fixup_prefix2_alt[];
947785b79d1SJordan Niethe int size = sizeof(unsigned int) * (end_ftr_fixup_prefix2 - ftr_fixup_prefix2);
948785b79d1SJordan Niethe
949785b79d1SJordan Niethe fixup.value = fixup.mask = 8;
950785b79d1SJordan Niethe fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix2 + 1);
951785b79d1SJordan Niethe fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix2 + 3);
952785b79d1SJordan Niethe fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_prefix2_alt);
953785b79d1SJordan Niethe fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_prefix2_alt + 2);
954785b79d1SJordan Niethe /* Sanity check */
955785b79d1SJordan Niethe check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_orig, size) == 0);
956785b79d1SJordan Niethe
957785b79d1SJordan Niethe patch_feature_section(0, &fixup);
958785b79d1SJordan Niethe check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_expected, size) == 0);
959785b79d1SJordan Niethe check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_orig, size) != 0);
960785b79d1SJordan Niethe }
961785b79d1SJordan Niethe
test_prefix_word_alt_patching(void)962785b79d1SJordan Niethe static void __init test_prefix_word_alt_patching(void)
963785b79d1SJordan Niethe {
964785b79d1SJordan Niethe extern unsigned int ftr_fixup_prefix3[];
965785b79d1SJordan Niethe extern unsigned int end_ftr_fixup_prefix3[];
966785b79d1SJordan Niethe extern unsigned int ftr_fixup_prefix3_orig[];
967785b79d1SJordan Niethe extern unsigned int ftr_fixup_prefix3_expected[];
968785b79d1SJordan Niethe extern unsigned int ftr_fixup_prefix3_alt[];
969785b79d1SJordan Niethe int size = sizeof(unsigned int) * (end_ftr_fixup_prefix3 - ftr_fixup_prefix3);
970785b79d1SJordan Niethe
971785b79d1SJordan Niethe fixup.value = fixup.mask = 8;
972785b79d1SJordan Niethe fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix3 + 1);
973785b79d1SJordan Niethe fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix3 + 4);
974785b79d1SJordan Niethe fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_prefix3_alt);
975785b79d1SJordan Niethe fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_prefix3_alt + 3);
976785b79d1SJordan Niethe /* Sanity check */
977785b79d1SJordan Niethe check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_orig, size) == 0);
978785b79d1SJordan Niethe
979785b79d1SJordan Niethe patch_feature_section(0, &fixup);
980785b79d1SJordan Niethe check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_expected, size) == 0);
981785b79d1SJordan Niethe patch_feature_section(0, &fixup);
982785b79d1SJordan Niethe check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_orig, size) != 0);
983785b79d1SJordan Niethe }
984785b79d1SJordan Niethe #else
test_prefix_patching(void)985785b79d1SJordan Niethe static inline void test_prefix_patching(void) {}
test_prefix_alt_patching(void)986785b79d1SJordan Niethe static inline void test_prefix_alt_patching(void) {}
test_prefix_word_alt_patching(void)987785b79d1SJordan Niethe static inline void test_prefix_word_alt_patching(void) {}
988785b79d1SJordan Niethe #endif /* CONFIG_PPC64 */
989785b79d1SJordan Niethe
test_feature_fixups(void)990362e7701SMichael Ellerman static int __init test_feature_fixups(void)
991362e7701SMichael Ellerman {
992362e7701SMichael Ellerman printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
993362e7701SMichael Ellerman
994362e7701SMichael Ellerman test_basic_patching();
995362e7701SMichael Ellerman test_alternative_patching();
996362e7701SMichael Ellerman test_alternative_case_too_big();
997362e7701SMichael Ellerman test_alternative_case_too_small();
998362e7701SMichael Ellerman test_alternative_case_with_branch();
999362e7701SMichael Ellerman test_alternative_case_with_external_branch();
10006158faedSMichael Ellerman test_alternative_case_with_branch_to_end();
1001362e7701SMichael Ellerman test_cpu_macros();
1002362e7701SMichael Ellerman test_fw_macros();
10032d1b2027SKumar Gala test_lwsync_macros();
1004785b79d1SJordan Niethe test_prefix_patching();
1005785b79d1SJordan Niethe test_prefix_alt_patching();
1006785b79d1SJordan Niethe test_prefix_word_alt_patching();
1007362e7701SMichael Ellerman
1008362e7701SMichael Ellerman return 0;
1009362e7701SMichael Ellerman }
1010362e7701SMichael Ellerman late_initcall(test_feature_fixups);
1011362e7701SMichael Ellerman
1012362e7701SMichael Ellerman #endif /* CONFIG_FTR_FIXUP_SELFTEST */
1013