19a868f63SMichael Ellerman // SPDX-License-Identifier: GPL-2.0+
29a868f63SMichael Ellerman //
39a868f63SMichael Ellerman // Security related flags and so on.
49a868f63SMichael Ellerman //
59a868f63SMichael Ellerman // Copyright 2018, Michael Ellerman, IBM Corporation.
69a868f63SMichael Ellerman
742e2acdeSBreno Leitao #include <linux/cpu.h>
89a868f63SMichael Ellerman #include <linux/kernel.h>
98ad33041SMichael Ellerman #include <linux/device.h>
10c6b4c914SMichael Ellerman #include <linux/memblock.h>
11d93e5e2dSMichael Ellerman #include <linux/nospec.h>
12d93e5e2dSMichael Ellerman #include <linux/prctl.h>
13ff348355SMichael Ellerman #include <linux/seq_buf.h>
14dbf77fedSAneesh Kumar K.V #include <linux/debugfs.h>
158ad33041SMichael Ellerman
16ee13cb24SMichael Ellerman #include <asm/asm-prototypes.h>
17ee13cb24SMichael Ellerman #include <asm/code-patching.h>
189a868f63SMichael Ellerman #include <asm/security_features.h>
19eb316ae7SMichael Ellerman #include <asm/sections.h>
202eea7f06SMichal Suchanek #include <asm/setup.h>
2175346251SJordan Niethe #include <asm/inst.h>
229a868f63SMichael Ellerman
23c6b4c914SMichael Ellerman #include "setup.h"
249a868f63SMichael Ellerman
253b05a1e5SGeert Uytterhoeven u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
268ad33041SMichael Ellerman
271026798cSNicholas Piggin enum branch_cache_flush_type {
281026798cSNicholas Piggin BRANCH_CACHE_FLUSH_NONE = 0x1,
291026798cSNicholas Piggin BRANCH_CACHE_FLUSH_SW = 0x2,
301026798cSNicholas Piggin BRANCH_CACHE_FLUSH_HW = 0x4,
31ee13cb24SMichael Ellerman };
321026798cSNicholas Piggin static enum branch_cache_flush_type count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE;
33c06ac277SNicholas Piggin static enum branch_cache_flush_type link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE;
34ee13cb24SMichael Ellerman
35815069caSMichal Suchanek bool barrier_nospec_enabled;
36cf175dc3SDiana Craciun static bool no_nospec;
37f633a8adSDiana Craciun static bool btb_flush_enabled;
383e731858SChristophe Leroy #if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64)
39f633a8adSDiana Craciun static bool no_spectrev2;
40f633a8adSDiana Craciun #endif
412eea7f06SMichal Suchanek
enable_barrier_nospec(bool enable)422eea7f06SMichal Suchanek static void enable_barrier_nospec(bool enable)
432eea7f06SMichal Suchanek {
442eea7f06SMichal Suchanek barrier_nospec_enabled = enable;
452eea7f06SMichal Suchanek do_barrier_nospec_fixups(enable);
462eea7f06SMichal Suchanek }
472eea7f06SMichal Suchanek
setup_barrier_nospec(void)48d276960dSNick Child void __init setup_barrier_nospec(void)
49cb3d6759SMichal Suchanek {
50cb3d6759SMichal Suchanek bool enable;
51cb3d6759SMichal Suchanek
52cb3d6759SMichal Suchanek /*
53cb3d6759SMichal Suchanek * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well.
54cb3d6759SMichal Suchanek * But there's a good reason not to. The two flags we check below are
55cb3d6759SMichal Suchanek * both are enabled by default in the kernel, so if the hcall is not
56cb3d6759SMichal Suchanek * functional they will be enabled.
57cb3d6759SMichal Suchanek * On a system where the host firmware has been updated (so the ori
58cb3d6759SMichal Suchanek * functions as a barrier), but on which the hypervisor (KVM/Qemu) has
59cb3d6759SMichal Suchanek * not been updated, we would like to enable the barrier. Dropping the
60cb3d6759SMichal Suchanek * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is
61cb3d6759SMichal Suchanek * we potentially enable the barrier on systems where the host firmware
62cb3d6759SMichal Suchanek * is not updated, but that's harmless as it's a no-op.
63cb3d6759SMichal Suchanek */
64cb3d6759SMichal Suchanek enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
65cb3d6759SMichal Suchanek security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
66cb3d6759SMichal Suchanek
67782e69efSJosh Poimboeuf if (!no_nospec && !cpu_mitigations_off())
68cb3d6759SMichal Suchanek enable_barrier_nospec(enable);
69cb3d6759SMichal Suchanek }
70cb3d6759SMichal Suchanek
handle_nospectre_v1(char * p)71cf175dc3SDiana Craciun static int __init handle_nospectre_v1(char *p)
72cf175dc3SDiana Craciun {
73cf175dc3SDiana Craciun no_nospec = true;
74cf175dc3SDiana Craciun
75cf175dc3SDiana Craciun return 0;
76cf175dc3SDiana Craciun }
77cf175dc3SDiana Craciun early_param("nospectre_v1", handle_nospectre_v1);
78cf175dc3SDiana Craciun
79cb3d6759SMichal Suchanek #ifdef CONFIG_DEBUG_FS
barrier_nospec_set(void * data,u64 val)80cb3d6759SMichal Suchanek static int barrier_nospec_set(void *data, u64 val)
81cb3d6759SMichal Suchanek {
82cb3d6759SMichal Suchanek switch (val) {
83cb3d6759SMichal Suchanek case 0:
84cb3d6759SMichal Suchanek case 1:
85cb3d6759SMichal Suchanek break;
86cb3d6759SMichal Suchanek default:
87cb3d6759SMichal Suchanek return -EINVAL;
88cb3d6759SMichal Suchanek }
89cb3d6759SMichal Suchanek
90cb3d6759SMichal Suchanek if (!!val == !!barrier_nospec_enabled)
91cb3d6759SMichal Suchanek return 0;
92cb3d6759SMichal Suchanek
93cb3d6759SMichal Suchanek enable_barrier_nospec(!!val);
94cb3d6759SMichal Suchanek
95cb3d6759SMichal Suchanek return 0;
96cb3d6759SMichal Suchanek }
97cb3d6759SMichal Suchanek
barrier_nospec_get(void * data,u64 * val)98cb3d6759SMichal Suchanek static int barrier_nospec_get(void *data, u64 *val)
99cb3d6759SMichal Suchanek {
100cb3d6759SMichal Suchanek *val = barrier_nospec_enabled ? 1 : 0;
101cb3d6759SMichal Suchanek return 0;
102cb3d6759SMichal Suchanek }
103cb3d6759SMichal Suchanek
104090d5ab9SYueHaibing DEFINE_DEBUGFS_ATTRIBUTE(fops_barrier_nospec, barrier_nospec_get,
105090d5ab9SYueHaibing barrier_nospec_set, "%llu\n");
106cb3d6759SMichal Suchanek
barrier_nospec_debugfs_init(void)107cb3d6759SMichal Suchanek static __init int barrier_nospec_debugfs_init(void)
108cb3d6759SMichal Suchanek {
109090d5ab9SYueHaibing debugfs_create_file_unsafe("barrier_nospec", 0600,
110dbf77fedSAneesh Kumar K.V arch_debugfs_dir, NULL,
111cb3d6759SMichal Suchanek &fops_barrier_nospec);
112cb3d6759SMichal Suchanek return 0;
113cb3d6759SMichal Suchanek }
114cb3d6759SMichal Suchanek device_initcall(barrier_nospec_debugfs_init);
115398af571SMichael Ellerman
security_feature_debugfs_init(void)116398af571SMichael Ellerman static __init int security_feature_debugfs_init(void)
117398af571SMichael Ellerman {
118dbf77fedSAneesh Kumar K.V debugfs_create_x64("security_features", 0400, arch_debugfs_dir,
1193b05a1e5SGeert Uytterhoeven &powerpc_security_features);
120398af571SMichael Ellerman return 0;
121398af571SMichael Ellerman }
122398af571SMichael Ellerman device_initcall(security_feature_debugfs_init);
123cb3d6759SMichal Suchanek #endif /* CONFIG_DEBUG_FS */
124cb3d6759SMichal Suchanek
1253e731858SChristophe Leroy #if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64)
handle_nospectre_v2(char * p)126f633a8adSDiana Craciun static int __init handle_nospectre_v2(char *p)
127f633a8adSDiana Craciun {
128f633a8adSDiana Craciun no_spectrev2 = true;
129f633a8adSDiana Craciun
130f633a8adSDiana Craciun return 0;
131f633a8adSDiana Craciun }
132f633a8adSDiana Craciun early_param("nospectre_v2", handle_nospectre_v2);
1333e731858SChristophe Leroy #endif /* CONFIG_PPC_E500 || CONFIG_PPC_BOOK3S_64 */
134d8f0e0b0SChristopher M. Riedl
1353e731858SChristophe Leroy #ifdef CONFIG_PPC_E500
setup_spectre_v2(void)136d276960dSNick Child void __init setup_spectre_v2(void)
137f633a8adSDiana Craciun {
138782e69efSJosh Poimboeuf if (no_spectrev2 || cpu_mitigations_off())
139f633a8adSDiana Craciun do_btb_flush_fixups();
140f633a8adSDiana Craciun else
141f633a8adSDiana Craciun btb_flush_enabled = true;
142f633a8adSDiana Craciun }
1433e731858SChristophe Leroy #endif /* CONFIG_PPC_E500 */
144f633a8adSDiana Craciun
145406d2b6aSDiana Craciun #ifdef CONFIG_PPC_BOOK3S_64
cpu_show_meltdown(struct device * dev,struct device_attribute * attr,char * buf)1468ad33041SMichael Ellerman ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1478ad33041SMichael Ellerman {
148ff348355SMichael Ellerman bool thread_priv;
149ff348355SMichael Ellerman
150ff348355SMichael Ellerman thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
151ff348355SMichael Ellerman
1524e706af3SGustavo L. F. Walbon if (rfi_flush) {
153ff348355SMichael Ellerman struct seq_buf s;
154ff348355SMichael Ellerman seq_buf_init(&s, buf, PAGE_SIZE - 1);
155ff348355SMichael Ellerman
1564e706af3SGustavo L. F. Walbon seq_buf_printf(&s, "Mitigation: RFI Flush");
157ff348355SMichael Ellerman if (thread_priv)
1584e706af3SGustavo L. F. Walbon seq_buf_printf(&s, ", L1D private per thread");
159ff348355SMichael Ellerman
160ff348355SMichael Ellerman seq_buf_printf(&s, "\n");
161ff348355SMichael Ellerman
162ff348355SMichael Ellerman return s.len;
163ff348355SMichael Ellerman }
164ff348355SMichael Ellerman
1654e706af3SGustavo L. F. Walbon if (thread_priv)
1664e706af3SGustavo L. F. Walbon return sprintf(buf, "Vulnerable: L1D private per thread\n");
1674e706af3SGustavo L. F. Walbon
168ff348355SMichael Ellerman if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
169ff348355SMichael Ellerman !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
170ff348355SMichael Ellerman return sprintf(buf, "Not affected\n");
1718ad33041SMichael Ellerman
1728ad33041SMichael Ellerman return sprintf(buf, "Vulnerable\n");
1738ad33041SMichael Ellerman }
1748e6b6da9SAnthony Steinhauser
cpu_show_l1tf(struct device * dev,struct device_attribute * attr,char * buf)1758e6b6da9SAnthony Steinhauser ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
1768e6b6da9SAnthony Steinhauser {
1778e6b6da9SAnthony Steinhauser return cpu_show_meltdown(dev, attr, buf);
1788e6b6da9SAnthony Steinhauser }
179406d2b6aSDiana Craciun #endif
18056986016SMichael Ellerman
cpu_show_spectre_v1(struct device * dev,struct device_attribute * attr,char * buf)18156986016SMichael Ellerman ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
18256986016SMichael Ellerman {
1836d44acaeSMichael Ellerman struct seq_buf s;
18456986016SMichael Ellerman
1856d44acaeSMichael Ellerman seq_buf_init(&s, buf, PAGE_SIZE - 1);
1866d44acaeSMichael Ellerman
1876d44acaeSMichael Ellerman if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) {
188a3775145SMichal Suchanek if (barrier_nospec_enabled)
1896d44acaeSMichael Ellerman seq_buf_printf(&s, "Mitigation: __user pointer sanitization");
1906d44acaeSMichael Ellerman else
1916d44acaeSMichael Ellerman seq_buf_printf(&s, "Vulnerable");
192a3775145SMichal Suchanek
1936d44acaeSMichael Ellerman if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31))
1946d44acaeSMichael Ellerman seq_buf_printf(&s, ", ori31 speculation barrier enabled");
1956d44acaeSMichael Ellerman
1966d44acaeSMichael Ellerman seq_buf_printf(&s, "\n");
1976d44acaeSMichael Ellerman } else
1986d44acaeSMichael Ellerman seq_buf_printf(&s, "Not affected\n");
1996d44acaeSMichael Ellerman
2006d44acaeSMichael Ellerman return s.len;
20156986016SMichael Ellerman }
202d6fbe1c5SMichael Ellerman
cpu_show_spectre_v2(struct device * dev,struct device_attribute * attr,char * buf)203d6fbe1c5SMichael Ellerman ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
204d6fbe1c5SMichael Ellerman {
205d6fbe1c5SMichael Ellerman struct seq_buf s;
2066d44acaeSMichael Ellerman bool bcs, ccd;
207d6fbe1c5SMichael Ellerman
208d6fbe1c5SMichael Ellerman seq_buf_init(&s, buf, PAGE_SIZE - 1);
209d6fbe1c5SMichael Ellerman
210d6fbe1c5SMichael Ellerman bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
211d6fbe1c5SMichael Ellerman ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
212d6fbe1c5SMichael Ellerman
21392edf8dfSMichael Ellerman if (bcs || ccd) {
214d6fbe1c5SMichael Ellerman seq_buf_printf(&s, "Mitigation: ");
215d6fbe1c5SMichael Ellerman
21692edf8dfSMichael Ellerman if (bcs)
217d6fbe1c5SMichael Ellerman seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
218d6fbe1c5SMichael Ellerman
21992edf8dfSMichael Ellerman if (bcs && ccd)
220ee13cb24SMichael Ellerman seq_buf_printf(&s, ", ");
22192edf8dfSMichael Ellerman
22292edf8dfSMichael Ellerman if (ccd)
223ee13cb24SMichael Ellerman seq_buf_printf(&s, "Indirect branch cache disabled");
22439e72bf9SMichael Ellerman
2251026798cSNicholas Piggin } else if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) {
22692edf8dfSMichael Ellerman seq_buf_printf(&s, "Mitigation: Software count cache flush");
227ee13cb24SMichael Ellerman
2281026798cSNicholas Piggin if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW)
229ee13cb24SMichael Ellerman seq_buf_printf(&s, " (hardware accelerated)");
23039e72bf9SMichael Ellerman
231dfa88658SDiana Craciun } else if (btb_flush_enabled) {
232dfa88658SDiana Craciun seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
233dfa88658SDiana Craciun } else {
234d6fbe1c5SMichael Ellerman seq_buf_printf(&s, "Vulnerable");
235dfa88658SDiana Craciun }
236d6fbe1c5SMichael Ellerman
2374d24e21cSNicholas Piggin if (bcs || ccd || count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) {
2384d24e21cSNicholas Piggin if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE)
2394d24e21cSNicholas Piggin seq_buf_printf(&s, ", Software link stack flush");
2404d24e21cSNicholas Piggin if (link_stack_flush_type == BRANCH_CACHE_FLUSH_HW)
2414d24e21cSNicholas Piggin seq_buf_printf(&s, " (hardware accelerated)");
2424d24e21cSNicholas Piggin }
2434d24e21cSNicholas Piggin
244d6fbe1c5SMichael Ellerman seq_buf_printf(&s, "\n");
245d6fbe1c5SMichael Ellerman
246d6fbe1c5SMichael Ellerman return s.len;
247d6fbe1c5SMichael Ellerman }
248a048a07dSNicholas Piggin
2496453b532SDiana Craciun #ifdef CONFIG_PPC_BOOK3S_64
250a048a07dSNicholas Piggin /*
251a048a07dSNicholas Piggin * Store-forwarding barrier support.
252a048a07dSNicholas Piggin */
253a048a07dSNicholas Piggin
254a048a07dSNicholas Piggin static enum stf_barrier_type stf_enabled_flush_types;
255a048a07dSNicholas Piggin static bool no_stf_barrier;
2567f262b4dSLi Huafei static bool stf_barrier;
257a048a07dSNicholas Piggin
handle_no_stf_barrier(char * p)258a048a07dSNicholas Piggin static int __init handle_no_stf_barrier(char *p)
259a048a07dSNicholas Piggin {
260a048a07dSNicholas Piggin pr_info("stf-barrier: disabled on command line.");
261a048a07dSNicholas Piggin no_stf_barrier = true;
262a048a07dSNicholas Piggin return 0;
263a048a07dSNicholas Piggin }
264a048a07dSNicholas Piggin
265a048a07dSNicholas Piggin early_param("no_stf_barrier", handle_no_stf_barrier);
266a048a07dSNicholas Piggin
stf_barrier_type_get(void)26703090592SNaveen N. Rao enum stf_barrier_type stf_barrier_type_get(void)
26803090592SNaveen N. Rao {
26903090592SNaveen N. Rao return stf_enabled_flush_types;
27003090592SNaveen N. Rao }
27103090592SNaveen N. Rao
272a048a07dSNicholas Piggin /* This is the generic flag used by other architectures */
handle_ssbd(char * p)273a048a07dSNicholas Piggin static int __init handle_ssbd(char *p)
274a048a07dSNicholas Piggin {
275a048a07dSNicholas Piggin if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
276a048a07dSNicholas Piggin /* Until firmware tells us, we have the barrier with auto */
277a048a07dSNicholas Piggin return 0;
278a048a07dSNicholas Piggin } else if (strncmp(p, "off", 3) == 0) {
279a048a07dSNicholas Piggin handle_no_stf_barrier(NULL);
280a048a07dSNicholas Piggin return 0;
281a048a07dSNicholas Piggin } else
282a048a07dSNicholas Piggin return 1;
283a048a07dSNicholas Piggin
284a048a07dSNicholas Piggin return 0;
285a048a07dSNicholas Piggin }
286a048a07dSNicholas Piggin early_param("spec_store_bypass_disable", handle_ssbd);
287a048a07dSNicholas Piggin
288a048a07dSNicholas Piggin /* This is the generic flag used by other architectures */
handle_no_ssbd(char * p)289a048a07dSNicholas Piggin static int __init handle_no_ssbd(char *p)
290a048a07dSNicholas Piggin {
291a048a07dSNicholas Piggin handle_no_stf_barrier(NULL);
292a048a07dSNicholas Piggin return 0;
293a048a07dSNicholas Piggin }
294a048a07dSNicholas Piggin early_param("nospec_store_bypass_disable", handle_no_ssbd);
295a048a07dSNicholas Piggin
stf_barrier_enable(bool enable)296a048a07dSNicholas Piggin static void stf_barrier_enable(bool enable)
297a048a07dSNicholas Piggin {
298a048a07dSNicholas Piggin if (enable)
299a048a07dSNicholas Piggin do_stf_barrier_fixups(stf_enabled_flush_types);
300a048a07dSNicholas Piggin else
301a048a07dSNicholas Piggin do_stf_barrier_fixups(STF_BARRIER_NONE);
302a048a07dSNicholas Piggin
303a048a07dSNicholas Piggin stf_barrier = enable;
304a048a07dSNicholas Piggin }
305a048a07dSNicholas Piggin
setup_stf_barrier(void)306a048a07dSNicholas Piggin void setup_stf_barrier(void)
307a048a07dSNicholas Piggin {
308a048a07dSNicholas Piggin enum stf_barrier_type type;
30984ed26fdSNicholas Piggin bool enable;
310a048a07dSNicholas Piggin
311a048a07dSNicholas Piggin /* Default to fallback in case fw-features are not available */
312a048a07dSNicholas Piggin if (cpu_has_feature(CPU_FTR_ARCH_300))
313a048a07dSNicholas Piggin type = STF_BARRIER_EIEIO;
314a048a07dSNicholas Piggin else if (cpu_has_feature(CPU_FTR_ARCH_207S))
315a048a07dSNicholas Piggin type = STF_BARRIER_SYNC_ORI;
316a048a07dSNicholas Piggin else if (cpu_has_feature(CPU_FTR_ARCH_206))
317a048a07dSNicholas Piggin type = STF_BARRIER_FALLBACK;
318a048a07dSNicholas Piggin else
319a048a07dSNicholas Piggin type = STF_BARRIER_NONE;
320a048a07dSNicholas Piggin
321a048a07dSNicholas Piggin enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
32284ed26fdSNicholas Piggin security_ftr_enabled(SEC_FTR_STF_BARRIER);
323a048a07dSNicholas Piggin
324a048a07dSNicholas Piggin if (type == STF_BARRIER_FALLBACK) {
325a048a07dSNicholas Piggin pr_info("stf-barrier: fallback barrier available\n");
326a048a07dSNicholas Piggin } else if (type == STF_BARRIER_SYNC_ORI) {
327a048a07dSNicholas Piggin pr_info("stf-barrier: hwsync barrier available\n");
328a048a07dSNicholas Piggin } else if (type == STF_BARRIER_EIEIO) {
329a048a07dSNicholas Piggin pr_info("stf-barrier: eieio barrier available\n");
330a048a07dSNicholas Piggin }
331a048a07dSNicholas Piggin
332a048a07dSNicholas Piggin stf_enabled_flush_types = type;
333a048a07dSNicholas Piggin
334782e69efSJosh Poimboeuf if (!no_stf_barrier && !cpu_mitigations_off())
335a048a07dSNicholas Piggin stf_barrier_enable(enable);
336a048a07dSNicholas Piggin }
337a048a07dSNicholas Piggin
cpu_show_spec_store_bypass(struct device * dev,struct device_attribute * attr,char * buf)338a048a07dSNicholas Piggin ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
339a048a07dSNicholas Piggin {
340a048a07dSNicholas Piggin if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
341a048a07dSNicholas Piggin const char *type;
342a048a07dSNicholas Piggin switch (stf_enabled_flush_types) {
343a048a07dSNicholas Piggin case STF_BARRIER_EIEIO:
344a048a07dSNicholas Piggin type = "eieio";
345a048a07dSNicholas Piggin break;
346a048a07dSNicholas Piggin case STF_BARRIER_SYNC_ORI:
347a048a07dSNicholas Piggin type = "hwsync";
348a048a07dSNicholas Piggin break;
349a048a07dSNicholas Piggin case STF_BARRIER_FALLBACK:
350a048a07dSNicholas Piggin type = "fallback";
351a048a07dSNicholas Piggin break;
352a048a07dSNicholas Piggin default:
353a048a07dSNicholas Piggin type = "unknown";
354a048a07dSNicholas Piggin }
355a048a07dSNicholas Piggin return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
356a048a07dSNicholas Piggin }
357a048a07dSNicholas Piggin
358a048a07dSNicholas Piggin if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
359a048a07dSNicholas Piggin !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
360a048a07dSNicholas Piggin return sprintf(buf, "Not affected\n");
361a048a07dSNicholas Piggin
362a048a07dSNicholas Piggin return sprintf(buf, "Vulnerable\n");
363a048a07dSNicholas Piggin }
364a048a07dSNicholas Piggin
ssb_prctl_get(struct task_struct * task)365d93e5e2dSMichael Ellerman static int ssb_prctl_get(struct task_struct *task)
366d93e5e2dSMichael Ellerman {
367d93e5e2dSMichael Ellerman /*
368*5bcedc59SMichael Ellerman * The STF_BARRIER feature is on by default, so if it's off that means
369*5bcedc59SMichael Ellerman * firmware has explicitly said the CPU is not vulnerable via either
370*5bcedc59SMichael Ellerman * the hypercall or device tree.
371d93e5e2dSMichael Ellerman */
372*5bcedc59SMichael Ellerman if (!security_ftr_enabled(SEC_FTR_STF_BARRIER))
373d93e5e2dSMichael Ellerman return PR_SPEC_NOT_AFFECTED;
374*5bcedc59SMichael Ellerman
375d93e5e2dSMichael Ellerman /*
376*5bcedc59SMichael Ellerman * If the system's CPU has no known barrier (see setup_stf_barrier())
377*5bcedc59SMichael Ellerman * then assume that the CPU is not vulnerable.
378*5bcedc59SMichael Ellerman */
379*5bcedc59SMichael Ellerman if (stf_enabled_flush_types == STF_BARRIER_NONE)
380*5bcedc59SMichael Ellerman return PR_SPEC_NOT_AFFECTED;
381*5bcedc59SMichael Ellerman
382*5bcedc59SMichael Ellerman /*
383*5bcedc59SMichael Ellerman * Otherwise the CPU is vulnerable. The barrier is not a global or
384*5bcedc59SMichael Ellerman * per-process mitigation, so the only value that can be reported here
385*5bcedc59SMichael Ellerman * is PR_SPEC_ENABLE, which appears as "vulnerable" in /proc.
386d93e5e2dSMichael Ellerman */
387d93e5e2dSMichael Ellerman return PR_SPEC_ENABLE;
388d93e5e2dSMichael Ellerman }
389d93e5e2dSMichael Ellerman
arch_prctl_spec_ctrl_get(struct task_struct * task,unsigned long which)390d93e5e2dSMichael Ellerman int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
391d93e5e2dSMichael Ellerman {
392d93e5e2dSMichael Ellerman switch (which) {
393d93e5e2dSMichael Ellerman case PR_SPEC_STORE_BYPASS:
394d93e5e2dSMichael Ellerman return ssb_prctl_get(task);
395d93e5e2dSMichael Ellerman default:
396d93e5e2dSMichael Ellerman return -ENODEV;
397d93e5e2dSMichael Ellerman }
398d93e5e2dSMichael Ellerman }
399d93e5e2dSMichael Ellerman
400a048a07dSNicholas Piggin #ifdef CONFIG_DEBUG_FS
stf_barrier_set(void * data,u64 val)401a048a07dSNicholas Piggin static int stf_barrier_set(void *data, u64 val)
402a048a07dSNicholas Piggin {
403a048a07dSNicholas Piggin bool enable;
404a048a07dSNicholas Piggin
405a048a07dSNicholas Piggin if (val == 1)
406a048a07dSNicholas Piggin enable = true;
407a048a07dSNicholas Piggin else if (val == 0)
408a048a07dSNicholas Piggin enable = false;
409a048a07dSNicholas Piggin else
410a048a07dSNicholas Piggin return -EINVAL;
411a048a07dSNicholas Piggin
412a048a07dSNicholas Piggin /* Only do anything if we're changing state */
413a048a07dSNicholas Piggin if (enable != stf_barrier)
414a048a07dSNicholas Piggin stf_barrier_enable(enable);
415a048a07dSNicholas Piggin
416a048a07dSNicholas Piggin return 0;
417a048a07dSNicholas Piggin }
418a048a07dSNicholas Piggin
stf_barrier_get(void * data,u64 * val)419a048a07dSNicholas Piggin static int stf_barrier_get(void *data, u64 *val)
420a048a07dSNicholas Piggin {
421a048a07dSNicholas Piggin *val = stf_barrier ? 1 : 0;
422a048a07dSNicholas Piggin return 0;
423a048a07dSNicholas Piggin }
424a048a07dSNicholas Piggin
425090d5ab9SYueHaibing DEFINE_DEBUGFS_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set,
426090d5ab9SYueHaibing "%llu\n");
427a048a07dSNicholas Piggin
stf_barrier_debugfs_init(void)428a048a07dSNicholas Piggin static __init int stf_barrier_debugfs_init(void)
429a048a07dSNicholas Piggin {
430dbf77fedSAneesh Kumar K.V debugfs_create_file_unsafe("stf_barrier", 0600, arch_debugfs_dir,
431090d5ab9SYueHaibing NULL, &fops_stf_barrier);
432a048a07dSNicholas Piggin return 0;
433a048a07dSNicholas Piggin }
434a048a07dSNicholas Piggin device_initcall(stf_barrier_debugfs_init);
435a048a07dSNicholas Piggin #endif /* CONFIG_DEBUG_FS */
436ee13cb24SMichael Ellerman
update_branch_cache_flush(void)437c0036549SNicholas Piggin static void update_branch_cache_flush(void)
438ee13cb24SMichael Ellerman {
43989d35b23SNicholas Piggin u32 *site, __maybe_unused *site2;
440792254a7SNicholas Piggin
441c0036549SNicholas Piggin #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
442792254a7SNicholas Piggin site = &patch__call_kvm_flush_link_stack;
44389d35b23SNicholas Piggin site2 = &patch__call_kvm_flush_link_stack_p9;
444c0036549SNicholas Piggin // This controls the branch from guest_exit_cont to kvm_flush_link_stack
445c0036549SNicholas Piggin if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
446e7304597SChristophe Leroy patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
4473c536423SMichael Ellerman patch_instruction_site(site2, ppc_inst(PPC_RAW_NOP()));
448c0036549SNicholas Piggin } else {
4494d24e21cSNicholas Piggin // Could use HW flush, but that could also flush count cache
450792254a7SNicholas Piggin patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
45189d35b23SNicholas Piggin patch_branch_site(site2, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
452c0036549SNicholas Piggin }
453c0036549SNicholas Piggin #endif
454c0036549SNicholas Piggin
455792254a7SNicholas Piggin // Patch out the bcctr first, then nop the rest
456792254a7SNicholas Piggin site = &patch__call_flush_branch_caches3;
457e7304597SChristophe Leroy patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
458792254a7SNicholas Piggin site = &patch__call_flush_branch_caches2;
459e7304597SChristophe Leroy patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
460792254a7SNicholas Piggin site = &patch__call_flush_branch_caches1;
461e7304597SChristophe Leroy patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
462792254a7SNicholas Piggin
463c0036549SNicholas Piggin // This controls the branch from _switch to flush_branch_caches
464c0036549SNicholas Piggin if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE &&
465c0036549SNicholas Piggin link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
466792254a7SNicholas Piggin // Nothing to be done
467792254a7SNicholas Piggin
4684d24e21cSNicholas Piggin } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW &&
4694d24e21cSNicholas Piggin link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) {
470792254a7SNicholas Piggin // Patch in the bcctr last
471792254a7SNicholas Piggin site = &patch__call_flush_branch_caches1;
472792254a7SNicholas Piggin patch_instruction_site(site, ppc_inst(0x39207fff)); // li r9,0x7fff
473792254a7SNicholas Piggin site = &patch__call_flush_branch_caches2;
474792254a7SNicholas Piggin patch_instruction_site(site, ppc_inst(0x7d2903a6)); // mtctr r9
475792254a7SNicholas Piggin site = &patch__call_flush_branch_caches3;
476792254a7SNicholas Piggin patch_instruction_site(site, ppc_inst(PPC_INST_BCCTR_FLUSH));
477792254a7SNicholas Piggin
478c0036549SNicholas Piggin } else {
479792254a7SNicholas Piggin patch_branch_site(site, (u64)&flush_branch_caches, BRANCH_SET_LINK);
480c0036549SNicholas Piggin
481c0036549SNicholas Piggin // If we just need to flush the link stack, early return
482c0036549SNicholas Piggin if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) {
483c0036549SNicholas Piggin patch_instruction_site(&patch__flush_link_stack_return,
484e7304597SChristophe Leroy ppc_inst(PPC_RAW_BLR()));
485c0036549SNicholas Piggin
486c0036549SNicholas Piggin // If we have flush instruction, early return
487c0036549SNicholas Piggin } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) {
488c0036549SNicholas Piggin patch_instruction_site(&patch__flush_count_cache_return,
489e7304597SChristophe Leroy ppc_inst(PPC_RAW_BLR()));
490c0036549SNicholas Piggin }
491c0036549SNicholas Piggin }
49239e72bf9SMichael Ellerman }
49339e72bf9SMichael Ellerman
toggle_branch_cache_flush(bool enable)4941026798cSNicholas Piggin static void toggle_branch_cache_flush(bool enable)
49539e72bf9SMichael Ellerman {
496c0036549SNicholas Piggin if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
497c0036549SNicholas Piggin if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE)
498c0036549SNicholas Piggin count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE;
49939e72bf9SMichael Ellerman
500c0036549SNicholas Piggin pr_info("count-cache-flush: flush disabled.\n");
501c0036549SNicholas Piggin } else {
502c0036549SNicholas Piggin if (security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
5031026798cSNicholas Piggin count_cache_flush_type = BRANCH_CACHE_FLUSH_HW;
5041afe00c7SNicholas Piggin pr_info("count-cache-flush: hardware flush enabled.\n");
505c0036549SNicholas Piggin } else {
506c0036549SNicholas Piggin count_cache_flush_type = BRANCH_CACHE_FLUSH_SW;
507c0036549SNicholas Piggin pr_info("count-cache-flush: software flush enabled.\n");
508c0036549SNicholas Piggin }
509c0036549SNicholas Piggin }
510c0036549SNicholas Piggin
511c0036549SNicholas Piggin if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) {
512c0036549SNicholas Piggin if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE)
513c0036549SNicholas Piggin link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE;
514c0036549SNicholas Piggin
515c0036549SNicholas Piggin pr_info("link-stack-flush: flush disabled.\n");
516c0036549SNicholas Piggin } else {
5174d24e21cSNicholas Piggin if (security_ftr_enabled(SEC_FTR_BCCTR_LINK_FLUSH_ASSIST)) {
5184d24e21cSNicholas Piggin link_stack_flush_type = BRANCH_CACHE_FLUSH_HW;
5194d24e21cSNicholas Piggin pr_info("link-stack-flush: hardware flush enabled.\n");
5204d24e21cSNicholas Piggin } else {
521c0036549SNicholas Piggin link_stack_flush_type = BRANCH_CACHE_FLUSH_SW;
522c0036549SNicholas Piggin pr_info("link-stack-flush: software flush enabled.\n");
523c0036549SNicholas Piggin }
5244d24e21cSNicholas Piggin }
525c0036549SNicholas Piggin
526c0036549SNicholas Piggin update_branch_cache_flush();
527ee13cb24SMichael Ellerman }
528ee13cb24SMichael Ellerman
setup_count_cache_flush(void)529ee13cb24SMichael Ellerman void setup_count_cache_flush(void)
530ee13cb24SMichael Ellerman {
531d8f0e0b0SChristopher M. Riedl bool enable = true;
532d8f0e0b0SChristopher M. Riedl
533d8f0e0b0SChristopher M. Riedl if (no_spectrev2 || cpu_mitigations_off()) {
534d8f0e0b0SChristopher M. Riedl if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) ||
535d8f0e0b0SChristopher M. Riedl security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED))
53639e72bf9SMichael Ellerman pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n");
537d8f0e0b0SChristopher M. Riedl
538d8f0e0b0SChristopher M. Riedl enable = false;
539d8f0e0b0SChristopher M. Riedl }
540d8f0e0b0SChristopher M. Riedl
54139e72bf9SMichael Ellerman /*
54239e72bf9SMichael Ellerman * There's no firmware feature flag/hypervisor bit to tell us we need to
54339e72bf9SMichael Ellerman * flush the link stack on context switch. So we set it here if we see
54439e72bf9SMichael Ellerman * either of the Spectre v2 mitigations that aim to protect userspace.
54539e72bf9SMichael Ellerman */
54639e72bf9SMichael Ellerman if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) ||
54739e72bf9SMichael Ellerman security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE))
54839e72bf9SMichael Ellerman security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
54939e72bf9SMichael Ellerman
5501026798cSNicholas Piggin toggle_branch_cache_flush(enable);
551ee13cb24SMichael Ellerman }
552ee13cb24SMichael Ellerman
553c6b4c914SMichael Ellerman static enum l1d_flush_type enabled_flush_types;
554c6b4c914SMichael Ellerman static void *l1d_flush_fallback_area;
555c6b4c914SMichael Ellerman static bool no_rfi_flush;
556c6b4c914SMichael Ellerman static bool no_entry_flush;
557c6b4c914SMichael Ellerman static bool no_uaccess_flush;
558c6b4c914SMichael Ellerman bool rfi_flush;
559c6b4c914SMichael Ellerman static bool entry_flush;
560c6b4c914SMichael Ellerman static bool uaccess_flush;
561c6b4c914SMichael Ellerman DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
562c6b4c914SMichael Ellerman EXPORT_SYMBOL(uaccess_flush_key);
563c6b4c914SMichael Ellerman
handle_no_rfi_flush(char * p)564c6b4c914SMichael Ellerman static int __init handle_no_rfi_flush(char *p)
565c6b4c914SMichael Ellerman {
566c6b4c914SMichael Ellerman pr_info("rfi-flush: disabled on command line.");
567c6b4c914SMichael Ellerman no_rfi_flush = true;
568c6b4c914SMichael Ellerman return 0;
569c6b4c914SMichael Ellerman }
570c6b4c914SMichael Ellerman early_param("no_rfi_flush", handle_no_rfi_flush);
571c6b4c914SMichael Ellerman
handle_no_entry_flush(char * p)572c6b4c914SMichael Ellerman static int __init handle_no_entry_flush(char *p)
573c6b4c914SMichael Ellerman {
574c6b4c914SMichael Ellerman pr_info("entry-flush: disabled on command line.");
575c6b4c914SMichael Ellerman no_entry_flush = true;
576c6b4c914SMichael Ellerman return 0;
577c6b4c914SMichael Ellerman }
578c6b4c914SMichael Ellerman early_param("no_entry_flush", handle_no_entry_flush);
579c6b4c914SMichael Ellerman
handle_no_uaccess_flush(char * p)580c6b4c914SMichael Ellerman static int __init handle_no_uaccess_flush(char *p)
581c6b4c914SMichael Ellerman {
582c6b4c914SMichael Ellerman pr_info("uaccess-flush: disabled on command line.");
583c6b4c914SMichael Ellerman no_uaccess_flush = true;
584c6b4c914SMichael Ellerman return 0;
585c6b4c914SMichael Ellerman }
586c6b4c914SMichael Ellerman early_param("no_uaccess_flush", handle_no_uaccess_flush);
587c6b4c914SMichael Ellerman
588c6b4c914SMichael Ellerman /*
589c6b4c914SMichael Ellerman * The RFI flush is not KPTI, but because users will see doco that says to use
590c6b4c914SMichael Ellerman * nopti we hijack that option here to also disable the RFI flush.
591c6b4c914SMichael Ellerman */
handle_no_pti(char * p)592c6b4c914SMichael Ellerman static int __init handle_no_pti(char *p)
593c6b4c914SMichael Ellerman {
594c6b4c914SMichael Ellerman pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
595c6b4c914SMichael Ellerman handle_no_rfi_flush(NULL);
596c6b4c914SMichael Ellerman return 0;
597c6b4c914SMichael Ellerman }
598c6b4c914SMichael Ellerman early_param("nopti", handle_no_pti);
599c6b4c914SMichael Ellerman
do_nothing(void * unused)600c6b4c914SMichael Ellerman static void do_nothing(void *unused)
601c6b4c914SMichael Ellerman {
602c6b4c914SMichael Ellerman /*
603c6b4c914SMichael Ellerman * We don't need to do the flush explicitly, just enter+exit kernel is
604c6b4c914SMichael Ellerman * sufficient, the RFI exit handlers will do the right thing.
605c6b4c914SMichael Ellerman */
606c6b4c914SMichael Ellerman }
607c6b4c914SMichael Ellerman
rfi_flush_enable(bool enable)608c6b4c914SMichael Ellerman void rfi_flush_enable(bool enable)
609c6b4c914SMichael Ellerman {
610c6b4c914SMichael Ellerman if (enable) {
611c6b4c914SMichael Ellerman do_rfi_flush_fixups(enabled_flush_types);
612c6b4c914SMichael Ellerman on_each_cpu(do_nothing, NULL, 1);
613c6b4c914SMichael Ellerman } else
614c6b4c914SMichael Ellerman do_rfi_flush_fixups(L1D_FLUSH_NONE);
615c6b4c914SMichael Ellerman
616c6b4c914SMichael Ellerman rfi_flush = enable;
617c6b4c914SMichael Ellerman }
618c6b4c914SMichael Ellerman
entry_flush_enable(bool enable)619c6b4c914SMichael Ellerman static void entry_flush_enable(bool enable)
620c6b4c914SMichael Ellerman {
621c6b4c914SMichael Ellerman if (enable) {
622c6b4c914SMichael Ellerman do_entry_flush_fixups(enabled_flush_types);
623c6b4c914SMichael Ellerman on_each_cpu(do_nothing, NULL, 1);
624c6b4c914SMichael Ellerman } else {
625c6b4c914SMichael Ellerman do_entry_flush_fixups(L1D_FLUSH_NONE);
626c6b4c914SMichael Ellerman }
627c6b4c914SMichael Ellerman
628c6b4c914SMichael Ellerman entry_flush = enable;
629c6b4c914SMichael Ellerman }
630c6b4c914SMichael Ellerman
uaccess_flush_enable(bool enable)631c6b4c914SMichael Ellerman static void uaccess_flush_enable(bool enable)
632c6b4c914SMichael Ellerman {
633c6b4c914SMichael Ellerman if (enable) {
634c6b4c914SMichael Ellerman do_uaccess_flush_fixups(enabled_flush_types);
635c6b4c914SMichael Ellerman static_branch_enable(&uaccess_flush_key);
636c6b4c914SMichael Ellerman on_each_cpu(do_nothing, NULL, 1);
637c6b4c914SMichael Ellerman } else {
638c6b4c914SMichael Ellerman static_branch_disable(&uaccess_flush_key);
639c6b4c914SMichael Ellerman do_uaccess_flush_fixups(L1D_FLUSH_NONE);
640c6b4c914SMichael Ellerman }
641c6b4c914SMichael Ellerman
642c6b4c914SMichael Ellerman uaccess_flush = enable;
643c6b4c914SMichael Ellerman }
644c6b4c914SMichael Ellerman
init_fallback_flush(void)645c6b4c914SMichael Ellerman static void __ref init_fallback_flush(void)
646c6b4c914SMichael Ellerman {
647c6b4c914SMichael Ellerman u64 l1d_size, limit;
648c6b4c914SMichael Ellerman int cpu;
649c6b4c914SMichael Ellerman
650c6b4c914SMichael Ellerman /* Only allocate the fallback flush area once (at boot time). */
651c6b4c914SMichael Ellerman if (l1d_flush_fallback_area)
652c6b4c914SMichael Ellerman return;
653c6b4c914SMichael Ellerman
654c6b4c914SMichael Ellerman l1d_size = ppc64_caches.l1d.size;
655c6b4c914SMichael Ellerman
656c6b4c914SMichael Ellerman /*
657c6b4c914SMichael Ellerman * If there is no d-cache-size property in the device tree, l1d_size
658c6b4c914SMichael Ellerman * could be zero. That leads to the loop in the asm wrapping around to
659c6b4c914SMichael Ellerman * 2^64-1, and then walking off the end of the fallback area and
660c6b4c914SMichael Ellerman * eventually causing a page fault which is fatal. Just default to
661c6b4c914SMichael Ellerman * something vaguely sane.
662c6b4c914SMichael Ellerman */
663c6b4c914SMichael Ellerman if (!l1d_size)
664c6b4c914SMichael Ellerman l1d_size = (64 * 1024);
665c6b4c914SMichael Ellerman
666c6b4c914SMichael Ellerman limit = min(ppc64_bolted_size(), ppc64_rma_size);
667c6b4c914SMichael Ellerman
668c6b4c914SMichael Ellerman /*
669c6b4c914SMichael Ellerman * Align to L1d size, and size it at 2x L1d size, to catch possible
670c6b4c914SMichael Ellerman * hardware prefetch runoff. We don't have a recipe for load patterns to
671c6b4c914SMichael Ellerman * reliably avoid the prefetcher.
672c6b4c914SMichael Ellerman */
673c6b4c914SMichael Ellerman l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
674c6b4c914SMichael Ellerman l1d_size, MEMBLOCK_LOW_LIMIT,
675c6b4c914SMichael Ellerman limit, NUMA_NO_NODE);
676c6b4c914SMichael Ellerman if (!l1d_flush_fallback_area)
677c6b4c914SMichael Ellerman panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
678c6b4c914SMichael Ellerman __func__, l1d_size * 2, l1d_size, &limit);
679c6b4c914SMichael Ellerman
680c6b4c914SMichael Ellerman
681c6b4c914SMichael Ellerman for_each_possible_cpu(cpu) {
682c6b4c914SMichael Ellerman struct paca_struct *paca = paca_ptrs[cpu];
683c6b4c914SMichael Ellerman paca->rfi_flush_fallback_area = l1d_flush_fallback_area;
684c6b4c914SMichael Ellerman paca->l1d_flush_size = l1d_size;
685c6b4c914SMichael Ellerman }
686c6b4c914SMichael Ellerman }
687c6b4c914SMichael Ellerman
setup_rfi_flush(enum l1d_flush_type types,bool enable)688c6b4c914SMichael Ellerman void setup_rfi_flush(enum l1d_flush_type types, bool enable)
689c6b4c914SMichael Ellerman {
690c6b4c914SMichael Ellerman if (types & L1D_FLUSH_FALLBACK) {
691c6b4c914SMichael Ellerman pr_info("rfi-flush: fallback displacement flush available\n");
692c6b4c914SMichael Ellerman init_fallback_flush();
693c6b4c914SMichael Ellerman }
694c6b4c914SMichael Ellerman
695c6b4c914SMichael Ellerman if (types & L1D_FLUSH_ORI)
696c6b4c914SMichael Ellerman pr_info("rfi-flush: ori type flush available\n");
697c6b4c914SMichael Ellerman
698c6b4c914SMichael Ellerman if (types & L1D_FLUSH_MTTRIG)
699c6b4c914SMichael Ellerman pr_info("rfi-flush: mttrig type flush available\n");
700c6b4c914SMichael Ellerman
701c6b4c914SMichael Ellerman enabled_flush_types = types;
702c6b4c914SMichael Ellerman
703c6b4c914SMichael Ellerman if (!cpu_mitigations_off() && !no_rfi_flush)
704c6b4c914SMichael Ellerman rfi_flush_enable(enable);
705c6b4c914SMichael Ellerman }
706c6b4c914SMichael Ellerman
setup_entry_flush(bool enable)707c6b4c914SMichael Ellerman void setup_entry_flush(bool enable)
708c6b4c914SMichael Ellerman {
709c6b4c914SMichael Ellerman if (cpu_mitigations_off())
710c6b4c914SMichael Ellerman return;
711c6b4c914SMichael Ellerman
712c6b4c914SMichael Ellerman if (!no_entry_flush)
713c6b4c914SMichael Ellerman entry_flush_enable(enable);
714c6b4c914SMichael Ellerman }
715c6b4c914SMichael Ellerman
setup_uaccess_flush(bool enable)716c6b4c914SMichael Ellerman void setup_uaccess_flush(bool enable)
717c6b4c914SMichael Ellerman {
718c6b4c914SMichael Ellerman if (cpu_mitigations_off())
719c6b4c914SMichael Ellerman return;
720c6b4c914SMichael Ellerman
721c6b4c914SMichael Ellerman if (!no_uaccess_flush)
722c6b4c914SMichael Ellerman uaccess_flush_enable(enable);
723c6b4c914SMichael Ellerman }
724c6b4c914SMichael Ellerman
725ee13cb24SMichael Ellerman #ifdef CONFIG_DEBUG_FS
count_cache_flush_set(void * data,u64 val)726ee13cb24SMichael Ellerman static int count_cache_flush_set(void *data, u64 val)
727ee13cb24SMichael Ellerman {
728ee13cb24SMichael Ellerman bool enable;
729ee13cb24SMichael Ellerman
730ee13cb24SMichael Ellerman if (val == 1)
731ee13cb24SMichael Ellerman enable = true;
732ee13cb24SMichael Ellerman else if (val == 0)
733ee13cb24SMichael Ellerman enable = false;
734ee13cb24SMichael Ellerman else
735ee13cb24SMichael Ellerman return -EINVAL;
736ee13cb24SMichael Ellerman
7371026798cSNicholas Piggin toggle_branch_cache_flush(enable);
738ee13cb24SMichael Ellerman
739ee13cb24SMichael Ellerman return 0;
740ee13cb24SMichael Ellerman }
741ee13cb24SMichael Ellerman
count_cache_flush_get(void * data,u64 * val)742ee13cb24SMichael Ellerman static int count_cache_flush_get(void *data, u64 *val)
743ee13cb24SMichael Ellerman {
7441026798cSNicholas Piggin if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE)
745ee13cb24SMichael Ellerman *val = 0;
746ee13cb24SMichael Ellerman else
747ee13cb24SMichael Ellerman *val = 1;
748ee13cb24SMichael Ellerman
749ee13cb24SMichael Ellerman return 0;
750ee13cb24SMichael Ellerman }
751ee13cb24SMichael Ellerman
link_stack_flush_get(void * data,u64 * val)752b2a6f604SMichal Suchanek static int link_stack_flush_get(void *data, u64 *val)
753b2a6f604SMichal Suchanek {
754b2a6f604SMichal Suchanek if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE)
755b2a6f604SMichal Suchanek *val = 0;
756b2a6f604SMichal Suchanek else
757b2a6f604SMichal Suchanek *val = 1;
758b2a6f604SMichal Suchanek
759b2a6f604SMichal Suchanek return 0;
760b2a6f604SMichal Suchanek }
761b2a6f604SMichal Suchanek
762090d5ab9SYueHaibing DEFINE_DEBUGFS_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
763ee13cb24SMichael Ellerman count_cache_flush_set, "%llu\n");
764b2a6f604SMichal Suchanek DEFINE_DEBUGFS_ATTRIBUTE(fops_link_stack_flush, link_stack_flush_get,
765b2a6f604SMichal Suchanek count_cache_flush_set, "%llu\n");
766ee13cb24SMichael Ellerman
count_cache_flush_debugfs_init(void)767ee13cb24SMichael Ellerman static __init int count_cache_flush_debugfs_init(void)
768ee13cb24SMichael Ellerman {
769090d5ab9SYueHaibing debugfs_create_file_unsafe("count_cache_flush", 0600,
770dbf77fedSAneesh Kumar K.V arch_debugfs_dir, NULL,
771090d5ab9SYueHaibing &fops_count_cache_flush);
772b2a6f604SMichal Suchanek debugfs_create_file_unsafe("link_stack_flush", 0600,
773b2a6f604SMichal Suchanek arch_debugfs_dir, NULL,
774b2a6f604SMichal Suchanek &fops_link_stack_flush);
775ee13cb24SMichael Ellerman return 0;
776ee13cb24SMichael Ellerman }
777ee13cb24SMichael Ellerman device_initcall(count_cache_flush_debugfs_init);
778c6b4c914SMichael Ellerman
rfi_flush_set(void * data,u64 val)779c6b4c914SMichael Ellerman static int rfi_flush_set(void *data, u64 val)
780c6b4c914SMichael Ellerman {
781c6b4c914SMichael Ellerman bool enable;
782c6b4c914SMichael Ellerman
783c6b4c914SMichael Ellerman if (val == 1)
784c6b4c914SMichael Ellerman enable = true;
785c6b4c914SMichael Ellerman else if (val == 0)
786c6b4c914SMichael Ellerman enable = false;
787c6b4c914SMichael Ellerman else
788c6b4c914SMichael Ellerman return -EINVAL;
789c6b4c914SMichael Ellerman
790c6b4c914SMichael Ellerman /* Only do anything if we're changing state */
791c6b4c914SMichael Ellerman if (enable != rfi_flush)
792c6b4c914SMichael Ellerman rfi_flush_enable(enable);
793c6b4c914SMichael Ellerman
794c6b4c914SMichael Ellerman return 0;
795c6b4c914SMichael Ellerman }
796c6b4c914SMichael Ellerman
rfi_flush_get(void * data,u64 * val)797c6b4c914SMichael Ellerman static int rfi_flush_get(void *data, u64 *val)
798c6b4c914SMichael Ellerman {
799c6b4c914SMichael Ellerman *val = rfi_flush ? 1 : 0;
800c6b4c914SMichael Ellerman return 0;
801c6b4c914SMichael Ellerman }
802c6b4c914SMichael Ellerman
803c6b4c914SMichael Ellerman DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
804c6b4c914SMichael Ellerman
entry_flush_set(void * data,u64 val)805c6b4c914SMichael Ellerman static int entry_flush_set(void *data, u64 val)
806c6b4c914SMichael Ellerman {
807c6b4c914SMichael Ellerman bool enable;
808c6b4c914SMichael Ellerman
809c6b4c914SMichael Ellerman if (val == 1)
810c6b4c914SMichael Ellerman enable = true;
811c6b4c914SMichael Ellerman else if (val == 0)
812c6b4c914SMichael Ellerman enable = false;
813c6b4c914SMichael Ellerman else
814c6b4c914SMichael Ellerman return -EINVAL;
815c6b4c914SMichael Ellerman
816c6b4c914SMichael Ellerman /* Only do anything if we're changing state */
817c6b4c914SMichael Ellerman if (enable != entry_flush)
818c6b4c914SMichael Ellerman entry_flush_enable(enable);
819c6b4c914SMichael Ellerman
820c6b4c914SMichael Ellerman return 0;
821c6b4c914SMichael Ellerman }
822c6b4c914SMichael Ellerman
entry_flush_get(void * data,u64 * val)823c6b4c914SMichael Ellerman static int entry_flush_get(void *data, u64 *val)
824c6b4c914SMichael Ellerman {
825c6b4c914SMichael Ellerman *val = entry_flush ? 1 : 0;
826c6b4c914SMichael Ellerman return 0;
827c6b4c914SMichael Ellerman }
828c6b4c914SMichael Ellerman
829c6b4c914SMichael Ellerman DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
830c6b4c914SMichael Ellerman
uaccess_flush_set(void * data,u64 val)831c6b4c914SMichael Ellerman static int uaccess_flush_set(void *data, u64 val)
832c6b4c914SMichael Ellerman {
833c6b4c914SMichael Ellerman bool enable;
834c6b4c914SMichael Ellerman
835c6b4c914SMichael Ellerman if (val == 1)
836c6b4c914SMichael Ellerman enable = true;
837c6b4c914SMichael Ellerman else if (val == 0)
838c6b4c914SMichael Ellerman enable = false;
839c6b4c914SMichael Ellerman else
840c6b4c914SMichael Ellerman return -EINVAL;
841c6b4c914SMichael Ellerman
842c6b4c914SMichael Ellerman /* Only do anything if we're changing state */
843c6b4c914SMichael Ellerman if (enable != uaccess_flush)
844c6b4c914SMichael Ellerman uaccess_flush_enable(enable);
845c6b4c914SMichael Ellerman
846c6b4c914SMichael Ellerman return 0;
847c6b4c914SMichael Ellerman }
848c6b4c914SMichael Ellerman
uaccess_flush_get(void * data,u64 * val)849c6b4c914SMichael Ellerman static int uaccess_flush_get(void *data, u64 *val)
850c6b4c914SMichael Ellerman {
851c6b4c914SMichael Ellerman *val = uaccess_flush ? 1 : 0;
852c6b4c914SMichael Ellerman return 0;
853c6b4c914SMichael Ellerman }
854c6b4c914SMichael Ellerman
855c6b4c914SMichael Ellerman DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
856c6b4c914SMichael Ellerman
rfi_flush_debugfs_init(void)857c6b4c914SMichael Ellerman static __init int rfi_flush_debugfs_init(void)
858c6b4c914SMichael Ellerman {
859dbf77fedSAneesh Kumar K.V debugfs_create_file("rfi_flush", 0600, arch_debugfs_dir, NULL, &fops_rfi_flush);
860dbf77fedSAneesh Kumar K.V debugfs_create_file("entry_flush", 0600, arch_debugfs_dir, NULL, &fops_entry_flush);
861dbf77fedSAneesh Kumar K.V debugfs_create_file("uaccess_flush", 0600, arch_debugfs_dir, NULL, &fops_uaccess_flush);
862c6b4c914SMichael Ellerman return 0;
863c6b4c914SMichael Ellerman }
864c6b4c914SMichael Ellerman device_initcall(rfi_flush_debugfs_init);
865ee13cb24SMichael Ellerman #endif /* CONFIG_DEBUG_FS */
8666453b532SDiana Craciun #endif /* CONFIG_PPC_BOOK3S_64 */
867