1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Security related flags and so on. 4 // 5 // Copyright 2018, Michael Ellerman, IBM Corporation. 6 7 #include <linux/cpu.h> 8 #include <linux/kernel.h> 9 #include <linux/device.h> 10 #include <linux/seq_buf.h> 11 12 #include <asm/asm-prototypes.h> 13 #include <asm/code-patching.h> 14 #include <asm/debugfs.h> 15 #include <asm/security_features.h> 16 #include <asm/setup.h> 17 18 19 unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; 20 21 enum count_cache_flush_type { 22 COUNT_CACHE_FLUSH_NONE = 0x1, 23 COUNT_CACHE_FLUSH_SW = 0x2, 24 COUNT_CACHE_FLUSH_HW = 0x4, 25 }; 26 static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; 27 28 bool barrier_nospec_enabled; 29 static bool no_nospec; 30 static bool btb_flush_enabled; 31 #ifdef CONFIG_PPC_FSL_BOOK3E 32 static bool no_spectrev2; 33 #endif 34 35 static void enable_barrier_nospec(bool enable) 36 { 37 barrier_nospec_enabled = enable; 38 do_barrier_nospec_fixups(enable); 39 } 40 41 void setup_barrier_nospec(void) 42 { 43 bool enable; 44 45 /* 46 * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well. 47 * But there's a good reason not to. The two flags we check below are 48 * both are enabled by default in the kernel, so if the hcall is not 49 * functional they will be enabled. 50 * On a system where the host firmware has been updated (so the ori 51 * functions as a barrier), but on which the hypervisor (KVM/Qemu) has 52 * not been updated, we would like to enable the barrier. Dropping the 53 * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is 54 * we potentially enable the barrier on systems where the host firmware 55 * is not updated, but that's harmless as it's a no-op. 56 */ 57 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 58 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); 59 60 if (!no_nospec && !cpu_mitigations_off()) 61 enable_barrier_nospec(enable); 62 } 63 64 static int __init handle_nospectre_v1(char *p) 65 { 66 no_nospec = true; 67 68 return 0; 69 } 70 early_param("nospectre_v1", handle_nospectre_v1); 71 72 #ifdef CONFIG_DEBUG_FS 73 static int barrier_nospec_set(void *data, u64 val) 74 { 75 switch (val) { 76 case 0: 77 case 1: 78 break; 79 default: 80 return -EINVAL; 81 } 82 83 if (!!val == !!barrier_nospec_enabled) 84 return 0; 85 86 enable_barrier_nospec(!!val); 87 88 return 0; 89 } 90 91 static int barrier_nospec_get(void *data, u64 *val) 92 { 93 *val = barrier_nospec_enabled ? 1 : 0; 94 return 0; 95 } 96 97 DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec, 98 barrier_nospec_get, barrier_nospec_set, "%llu\n"); 99 100 static __init int barrier_nospec_debugfs_init(void) 101 { 102 debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL, 103 &fops_barrier_nospec); 104 return 0; 105 } 106 device_initcall(barrier_nospec_debugfs_init); 107 108 static __init int security_feature_debugfs_init(void) 109 { 110 debugfs_create_x64("security_features", 0400, powerpc_debugfs_root, 111 (u64 *)&powerpc_security_features); 112 return 0; 113 } 114 device_initcall(security_feature_debugfs_init); 115 #endif /* CONFIG_DEBUG_FS */ 116 117 #ifdef CONFIG_PPC_FSL_BOOK3E 118 static int __init handle_nospectre_v2(char *p) 119 { 120 no_spectrev2 = true; 121 122 return 0; 123 } 124 early_param("nospectre_v2", handle_nospectre_v2); 125 void setup_spectre_v2(void) 126 { 127 if (no_spectrev2 || cpu_mitigations_off()) 128 do_btb_flush_fixups(); 129 else 130 btb_flush_enabled = true; 131 } 132 #endif /* CONFIG_PPC_FSL_BOOK3E */ 133 134 #ifdef CONFIG_PPC_BOOK3S_64 135 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 136 { 137 bool thread_priv; 138 139 thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV); 140 141 if (rfi_flush || thread_priv) { 142 struct seq_buf s; 143 seq_buf_init(&s, buf, PAGE_SIZE - 1); 144 145 seq_buf_printf(&s, "Mitigation: "); 146 147 if (rfi_flush) 148 seq_buf_printf(&s, "RFI Flush"); 149 150 if (rfi_flush && thread_priv) 151 seq_buf_printf(&s, ", "); 152 153 if (thread_priv) 154 seq_buf_printf(&s, "L1D private per thread"); 155 156 seq_buf_printf(&s, "\n"); 157 158 return s.len; 159 } 160 161 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 162 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 163 return sprintf(buf, "Not affected\n"); 164 165 return sprintf(buf, "Vulnerable\n"); 166 } 167 #endif 168 169 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 170 { 171 struct seq_buf s; 172 173 seq_buf_init(&s, buf, PAGE_SIZE - 1); 174 175 if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) { 176 if (barrier_nospec_enabled) 177 seq_buf_printf(&s, "Mitigation: __user pointer sanitization"); 178 else 179 seq_buf_printf(&s, "Vulnerable"); 180 181 if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31)) 182 seq_buf_printf(&s, ", ori31 speculation barrier enabled"); 183 184 seq_buf_printf(&s, "\n"); 185 } else 186 seq_buf_printf(&s, "Not affected\n"); 187 188 return s.len; 189 } 190 191 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 192 { 193 struct seq_buf s; 194 bool bcs, ccd; 195 196 seq_buf_init(&s, buf, PAGE_SIZE - 1); 197 198 bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); 199 ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); 200 201 if (bcs || ccd) { 202 seq_buf_printf(&s, "Mitigation: "); 203 204 if (bcs) 205 seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); 206 207 if (bcs && ccd) 208 seq_buf_printf(&s, ", "); 209 210 if (ccd) 211 seq_buf_printf(&s, "Indirect branch cache disabled"); 212 } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { 213 seq_buf_printf(&s, "Mitigation: Software count cache flush"); 214 215 if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) 216 seq_buf_printf(&s, " (hardware accelerated)"); 217 } else if (btb_flush_enabled) { 218 seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); 219 } else { 220 seq_buf_printf(&s, "Vulnerable"); 221 } 222 223 seq_buf_printf(&s, "\n"); 224 225 return s.len; 226 } 227 228 #ifdef CONFIG_PPC_BOOK3S_64 229 /* 230 * Store-forwarding barrier support. 231 */ 232 233 static enum stf_barrier_type stf_enabled_flush_types; 234 static bool no_stf_barrier; 235 bool stf_barrier; 236 237 static int __init handle_no_stf_barrier(char *p) 238 { 239 pr_info("stf-barrier: disabled on command line."); 240 no_stf_barrier = true; 241 return 0; 242 } 243 244 early_param("no_stf_barrier", handle_no_stf_barrier); 245 246 /* This is the generic flag used by other architectures */ 247 static int __init handle_ssbd(char *p) 248 { 249 if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) { 250 /* Until firmware tells us, we have the barrier with auto */ 251 return 0; 252 } else if (strncmp(p, "off", 3) == 0) { 253 handle_no_stf_barrier(NULL); 254 return 0; 255 } else 256 return 1; 257 258 return 0; 259 } 260 early_param("spec_store_bypass_disable", handle_ssbd); 261 262 /* This is the generic flag used by other architectures */ 263 static int __init handle_no_ssbd(char *p) 264 { 265 handle_no_stf_barrier(NULL); 266 return 0; 267 } 268 early_param("nospec_store_bypass_disable", handle_no_ssbd); 269 270 static void stf_barrier_enable(bool enable) 271 { 272 if (enable) 273 do_stf_barrier_fixups(stf_enabled_flush_types); 274 else 275 do_stf_barrier_fixups(STF_BARRIER_NONE); 276 277 stf_barrier = enable; 278 } 279 280 void setup_stf_barrier(void) 281 { 282 enum stf_barrier_type type; 283 bool enable, hv; 284 285 hv = cpu_has_feature(CPU_FTR_HVMODE); 286 287 /* Default to fallback in case fw-features are not available */ 288 if (cpu_has_feature(CPU_FTR_ARCH_300)) 289 type = STF_BARRIER_EIEIO; 290 else if (cpu_has_feature(CPU_FTR_ARCH_207S)) 291 type = STF_BARRIER_SYNC_ORI; 292 else if (cpu_has_feature(CPU_FTR_ARCH_206)) 293 type = STF_BARRIER_FALLBACK; 294 else 295 type = STF_BARRIER_NONE; 296 297 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 298 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || 299 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv)); 300 301 if (type == STF_BARRIER_FALLBACK) { 302 pr_info("stf-barrier: fallback barrier available\n"); 303 } else if (type == STF_BARRIER_SYNC_ORI) { 304 pr_info("stf-barrier: hwsync barrier available\n"); 305 } else if (type == STF_BARRIER_EIEIO) { 306 pr_info("stf-barrier: eieio barrier available\n"); 307 } 308 309 stf_enabled_flush_types = type; 310 311 if (!no_stf_barrier && !cpu_mitigations_off()) 312 stf_barrier_enable(enable); 313 } 314 315 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 316 { 317 if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) { 318 const char *type; 319 switch (stf_enabled_flush_types) { 320 case STF_BARRIER_EIEIO: 321 type = "eieio"; 322 break; 323 case STF_BARRIER_SYNC_ORI: 324 type = "hwsync"; 325 break; 326 case STF_BARRIER_FALLBACK: 327 type = "fallback"; 328 break; 329 default: 330 type = "unknown"; 331 } 332 return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type); 333 } 334 335 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 336 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 337 return sprintf(buf, "Not affected\n"); 338 339 return sprintf(buf, "Vulnerable\n"); 340 } 341 342 #ifdef CONFIG_DEBUG_FS 343 static int stf_barrier_set(void *data, u64 val) 344 { 345 bool enable; 346 347 if (val == 1) 348 enable = true; 349 else if (val == 0) 350 enable = false; 351 else 352 return -EINVAL; 353 354 /* Only do anything if we're changing state */ 355 if (enable != stf_barrier) 356 stf_barrier_enable(enable); 357 358 return 0; 359 } 360 361 static int stf_barrier_get(void *data, u64 *val) 362 { 363 *val = stf_barrier ? 1 : 0; 364 return 0; 365 } 366 367 DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n"); 368 369 static __init int stf_barrier_debugfs_init(void) 370 { 371 debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier); 372 return 0; 373 } 374 device_initcall(stf_barrier_debugfs_init); 375 #endif /* CONFIG_DEBUG_FS */ 376 377 static void toggle_count_cache_flush(bool enable) 378 { 379 if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { 380 patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP); 381 count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; 382 pr_info("count-cache-flush: software flush disabled.\n"); 383 return; 384 } 385 386 patch_branch_site(&patch__call_flush_count_cache, 387 (u64)&flush_count_cache, BRANCH_SET_LINK); 388 389 if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { 390 count_cache_flush_type = COUNT_CACHE_FLUSH_SW; 391 pr_info("count-cache-flush: full software flush sequence enabled.\n"); 392 return; 393 } 394 395 patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR); 396 count_cache_flush_type = COUNT_CACHE_FLUSH_HW; 397 pr_info("count-cache-flush: hardware assisted flush sequence enabled\n"); 398 } 399 400 void setup_count_cache_flush(void) 401 { 402 toggle_count_cache_flush(true); 403 } 404 405 #ifdef CONFIG_DEBUG_FS 406 static int count_cache_flush_set(void *data, u64 val) 407 { 408 bool enable; 409 410 if (val == 1) 411 enable = true; 412 else if (val == 0) 413 enable = false; 414 else 415 return -EINVAL; 416 417 toggle_count_cache_flush(enable); 418 419 return 0; 420 } 421 422 static int count_cache_flush_get(void *data, u64 *val) 423 { 424 if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE) 425 *val = 0; 426 else 427 *val = 1; 428 429 return 0; 430 } 431 432 DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get, 433 count_cache_flush_set, "%llu\n"); 434 435 static __init int count_cache_flush_debugfs_init(void) 436 { 437 debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root, 438 NULL, &fops_count_cache_flush); 439 return 0; 440 } 441 device_initcall(count_cache_flush_debugfs_init); 442 #endif /* CONFIG_DEBUG_FS */ 443 #endif /* CONFIG_PPC_BOOK3S_64 */ 444