1 /*
2  * Copyright 2017, Nicholas Piggin, IBM Corporation
3  * Licensed under GPLv2.
4  */
5 
6 #define pr_fmt(fmt) "dt-cpu-ftrs: " fmt
7 
8 #include <linux/export.h>
9 #include <linux/init.h>
10 #include <linux/jump_label.h>
11 #include <linux/memblock.h>
12 #include <linux/printk.h>
13 #include <linux/sched.h>
14 #include <linux/string.h>
15 #include <linux/threads.h>
16 
17 #include <asm/cputable.h>
18 #include <asm/dt_cpu_ftrs.h>
19 #include <asm/mmu.h>
20 #include <asm/oprofile_impl.h>
21 #include <asm/prom.h>
22 #include <asm/setup.h>
23 
24 
25 /* Device-tree visible constants follow */
26 #define ISA_V2_07B      2070
27 #define ISA_V3_0B       3000
28 
29 #define USABLE_PR               (1U << 0)
30 #define USABLE_OS               (1U << 1)
31 #define USABLE_HV               (1U << 2)
32 
33 #define HV_SUPPORT_HFSCR        (1U << 0)
34 #define OS_SUPPORT_FSCR         (1U << 0)
35 
36 /* For parsing, we define all bits set as "NONE" case */
37 #define HV_SUPPORT_NONE		0xffffffffU
38 #define OS_SUPPORT_NONE		0xffffffffU
39 
40 struct dt_cpu_feature {
41 	const char *name;
42 	uint32_t isa;
43 	uint32_t usable_privilege;
44 	uint32_t hv_support;
45 	uint32_t os_support;
46 	uint32_t hfscr_bit_nr;
47 	uint32_t fscr_bit_nr;
48 	uint32_t hwcap_bit_nr;
49 	/* fdt parsing */
50 	unsigned long node;
51 	int enabled;
52 	int disabled;
53 };
54 
55 #define CPU_FTRS_BASE \
56 	   (CPU_FTR_USE_TB | \
57 	    CPU_FTR_LWSYNC | \
58 	    CPU_FTR_FPU_UNAVAILABLE |\
59 	    CPU_FTR_NODSISRALIGN |\
60 	    CPU_FTR_NOEXECUTE |\
61 	    CPU_FTR_COHERENT_ICACHE | \
62 	    CPU_FTR_STCX_CHECKS_ADDRESS |\
63 	    CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
64 	    CPU_FTR_DAWR | \
65 	    CPU_FTR_ARCH_206 |\
66 	    CPU_FTR_ARCH_207S)
67 
68 #define MMU_FTRS_HASH_BASE (MMU_FTRS_POWER8)
69 
70 #define COMMON_USER_BASE	(PPC_FEATURE_32 | PPC_FEATURE_64 | \
71 				 PPC_FEATURE_ARCH_2_06 |\
72 				 PPC_FEATURE_ICACHE_SNOOP)
73 #define COMMON_USER2_BASE	(PPC_FEATURE2_ARCH_2_07 | \
74 				 PPC_FEATURE2_ISEL)
75 /*
76  * Set up the base CPU
77  */
78 
79 extern void __flush_tlb_power8(unsigned int action);
80 extern void __flush_tlb_power9(unsigned int action);
81 extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
82 extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
83 
84 static int hv_mode;
85 
86 static struct {
87 	u64	lpcr;
88 	u64	hfscr;
89 	u64	fscr;
90 } system_registers;
91 
92 static void (*init_pmu_registers)(void);
93 
94 static void cpufeatures_flush_tlb(void)
95 {
96 	unsigned long rb;
97 	unsigned int i, num_sets;
98 
99 	/*
100 	 * This is a temporary measure to keep equivalent TLB flush as the
101 	 * cputable based setup code.
102 	 */
103 	switch (PVR_VER(mfspr(SPRN_PVR))) {
104 	case PVR_POWER8:
105 	case PVR_POWER8E:
106 	case PVR_POWER8NVL:
107 		num_sets = POWER8_TLB_SETS;
108 		break;
109 	case PVR_POWER9:
110 		num_sets = POWER9_TLB_SETS_HASH;
111 		break;
112 	default:
113 		num_sets = 1;
114 		pr_err("unknown CPU version for boot TLB flush\n");
115 		break;
116 	}
117 
118 	asm volatile("ptesync" : : : "memory");
119 	rb = TLBIEL_INVAL_SET;
120 	for (i = 0; i < num_sets; i++) {
121 		asm volatile("tlbiel %0" : : "r" (rb));
122 		rb += 1 << TLBIEL_INVAL_SET_SHIFT;
123 	}
124 	asm volatile("ptesync" : : : "memory");
125 }
126 
127 static void __restore_cpu_cpufeatures(void)
128 {
129 	/*
130 	 * LPCR is restored by the power on engine already. It can be changed
131 	 * after early init e.g., by radix enable, and we have no unified API
132 	 * for saving and restoring such SPRs.
133 	 *
134 	 * This ->restore hook should really be removed from idle and register
135 	 * restore moved directly into the idle restore code, because this code
136 	 * doesn't know how idle is implemented or what it needs restored here.
137 	 *
138 	 * The best we can do to accommodate secondary boot and idle restore
139 	 * for now is "or" LPCR with existing.
140 	 */
141 
142 	mtspr(SPRN_LPCR, system_registers.lpcr | mfspr(SPRN_LPCR));
143 	if (hv_mode) {
144 		mtspr(SPRN_LPID, 0);
145 		mtspr(SPRN_HFSCR, system_registers.hfscr);
146 	}
147 	mtspr(SPRN_FSCR, system_registers.fscr);
148 
149 	if (init_pmu_registers)
150 		init_pmu_registers();
151 
152 	cpufeatures_flush_tlb();
153 }
154 
155 static char dt_cpu_name[64];
156 
157 static struct cpu_spec __initdata base_cpu_spec = {
158 	.cpu_name		= NULL,
159 	.cpu_features		= CPU_FTRS_BASE,
160 	.cpu_user_features	= COMMON_USER_BASE,
161 	.cpu_user_features2	= COMMON_USER2_BASE,
162 	.mmu_features		= 0,
163 	.icache_bsize		= 32, /* minimum block size, fixed by */
164 	.dcache_bsize		= 32, /* cache info init.             */
165 	.num_pmcs		= 0,
166 	.pmc_type		= PPC_PMC_DEFAULT,
167 	.oprofile_cpu_type	= NULL,
168 	.oprofile_type		= PPC_OPROFILE_INVALID,
169 	.cpu_setup		= NULL,
170 	.cpu_restore		= __restore_cpu_cpufeatures,
171 	.flush_tlb		= NULL,
172 	.machine_check_early	= NULL,
173 	.platform		= NULL,
174 };
175 
176 static void __init cpufeatures_setup_cpu(void)
177 {
178 	set_cur_cpu_spec(&base_cpu_spec);
179 
180 	cur_cpu_spec->pvr_mask = -1;
181 	cur_cpu_spec->pvr_value = mfspr(SPRN_PVR);
182 
183 	/* Initialize the base environment -- clear FSCR/HFSCR.  */
184 	hv_mode = !!(mfmsr() & MSR_HV);
185 	if (hv_mode) {
186 		/* CPU_FTR_HVMODE is used early in PACA setup */
187 		cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
188 		mtspr(SPRN_HFSCR, 0);
189 	}
190 	mtspr(SPRN_FSCR, 0);
191 
192 	/*
193 	 * LPCR does not get cleared, to match behaviour with secondaries
194 	 * in __restore_cpu_cpufeatures. Once the idle code is fixed, this
195 	 * could clear LPCR too.
196 	 */
197 }
198 
199 static int __init feat_try_enable_unknown(struct dt_cpu_feature *f)
200 {
201 	if (f->hv_support == HV_SUPPORT_NONE) {
202 	} else if (f->hv_support & HV_SUPPORT_HFSCR) {
203 		u64 hfscr = mfspr(SPRN_HFSCR);
204 		hfscr |= 1UL << f->hfscr_bit_nr;
205 		mtspr(SPRN_HFSCR, hfscr);
206 	} else {
207 		/* Does not have a known recipe */
208 		return 0;
209 	}
210 
211 	if (f->os_support == OS_SUPPORT_NONE) {
212 	} else if (f->os_support & OS_SUPPORT_FSCR) {
213 		u64 fscr = mfspr(SPRN_FSCR);
214 		fscr |= 1UL << f->fscr_bit_nr;
215 		mtspr(SPRN_FSCR, fscr);
216 	} else {
217 		/* Does not have a known recipe */
218 		return 0;
219 	}
220 
221 	if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
222 		uint32_t word = f->hwcap_bit_nr / 32;
223 		uint32_t bit = f->hwcap_bit_nr % 32;
224 
225 		if (word == 0)
226 			cur_cpu_spec->cpu_user_features |= 1U << bit;
227 		else if (word == 1)
228 			cur_cpu_spec->cpu_user_features2 |= 1U << bit;
229 		else
230 			pr_err("%s could not advertise to user (no hwcap bits)\n", f->name);
231 	}
232 
233 	return 1;
234 }
235 
236 static int __init feat_enable(struct dt_cpu_feature *f)
237 {
238 	if (f->hv_support != HV_SUPPORT_NONE) {
239 		if (f->hfscr_bit_nr != -1) {
240 			u64 hfscr = mfspr(SPRN_HFSCR);
241 			hfscr |= 1UL << f->hfscr_bit_nr;
242 			mtspr(SPRN_HFSCR, hfscr);
243 		}
244 	}
245 
246 	if (f->os_support != OS_SUPPORT_NONE) {
247 		if (f->fscr_bit_nr != -1) {
248 			u64 fscr = mfspr(SPRN_FSCR);
249 			fscr |= 1UL << f->fscr_bit_nr;
250 			mtspr(SPRN_FSCR, fscr);
251 		}
252 	}
253 
254 	if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
255 		uint32_t word = f->hwcap_bit_nr / 32;
256 		uint32_t bit = f->hwcap_bit_nr % 32;
257 
258 		if (word == 0)
259 			cur_cpu_spec->cpu_user_features |= 1U << bit;
260 		else if (word == 1)
261 			cur_cpu_spec->cpu_user_features2 |= 1U << bit;
262 		else
263 			pr_err("CPU feature: %s could not advertise to user (no hwcap bits)\n", f->name);
264 	}
265 
266 	return 1;
267 }
268 
269 static int __init feat_disable(struct dt_cpu_feature *f)
270 {
271 	return 0;
272 }
273 
274 static int __init feat_enable_hv(struct dt_cpu_feature *f)
275 {
276 	u64 lpcr;
277 
278 	if (!hv_mode) {
279 		pr_err("CPU feature hypervisor present in device tree but HV mode not enabled in the CPU. Ignoring.\n");
280 		return 0;
281 	}
282 
283 	mtspr(SPRN_LPID, 0);
284 
285 	lpcr = mfspr(SPRN_LPCR);
286 	lpcr &=  ~LPCR_LPES0; /* HV external interrupts */
287 	mtspr(SPRN_LPCR, lpcr);
288 
289 	cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
290 
291 	return 1;
292 }
293 
294 static int __init feat_enable_le(struct dt_cpu_feature *f)
295 {
296 	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_TRUE_LE;
297 	return 1;
298 }
299 
300 static int __init feat_enable_smt(struct dt_cpu_feature *f)
301 {
302 	cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
303 	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_SMT;
304 	return 1;
305 }
306 
307 static int __init feat_enable_idle_nap(struct dt_cpu_feature *f)
308 {
309 	u64 lpcr;
310 
311 	/* Set PECE wakeup modes for ISA 207 */
312 	lpcr = mfspr(SPRN_LPCR);
313 	lpcr |=  LPCR_PECE0;
314 	lpcr |=  LPCR_PECE1;
315 	lpcr |=  LPCR_PECE2;
316 	mtspr(SPRN_LPCR, lpcr);
317 
318 	return 1;
319 }
320 
321 static int __init feat_enable_align_dsisr(struct dt_cpu_feature *f)
322 {
323 	cur_cpu_spec->cpu_features &= ~CPU_FTR_NODSISRALIGN;
324 
325 	return 1;
326 }
327 
328 static int __init feat_enable_idle_stop(struct dt_cpu_feature *f)
329 {
330 	u64 lpcr;
331 
332 	/* Set PECE wakeup modes for ISAv3.0B */
333 	lpcr = mfspr(SPRN_LPCR);
334 	lpcr |=  LPCR_PECE0;
335 	lpcr |=  LPCR_PECE1;
336 	lpcr |=  LPCR_PECE2;
337 	mtspr(SPRN_LPCR, lpcr);
338 
339 	return 1;
340 }
341 
342 static int __init feat_enable_mmu_hash(struct dt_cpu_feature *f)
343 {
344 	u64 lpcr;
345 
346 	lpcr = mfspr(SPRN_LPCR);
347 	lpcr &= ~LPCR_ISL;
348 
349 	/* VRMASD */
350 	lpcr |= LPCR_VPM0;
351 	lpcr &= ~LPCR_VPM1;
352 	lpcr |= 0x10UL << LPCR_VRMASD_SH; /* L=1 LP=00 */
353 	mtspr(SPRN_LPCR, lpcr);
354 
355 	cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
356 	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
357 
358 	return 1;
359 }
360 
361 static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
362 {
363 	u64 lpcr;
364 
365 	lpcr = mfspr(SPRN_LPCR);
366 	lpcr &= ~LPCR_ISL;
367 	mtspr(SPRN_LPCR, lpcr);
368 
369 	cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
370 	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
371 
372 	return 1;
373 }
374 
375 
376 static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f)
377 {
378 #ifdef CONFIG_PPC_RADIX_MMU
379 	cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
380 	cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
381 	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
382 
383 	return 1;
384 #endif
385 	return 0;
386 }
387 
388 static int __init feat_enable_dscr(struct dt_cpu_feature *f)
389 {
390 	u64 lpcr;
391 
392 	feat_enable(f);
393 
394 	lpcr = mfspr(SPRN_LPCR);
395 	lpcr &= ~LPCR_DPFD;
396 	lpcr |=  (4UL << LPCR_DPFD_SH);
397 	mtspr(SPRN_LPCR, lpcr);
398 
399 	return 1;
400 }
401 
402 static void hfscr_pmu_enable(void)
403 {
404 	u64 hfscr = mfspr(SPRN_HFSCR);
405 	hfscr |= PPC_BIT(60);
406 	mtspr(SPRN_HFSCR, hfscr);
407 }
408 
409 static void init_pmu_power8(void)
410 {
411 	if (hv_mode) {
412 		mtspr(SPRN_MMCRC, 0);
413 		mtspr(SPRN_MMCRH, 0);
414 	}
415 
416 	mtspr(SPRN_MMCRA, 0);
417 	mtspr(SPRN_MMCR0, 0);
418 	mtspr(SPRN_MMCR1, 0);
419 	mtspr(SPRN_MMCR2, 0);
420 	mtspr(SPRN_MMCRS, 0);
421 }
422 
423 static int __init feat_enable_mce_power8(struct dt_cpu_feature *f)
424 {
425 	cur_cpu_spec->platform = "power8";
426 	cur_cpu_spec->flush_tlb = __flush_tlb_power8;
427 	cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p8;
428 
429 	return 1;
430 }
431 
432 static int __init feat_enable_pmu_power8(struct dt_cpu_feature *f)
433 {
434 	hfscr_pmu_enable();
435 
436 	init_pmu_power8();
437 	init_pmu_registers = init_pmu_power8;
438 
439 	cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
440 	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
441 	if (pvr_version_is(PVR_POWER8E))
442 		cur_cpu_spec->cpu_features |= CPU_FTR_PMAO_BUG;
443 
444 	cur_cpu_spec->num_pmcs		= 6;
445 	cur_cpu_spec->pmc_type		= PPC_PMC_IBM;
446 	cur_cpu_spec->oprofile_cpu_type	= "ppc64/power8";
447 
448 	return 1;
449 }
450 
451 static void init_pmu_power9(void)
452 {
453 	if (hv_mode)
454 		mtspr(SPRN_MMCRC, 0);
455 
456 	mtspr(SPRN_MMCRA, 0);
457 	mtspr(SPRN_MMCR0, 0);
458 	mtspr(SPRN_MMCR1, 0);
459 	mtspr(SPRN_MMCR2, 0);
460 }
461 
462 static int __init feat_enable_mce_power9(struct dt_cpu_feature *f)
463 {
464 	cur_cpu_spec->platform = "power9";
465 	cur_cpu_spec->flush_tlb = __flush_tlb_power9;
466 	cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p9;
467 
468 	return 1;
469 }
470 
471 static int __init feat_enable_pmu_power9(struct dt_cpu_feature *f)
472 {
473 	hfscr_pmu_enable();
474 
475 	init_pmu_power9();
476 	init_pmu_registers = init_pmu_power9;
477 
478 	cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
479 	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
480 
481 	cur_cpu_spec->num_pmcs		= 6;
482 	cur_cpu_spec->pmc_type		= PPC_PMC_IBM;
483 	cur_cpu_spec->oprofile_cpu_type	= "ppc64/power9";
484 
485 	return 1;
486 }
487 
488 static int __init feat_enable_tm(struct dt_cpu_feature *f)
489 {
490 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
491 	feat_enable(f);
492 	cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_HTM_NOSC;
493 	return 1;
494 #endif
495 	return 0;
496 }
497 
498 static int __init feat_enable_fp(struct dt_cpu_feature *f)
499 {
500 	feat_enable(f);
501 	cur_cpu_spec->cpu_features &= ~CPU_FTR_FPU_UNAVAILABLE;
502 
503 	return 1;
504 }
505 
506 static int __init feat_enable_vector(struct dt_cpu_feature *f)
507 {
508 #ifdef CONFIG_ALTIVEC
509 	feat_enable(f);
510 	cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
511 	cur_cpu_spec->cpu_features |= CPU_FTR_VMX_COPY;
512 	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
513 
514 	return 1;
515 #endif
516 	return 0;
517 }
518 
519 static int __init feat_enable_vsx(struct dt_cpu_feature *f)
520 {
521 #ifdef CONFIG_VSX
522 	feat_enable(f);
523 	cur_cpu_spec->cpu_features |= CPU_FTR_VSX;
524 	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_VSX;
525 
526 	return 1;
527 #endif
528 	return 0;
529 }
530 
531 static int __init feat_enable_purr(struct dt_cpu_feature *f)
532 {
533 	cur_cpu_spec->cpu_features |= CPU_FTR_PURR | CPU_FTR_SPURR;
534 
535 	return 1;
536 }
537 
538 static int __init feat_enable_ebb(struct dt_cpu_feature *f)
539 {
540 	/*
541 	 * PPC_FEATURE2_EBB is enabled in PMU init code because it has
542 	 * historically been related to the PMU facility. This may have
543 	 * to be decoupled if EBB becomes more generic. For now, follow
544 	 * existing convention.
545 	 */
546 	f->hwcap_bit_nr = -1;
547 	feat_enable(f);
548 
549 	return 1;
550 }
551 
552 static int __init feat_enable_dbell(struct dt_cpu_feature *f)
553 {
554 	u64 lpcr;
555 
556 	/* P9 has an HFSCR for privileged state */
557 	feat_enable(f);
558 
559 	cur_cpu_spec->cpu_features |= CPU_FTR_DBELL;
560 
561 	lpcr = mfspr(SPRN_LPCR);
562 	lpcr |=  LPCR_PECEDH; /* hyp doorbell wakeup */
563 	mtspr(SPRN_LPCR, lpcr);
564 
565 	return 1;
566 }
567 
568 static int __init feat_enable_hvi(struct dt_cpu_feature *f)
569 {
570 	u64 lpcr;
571 
572 	/*
573 	 * POWER9 XIVE interrupts including in OPAL XICS compatibility
574 	 * are always delivered as hypervisor virtualization interrupts (HVI)
575 	 * rather than EE.
576 	 *
577 	 * However LPES0 is not set here, in the chance that an EE does get
578 	 * delivered to the host somehow, the EE handler would not expect it
579 	 * to be delivered in LPES0 mode (e.g., using SRR[01]). This could
580 	 * happen if there is a bug in interrupt controller code, or IC is
581 	 * misconfigured in systemsim.
582 	 */
583 
584 	lpcr = mfspr(SPRN_LPCR);
585 	lpcr |= LPCR_HVICE;	/* enable hvi interrupts */
586 	lpcr |= LPCR_HEIC;	/* disable ee interrupts when MSR_HV */
587 	lpcr |= LPCR_PECE_HVEE; /* hvi can wake from stop */
588 	mtspr(SPRN_LPCR, lpcr);
589 
590 	return 1;
591 }
592 
593 static int __init feat_enable_large_ci(struct dt_cpu_feature *f)
594 {
595 	cur_cpu_spec->mmu_features |= MMU_FTR_CI_LARGE_PAGE;
596 
597 	return 1;
598 }
599 
600 struct dt_cpu_feature_match {
601 	const char *name;
602 	int (*enable)(struct dt_cpu_feature *f);
603 	u64 cpu_ftr_bit_mask;
604 };
605 
606 static struct dt_cpu_feature_match __initdata
607 		dt_cpu_feature_match_table[] = {
608 	{"hypervisor", feat_enable_hv, 0},
609 	{"big-endian", feat_enable, 0},
610 	{"little-endian", feat_enable_le, CPU_FTR_REAL_LE},
611 	{"smt", feat_enable_smt, 0},
612 	{"interrupt-facilities", feat_enable, 0},
613 	{"timer-facilities", feat_enable, 0},
614 	{"timer-facilities-v3", feat_enable, 0},
615 	{"debug-facilities", feat_enable, 0},
616 	{"come-from-address-register", feat_enable, CPU_FTR_CFAR},
617 	{"branch-tracing", feat_enable, 0},
618 	{"floating-point", feat_enable_fp, 0},
619 	{"vector", feat_enable_vector, 0},
620 	{"vector-scalar", feat_enable_vsx, 0},
621 	{"vector-scalar-v3", feat_enable, 0},
622 	{"decimal-floating-point", feat_enable, 0},
623 	{"decimal-integer", feat_enable, 0},
624 	{"quadword-load-store", feat_enable, 0},
625 	{"vector-crypto", feat_enable, 0},
626 	{"mmu-hash", feat_enable_mmu_hash, 0},
627 	{"mmu-radix", feat_enable_mmu_radix, 0},
628 	{"mmu-hash-v3", feat_enable_mmu_hash_v3, 0},
629 	{"virtual-page-class-key-protection", feat_enable, 0},
630 	{"transactional-memory", feat_enable_tm, CPU_FTR_TM},
631 	{"transactional-memory-v3", feat_enable_tm, 0},
632 	{"idle-nap", feat_enable_idle_nap, 0},
633 	{"alignment-interrupt-dsisr", feat_enable_align_dsisr, 0},
634 	{"idle-stop", feat_enable_idle_stop, 0},
635 	{"machine-check-power8", feat_enable_mce_power8, 0},
636 	{"performance-monitor-power8", feat_enable_pmu_power8, 0},
637 	{"data-stream-control-register", feat_enable_dscr, CPU_FTR_DSCR},
638 	{"event-based-branch", feat_enable_ebb, 0},
639 	{"target-address-register", feat_enable, 0},
640 	{"branch-history-rolling-buffer", feat_enable, 0},
641 	{"control-register", feat_enable, CPU_FTR_CTRL},
642 	{"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
643 	{"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
644 	{"processor-utilization-of-resources-register", feat_enable_purr, 0},
645 	{"subcore", feat_enable, CPU_FTR_SUBCORE},
646 	{"no-execute", feat_enable, 0},
647 	{"strong-access-ordering", feat_enable, CPU_FTR_SAO},
648 	{"cache-inhibited-large-page", feat_enable_large_ci, 0},
649 	{"coprocessor-icswx", feat_enable, CPU_FTR_ICSWX},
650 	{"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
651 	{"program-priority-register", feat_enable, CPU_FTR_HAS_PPR},
652 	{"wait", feat_enable, 0},
653 	{"atomic-memory-operations", feat_enable, 0},
654 	{"branch-v3", feat_enable, 0},
655 	{"copy-paste", feat_enable, 0},
656 	{"decimal-floating-point-v3", feat_enable, 0},
657 	{"decimal-integer-v3", feat_enable, 0},
658 	{"fixed-point-v3", feat_enable, 0},
659 	{"floating-point-v3", feat_enable, 0},
660 	{"group-start-register", feat_enable, 0},
661 	{"pc-relative-addressing", feat_enable, 0},
662 	{"machine-check-power9", feat_enable_mce_power9, 0},
663 	{"performance-monitor-power9", feat_enable_pmu_power9, 0},
664 	{"event-based-branch-v3", feat_enable, 0},
665 	{"random-number-generator", feat_enable, 0},
666 	{"system-call-vectored", feat_disable, 0},
667 	{"trace-interrupt-v3", feat_enable, 0},
668 	{"vector-v3", feat_enable, 0},
669 	{"vector-binary128", feat_enable, 0},
670 	{"vector-binary16", feat_enable, 0},
671 	{"wait-v3", feat_enable, 0},
672 };
673 
674 /* XXX: how to configure this? Default + boot time? */
675 #ifdef CONFIG_PPC_CPUFEATURES_ENABLE_UNKNOWN
676 #define CPU_FEATURE_ENABLE_UNKNOWN 1
677 #else
678 #define CPU_FEATURE_ENABLE_UNKNOWN 0
679 #endif
680 
681 static void __init cpufeatures_setup_start(u32 isa)
682 {
683 	pr_info("setup for ISA %d\n", isa);
684 
685 	if (isa >= 3000) {
686 		cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300;
687 		cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00;
688 	}
689 }
690 
691 static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
692 {
693 	const struct dt_cpu_feature_match *m;
694 	bool known = false;
695 	int i;
696 
697 	for (i = 0; i < ARRAY_SIZE(dt_cpu_feature_match_table); i++) {
698 		m = &dt_cpu_feature_match_table[i];
699 		if (!strcmp(f->name, m->name)) {
700 			known = true;
701 			if (m->enable(f))
702 				break;
703 
704 			pr_info("not enabling: %s (disabled or unsupported by kernel)\n",
705 				f->name);
706 			return false;
707 		}
708 	}
709 
710 	if (!known && CPU_FEATURE_ENABLE_UNKNOWN) {
711 		if (!feat_try_enable_unknown(f)) {
712 			pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
713 				f->name);
714 			return false;
715 		}
716 	}
717 
718 	if (m->cpu_ftr_bit_mask)
719 		cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask;
720 
721 	if (known)
722 		pr_debug("enabling: %s\n", f->name);
723 	else
724 		pr_debug("enabling: %s (unknown)\n", f->name);
725 
726 	return true;
727 }
728 
729 static __init void cpufeatures_cpu_quirks(void)
730 {
731 	int version = mfspr(SPRN_PVR);
732 
733 	/*
734 	 * Not all quirks can be derived from the cpufeatures device tree.
735 	 */
736 	if ((version & 0xffffff00) == 0x004e0100)
737 		cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
738 }
739 
740 static void __init cpufeatures_setup_finished(void)
741 {
742 	cpufeatures_cpu_quirks();
743 
744 	if (hv_mode && !(cur_cpu_spec->cpu_features & CPU_FTR_HVMODE)) {
745 		pr_err("hypervisor not present in device tree but HV mode is enabled in the CPU. Enabling.\n");
746 		cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
747 	}
748 
749 	system_registers.lpcr = mfspr(SPRN_LPCR);
750 	system_registers.hfscr = mfspr(SPRN_HFSCR);
751 	system_registers.fscr = mfspr(SPRN_FSCR);
752 
753 	cpufeatures_flush_tlb();
754 
755 	pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n",
756 		cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
757 }
758 
759 static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
760 					int depth, void *data)
761 {
762 	if (of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features")
763 	    && of_get_flat_dt_prop(node, "isa", NULL))
764 		return 1;
765 
766 	return 0;
767 }
768 
769 static bool __initdata using_dt_cpu_ftrs = false;
770 
771 bool __init dt_cpu_ftrs_in_use(void)
772 {
773 	return using_dt_cpu_ftrs;
774 }
775 
776 bool __init dt_cpu_ftrs_init(void *fdt)
777 {
778 	/* Setup and verify the FDT, if it fails we just bail */
779 	if (!early_init_dt_verify(fdt))
780 		return false;
781 
782 	if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
783 		return false;
784 
785 	cpufeatures_setup_cpu();
786 
787 	using_dt_cpu_ftrs = true;
788 	return true;
789 }
790 
791 static int nr_dt_cpu_features;
792 static struct dt_cpu_feature *dt_cpu_features;
793 
794 static int __init process_cpufeatures_node(unsigned long node,
795 					  const char *uname, int i)
796 {
797 	const __be32 *prop;
798 	struct dt_cpu_feature *f;
799 	int len;
800 
801 	f = &dt_cpu_features[i];
802 	memset(f, 0, sizeof(struct dt_cpu_feature));
803 
804 	f->node = node;
805 
806 	f->name = uname;
807 
808 	prop = of_get_flat_dt_prop(node, "isa", &len);
809 	if (!prop) {
810 		pr_warn("%s: missing isa property\n", uname);
811 		return 0;
812 	}
813 	f->isa = be32_to_cpup(prop);
814 
815 	prop = of_get_flat_dt_prop(node, "usable-privilege", &len);
816 	if (!prop) {
817 		pr_warn("%s: missing usable-privilege property", uname);
818 		return 0;
819 	}
820 	f->usable_privilege = be32_to_cpup(prop);
821 
822 	prop = of_get_flat_dt_prop(node, "hv-support", &len);
823 	if (prop)
824 		f->hv_support = be32_to_cpup(prop);
825 	else
826 		f->hv_support = HV_SUPPORT_NONE;
827 
828 	prop = of_get_flat_dt_prop(node, "os-support", &len);
829 	if (prop)
830 		f->os_support = be32_to_cpup(prop);
831 	else
832 		f->os_support = OS_SUPPORT_NONE;
833 
834 	prop = of_get_flat_dt_prop(node, "hfscr-bit-nr", &len);
835 	if (prop)
836 		f->hfscr_bit_nr = be32_to_cpup(prop);
837 	else
838 		f->hfscr_bit_nr = -1;
839 	prop = of_get_flat_dt_prop(node, "fscr-bit-nr", &len);
840 	if (prop)
841 		f->fscr_bit_nr = be32_to_cpup(prop);
842 	else
843 		f->fscr_bit_nr = -1;
844 	prop = of_get_flat_dt_prop(node, "hwcap-bit-nr", &len);
845 	if (prop)
846 		f->hwcap_bit_nr = be32_to_cpup(prop);
847 	else
848 		f->hwcap_bit_nr = -1;
849 
850 	if (f->usable_privilege & USABLE_HV) {
851 		if (!(mfmsr() & MSR_HV)) {
852 			pr_warn("%s: HV feature passed to guest\n", uname);
853 			return 0;
854 		}
855 
856 		if (f->hv_support == HV_SUPPORT_NONE && f->hfscr_bit_nr != -1) {
857 			pr_warn("%s: unwanted hfscr_bit_nr\n", uname);
858 			return 0;
859 		}
860 
861 		if (f->hv_support == HV_SUPPORT_HFSCR) {
862 			if (f->hfscr_bit_nr == -1) {
863 				pr_warn("%s: missing hfscr_bit_nr\n", uname);
864 				return 0;
865 			}
866 		}
867 	} else {
868 		if (f->hv_support != HV_SUPPORT_NONE || f->hfscr_bit_nr != -1) {
869 			pr_warn("%s: unwanted hv_support/hfscr_bit_nr\n", uname);
870 			return 0;
871 		}
872 	}
873 
874 	if (f->usable_privilege & USABLE_OS) {
875 		if (f->os_support == OS_SUPPORT_NONE && f->fscr_bit_nr != -1) {
876 			pr_warn("%s: unwanted fscr_bit_nr\n", uname);
877 			return 0;
878 		}
879 
880 		if (f->os_support == OS_SUPPORT_FSCR) {
881 			if (f->fscr_bit_nr == -1) {
882 				pr_warn("%s: missing fscr_bit_nr\n", uname);
883 				return 0;
884 			}
885 		}
886 	} else {
887 		if (f->os_support != OS_SUPPORT_NONE || f->fscr_bit_nr != -1) {
888 			pr_warn("%s: unwanted os_support/fscr_bit_nr\n", uname);
889 			return 0;
890 		}
891 	}
892 
893 	if (!(f->usable_privilege & USABLE_PR)) {
894 		if (f->hwcap_bit_nr != -1) {
895 			pr_warn("%s: unwanted hwcap_bit_nr\n", uname);
896 			return 0;
897 		}
898 	}
899 
900 	/* Do all the independent features in the first pass */
901 	if (!of_get_flat_dt_prop(node, "dependencies", &len)) {
902 		if (cpufeatures_process_feature(f))
903 			f->enabled = 1;
904 		else
905 			f->disabled = 1;
906 	}
907 
908 	return 0;
909 }
910 
911 static void __init cpufeatures_deps_enable(struct dt_cpu_feature *f)
912 {
913 	const __be32 *prop;
914 	int len;
915 	int nr_deps;
916 	int i;
917 
918 	if (f->enabled || f->disabled)
919 		return;
920 
921 	prop = of_get_flat_dt_prop(f->node, "dependencies", &len);
922 	if (!prop) {
923 		pr_warn("%s: missing dependencies property", f->name);
924 		return;
925 	}
926 
927 	nr_deps = len / sizeof(int);
928 
929 	for (i = 0; i < nr_deps; i++) {
930 		unsigned long phandle = be32_to_cpu(prop[i]);
931 		int j;
932 
933 		for (j = 0; j < nr_dt_cpu_features; j++) {
934 			struct dt_cpu_feature *d = &dt_cpu_features[j];
935 
936 			if (of_get_flat_dt_phandle(d->node) == phandle) {
937 				cpufeatures_deps_enable(d);
938 				if (d->disabled) {
939 					f->disabled = 1;
940 					return;
941 				}
942 			}
943 		}
944 	}
945 
946 	if (cpufeatures_process_feature(f))
947 		f->enabled = 1;
948 	else
949 		f->disabled = 1;
950 }
951 
952 static int __init scan_cpufeatures_subnodes(unsigned long node,
953 					  const char *uname,
954 					  void *data)
955 {
956 	int *count = data;
957 
958 	process_cpufeatures_node(node, uname, *count);
959 
960 	(*count)++;
961 
962 	return 0;
963 }
964 
965 static int __init count_cpufeatures_subnodes(unsigned long node,
966 					  const char *uname,
967 					  void *data)
968 {
969 	int *count = data;
970 
971 	(*count)++;
972 
973 	return 0;
974 }
975 
976 static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
977 					    *uname, int depth, void *data)
978 {
979 	const __be32 *prop;
980 	int count, i;
981 	u32 isa;
982 
983 	/* We are scanning "ibm,powerpc-cpu-features" nodes only */
984 	if (!of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features"))
985 		return 0;
986 
987 	prop = of_get_flat_dt_prop(node, "isa", NULL);
988 	if (!prop)
989 		/* We checked before, "can't happen" */
990 		return 0;
991 
992 	isa = be32_to_cpup(prop);
993 
994 	/* Count and allocate space for cpu features */
995 	of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
996 						&nr_dt_cpu_features);
997 	dt_cpu_features = __va(
998 		memblock_alloc(sizeof(struct dt_cpu_feature)*
999 				nr_dt_cpu_features, PAGE_SIZE));
1000 
1001 	cpufeatures_setup_start(isa);
1002 
1003 	/* Scan nodes into dt_cpu_features and enable those without deps  */
1004 	count = 0;
1005 	of_scan_flat_dt_subnodes(node, scan_cpufeatures_subnodes, &count);
1006 
1007 	/* Recursive enable remaining features with dependencies */
1008 	for (i = 0; i < nr_dt_cpu_features; i++) {
1009 		struct dt_cpu_feature *f = &dt_cpu_features[i];
1010 
1011 		cpufeatures_deps_enable(f);
1012 	}
1013 
1014 	prop = of_get_flat_dt_prop(node, "display-name", NULL);
1015 	if (prop && strlen((char *)prop) != 0) {
1016 		strlcpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name));
1017 		cur_cpu_spec->cpu_name = dt_cpu_name;
1018 	}
1019 
1020 	cpufeatures_setup_finished();
1021 
1022 	memblock_free(__pa(dt_cpu_features),
1023 			sizeof(struct dt_cpu_feature)*nr_dt_cpu_features);
1024 
1025 	return 0;
1026 }
1027 
1028 void __init dt_cpu_ftrs_scan(void)
1029 {
1030 	of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
1031 }
1032