xref: /openbmc/linux/arch/arm64/kvm/nested.c (revision 8957261c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 - Columbia University and Linaro Ltd.
4  * Author: Jintack Lim <jintack.lim@linaro.org>
5  */
6 
7 #include <linux/kvm.h>
8 #include <linux/kvm_host.h>
9 
10 #include <asm/kvm_emulate.h>
11 #include <asm/kvm_nested.h>
12 #include <asm/sysreg.h>
13 
14 #include "sys_regs.h"
15 
16 /* Protection against the sysreg repainting madness... */
17 #define NV_FTR(r, f)		ID_AA64##r##_EL1_##f
18 
19 /*
20  * Our emulated CPU doesn't support all the possible features. For the
21  * sake of simplicity (and probably mental sanity), wipe out a number
22  * of feature bits we don't intend to support for the time being.
23  * This list should get updated as new features get added to the NV
24  * support, and new extension to the architecture.
25  */
26 void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p,
27 			  const struct sys_reg_desc *r)
28 {
29 	u32 id = reg_to_encoding(r);
30 	u64 val, tmp;
31 
32 	val = p->regval;
33 
34 	switch (id) {
35 	case SYS_ID_AA64ISAR0_EL1:
36 		/* Support everything but TME, O.S. and Range TLBIs */
37 		val &= ~(NV_FTR(ISAR0, TLB)		|
38 			 NV_FTR(ISAR0, TME));
39 		break;
40 
41 	case SYS_ID_AA64ISAR1_EL1:
42 		/* Support everything but PtrAuth and Spec Invalidation */
43 		val &= ~(GENMASK_ULL(63, 56)	|
44 			 NV_FTR(ISAR1, SPECRES)	|
45 			 NV_FTR(ISAR1, GPI)	|
46 			 NV_FTR(ISAR1, GPA)	|
47 			 NV_FTR(ISAR1, API)	|
48 			 NV_FTR(ISAR1, APA));
49 		break;
50 
51 	case SYS_ID_AA64PFR0_EL1:
52 		/* No AMU, MPAM, S-EL2, RAS or SVE */
53 		val &= ~(GENMASK_ULL(55, 52)	|
54 			 NV_FTR(PFR0, AMU)	|
55 			 NV_FTR(PFR0, MPAM)	|
56 			 NV_FTR(PFR0, SEL2)	|
57 			 NV_FTR(PFR0, RAS)	|
58 			 NV_FTR(PFR0, SVE)	|
59 			 NV_FTR(PFR0, EL3)	|
60 			 NV_FTR(PFR0, EL2)	|
61 			 NV_FTR(PFR0, EL1));
62 		/* 64bit EL1/EL2/EL3 only */
63 		val |= FIELD_PREP(NV_FTR(PFR0, EL1), 0b0001);
64 		val |= FIELD_PREP(NV_FTR(PFR0, EL2), 0b0001);
65 		val |= FIELD_PREP(NV_FTR(PFR0, EL3), 0b0001);
66 		break;
67 
68 	case SYS_ID_AA64PFR1_EL1:
69 		/* Only support SSBS */
70 		val &= NV_FTR(PFR1, SSBS);
71 		break;
72 
73 	case SYS_ID_AA64MMFR0_EL1:
74 		/* Hide ECV, ExS, Secure Memory */
75 		val &= ~(NV_FTR(MMFR0, ECV)		|
76 			 NV_FTR(MMFR0, EXS)		|
77 			 NV_FTR(MMFR0, TGRAN4_2)	|
78 			 NV_FTR(MMFR0, TGRAN16_2)	|
79 			 NV_FTR(MMFR0, TGRAN64_2)	|
80 			 NV_FTR(MMFR0, SNSMEM));
81 
82 		/* Disallow unsupported S2 page sizes */
83 		switch (PAGE_SIZE) {
84 		case SZ_64K:
85 			val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN16_2), 0b0001);
86 			fallthrough;
87 		case SZ_16K:
88 			val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN4_2), 0b0001);
89 			fallthrough;
90 		case SZ_4K:
91 			/* Support everything */
92 			break;
93 		}
94 		/*
95 		 * Since we can't support a guest S2 page size smaller than
96 		 * the host's own page size (due to KVM only populating its
97 		 * own S2 using the kernel's page size), advertise the
98 		 * limitation using FEAT_GTG.
99 		 */
100 		switch (PAGE_SIZE) {
101 		case SZ_4K:
102 			val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN4_2), 0b0010);
103 			fallthrough;
104 		case SZ_16K:
105 			val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN16_2), 0b0010);
106 			fallthrough;
107 		case SZ_64K:
108 			val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN64_2), 0b0010);
109 			break;
110 		}
111 		/* Cap PARange to 48bits */
112 		tmp = FIELD_GET(NV_FTR(MMFR0, PARANGE), val);
113 		if (tmp > 0b0101) {
114 			val &= ~NV_FTR(MMFR0, PARANGE);
115 			val |= FIELD_PREP(NV_FTR(MMFR0, PARANGE), 0b0101);
116 		}
117 		break;
118 
119 	case SYS_ID_AA64MMFR1_EL1:
120 		val &= (NV_FTR(MMFR1, HCX)	|
121 			NV_FTR(MMFR1, PAN)	|
122 			NV_FTR(MMFR1, LO)	|
123 			NV_FTR(MMFR1, HPDS)	|
124 			NV_FTR(MMFR1, VH)	|
125 			NV_FTR(MMFR1, VMIDBits));
126 		break;
127 
128 	case SYS_ID_AA64MMFR2_EL1:
129 		val &= ~(NV_FTR(MMFR2, BBM)	|
130 			 NV_FTR(MMFR2, TTL)	|
131 			 GENMASK_ULL(47, 44)	|
132 			 NV_FTR(MMFR2, ST)	|
133 			 NV_FTR(MMFR2, CCIDX)	|
134 			 NV_FTR(MMFR2, VARange));
135 
136 		/* Force TTL support */
137 		val |= FIELD_PREP(NV_FTR(MMFR2, TTL), 0b0001);
138 		break;
139 
140 	case SYS_ID_AA64DFR0_EL1:
141 		/* Only limited support for PMU, Debug, BPs and WPs */
142 		val &= (NV_FTR(DFR0, PMUVer)	|
143 			NV_FTR(DFR0, WRPs)	|
144 			NV_FTR(DFR0, BRPs)	|
145 			NV_FTR(DFR0, DebugVer));
146 
147 		/* Cap Debug to ARMv8.1 */
148 		tmp = FIELD_GET(NV_FTR(DFR0, DebugVer), val);
149 		if (tmp > 0b0111) {
150 			val &= ~NV_FTR(DFR0, DebugVer);
151 			val |= FIELD_PREP(NV_FTR(DFR0, DebugVer), 0b0111);
152 		}
153 		break;
154 
155 	default:
156 		/* Unknown register, just wipe it clean */
157 		val = 0;
158 		break;
159 	}
160 
161 	p->regval = val;
162 }
163