1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Check for KVM_GET_REG_LIST regressions.
4  *
5  * Copyright (C) 2020, Red Hat, Inc.
6  *
7  * When attempting to migrate from a host with an older kernel to a host
8  * with a newer kernel we allow the newer kernel on the destination to
9  * list new registers with get-reg-list. We assume they'll be unused, at
10  * least until the guest reboots, and so they're relatively harmless.
11  * However, if the destination host with the newer kernel is missing
12  * registers which the source host with the older kernel has, then that's
13  * a regression in get-reg-list. This test checks for that regression by
14  * checking the current list against a blessed list. We should never have
15  * missing registers, but if new ones appear then they can probably be
16  * added to the blessed list. A completely new blessed list can be created
17  * by running the test with the --list command line argument.
18  *
19  * Note, the blessed list should be created from the oldest possible
20  * kernel. We can't go older than v4.15, though, because that's the first
21  * release to expose the ID system registers in KVM_GET_REG_LIST, see
22  * commit 93390c0a1b20 ("arm64: KVM: Hide unsupported AArch64 CPU features
23  * from guests"). Also, one must use the --core-reg-fixup command line
24  * option when running on an older kernel that doesn't include df205b5c6328
25  * ("KVM: arm64: Filter out invalid core register IDs in KVM_GET_REG_LIST")
26  */
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <sys/types.h>
32 #include <sys/wait.h>
33 #include "kvm_util.h"
34 #include "test_util.h"
35 #include "processor.h"
36 
37 static struct kvm_reg_list *reg_list;
38 static __u64 *blessed_reg, blessed_n;
39 
40 struct reg_sublist {
41 	const char *name;
42 	long capability;
43 	int feature;
44 	bool finalize;
45 	__u64 *regs;
46 	__u64 regs_n;
47 	__u64 *rejects_set;
48 	__u64 rejects_set_n;
49 };
50 
51 struct feature_id_reg {
52 	__u64 reg;
53 	__u64 id_reg;
54 	__u64 feat_shift;
55 	__u64 feat_min;
56 };
57 
58 static struct feature_id_reg feat_id_regs[] = {
59 	{
60 		ARM64_SYS_REG(3, 0, 2, 0, 3),	/* TCR2_EL1 */
61 		ARM64_SYS_REG(3, 0, 0, 7, 3),	/* ID_AA64MMFR3_EL1 */
62 		0,
63 		1
64 	},
65 	{
66 		ARM64_SYS_REG(3, 0, 10, 2, 2),	/* PIRE0_EL1 */
67 		ARM64_SYS_REG(3, 0, 0, 7, 3),	/* ID_AA64MMFR3_EL1 */
68 		4,
69 		1
70 	},
71 	{
72 		ARM64_SYS_REG(3, 0, 10, 2, 3),	/* PIR_EL1 */
73 		ARM64_SYS_REG(3, 0, 0, 7, 3),	/* ID_AA64MMFR3_EL1 */
74 		4,
75 		1
76 	}
77 };
78 
79 struct vcpu_config {
80 	char *name;
81 	struct reg_sublist sublists[];
82 };
83 
84 static struct vcpu_config *vcpu_configs[];
85 static int vcpu_configs_n;
86 
87 #define for_each_sublist(c, s)							\
88 	for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
89 
90 #define for_each_reg(i)								\
91 	for ((i) = 0; (i) < reg_list->n; ++(i))
92 
93 #define for_each_reg_filtered(i)						\
94 	for_each_reg(i)								\
95 		if (!filter_reg(reg_list->reg[i]))
96 
97 #define for_each_missing_reg(i)							\
98 	for ((i) = 0; (i) < blessed_n; ++(i))					\
99 		if (!find_reg(reg_list->reg, reg_list->n, blessed_reg[i]))	\
100 			if (check_supported_feat_reg(vcpu, blessed_reg[i]))
101 
102 #define for_each_new_reg(i)							\
103 	for_each_reg_filtered(i)						\
104 		if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i]))
105 
106 static const char *config_name(struct vcpu_config *c)
107 {
108 	struct reg_sublist *s;
109 	int len = 0;
110 
111 	if (c->name)
112 		return c->name;
113 
114 	for_each_sublist(c, s)
115 		len += strlen(s->name) + 1;
116 
117 	c->name = malloc(len);
118 
119 	len = 0;
120 	for_each_sublist(c, s) {
121 		if (!strcmp(s->name, "base"))
122 			continue;
123 		strcat(c->name + len, s->name);
124 		len += strlen(s->name) + 1;
125 		c->name[len - 1] = '+';
126 	}
127 	c->name[len - 1] = '\0';
128 
129 	return c->name;
130 }
131 
132 static bool has_cap(struct vcpu_config *c, long capability)
133 {
134 	struct reg_sublist *s;
135 
136 	for_each_sublist(c, s)
137 		if (s->capability == capability)
138 			return true;
139 	return false;
140 }
141 
142 static bool filter_reg(__u64 reg)
143 {
144 	/*
145 	 * DEMUX register presence depends on the host's CLIDR_EL1.
146 	 * This means there's no set of them that we can bless.
147 	 */
148 	if ((reg & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
149 		return true;
150 
151 	return false;
152 }
153 
154 static bool find_reg(__u64 regs[], __u64 nr_regs, __u64 reg)
155 {
156 	int i;
157 
158 	for (i = 0; i < nr_regs; ++i)
159 		if (reg == regs[i])
160 			return true;
161 	return false;
162 }
163 
164 static bool check_supported_feat_reg(struct kvm_vcpu *vcpu, __u64 reg)
165 {
166 	int i, ret;
167 	__u64 data, feat_val;
168 
169 	for (i = 0; i < ARRAY_SIZE(feat_id_regs); i++) {
170 		if (feat_id_regs[i].reg == reg) {
171 			ret = __vcpu_get_reg(vcpu, feat_id_regs[i].id_reg, &data);
172 			if (ret < 0)
173 				return false;
174 
175 			feat_val = ((data >> feat_id_regs[i].feat_shift) & 0xf);
176 			return feat_val >= feat_id_regs[i].feat_min;
177 		}
178 	}
179 
180 	return true;
181 }
182 
183 static const char *str_with_index(const char *template, __u64 index)
184 {
185 	char *str, *p;
186 	int n;
187 
188 	str = strdup(template);
189 	p = strstr(str, "##");
190 	n = sprintf(p, "%lld", index);
191 	strcat(p + n, strstr(template, "##") + 2);
192 
193 	return (const char *)str;
194 }
195 
196 #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK)
197 
198 #define CORE_REGS_XX_NR_WORDS	2
199 #define CORE_SPSR_XX_NR_WORDS	2
200 #define CORE_FPREGS_XX_NR_WORDS	4
201 
202 static const char *core_id_to_str(struct vcpu_config *c, __u64 id)
203 {
204 	__u64 core_off = id & ~REG_MASK, idx;
205 
206 	/*
207 	 * core_off is the offset into struct kvm_regs
208 	 */
209 	switch (core_off) {
210 	case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
211 	     KVM_REG_ARM_CORE_REG(regs.regs[30]):
212 		idx = (core_off - KVM_REG_ARM_CORE_REG(regs.regs[0])) / CORE_REGS_XX_NR_WORDS;
213 		TEST_ASSERT(idx < 31, "%s: Unexpected regs.regs index: %lld", config_name(c), idx);
214 		return str_with_index("KVM_REG_ARM_CORE_REG(regs.regs[##])", idx);
215 	case KVM_REG_ARM_CORE_REG(regs.sp):
216 		return "KVM_REG_ARM_CORE_REG(regs.sp)";
217 	case KVM_REG_ARM_CORE_REG(regs.pc):
218 		return "KVM_REG_ARM_CORE_REG(regs.pc)";
219 	case KVM_REG_ARM_CORE_REG(regs.pstate):
220 		return "KVM_REG_ARM_CORE_REG(regs.pstate)";
221 	case KVM_REG_ARM_CORE_REG(sp_el1):
222 		return "KVM_REG_ARM_CORE_REG(sp_el1)";
223 	case KVM_REG_ARM_CORE_REG(elr_el1):
224 		return "KVM_REG_ARM_CORE_REG(elr_el1)";
225 	case KVM_REG_ARM_CORE_REG(spsr[0]) ...
226 	     KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
227 		idx = (core_off - KVM_REG_ARM_CORE_REG(spsr[0])) / CORE_SPSR_XX_NR_WORDS;
228 		TEST_ASSERT(idx < KVM_NR_SPSR, "%s: Unexpected spsr index: %lld", config_name(c), idx);
229 		return str_with_index("KVM_REG_ARM_CORE_REG(spsr[##])", idx);
230 	case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
231 	     KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
232 		idx = (core_off - KVM_REG_ARM_CORE_REG(fp_regs.vregs[0])) / CORE_FPREGS_XX_NR_WORDS;
233 		TEST_ASSERT(idx < 32, "%s: Unexpected fp_regs.vregs index: %lld", config_name(c), idx);
234 		return str_with_index("KVM_REG_ARM_CORE_REG(fp_regs.vregs[##])", idx);
235 	case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
236 		return "KVM_REG_ARM_CORE_REG(fp_regs.fpsr)";
237 	case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
238 		return "KVM_REG_ARM_CORE_REG(fp_regs.fpcr)";
239 	}
240 
241 	TEST_FAIL("%s: Unknown core reg id: 0x%llx", config_name(c), id);
242 	return NULL;
243 }
244 
245 static const char *sve_id_to_str(struct vcpu_config *c, __u64 id)
246 {
247 	__u64 sve_off, n, i;
248 
249 	if (id == KVM_REG_ARM64_SVE_VLS)
250 		return "KVM_REG_ARM64_SVE_VLS";
251 
252 	sve_off = id & ~(REG_MASK | ((1ULL << 5) - 1));
253 	i = id & (KVM_ARM64_SVE_MAX_SLICES - 1);
254 
255 	TEST_ASSERT(i == 0, "%s: Currently we don't expect slice > 0, reg id 0x%llx", config_name(c), id);
256 
257 	switch (sve_off) {
258 	case KVM_REG_ARM64_SVE_ZREG_BASE ...
259 	     KVM_REG_ARM64_SVE_ZREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_ZREGS - 1:
260 		n = (id >> 5) & (KVM_ARM64_SVE_NUM_ZREGS - 1);
261 		TEST_ASSERT(id == KVM_REG_ARM64_SVE_ZREG(n, 0),
262 			    "%s: Unexpected bits set in SVE ZREG id: 0x%llx", config_name(c), id);
263 		return str_with_index("KVM_REG_ARM64_SVE_ZREG(##, 0)", n);
264 	case KVM_REG_ARM64_SVE_PREG_BASE ...
265 	     KVM_REG_ARM64_SVE_PREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_PREGS - 1:
266 		n = (id >> 5) & (KVM_ARM64_SVE_NUM_PREGS - 1);
267 		TEST_ASSERT(id == KVM_REG_ARM64_SVE_PREG(n, 0),
268 			    "%s: Unexpected bits set in SVE PREG id: 0x%llx", config_name(c), id);
269 		return str_with_index("KVM_REG_ARM64_SVE_PREG(##, 0)", n);
270 	case KVM_REG_ARM64_SVE_FFR_BASE:
271 		TEST_ASSERT(id == KVM_REG_ARM64_SVE_FFR(0),
272 			    "%s: Unexpected bits set in SVE FFR id: 0x%llx", config_name(c), id);
273 		return "KVM_REG_ARM64_SVE_FFR(0)";
274 	}
275 
276 	return NULL;
277 }
278 
279 static void print_reg(struct vcpu_config *c, __u64 id)
280 {
281 	unsigned op0, op1, crn, crm, op2;
282 	const char *reg_size = NULL;
283 
284 	TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64,
285 		    "%s: KVM_REG_ARM64 missing in reg id: 0x%llx", config_name(c), id);
286 
287 	switch (id & KVM_REG_SIZE_MASK) {
288 	case KVM_REG_SIZE_U8:
289 		reg_size = "KVM_REG_SIZE_U8";
290 		break;
291 	case KVM_REG_SIZE_U16:
292 		reg_size = "KVM_REG_SIZE_U16";
293 		break;
294 	case KVM_REG_SIZE_U32:
295 		reg_size = "KVM_REG_SIZE_U32";
296 		break;
297 	case KVM_REG_SIZE_U64:
298 		reg_size = "KVM_REG_SIZE_U64";
299 		break;
300 	case KVM_REG_SIZE_U128:
301 		reg_size = "KVM_REG_SIZE_U128";
302 		break;
303 	case KVM_REG_SIZE_U256:
304 		reg_size = "KVM_REG_SIZE_U256";
305 		break;
306 	case KVM_REG_SIZE_U512:
307 		reg_size = "KVM_REG_SIZE_U512";
308 		break;
309 	case KVM_REG_SIZE_U1024:
310 		reg_size = "KVM_REG_SIZE_U1024";
311 		break;
312 	case KVM_REG_SIZE_U2048:
313 		reg_size = "KVM_REG_SIZE_U2048";
314 		break;
315 	default:
316 		TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx",
317 			  config_name(c), (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
318 	}
319 
320 	switch (id & KVM_REG_ARM_COPROC_MASK) {
321 	case KVM_REG_ARM_CORE:
322 		printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(c, id));
323 		break;
324 	case KVM_REG_ARM_DEMUX:
325 		TEST_ASSERT(!(id & ~(REG_MASK | KVM_REG_ARM_DEMUX_ID_MASK | KVM_REG_ARM_DEMUX_VAL_MASK)),
326 			    "%s: Unexpected bits set in DEMUX reg id: 0x%llx", config_name(c), id);
327 		printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | %lld,\n",
328 		       reg_size, id & KVM_REG_ARM_DEMUX_VAL_MASK);
329 		break;
330 	case KVM_REG_ARM64_SYSREG:
331 		op0 = (id & KVM_REG_ARM64_SYSREG_OP0_MASK) >> KVM_REG_ARM64_SYSREG_OP0_SHIFT;
332 		op1 = (id & KVM_REG_ARM64_SYSREG_OP1_MASK) >> KVM_REG_ARM64_SYSREG_OP1_SHIFT;
333 		crn = (id & KVM_REG_ARM64_SYSREG_CRN_MASK) >> KVM_REG_ARM64_SYSREG_CRN_SHIFT;
334 		crm = (id & KVM_REG_ARM64_SYSREG_CRM_MASK) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT;
335 		op2 = (id & KVM_REG_ARM64_SYSREG_OP2_MASK) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT;
336 		TEST_ASSERT(id == ARM64_SYS_REG(op0, op1, crn, crm, op2),
337 			    "%s: Unexpected bits set in SYSREG reg id: 0x%llx", config_name(c), id);
338 		printf("\tARM64_SYS_REG(%d, %d, %d, %d, %d),\n", op0, op1, crn, crm, op2);
339 		break;
340 	case KVM_REG_ARM_FW:
341 		TEST_ASSERT(id == KVM_REG_ARM_FW_REG(id & 0xffff),
342 			    "%s: Unexpected bits set in FW reg id: 0x%llx", config_name(c), id);
343 		printf("\tKVM_REG_ARM_FW_REG(%lld),\n", id & 0xffff);
344 		break;
345 	case KVM_REG_ARM_FW_FEAT_BMAP:
346 		TEST_ASSERT(id == KVM_REG_ARM_FW_FEAT_BMAP_REG(id & 0xffff),
347 			    "%s: Unexpected bits set in the bitmap feature FW reg id: 0x%llx", config_name(c), id);
348 		printf("\tKVM_REG_ARM_FW_FEAT_BMAP_REG(%lld),\n", id & 0xffff);
349 		break;
350 	case KVM_REG_ARM64_SVE:
351 		if (has_cap(c, KVM_CAP_ARM_SVE))
352 			printf("\t%s,\n", sve_id_to_str(c, id));
353 		else
354 			TEST_FAIL("%s: KVM_REG_ARM64_SVE is an unexpected coproc type in reg id: 0x%llx", config_name(c), id);
355 		break;
356 	default:
357 		TEST_FAIL("%s: Unexpected coproc type: 0x%llx in reg id: 0x%llx",
358 			  config_name(c), (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id);
359 	}
360 }
361 
362 /*
363  * Older kernels listed each 32-bit word of CORE registers separately.
364  * For 64 and 128-bit registers we need to ignore the extra words. We
365  * also need to fixup the sizes, because the older kernels stated all
366  * registers were 64-bit, even when they weren't.
367  */
368 static void core_reg_fixup(void)
369 {
370 	struct kvm_reg_list *tmp;
371 	__u64 id, core_off;
372 	int i;
373 
374 	tmp = calloc(1, sizeof(*tmp) + reg_list->n * sizeof(__u64));
375 
376 	for (i = 0; i < reg_list->n; ++i) {
377 		id = reg_list->reg[i];
378 
379 		if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM_CORE) {
380 			tmp->reg[tmp->n++] = id;
381 			continue;
382 		}
383 
384 		core_off = id & ~REG_MASK;
385 
386 		switch (core_off) {
387 		case 0x52: case 0xd2: case 0xd6:
388 			/*
389 			 * These offsets are pointing at padding.
390 			 * We need to ignore them too.
391 			 */
392 			continue;
393 		case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
394 		     KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
395 			if (core_off & 3)
396 				continue;
397 			id &= ~KVM_REG_SIZE_MASK;
398 			id |= KVM_REG_SIZE_U128;
399 			tmp->reg[tmp->n++] = id;
400 			continue;
401 		case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
402 		case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
403 			id &= ~KVM_REG_SIZE_MASK;
404 			id |= KVM_REG_SIZE_U32;
405 			tmp->reg[tmp->n++] = id;
406 			continue;
407 		default:
408 			if (core_off & 1)
409 				continue;
410 			tmp->reg[tmp->n++] = id;
411 			break;
412 		}
413 	}
414 
415 	free(reg_list);
416 	reg_list = tmp;
417 }
418 
419 static void prepare_vcpu_init(struct vcpu_config *c, struct kvm_vcpu_init *init)
420 {
421 	struct reg_sublist *s;
422 
423 	for_each_sublist(c, s)
424 		if (s->capability)
425 			init->features[s->feature / 32] |= 1 << (s->feature % 32);
426 }
427 
428 static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_config *c)
429 {
430 	struct reg_sublist *s;
431 	int feature;
432 
433 	for_each_sublist(c, s) {
434 		if (s->finalize) {
435 			feature = s->feature;
436 			vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature);
437 		}
438 	}
439 }
440 
441 static void check_supported(struct vcpu_config *c)
442 {
443 	struct reg_sublist *s;
444 
445 	for_each_sublist(c, s) {
446 		if (!s->capability)
447 			continue;
448 
449 		__TEST_REQUIRE(kvm_has_cap(s->capability),
450 			       "%s: %s not available, skipping tests\n",
451 			       config_name(c), s->name);
452 	}
453 }
454 
455 static bool print_list;
456 static bool print_filtered;
457 static bool fixup_core_regs;
458 
459 static void run_test(struct vcpu_config *c)
460 {
461 	struct kvm_vcpu_init init = { .target = -1, };
462 	int new_regs = 0, missing_regs = 0, i, n;
463 	int failed_get = 0, failed_set = 0, failed_reject = 0;
464 	struct kvm_vcpu *vcpu;
465 	struct kvm_vm *vm;
466 	struct reg_sublist *s;
467 
468 	check_supported(c);
469 
470 	vm = vm_create_barebones();
471 	prepare_vcpu_init(c, &init);
472 	vcpu = __vm_vcpu_add(vm, 0);
473 	aarch64_vcpu_setup(vcpu, &init);
474 	finalize_vcpu(vcpu, c);
475 
476 	reg_list = vcpu_get_reg_list(vcpu);
477 
478 	if (fixup_core_regs)
479 		core_reg_fixup();
480 
481 	if (print_list || print_filtered) {
482 		putchar('\n');
483 		for_each_reg(i) {
484 			__u64 id = reg_list->reg[i];
485 			if ((print_list && !filter_reg(id)) ||
486 			    (print_filtered && filter_reg(id)))
487 				print_reg(c, id);
488 		}
489 		putchar('\n');
490 		return;
491 	}
492 
493 	/*
494 	 * We only test that we can get the register and then write back the
495 	 * same value. Some registers may allow other values to be written
496 	 * back, but others only allow some bits to be changed, and at least
497 	 * for ID registers set will fail if the value does not exactly match
498 	 * what was returned by get. If registers that allow other values to
499 	 * be written need to have the other values tested, then we should
500 	 * create a new set of tests for those in a new independent test
501 	 * executable.
502 	 */
503 	for_each_reg(i) {
504 		uint8_t addr[2048 / 8];
505 		struct kvm_one_reg reg = {
506 			.id = reg_list->reg[i],
507 			.addr = (__u64)&addr,
508 		};
509 		bool reject_reg = false;
510 		int ret;
511 
512 		ret = __vcpu_get_reg(vcpu, reg_list->reg[i], &addr);
513 		if (ret) {
514 			printf("%s: Failed to get ", config_name(c));
515 			print_reg(c, reg.id);
516 			putchar('\n');
517 			++failed_get;
518 		}
519 
520 		/* rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE */
521 		for_each_sublist(c, s) {
522 			if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) {
523 				reject_reg = true;
524 				ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
525 				if (ret != -1 || errno != EPERM) {
526 					printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
527 					print_reg(c, reg.id);
528 					putchar('\n');
529 					++failed_reject;
530 				}
531 				break;
532 			}
533 		}
534 
535 		if (!reject_reg) {
536 			ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
537 			if (ret) {
538 				printf("%s: Failed to set ", config_name(c));
539 				print_reg(c, reg.id);
540 				putchar('\n');
541 				++failed_set;
542 			}
543 		}
544 	}
545 
546 	for_each_sublist(c, s)
547 		blessed_n += s->regs_n;
548 	blessed_reg = calloc(blessed_n, sizeof(__u64));
549 
550 	n = 0;
551 	for_each_sublist(c, s) {
552 		for (i = 0; i < s->regs_n; ++i)
553 			blessed_reg[n++] = s->regs[i];
554 	}
555 
556 	for_each_new_reg(i)
557 		++new_regs;
558 
559 	for_each_missing_reg(i)
560 		++missing_regs;
561 
562 	if (new_regs || missing_regs) {
563 		n = 0;
564 		for_each_reg_filtered(i)
565 			++n;
566 
567 		printf("%s: Number blessed registers: %5lld\n", config_name(c), blessed_n);
568 		printf("%s: Number registers:         %5lld (includes %lld filtered registers)\n",
569 		       config_name(c), reg_list->n, reg_list->n - n);
570 	}
571 
572 	if (new_regs) {
573 		printf("\n%s: There are %d new registers.\n"
574 		       "Consider adding them to the blessed reg "
575 		       "list with the following lines:\n\n", config_name(c), new_regs);
576 		for_each_new_reg(i)
577 			print_reg(c, reg_list->reg[i]);
578 		putchar('\n');
579 	}
580 
581 	if (missing_regs) {
582 		printf("\n%s: There are %d missing registers.\n"
583 		       "The following lines are missing registers:\n\n", config_name(c), missing_regs);
584 		for_each_missing_reg(i)
585 			print_reg(c, blessed_reg[i]);
586 		putchar('\n');
587 	}
588 
589 	TEST_ASSERT(!missing_regs && !failed_get && !failed_set && !failed_reject,
590 		    "%s: There are %d missing registers; "
591 		    "%d registers failed get; %d registers failed set; %d registers failed reject",
592 		    config_name(c), missing_regs, failed_get, failed_set, failed_reject);
593 
594 	pr_info("%s: PASS\n", config_name(c));
595 	blessed_n = 0;
596 	free(blessed_reg);
597 	free(reg_list);
598 	kvm_vm_free(vm);
599 }
600 
601 static void help(void)
602 {
603 	struct vcpu_config *c;
604 	int i;
605 
606 	printf(
607 	"\n"
608 	"usage: get-reg-list [--config=<selection>] [--list] [--list-filtered] [--core-reg-fixup]\n\n"
609 	" --config=<selection>        Used to select a specific vcpu configuration for the test/listing\n"
610 	"                             '<selection>' may be\n");
611 
612 	for (i = 0; i < vcpu_configs_n; ++i) {
613 		c = vcpu_configs[i];
614 		printf(
615 	"                               '%s'\n", config_name(c));
616 	}
617 
618 	printf(
619 	"\n"
620 	" --list                      Print the register list rather than test it (requires --config)\n"
621 	" --list-filtered             Print registers that would normally be filtered out (requires --config)\n"
622 	" --core-reg-fixup            Needed when running on old kernels with broken core reg listings\n"
623 	"\n"
624 	);
625 }
626 
627 static struct vcpu_config *parse_config(const char *config)
628 {
629 	struct vcpu_config *c;
630 	int i;
631 
632 	if (config[8] != '=')
633 		help(), exit(1);
634 
635 	for (i = 0; i < vcpu_configs_n; ++i) {
636 		c = vcpu_configs[i];
637 		if (strcmp(config_name(c), &config[9]) == 0)
638 			break;
639 	}
640 
641 	if (i == vcpu_configs_n)
642 		help(), exit(1);
643 
644 	return c;
645 }
646 
647 int main(int ac, char **av)
648 {
649 	struct vcpu_config *c, *sel = NULL;
650 	int i, ret = 0;
651 	pid_t pid;
652 
653 	for (i = 1; i < ac; ++i) {
654 		if (strcmp(av[i], "--core-reg-fixup") == 0)
655 			fixup_core_regs = true;
656 		else if (strncmp(av[i], "--config", 8) == 0)
657 			sel = parse_config(av[i]);
658 		else if (strcmp(av[i], "--list") == 0)
659 			print_list = true;
660 		else if (strcmp(av[i], "--list-filtered") == 0)
661 			print_filtered = true;
662 		else if (strcmp(av[i], "--help") == 0 || strcmp(av[1], "-h") == 0)
663 			help(), exit(0);
664 		else
665 			help(), exit(1);
666 	}
667 
668 	if (print_list || print_filtered) {
669 		/*
670 		 * We only want to print the register list of a single config.
671 		 */
672 		if (!sel)
673 			help(), exit(1);
674 	}
675 
676 	for (i = 0; i < vcpu_configs_n; ++i) {
677 		c = vcpu_configs[i];
678 		if (sel && c != sel)
679 			continue;
680 
681 		pid = fork();
682 
683 		if (!pid) {
684 			run_test(c);
685 			exit(0);
686 		} else {
687 			int wstatus;
688 			pid_t wpid = wait(&wstatus);
689 			TEST_ASSERT(wpid == pid && WIFEXITED(wstatus), "wait: Unexpected return");
690 			if (WEXITSTATUS(wstatus) && WEXITSTATUS(wstatus) != KSFT_SKIP)
691 				ret = KSFT_FAIL;
692 		}
693 	}
694 
695 	return ret;
696 }
697 
698 /*
699  * The current blessed list was primed with the output of kernel version
700  * v4.15 with --core-reg-fixup and then later updated with new registers.
701  *
702  * The blessed list is up to date with kernel version v6.4 (or so we hope)
703  */
704 static __u64 base_regs[] = {
705 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[0]),
706 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[1]),
707 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[2]),
708 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[3]),
709 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[4]),
710 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[5]),
711 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[6]),
712 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[7]),
713 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[8]),
714 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[9]),
715 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[10]),
716 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[11]),
717 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[12]),
718 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[13]),
719 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[14]),
720 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[15]),
721 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[16]),
722 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[17]),
723 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[18]),
724 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[19]),
725 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[20]),
726 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[21]),
727 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[22]),
728 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[23]),
729 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[24]),
730 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[25]),
731 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[26]),
732 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[27]),
733 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[28]),
734 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[29]),
735 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[30]),
736 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.sp),
737 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pc),
738 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pstate),
739 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(sp_el1),
740 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(elr_el1),
741 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[0]),
742 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[1]),
743 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[2]),
744 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[3]),
745 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[4]),
746 	KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpsr),
747 	KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpcr),
748 	KVM_REG_ARM_FW_REG(0),		/* KVM_REG_ARM_PSCI_VERSION */
749 	KVM_REG_ARM_FW_REG(1),		/* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 */
750 	KVM_REG_ARM_FW_REG(2),		/* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 */
751 	KVM_REG_ARM_FW_REG(3),		/* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 */
752 	KVM_REG_ARM_FW_FEAT_BMAP_REG(0),	/* KVM_REG_ARM_STD_BMAP */
753 	KVM_REG_ARM_FW_FEAT_BMAP_REG(1),	/* KVM_REG_ARM_STD_HYP_BMAP */
754 	KVM_REG_ARM_FW_FEAT_BMAP_REG(2),	/* KVM_REG_ARM_VENDOR_HYP_BMAP */
755 	ARM64_SYS_REG(3, 3, 14, 3, 1),	/* CNTV_CTL_EL0 */
756 	ARM64_SYS_REG(3, 3, 14, 3, 2),	/* CNTV_CVAL_EL0 */
757 	ARM64_SYS_REG(3, 3, 14, 0, 2),
758 	ARM64_SYS_REG(3, 0, 0, 0, 0),	/* MIDR_EL1 */
759 	ARM64_SYS_REG(3, 0, 0, 0, 6),	/* REVIDR_EL1 */
760 	ARM64_SYS_REG(3, 1, 0, 0, 1),	/* CLIDR_EL1 */
761 	ARM64_SYS_REG(3, 1, 0, 0, 7),	/* AIDR_EL1 */
762 	ARM64_SYS_REG(3, 3, 0, 0, 1),	/* CTR_EL0 */
763 	ARM64_SYS_REG(2, 0, 0, 0, 4),
764 	ARM64_SYS_REG(2, 0, 0, 0, 5),
765 	ARM64_SYS_REG(2, 0, 0, 0, 6),
766 	ARM64_SYS_REG(2, 0, 0, 0, 7),
767 	ARM64_SYS_REG(2, 0, 0, 1, 4),
768 	ARM64_SYS_REG(2, 0, 0, 1, 5),
769 	ARM64_SYS_REG(2, 0, 0, 1, 6),
770 	ARM64_SYS_REG(2, 0, 0, 1, 7),
771 	ARM64_SYS_REG(2, 0, 0, 2, 0),	/* MDCCINT_EL1 */
772 	ARM64_SYS_REG(2, 0, 0, 2, 2),	/* MDSCR_EL1 */
773 	ARM64_SYS_REG(2, 0, 0, 2, 4),
774 	ARM64_SYS_REG(2, 0, 0, 2, 5),
775 	ARM64_SYS_REG(2, 0, 0, 2, 6),
776 	ARM64_SYS_REG(2, 0, 0, 2, 7),
777 	ARM64_SYS_REG(2, 0, 0, 3, 4),
778 	ARM64_SYS_REG(2, 0, 0, 3, 5),
779 	ARM64_SYS_REG(2, 0, 0, 3, 6),
780 	ARM64_SYS_REG(2, 0, 0, 3, 7),
781 	ARM64_SYS_REG(2, 0, 0, 4, 4),
782 	ARM64_SYS_REG(2, 0, 0, 4, 5),
783 	ARM64_SYS_REG(2, 0, 0, 4, 6),
784 	ARM64_SYS_REG(2, 0, 0, 4, 7),
785 	ARM64_SYS_REG(2, 0, 0, 5, 4),
786 	ARM64_SYS_REG(2, 0, 0, 5, 5),
787 	ARM64_SYS_REG(2, 0, 0, 5, 6),
788 	ARM64_SYS_REG(2, 0, 0, 5, 7),
789 	ARM64_SYS_REG(2, 0, 0, 6, 4),
790 	ARM64_SYS_REG(2, 0, 0, 6, 5),
791 	ARM64_SYS_REG(2, 0, 0, 6, 6),
792 	ARM64_SYS_REG(2, 0, 0, 6, 7),
793 	ARM64_SYS_REG(2, 0, 0, 7, 4),
794 	ARM64_SYS_REG(2, 0, 0, 7, 5),
795 	ARM64_SYS_REG(2, 0, 0, 7, 6),
796 	ARM64_SYS_REG(2, 0, 0, 7, 7),
797 	ARM64_SYS_REG(2, 0, 0, 8, 4),
798 	ARM64_SYS_REG(2, 0, 0, 8, 5),
799 	ARM64_SYS_REG(2, 0, 0, 8, 6),
800 	ARM64_SYS_REG(2, 0, 0, 8, 7),
801 	ARM64_SYS_REG(2, 0, 0, 9, 4),
802 	ARM64_SYS_REG(2, 0, 0, 9, 5),
803 	ARM64_SYS_REG(2, 0, 0, 9, 6),
804 	ARM64_SYS_REG(2, 0, 0, 9, 7),
805 	ARM64_SYS_REG(2, 0, 0, 10, 4),
806 	ARM64_SYS_REG(2, 0, 0, 10, 5),
807 	ARM64_SYS_REG(2, 0, 0, 10, 6),
808 	ARM64_SYS_REG(2, 0, 0, 10, 7),
809 	ARM64_SYS_REG(2, 0, 0, 11, 4),
810 	ARM64_SYS_REG(2, 0, 0, 11, 5),
811 	ARM64_SYS_REG(2, 0, 0, 11, 6),
812 	ARM64_SYS_REG(2, 0, 0, 11, 7),
813 	ARM64_SYS_REG(2, 0, 0, 12, 4),
814 	ARM64_SYS_REG(2, 0, 0, 12, 5),
815 	ARM64_SYS_REG(2, 0, 0, 12, 6),
816 	ARM64_SYS_REG(2, 0, 0, 12, 7),
817 	ARM64_SYS_REG(2, 0, 0, 13, 4),
818 	ARM64_SYS_REG(2, 0, 0, 13, 5),
819 	ARM64_SYS_REG(2, 0, 0, 13, 6),
820 	ARM64_SYS_REG(2, 0, 0, 13, 7),
821 	ARM64_SYS_REG(2, 0, 0, 14, 4),
822 	ARM64_SYS_REG(2, 0, 0, 14, 5),
823 	ARM64_SYS_REG(2, 0, 0, 14, 6),
824 	ARM64_SYS_REG(2, 0, 0, 14, 7),
825 	ARM64_SYS_REG(2, 0, 0, 15, 4),
826 	ARM64_SYS_REG(2, 0, 0, 15, 5),
827 	ARM64_SYS_REG(2, 0, 0, 15, 6),
828 	ARM64_SYS_REG(2, 0, 0, 15, 7),
829 	ARM64_SYS_REG(2, 0, 1, 1, 4),	/* OSLSR_EL1 */
830 	ARM64_SYS_REG(2, 4, 0, 7, 0),	/* DBGVCR32_EL2 */
831 	ARM64_SYS_REG(3, 0, 0, 0, 5),	/* MPIDR_EL1 */
832 	ARM64_SYS_REG(3, 0, 0, 1, 0),	/* ID_PFR0_EL1 */
833 	ARM64_SYS_REG(3, 0, 0, 1, 1),	/* ID_PFR1_EL1 */
834 	ARM64_SYS_REG(3, 0, 0, 1, 2),	/* ID_DFR0_EL1 */
835 	ARM64_SYS_REG(3, 0, 0, 1, 3),	/* ID_AFR0_EL1 */
836 	ARM64_SYS_REG(3, 0, 0, 1, 4),	/* ID_MMFR0_EL1 */
837 	ARM64_SYS_REG(3, 0, 0, 1, 5),	/* ID_MMFR1_EL1 */
838 	ARM64_SYS_REG(3, 0, 0, 1, 6),	/* ID_MMFR2_EL1 */
839 	ARM64_SYS_REG(3, 0, 0, 1, 7),	/* ID_MMFR3_EL1 */
840 	ARM64_SYS_REG(3, 0, 0, 2, 0),	/* ID_ISAR0_EL1 */
841 	ARM64_SYS_REG(3, 0, 0, 2, 1),	/* ID_ISAR1_EL1 */
842 	ARM64_SYS_REG(3, 0, 0, 2, 2),	/* ID_ISAR2_EL1 */
843 	ARM64_SYS_REG(3, 0, 0, 2, 3),	/* ID_ISAR3_EL1 */
844 	ARM64_SYS_REG(3, 0, 0, 2, 4),	/* ID_ISAR4_EL1 */
845 	ARM64_SYS_REG(3, 0, 0, 2, 5),	/* ID_ISAR5_EL1 */
846 	ARM64_SYS_REG(3, 0, 0, 2, 6),	/* ID_MMFR4_EL1 */
847 	ARM64_SYS_REG(3, 0, 0, 2, 7),	/* ID_ISAR6_EL1 */
848 	ARM64_SYS_REG(3, 0, 0, 3, 0),	/* MVFR0_EL1 */
849 	ARM64_SYS_REG(3, 0, 0, 3, 1),	/* MVFR1_EL1 */
850 	ARM64_SYS_REG(3, 0, 0, 3, 2),	/* MVFR2_EL1 */
851 	ARM64_SYS_REG(3, 0, 0, 3, 3),
852 	ARM64_SYS_REG(3, 0, 0, 3, 4),	/* ID_PFR2_EL1 */
853 	ARM64_SYS_REG(3, 0, 0, 3, 5),	/* ID_DFR1_EL1 */
854 	ARM64_SYS_REG(3, 0, 0, 3, 6),	/* ID_MMFR5_EL1 */
855 	ARM64_SYS_REG(3, 0, 0, 3, 7),
856 	ARM64_SYS_REG(3, 0, 0, 4, 0),	/* ID_AA64PFR0_EL1 */
857 	ARM64_SYS_REG(3, 0, 0, 4, 1),	/* ID_AA64PFR1_EL1 */
858 	ARM64_SYS_REG(3, 0, 0, 4, 2),	/* ID_AA64PFR2_EL1 */
859 	ARM64_SYS_REG(3, 0, 0, 4, 3),
860 	ARM64_SYS_REG(3, 0, 0, 4, 4),	/* ID_AA64ZFR0_EL1 */
861 	ARM64_SYS_REG(3, 0, 0, 4, 5),	/* ID_AA64SMFR0_EL1 */
862 	ARM64_SYS_REG(3, 0, 0, 4, 6),
863 	ARM64_SYS_REG(3, 0, 0, 4, 7),
864 	ARM64_SYS_REG(3, 0, 0, 5, 0),	/* ID_AA64DFR0_EL1 */
865 	ARM64_SYS_REG(3, 0, 0, 5, 1),	/* ID_AA64DFR1_EL1 */
866 	ARM64_SYS_REG(3, 0, 0, 5, 2),
867 	ARM64_SYS_REG(3, 0, 0, 5, 3),
868 	ARM64_SYS_REG(3, 0, 0, 5, 4),	/* ID_AA64AFR0_EL1 */
869 	ARM64_SYS_REG(3, 0, 0, 5, 5),	/* ID_AA64AFR1_EL1 */
870 	ARM64_SYS_REG(3, 0, 0, 5, 6),
871 	ARM64_SYS_REG(3, 0, 0, 5, 7),
872 	ARM64_SYS_REG(3, 0, 0, 6, 0),	/* ID_AA64ISAR0_EL1 */
873 	ARM64_SYS_REG(3, 0, 0, 6, 1),	/* ID_AA64ISAR1_EL1 */
874 	ARM64_SYS_REG(3, 0, 0, 6, 2),	/* ID_AA64ISAR2_EL1 */
875 	ARM64_SYS_REG(3, 0, 0, 6, 3),
876 	ARM64_SYS_REG(3, 0, 0, 6, 4),
877 	ARM64_SYS_REG(3, 0, 0, 6, 5),
878 	ARM64_SYS_REG(3, 0, 0, 6, 6),
879 	ARM64_SYS_REG(3, 0, 0, 6, 7),
880 	ARM64_SYS_REG(3, 0, 0, 7, 0),	/* ID_AA64MMFR0_EL1 */
881 	ARM64_SYS_REG(3, 0, 0, 7, 1),	/* ID_AA64MMFR1_EL1 */
882 	ARM64_SYS_REG(3, 0, 0, 7, 2),	/* ID_AA64MMFR2_EL1 */
883 	ARM64_SYS_REG(3, 0, 0, 7, 3),	/* ID_AA64MMFR3_EL1 */
884 	ARM64_SYS_REG(3, 0, 0, 7, 4),	/* ID_AA64MMFR4_EL1 */
885 	ARM64_SYS_REG(3, 0, 0, 7, 5),
886 	ARM64_SYS_REG(3, 0, 0, 7, 6),
887 	ARM64_SYS_REG(3, 0, 0, 7, 7),
888 	ARM64_SYS_REG(3, 0, 1, 0, 0),	/* SCTLR_EL1 */
889 	ARM64_SYS_REG(3, 0, 1, 0, 1),	/* ACTLR_EL1 */
890 	ARM64_SYS_REG(3, 0, 1, 0, 2),	/* CPACR_EL1 */
891 	ARM64_SYS_REG(3, 0, 2, 0, 0),	/* TTBR0_EL1 */
892 	ARM64_SYS_REG(3, 0, 2, 0, 1),	/* TTBR1_EL1 */
893 	ARM64_SYS_REG(3, 0, 2, 0, 2),	/* TCR_EL1 */
894 	ARM64_SYS_REG(3, 0, 2, 0, 3),	/* TCR2_EL1 */
895 	ARM64_SYS_REG(3, 0, 5, 1, 0),	/* AFSR0_EL1 */
896 	ARM64_SYS_REG(3, 0, 5, 1, 1),	/* AFSR1_EL1 */
897 	ARM64_SYS_REG(3, 0, 5, 2, 0),	/* ESR_EL1 */
898 	ARM64_SYS_REG(3, 0, 6, 0, 0),	/* FAR_EL1 */
899 	ARM64_SYS_REG(3, 0, 7, 4, 0),	/* PAR_EL1 */
900 	ARM64_SYS_REG(3, 0, 10, 2, 0),	/* MAIR_EL1 */
901 	ARM64_SYS_REG(3, 0, 10, 2, 2),	/* PIRE0_EL1 */
902 	ARM64_SYS_REG(3, 0, 10, 2, 3),	/* PIR_EL1 */
903 	ARM64_SYS_REG(3, 0, 10, 3, 0),	/* AMAIR_EL1 */
904 	ARM64_SYS_REG(3, 0, 12, 0, 0),	/* VBAR_EL1 */
905 	ARM64_SYS_REG(3, 0, 12, 1, 1),	/* DISR_EL1 */
906 	ARM64_SYS_REG(3, 0, 13, 0, 1),	/* CONTEXTIDR_EL1 */
907 	ARM64_SYS_REG(3, 0, 13, 0, 4),	/* TPIDR_EL1 */
908 	ARM64_SYS_REG(3, 0, 14, 1, 0),	/* CNTKCTL_EL1 */
909 	ARM64_SYS_REG(3, 2, 0, 0, 0),	/* CSSELR_EL1 */
910 	ARM64_SYS_REG(3, 3, 13, 0, 2),	/* TPIDR_EL0 */
911 	ARM64_SYS_REG(3, 3, 13, 0, 3),	/* TPIDRRO_EL0 */
912 	ARM64_SYS_REG(3, 3, 14, 0, 1),	/* CNTPCT_EL0 */
913 	ARM64_SYS_REG(3, 3, 14, 2, 1),	/* CNTP_CTL_EL0 */
914 	ARM64_SYS_REG(3, 3, 14, 2, 2),	/* CNTP_CVAL_EL0 */
915 	ARM64_SYS_REG(3, 4, 3, 0, 0),	/* DACR32_EL2 */
916 	ARM64_SYS_REG(3, 4, 5, 0, 1),	/* IFSR32_EL2 */
917 	ARM64_SYS_REG(3, 4, 5, 3, 0),	/* FPEXC32_EL2 */
918 };
919 
920 static __u64 pmu_regs[] = {
921 	ARM64_SYS_REG(3, 0, 9, 14, 1),	/* PMINTENSET_EL1 */
922 	ARM64_SYS_REG(3, 0, 9, 14, 2),	/* PMINTENCLR_EL1 */
923 	ARM64_SYS_REG(3, 3, 9, 12, 0),	/* PMCR_EL0 */
924 	ARM64_SYS_REG(3, 3, 9, 12, 1),	/* PMCNTENSET_EL0 */
925 	ARM64_SYS_REG(3, 3, 9, 12, 2),	/* PMCNTENCLR_EL0 */
926 	ARM64_SYS_REG(3, 3, 9, 12, 3),	/* PMOVSCLR_EL0 */
927 	ARM64_SYS_REG(3, 3, 9, 12, 4),	/* PMSWINC_EL0 */
928 	ARM64_SYS_REG(3, 3, 9, 12, 5),	/* PMSELR_EL0 */
929 	ARM64_SYS_REG(3, 3, 9, 13, 0),	/* PMCCNTR_EL0 */
930 	ARM64_SYS_REG(3, 3, 9, 14, 0),	/* PMUSERENR_EL0 */
931 	ARM64_SYS_REG(3, 3, 9, 14, 3),	/* PMOVSSET_EL0 */
932 	ARM64_SYS_REG(3, 3, 14, 8, 0),
933 	ARM64_SYS_REG(3, 3, 14, 8, 1),
934 	ARM64_SYS_REG(3, 3, 14, 8, 2),
935 	ARM64_SYS_REG(3, 3, 14, 8, 3),
936 	ARM64_SYS_REG(3, 3, 14, 8, 4),
937 	ARM64_SYS_REG(3, 3, 14, 8, 5),
938 	ARM64_SYS_REG(3, 3, 14, 8, 6),
939 	ARM64_SYS_REG(3, 3, 14, 8, 7),
940 	ARM64_SYS_REG(3, 3, 14, 9, 0),
941 	ARM64_SYS_REG(3, 3, 14, 9, 1),
942 	ARM64_SYS_REG(3, 3, 14, 9, 2),
943 	ARM64_SYS_REG(3, 3, 14, 9, 3),
944 	ARM64_SYS_REG(3, 3, 14, 9, 4),
945 	ARM64_SYS_REG(3, 3, 14, 9, 5),
946 	ARM64_SYS_REG(3, 3, 14, 9, 6),
947 	ARM64_SYS_REG(3, 3, 14, 9, 7),
948 	ARM64_SYS_REG(3, 3, 14, 10, 0),
949 	ARM64_SYS_REG(3, 3, 14, 10, 1),
950 	ARM64_SYS_REG(3, 3, 14, 10, 2),
951 	ARM64_SYS_REG(3, 3, 14, 10, 3),
952 	ARM64_SYS_REG(3, 3, 14, 10, 4),
953 	ARM64_SYS_REG(3, 3, 14, 10, 5),
954 	ARM64_SYS_REG(3, 3, 14, 10, 6),
955 	ARM64_SYS_REG(3, 3, 14, 10, 7),
956 	ARM64_SYS_REG(3, 3, 14, 11, 0),
957 	ARM64_SYS_REG(3, 3, 14, 11, 1),
958 	ARM64_SYS_REG(3, 3, 14, 11, 2),
959 	ARM64_SYS_REG(3, 3, 14, 11, 3),
960 	ARM64_SYS_REG(3, 3, 14, 11, 4),
961 	ARM64_SYS_REG(3, 3, 14, 11, 5),
962 	ARM64_SYS_REG(3, 3, 14, 11, 6),
963 	ARM64_SYS_REG(3, 3, 14, 12, 0),
964 	ARM64_SYS_REG(3, 3, 14, 12, 1),
965 	ARM64_SYS_REG(3, 3, 14, 12, 2),
966 	ARM64_SYS_REG(3, 3, 14, 12, 3),
967 	ARM64_SYS_REG(3, 3, 14, 12, 4),
968 	ARM64_SYS_REG(3, 3, 14, 12, 5),
969 	ARM64_SYS_REG(3, 3, 14, 12, 6),
970 	ARM64_SYS_REG(3, 3, 14, 12, 7),
971 	ARM64_SYS_REG(3, 3, 14, 13, 0),
972 	ARM64_SYS_REG(3, 3, 14, 13, 1),
973 	ARM64_SYS_REG(3, 3, 14, 13, 2),
974 	ARM64_SYS_REG(3, 3, 14, 13, 3),
975 	ARM64_SYS_REG(3, 3, 14, 13, 4),
976 	ARM64_SYS_REG(3, 3, 14, 13, 5),
977 	ARM64_SYS_REG(3, 3, 14, 13, 6),
978 	ARM64_SYS_REG(3, 3, 14, 13, 7),
979 	ARM64_SYS_REG(3, 3, 14, 14, 0),
980 	ARM64_SYS_REG(3, 3, 14, 14, 1),
981 	ARM64_SYS_REG(3, 3, 14, 14, 2),
982 	ARM64_SYS_REG(3, 3, 14, 14, 3),
983 	ARM64_SYS_REG(3, 3, 14, 14, 4),
984 	ARM64_SYS_REG(3, 3, 14, 14, 5),
985 	ARM64_SYS_REG(3, 3, 14, 14, 6),
986 	ARM64_SYS_REG(3, 3, 14, 14, 7),
987 	ARM64_SYS_REG(3, 3, 14, 15, 0),
988 	ARM64_SYS_REG(3, 3, 14, 15, 1),
989 	ARM64_SYS_REG(3, 3, 14, 15, 2),
990 	ARM64_SYS_REG(3, 3, 14, 15, 3),
991 	ARM64_SYS_REG(3, 3, 14, 15, 4),
992 	ARM64_SYS_REG(3, 3, 14, 15, 5),
993 	ARM64_SYS_REG(3, 3, 14, 15, 6),
994 	ARM64_SYS_REG(3, 3, 14, 15, 7),	/* PMCCFILTR_EL0 */
995 };
996 
997 static __u64 vregs[] = {
998 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]),
999 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[1]),
1000 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[2]),
1001 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[3]),
1002 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[4]),
1003 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[5]),
1004 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[6]),
1005 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[7]),
1006 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[8]),
1007 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[9]),
1008 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[10]),
1009 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[11]),
1010 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[12]),
1011 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[13]),
1012 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[14]),
1013 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[15]),
1014 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[16]),
1015 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[17]),
1016 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[18]),
1017 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[19]),
1018 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[20]),
1019 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[21]),
1020 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[22]),
1021 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[23]),
1022 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[24]),
1023 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[25]),
1024 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[26]),
1025 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[27]),
1026 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[28]),
1027 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[29]),
1028 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[30]),
1029 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]),
1030 };
1031 
1032 static __u64 sve_regs[] = {
1033 	KVM_REG_ARM64_SVE_VLS,
1034 	KVM_REG_ARM64_SVE_ZREG(0, 0),
1035 	KVM_REG_ARM64_SVE_ZREG(1, 0),
1036 	KVM_REG_ARM64_SVE_ZREG(2, 0),
1037 	KVM_REG_ARM64_SVE_ZREG(3, 0),
1038 	KVM_REG_ARM64_SVE_ZREG(4, 0),
1039 	KVM_REG_ARM64_SVE_ZREG(5, 0),
1040 	KVM_REG_ARM64_SVE_ZREG(6, 0),
1041 	KVM_REG_ARM64_SVE_ZREG(7, 0),
1042 	KVM_REG_ARM64_SVE_ZREG(8, 0),
1043 	KVM_REG_ARM64_SVE_ZREG(9, 0),
1044 	KVM_REG_ARM64_SVE_ZREG(10, 0),
1045 	KVM_REG_ARM64_SVE_ZREG(11, 0),
1046 	KVM_REG_ARM64_SVE_ZREG(12, 0),
1047 	KVM_REG_ARM64_SVE_ZREG(13, 0),
1048 	KVM_REG_ARM64_SVE_ZREG(14, 0),
1049 	KVM_REG_ARM64_SVE_ZREG(15, 0),
1050 	KVM_REG_ARM64_SVE_ZREG(16, 0),
1051 	KVM_REG_ARM64_SVE_ZREG(17, 0),
1052 	KVM_REG_ARM64_SVE_ZREG(18, 0),
1053 	KVM_REG_ARM64_SVE_ZREG(19, 0),
1054 	KVM_REG_ARM64_SVE_ZREG(20, 0),
1055 	KVM_REG_ARM64_SVE_ZREG(21, 0),
1056 	KVM_REG_ARM64_SVE_ZREG(22, 0),
1057 	KVM_REG_ARM64_SVE_ZREG(23, 0),
1058 	KVM_REG_ARM64_SVE_ZREG(24, 0),
1059 	KVM_REG_ARM64_SVE_ZREG(25, 0),
1060 	KVM_REG_ARM64_SVE_ZREG(26, 0),
1061 	KVM_REG_ARM64_SVE_ZREG(27, 0),
1062 	KVM_REG_ARM64_SVE_ZREG(28, 0),
1063 	KVM_REG_ARM64_SVE_ZREG(29, 0),
1064 	KVM_REG_ARM64_SVE_ZREG(30, 0),
1065 	KVM_REG_ARM64_SVE_ZREG(31, 0),
1066 	KVM_REG_ARM64_SVE_PREG(0, 0),
1067 	KVM_REG_ARM64_SVE_PREG(1, 0),
1068 	KVM_REG_ARM64_SVE_PREG(2, 0),
1069 	KVM_REG_ARM64_SVE_PREG(3, 0),
1070 	KVM_REG_ARM64_SVE_PREG(4, 0),
1071 	KVM_REG_ARM64_SVE_PREG(5, 0),
1072 	KVM_REG_ARM64_SVE_PREG(6, 0),
1073 	KVM_REG_ARM64_SVE_PREG(7, 0),
1074 	KVM_REG_ARM64_SVE_PREG(8, 0),
1075 	KVM_REG_ARM64_SVE_PREG(9, 0),
1076 	KVM_REG_ARM64_SVE_PREG(10, 0),
1077 	KVM_REG_ARM64_SVE_PREG(11, 0),
1078 	KVM_REG_ARM64_SVE_PREG(12, 0),
1079 	KVM_REG_ARM64_SVE_PREG(13, 0),
1080 	KVM_REG_ARM64_SVE_PREG(14, 0),
1081 	KVM_REG_ARM64_SVE_PREG(15, 0),
1082 	KVM_REG_ARM64_SVE_FFR(0),
1083 	ARM64_SYS_REG(3, 0, 1, 2, 0),   /* ZCR_EL1 */
1084 };
1085 
1086 static __u64 sve_rejects_set[] = {
1087 	KVM_REG_ARM64_SVE_VLS,
1088 };
1089 
1090 static __u64 pauth_addr_regs[] = {
1091 	ARM64_SYS_REG(3, 0, 2, 1, 0),	/* APIAKEYLO_EL1 */
1092 	ARM64_SYS_REG(3, 0, 2, 1, 1),	/* APIAKEYHI_EL1 */
1093 	ARM64_SYS_REG(3, 0, 2, 1, 2),	/* APIBKEYLO_EL1 */
1094 	ARM64_SYS_REG(3, 0, 2, 1, 3),	/* APIBKEYHI_EL1 */
1095 	ARM64_SYS_REG(3, 0, 2, 2, 0),	/* APDAKEYLO_EL1 */
1096 	ARM64_SYS_REG(3, 0, 2, 2, 1),	/* APDAKEYHI_EL1 */
1097 	ARM64_SYS_REG(3, 0, 2, 2, 2),	/* APDBKEYLO_EL1 */
1098 	ARM64_SYS_REG(3, 0, 2, 2, 3)	/* APDBKEYHI_EL1 */
1099 };
1100 
1101 static __u64 pauth_generic_regs[] = {
1102 	ARM64_SYS_REG(3, 0, 2, 3, 0),	/* APGAKEYLO_EL1 */
1103 	ARM64_SYS_REG(3, 0, 2, 3, 1),	/* APGAKEYHI_EL1 */
1104 };
1105 
1106 #define BASE_SUBLIST \
1107 	{ "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), }
1108 #define VREGS_SUBLIST \
1109 	{ "vregs", .regs = vregs, .regs_n = ARRAY_SIZE(vregs), }
1110 #define PMU_SUBLIST \
1111 	{ "pmu", .capability = KVM_CAP_ARM_PMU_V3, .feature = KVM_ARM_VCPU_PMU_V3, \
1112 	  .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
1113 #define SVE_SUBLIST \
1114 	{ "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \
1115 	  .regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \
1116 	  .rejects_set = sve_rejects_set, .rejects_set_n = ARRAY_SIZE(sve_rejects_set), }
1117 #define PAUTH_SUBLIST							\
1118 	{								\
1119 		.name 		= "pauth_address",			\
1120 		.capability	= KVM_CAP_ARM_PTRAUTH_ADDRESS,		\
1121 		.feature	= KVM_ARM_VCPU_PTRAUTH_ADDRESS,		\
1122 		.regs		= pauth_addr_regs,			\
1123 		.regs_n		= ARRAY_SIZE(pauth_addr_regs),		\
1124 	},								\
1125 	{								\
1126 		.name 		= "pauth_generic",			\
1127 		.capability	= KVM_CAP_ARM_PTRAUTH_GENERIC,		\
1128 		.feature	= KVM_ARM_VCPU_PTRAUTH_GENERIC,		\
1129 		.regs		= pauth_generic_regs,			\
1130 		.regs_n		= ARRAY_SIZE(pauth_generic_regs),	\
1131 	}
1132 
1133 static struct vcpu_config vregs_config = {
1134 	.sublists = {
1135 	BASE_SUBLIST,
1136 	VREGS_SUBLIST,
1137 	{0},
1138 	},
1139 };
1140 static struct vcpu_config vregs_pmu_config = {
1141 	.sublists = {
1142 	BASE_SUBLIST,
1143 	VREGS_SUBLIST,
1144 	PMU_SUBLIST,
1145 	{0},
1146 	},
1147 };
1148 static struct vcpu_config sve_config = {
1149 	.sublists = {
1150 	BASE_SUBLIST,
1151 	SVE_SUBLIST,
1152 	{0},
1153 	},
1154 };
1155 static struct vcpu_config sve_pmu_config = {
1156 	.sublists = {
1157 	BASE_SUBLIST,
1158 	SVE_SUBLIST,
1159 	PMU_SUBLIST,
1160 	{0},
1161 	},
1162 };
1163 static struct vcpu_config pauth_config = {
1164 	.sublists = {
1165 	BASE_SUBLIST,
1166 	VREGS_SUBLIST,
1167 	PAUTH_SUBLIST,
1168 	{0},
1169 	},
1170 };
1171 static struct vcpu_config pauth_pmu_config = {
1172 	.sublists = {
1173 	BASE_SUBLIST,
1174 	VREGS_SUBLIST,
1175 	PAUTH_SUBLIST,
1176 	PMU_SUBLIST,
1177 	{0},
1178 	},
1179 };
1180 
1181 static struct vcpu_config *vcpu_configs[] = {
1182 	&vregs_config,
1183 	&vregs_pmu_config,
1184 	&sve_config,
1185 	&sve_pmu_config,
1186 	&pauth_config,
1187 	&pauth_pmu_config,
1188 };
1189 static int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
1190