xref: /openbmc/linux/arch/arm64/kvm/sys_regs.c (revision 6aeadf78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/kvm/coproc.c:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Authors: Rusty Russell <rusty@rustcorp.com.au>
9  *          Christoffer Dall <c.dall@virtualopensystems.com>
10  */
11 
12 #include <linux/bitfield.h>
13 #include <linux/bsearch.h>
14 #include <linux/cacheinfo.h>
15 #include <linux/kvm_host.h>
16 #include <linux/mm.h>
17 #include <linux/printk.h>
18 #include <linux/uaccess.h>
19 
20 #include <asm/cacheflush.h>
21 #include <asm/cputype.h>
22 #include <asm/debug-monitors.h>
23 #include <asm/esr.h>
24 #include <asm/kvm_arm.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/kvm_hyp.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/kvm_nested.h>
29 #include <asm/perf_event.h>
30 #include <asm/sysreg.h>
31 
32 #include <trace/events/kvm.h>
33 
34 #include "sys_regs.h"
35 
36 #include "trace.h"
37 
38 /*
39  * For AArch32, we only take care of what is being trapped. Anything
40  * that has to do with init and userspace access has to go via the
41  * 64bit interface.
42  */
43 
44 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
45 
46 static bool read_from_write_only(struct kvm_vcpu *vcpu,
47 				 struct sys_reg_params *params,
48 				 const struct sys_reg_desc *r)
49 {
50 	WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
51 	print_sys_reg_instr(params);
52 	kvm_inject_undefined(vcpu);
53 	return false;
54 }
55 
56 static bool write_to_read_only(struct kvm_vcpu *vcpu,
57 			       struct sys_reg_params *params,
58 			       const struct sys_reg_desc *r)
59 {
60 	WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
61 	print_sys_reg_instr(params);
62 	kvm_inject_undefined(vcpu);
63 	return false;
64 }
65 
66 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
67 {
68 	u64 val = 0x8badf00d8badf00d;
69 
70 	if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
71 	    __vcpu_read_sys_reg_from_cpu(reg, &val))
72 		return val;
73 
74 	return __vcpu_sys_reg(vcpu, reg);
75 }
76 
77 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
78 {
79 	if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
80 	    __vcpu_write_sys_reg_to_cpu(val, reg))
81 		return;
82 
83 	__vcpu_sys_reg(vcpu, reg) = val;
84 }
85 
86 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
87 #define CSSELR_MAX 14
88 
89 /*
90  * Returns the minimum line size for the selected cache, expressed as
91  * Log2(bytes).
92  */
93 static u8 get_min_cache_line_size(bool icache)
94 {
95 	u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0);
96 	u8 field;
97 
98 	if (icache)
99 		field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr);
100 	else
101 		field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr);
102 
103 	/*
104 	 * Cache line size is represented as Log2(words) in CTR_EL0.
105 	 * Log2(bytes) can be derived with the following:
106 	 *
107 	 * Log2(words) + 2 = Log2(bytes / 4) + 2
108 	 * 		   = Log2(bytes) - 2 + 2
109 	 * 		   = Log2(bytes)
110 	 */
111 	return field + 2;
112 }
113 
114 /* Which cache CCSIDR represents depends on CSSELR value. */
115 static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr)
116 {
117 	u8 line_size;
118 
119 	if (vcpu->arch.ccsidr)
120 		return vcpu->arch.ccsidr[csselr];
121 
122 	line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD);
123 
124 	/*
125 	 * Fabricate a CCSIDR value as the overriding value does not exist.
126 	 * The real CCSIDR value will not be used as it can vary by the
127 	 * physical CPU which the vcpu currently resides in.
128 	 *
129 	 * The line size is determined with get_min_cache_line_size(), which
130 	 * should be valid for all CPUs even if they have different cache
131 	 * configuration.
132 	 *
133 	 * The associativity bits are cleared, meaning the geometry of all data
134 	 * and unified caches (which are guaranteed to be PIPT and thus
135 	 * non-aliasing) are 1 set and 1 way.
136 	 * Guests should not be doing cache operations by set/way at all, and
137 	 * for this reason, we trap them and attempt to infer the intent, so
138 	 * that we can flush the entire guest's address space at the appropriate
139 	 * time. The exposed geometry minimizes the number of the traps.
140 	 * [If guests should attempt to infer aliasing properties from the
141 	 * geometry (which is not permitted by the architecture), they would
142 	 * only do so for virtually indexed caches.]
143 	 *
144 	 * We don't check if the cache level exists as it is allowed to return
145 	 * an UNKNOWN value if not.
146 	 */
147 	return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4);
148 }
149 
150 static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
151 {
152 	u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4;
153 	u32 *ccsidr = vcpu->arch.ccsidr;
154 	u32 i;
155 
156 	if ((val & CCSIDR_EL1_RES0) ||
157 	    line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD))
158 		return -EINVAL;
159 
160 	if (!ccsidr) {
161 		if (val == get_ccsidr(vcpu, csselr))
162 			return 0;
163 
164 		ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT);
165 		if (!ccsidr)
166 			return -ENOMEM;
167 
168 		for (i = 0; i < CSSELR_MAX; i++)
169 			ccsidr[i] = get_ccsidr(vcpu, i);
170 
171 		vcpu->arch.ccsidr = ccsidr;
172 	}
173 
174 	ccsidr[csselr] = val;
175 
176 	return 0;
177 }
178 
179 static bool access_rw(struct kvm_vcpu *vcpu,
180 		      struct sys_reg_params *p,
181 		      const struct sys_reg_desc *r)
182 {
183 	if (p->is_write)
184 		vcpu_write_sys_reg(vcpu, p->regval, r->reg);
185 	else
186 		p->regval = vcpu_read_sys_reg(vcpu, r->reg);
187 
188 	return true;
189 }
190 
191 /*
192  * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
193  */
194 static bool access_dcsw(struct kvm_vcpu *vcpu,
195 			struct sys_reg_params *p,
196 			const struct sys_reg_desc *r)
197 {
198 	if (!p->is_write)
199 		return read_from_write_only(vcpu, p, r);
200 
201 	/*
202 	 * Only track S/W ops if we don't have FWB. It still indicates
203 	 * that the guest is a bit broken (S/W operations should only
204 	 * be done by firmware, knowing that there is only a single
205 	 * CPU left in the system, and certainly not from non-secure
206 	 * software).
207 	 */
208 	if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
209 		kvm_set_way_flush(vcpu);
210 
211 	return true;
212 }
213 
214 static bool access_dcgsw(struct kvm_vcpu *vcpu,
215 			 struct sys_reg_params *p,
216 			 const struct sys_reg_desc *r)
217 {
218 	if (!kvm_has_mte(vcpu->kvm)) {
219 		kvm_inject_undefined(vcpu);
220 		return false;
221 	}
222 
223 	/* Treat MTE S/W ops as we treat the classic ones: with contempt */
224 	return access_dcsw(vcpu, p, r);
225 }
226 
227 static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
228 {
229 	switch (r->aarch32_map) {
230 	case AA32_LO:
231 		*mask = GENMASK_ULL(31, 0);
232 		*shift = 0;
233 		break;
234 	case AA32_HI:
235 		*mask = GENMASK_ULL(63, 32);
236 		*shift = 32;
237 		break;
238 	default:
239 		*mask = GENMASK_ULL(63, 0);
240 		*shift = 0;
241 		break;
242 	}
243 }
244 
245 /*
246  * Generic accessor for VM registers. Only called as long as HCR_TVM
247  * is set. If the guest enables the MMU, we stop trapping the VM
248  * sys_regs and leave it in complete control of the caches.
249  */
250 static bool access_vm_reg(struct kvm_vcpu *vcpu,
251 			  struct sys_reg_params *p,
252 			  const struct sys_reg_desc *r)
253 {
254 	bool was_enabled = vcpu_has_cache_enabled(vcpu);
255 	u64 val, mask, shift;
256 
257 	BUG_ON(!p->is_write);
258 
259 	get_access_mask(r, &mask, &shift);
260 
261 	if (~mask) {
262 		val = vcpu_read_sys_reg(vcpu, r->reg);
263 		val &= ~mask;
264 	} else {
265 		val = 0;
266 	}
267 
268 	val |= (p->regval & (mask >> shift)) << shift;
269 	vcpu_write_sys_reg(vcpu, val, r->reg);
270 
271 	kvm_toggle_cache(vcpu, was_enabled);
272 	return true;
273 }
274 
275 static bool access_actlr(struct kvm_vcpu *vcpu,
276 			 struct sys_reg_params *p,
277 			 const struct sys_reg_desc *r)
278 {
279 	u64 mask, shift;
280 
281 	if (p->is_write)
282 		return ignore_write(vcpu, p);
283 
284 	get_access_mask(r, &mask, &shift);
285 	p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
286 
287 	return true;
288 }
289 
290 /*
291  * Trap handler for the GICv3 SGI generation system register.
292  * Forward the request to the VGIC emulation.
293  * The cp15_64 code makes sure this automatically works
294  * for both AArch64 and AArch32 accesses.
295  */
296 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
297 			   struct sys_reg_params *p,
298 			   const struct sys_reg_desc *r)
299 {
300 	bool g1;
301 
302 	if (!p->is_write)
303 		return read_from_write_only(vcpu, p, r);
304 
305 	/*
306 	 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
307 	 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
308 	 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
309 	 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
310 	 * group.
311 	 */
312 	if (p->Op0 == 0) {		/* AArch32 */
313 		switch (p->Op1) {
314 		default:		/* Keep GCC quiet */
315 		case 0:			/* ICC_SGI1R */
316 			g1 = true;
317 			break;
318 		case 1:			/* ICC_ASGI1R */
319 		case 2:			/* ICC_SGI0R */
320 			g1 = false;
321 			break;
322 		}
323 	} else {			/* AArch64 */
324 		switch (p->Op2) {
325 		default:		/* Keep GCC quiet */
326 		case 5:			/* ICC_SGI1R_EL1 */
327 			g1 = true;
328 			break;
329 		case 6:			/* ICC_ASGI1R_EL1 */
330 		case 7:			/* ICC_SGI0R_EL1 */
331 			g1 = false;
332 			break;
333 		}
334 	}
335 
336 	vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
337 
338 	return true;
339 }
340 
341 static bool access_gic_sre(struct kvm_vcpu *vcpu,
342 			   struct sys_reg_params *p,
343 			   const struct sys_reg_desc *r)
344 {
345 	if (p->is_write)
346 		return ignore_write(vcpu, p);
347 
348 	p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
349 	return true;
350 }
351 
352 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
353 			struct sys_reg_params *p,
354 			const struct sys_reg_desc *r)
355 {
356 	if (p->is_write)
357 		return ignore_write(vcpu, p);
358 	else
359 		return read_zero(vcpu, p);
360 }
361 
362 static bool trap_undef(struct kvm_vcpu *vcpu,
363 		       struct sys_reg_params *p,
364 		       const struct sys_reg_desc *r)
365 {
366 	kvm_inject_undefined(vcpu);
367 	return false;
368 }
369 
370 /*
371  * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
372  * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
373  * system, these registers should UNDEF. LORID_EL1 being a RO register, we
374  * treat it separately.
375  */
376 static bool trap_loregion(struct kvm_vcpu *vcpu,
377 			  struct sys_reg_params *p,
378 			  const struct sys_reg_desc *r)
379 {
380 	u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
381 	u32 sr = reg_to_encoding(r);
382 
383 	if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
384 		kvm_inject_undefined(vcpu);
385 		return false;
386 	}
387 
388 	if (p->is_write && sr == SYS_LORID_EL1)
389 		return write_to_read_only(vcpu, p, r);
390 
391 	return trap_raz_wi(vcpu, p, r);
392 }
393 
394 static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
395 			   struct sys_reg_params *p,
396 			   const struct sys_reg_desc *r)
397 {
398 	u64 oslsr;
399 
400 	if (!p->is_write)
401 		return read_from_write_only(vcpu, p, r);
402 
403 	/* Forward the OSLK bit to OSLSR */
404 	oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~OSLSR_EL1_OSLK;
405 	if (p->regval & OSLAR_EL1_OSLK)
406 		oslsr |= OSLSR_EL1_OSLK;
407 
408 	__vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
409 	return true;
410 }
411 
412 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
413 			   struct sys_reg_params *p,
414 			   const struct sys_reg_desc *r)
415 {
416 	if (p->is_write)
417 		return write_to_read_only(vcpu, p, r);
418 
419 	p->regval = __vcpu_sys_reg(vcpu, r->reg);
420 	return true;
421 }
422 
423 static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
424 			 u64 val)
425 {
426 	/*
427 	 * The only modifiable bit is the OSLK bit. Refuse the write if
428 	 * userspace attempts to change any other bit in the register.
429 	 */
430 	if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
431 		return -EINVAL;
432 
433 	__vcpu_sys_reg(vcpu, rd->reg) = val;
434 	return 0;
435 }
436 
437 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
438 				   struct sys_reg_params *p,
439 				   const struct sys_reg_desc *r)
440 {
441 	if (p->is_write) {
442 		return ignore_write(vcpu, p);
443 	} else {
444 		p->regval = read_sysreg(dbgauthstatus_el1);
445 		return true;
446 	}
447 }
448 
449 /*
450  * We want to avoid world-switching all the DBG registers all the
451  * time:
452  *
453  * - If we've touched any debug register, it is likely that we're
454  *   going to touch more of them. It then makes sense to disable the
455  *   traps and start doing the save/restore dance
456  * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
457  *   then mandatory to save/restore the registers, as the guest
458  *   depends on them.
459  *
460  * For this, we use a DIRTY bit, indicating the guest has modified the
461  * debug registers, used as follow:
462  *
463  * On guest entry:
464  * - If the dirty bit is set (because we're coming back from trapping),
465  *   disable the traps, save host registers, restore guest registers.
466  * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
467  *   set the dirty bit, disable the traps, save host registers,
468  *   restore guest registers.
469  * - Otherwise, enable the traps
470  *
471  * On guest exit:
472  * - If the dirty bit is set, save guest registers, restore host
473  *   registers and clear the dirty bit. This ensure that the host can
474  *   now use the debug registers.
475  */
476 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
477 			    struct sys_reg_params *p,
478 			    const struct sys_reg_desc *r)
479 {
480 	access_rw(vcpu, p, r);
481 	if (p->is_write)
482 		vcpu_set_flag(vcpu, DEBUG_DIRTY);
483 
484 	trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
485 
486 	return true;
487 }
488 
489 /*
490  * reg_to_dbg/dbg_to_reg
491  *
492  * A 32 bit write to a debug register leave top bits alone
493  * A 32 bit read from a debug register only returns the bottom bits
494  *
495  * All writes will set the DEBUG_DIRTY flag to ensure the hyp code
496  * switches between host and guest values in future.
497  */
498 static void reg_to_dbg(struct kvm_vcpu *vcpu,
499 		       struct sys_reg_params *p,
500 		       const struct sys_reg_desc *rd,
501 		       u64 *dbg_reg)
502 {
503 	u64 mask, shift, val;
504 
505 	get_access_mask(rd, &mask, &shift);
506 
507 	val = *dbg_reg;
508 	val &= ~mask;
509 	val |= (p->regval & (mask >> shift)) << shift;
510 	*dbg_reg = val;
511 
512 	vcpu_set_flag(vcpu, DEBUG_DIRTY);
513 }
514 
515 static void dbg_to_reg(struct kvm_vcpu *vcpu,
516 		       struct sys_reg_params *p,
517 		       const struct sys_reg_desc *rd,
518 		       u64 *dbg_reg)
519 {
520 	u64 mask, shift;
521 
522 	get_access_mask(rd, &mask, &shift);
523 	p->regval = (*dbg_reg & mask) >> shift;
524 }
525 
526 static bool trap_bvr(struct kvm_vcpu *vcpu,
527 		     struct sys_reg_params *p,
528 		     const struct sys_reg_desc *rd)
529 {
530 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
531 
532 	if (p->is_write)
533 		reg_to_dbg(vcpu, p, rd, dbg_reg);
534 	else
535 		dbg_to_reg(vcpu, p, rd, dbg_reg);
536 
537 	trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
538 
539 	return true;
540 }
541 
542 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
543 		   u64 val)
544 {
545 	vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val;
546 	return 0;
547 }
548 
549 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
550 		   u64 *val)
551 {
552 	*val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
553 	return 0;
554 }
555 
556 static void reset_bvr(struct kvm_vcpu *vcpu,
557 		      const struct sys_reg_desc *rd)
558 {
559 	vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
560 }
561 
562 static bool trap_bcr(struct kvm_vcpu *vcpu,
563 		     struct sys_reg_params *p,
564 		     const struct sys_reg_desc *rd)
565 {
566 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
567 
568 	if (p->is_write)
569 		reg_to_dbg(vcpu, p, rd, dbg_reg);
570 	else
571 		dbg_to_reg(vcpu, p, rd, dbg_reg);
572 
573 	trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
574 
575 	return true;
576 }
577 
578 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
579 		   u64 val)
580 {
581 	vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val;
582 	return 0;
583 }
584 
585 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
586 		   u64 *val)
587 {
588 	*val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
589 	return 0;
590 }
591 
592 static void reset_bcr(struct kvm_vcpu *vcpu,
593 		      const struct sys_reg_desc *rd)
594 {
595 	vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
596 }
597 
598 static bool trap_wvr(struct kvm_vcpu *vcpu,
599 		     struct sys_reg_params *p,
600 		     const struct sys_reg_desc *rd)
601 {
602 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
603 
604 	if (p->is_write)
605 		reg_to_dbg(vcpu, p, rd, dbg_reg);
606 	else
607 		dbg_to_reg(vcpu, p, rd, dbg_reg);
608 
609 	trace_trap_reg(__func__, rd->CRm, p->is_write,
610 		vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
611 
612 	return true;
613 }
614 
615 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
616 		   u64 val)
617 {
618 	vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val;
619 	return 0;
620 }
621 
622 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
623 		   u64 *val)
624 {
625 	*val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
626 	return 0;
627 }
628 
629 static void reset_wvr(struct kvm_vcpu *vcpu,
630 		      const struct sys_reg_desc *rd)
631 {
632 	vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
633 }
634 
635 static bool trap_wcr(struct kvm_vcpu *vcpu,
636 		     struct sys_reg_params *p,
637 		     const struct sys_reg_desc *rd)
638 {
639 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
640 
641 	if (p->is_write)
642 		reg_to_dbg(vcpu, p, rd, dbg_reg);
643 	else
644 		dbg_to_reg(vcpu, p, rd, dbg_reg);
645 
646 	trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
647 
648 	return true;
649 }
650 
651 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
652 		   u64 val)
653 {
654 	vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val;
655 	return 0;
656 }
657 
658 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
659 		   u64 *val)
660 {
661 	*val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
662 	return 0;
663 }
664 
665 static void reset_wcr(struct kvm_vcpu *vcpu,
666 		      const struct sys_reg_desc *rd)
667 {
668 	vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
669 }
670 
671 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
672 {
673 	u64 amair = read_sysreg(amair_el1);
674 	vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
675 }
676 
677 static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
678 {
679 	u64 actlr = read_sysreg(actlr_el1);
680 	vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
681 }
682 
683 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
684 {
685 	u64 mpidr;
686 
687 	/*
688 	 * Map the vcpu_id into the first three affinity level fields of
689 	 * the MPIDR. We limit the number of VCPUs in level 0 due to a
690 	 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
691 	 * of the GICv3 to be able to address each CPU directly when
692 	 * sending IPIs.
693 	 */
694 	mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
695 	mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
696 	mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
697 	vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
698 }
699 
700 static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
701 				   const struct sys_reg_desc *r)
702 {
703 	if (kvm_vcpu_has_pmu(vcpu))
704 		return 0;
705 
706 	return REG_HIDDEN;
707 }
708 
709 static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
710 {
711 	u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX);
712 
713 	/* No PMU available, any PMU reg may UNDEF... */
714 	if (!kvm_arm_support_pmu_v3())
715 		return;
716 
717 	n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
718 	n &= ARMV8_PMU_PMCR_N_MASK;
719 	if (n)
720 		mask |= GENMASK(n - 1, 0);
721 
722 	reset_unknown(vcpu, r);
723 	__vcpu_sys_reg(vcpu, r->reg) &= mask;
724 }
725 
726 static void reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
727 {
728 	reset_unknown(vcpu, r);
729 	__vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
730 }
731 
732 static void reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
733 {
734 	reset_unknown(vcpu, r);
735 	__vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
736 }
737 
738 static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
739 {
740 	reset_unknown(vcpu, r);
741 	__vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
742 }
743 
744 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
745 {
746 	u64 pmcr;
747 
748 	/* No PMU available, PMCR_EL0 may UNDEF... */
749 	if (!kvm_arm_support_pmu_v3())
750 		return;
751 
752 	/* Only preserve PMCR_EL0.N, and reset the rest to 0 */
753 	pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
754 	if (!kvm_supports_32bit_el0())
755 		pmcr |= ARMV8_PMU_PMCR_LC;
756 
757 	__vcpu_sys_reg(vcpu, r->reg) = pmcr;
758 }
759 
760 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
761 {
762 	u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
763 	bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
764 
765 	if (!enabled)
766 		kvm_inject_undefined(vcpu);
767 
768 	return !enabled;
769 }
770 
771 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
772 {
773 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
774 }
775 
776 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
777 {
778 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
779 }
780 
781 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
782 {
783 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
784 }
785 
786 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
787 {
788 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
789 }
790 
791 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
792 			const struct sys_reg_desc *r)
793 {
794 	u64 val;
795 
796 	if (pmu_access_el0_disabled(vcpu))
797 		return false;
798 
799 	if (p->is_write) {
800 		/*
801 		 * Only update writeable bits of PMCR (continuing into
802 		 * kvm_pmu_handle_pmcr() as well)
803 		 */
804 		val = __vcpu_sys_reg(vcpu, PMCR_EL0);
805 		val &= ~ARMV8_PMU_PMCR_MASK;
806 		val |= p->regval & ARMV8_PMU_PMCR_MASK;
807 		if (!kvm_supports_32bit_el0())
808 			val |= ARMV8_PMU_PMCR_LC;
809 		kvm_pmu_handle_pmcr(vcpu, val);
810 	} else {
811 		/* PMCR.P & PMCR.C are RAZ */
812 		val = __vcpu_sys_reg(vcpu, PMCR_EL0)
813 		      & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
814 		p->regval = val;
815 	}
816 
817 	return true;
818 }
819 
820 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
821 			  const struct sys_reg_desc *r)
822 {
823 	if (pmu_access_event_counter_el0_disabled(vcpu))
824 		return false;
825 
826 	if (p->is_write)
827 		__vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
828 	else
829 		/* return PMSELR.SEL field */
830 		p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
831 			    & ARMV8_PMU_COUNTER_MASK;
832 
833 	return true;
834 }
835 
836 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
837 			  const struct sys_reg_desc *r)
838 {
839 	u64 pmceid, mask, shift;
840 
841 	BUG_ON(p->is_write);
842 
843 	if (pmu_access_el0_disabled(vcpu))
844 		return false;
845 
846 	get_access_mask(r, &mask, &shift);
847 
848 	pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
849 	pmceid &= mask;
850 	pmceid >>= shift;
851 
852 	p->regval = pmceid;
853 
854 	return true;
855 }
856 
857 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
858 {
859 	u64 pmcr, val;
860 
861 	pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
862 	val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
863 	if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
864 		kvm_inject_undefined(vcpu);
865 		return false;
866 	}
867 
868 	return true;
869 }
870 
871 static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
872 			  u64 *val)
873 {
874 	u64 idx;
875 
876 	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
877 		/* PMCCNTR_EL0 */
878 		idx = ARMV8_PMU_CYCLE_IDX;
879 	else
880 		/* PMEVCNTRn_EL0 */
881 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
882 
883 	*val = kvm_pmu_get_counter_value(vcpu, idx);
884 	return 0;
885 }
886 
887 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
888 			      struct sys_reg_params *p,
889 			      const struct sys_reg_desc *r)
890 {
891 	u64 idx = ~0UL;
892 
893 	if (r->CRn == 9 && r->CRm == 13) {
894 		if (r->Op2 == 2) {
895 			/* PMXEVCNTR_EL0 */
896 			if (pmu_access_event_counter_el0_disabled(vcpu))
897 				return false;
898 
899 			idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
900 			      & ARMV8_PMU_COUNTER_MASK;
901 		} else if (r->Op2 == 0) {
902 			/* PMCCNTR_EL0 */
903 			if (pmu_access_cycle_counter_el0_disabled(vcpu))
904 				return false;
905 
906 			idx = ARMV8_PMU_CYCLE_IDX;
907 		}
908 	} else if (r->CRn == 0 && r->CRm == 9) {
909 		/* PMCCNTR */
910 		if (pmu_access_event_counter_el0_disabled(vcpu))
911 			return false;
912 
913 		idx = ARMV8_PMU_CYCLE_IDX;
914 	} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
915 		/* PMEVCNTRn_EL0 */
916 		if (pmu_access_event_counter_el0_disabled(vcpu))
917 			return false;
918 
919 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
920 	}
921 
922 	/* Catch any decoding mistake */
923 	WARN_ON(idx == ~0UL);
924 
925 	if (!pmu_counter_idx_valid(vcpu, idx))
926 		return false;
927 
928 	if (p->is_write) {
929 		if (pmu_access_el0_disabled(vcpu))
930 			return false;
931 
932 		kvm_pmu_set_counter_value(vcpu, idx, p->regval);
933 	} else {
934 		p->regval = kvm_pmu_get_counter_value(vcpu, idx);
935 	}
936 
937 	return true;
938 }
939 
940 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
941 			       const struct sys_reg_desc *r)
942 {
943 	u64 idx, reg;
944 
945 	if (pmu_access_el0_disabled(vcpu))
946 		return false;
947 
948 	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
949 		/* PMXEVTYPER_EL0 */
950 		idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
951 		reg = PMEVTYPER0_EL0 + idx;
952 	} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
953 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
954 		if (idx == ARMV8_PMU_CYCLE_IDX)
955 			reg = PMCCFILTR_EL0;
956 		else
957 			/* PMEVTYPERn_EL0 */
958 			reg = PMEVTYPER0_EL0 + idx;
959 	} else {
960 		BUG();
961 	}
962 
963 	if (!pmu_counter_idx_valid(vcpu, idx))
964 		return false;
965 
966 	if (p->is_write) {
967 		kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
968 		__vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
969 		kvm_vcpu_pmu_restore_guest(vcpu);
970 	} else {
971 		p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
972 	}
973 
974 	return true;
975 }
976 
977 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
978 			   const struct sys_reg_desc *r)
979 {
980 	u64 val, mask;
981 
982 	if (pmu_access_el0_disabled(vcpu))
983 		return false;
984 
985 	mask = kvm_pmu_valid_counter_mask(vcpu);
986 	if (p->is_write) {
987 		val = p->regval & mask;
988 		if (r->Op2 & 0x1) {
989 			/* accessing PMCNTENSET_EL0 */
990 			__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
991 			kvm_pmu_enable_counter_mask(vcpu, val);
992 			kvm_vcpu_pmu_restore_guest(vcpu);
993 		} else {
994 			/* accessing PMCNTENCLR_EL0 */
995 			__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
996 			kvm_pmu_disable_counter_mask(vcpu, val);
997 		}
998 	} else {
999 		p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
1000 	}
1001 
1002 	return true;
1003 }
1004 
1005 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1006 			   const struct sys_reg_desc *r)
1007 {
1008 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1009 
1010 	if (check_pmu_access_disabled(vcpu, 0))
1011 		return false;
1012 
1013 	if (p->is_write) {
1014 		u64 val = p->regval & mask;
1015 
1016 		if (r->Op2 & 0x1)
1017 			/* accessing PMINTENSET_EL1 */
1018 			__vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
1019 		else
1020 			/* accessing PMINTENCLR_EL1 */
1021 			__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
1022 	} else {
1023 		p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
1024 	}
1025 
1026 	return true;
1027 }
1028 
1029 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1030 			 const struct sys_reg_desc *r)
1031 {
1032 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1033 
1034 	if (pmu_access_el0_disabled(vcpu))
1035 		return false;
1036 
1037 	if (p->is_write) {
1038 		if (r->CRm & 0x2)
1039 			/* accessing PMOVSSET_EL0 */
1040 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
1041 		else
1042 			/* accessing PMOVSCLR_EL0 */
1043 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
1044 	} else {
1045 		p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
1046 	}
1047 
1048 	return true;
1049 }
1050 
1051 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1052 			   const struct sys_reg_desc *r)
1053 {
1054 	u64 mask;
1055 
1056 	if (!p->is_write)
1057 		return read_from_write_only(vcpu, p, r);
1058 
1059 	if (pmu_write_swinc_el0_disabled(vcpu))
1060 		return false;
1061 
1062 	mask = kvm_pmu_valid_counter_mask(vcpu);
1063 	kvm_pmu_software_increment(vcpu, p->regval & mask);
1064 	return true;
1065 }
1066 
1067 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1068 			     const struct sys_reg_desc *r)
1069 {
1070 	if (p->is_write) {
1071 		if (!vcpu_mode_priv(vcpu)) {
1072 			kvm_inject_undefined(vcpu);
1073 			return false;
1074 		}
1075 
1076 		__vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
1077 			       p->regval & ARMV8_PMU_USERENR_MASK;
1078 	} else {
1079 		p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1080 			    & ARMV8_PMU_USERENR_MASK;
1081 	}
1082 
1083 	return true;
1084 }
1085 
1086 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1087 #define DBG_BCR_BVR_WCR_WVR_EL1(n)					\
1088 	{ SYS_DESC(SYS_DBGBVRn_EL1(n)),					\
1089 	  trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr },		\
1090 	{ SYS_DESC(SYS_DBGBCRn_EL1(n)),					\
1091 	  trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr },		\
1092 	{ SYS_DESC(SYS_DBGWVRn_EL1(n)),					\
1093 	  trap_wvr, reset_wvr, 0, 0,  get_wvr, set_wvr },		\
1094 	{ SYS_DESC(SYS_DBGWCRn_EL1(n)),					\
1095 	  trap_wcr, reset_wcr, 0, 0,  get_wcr, set_wcr }
1096 
1097 #define PMU_SYS_REG(r)						\
1098 	SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility
1099 
1100 /* Macro to expand the PMEVCNTRn_EL0 register */
1101 #define PMU_PMEVCNTR_EL0(n)						\
1102 	{ PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)),				\
1103 	  .reset = reset_pmevcntr, .get_user = get_pmu_evcntr,		\
1104 	  .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1105 
1106 /* Macro to expand the PMEVTYPERn_EL0 register */
1107 #define PMU_PMEVTYPER_EL0(n)						\
1108 	{ PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)),				\
1109 	  .reset = reset_pmevtyper,					\
1110 	  .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
1111 
1112 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1113 			 const struct sys_reg_desc *r)
1114 {
1115 	kvm_inject_undefined(vcpu);
1116 
1117 	return false;
1118 }
1119 
1120 /* Macro to expand the AMU counter and type registers*/
1121 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1122 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1123 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1124 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1125 
1126 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1127 			const struct sys_reg_desc *rd)
1128 {
1129 	return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1130 }
1131 
1132 /*
1133  * If we land here on a PtrAuth access, that is because we didn't
1134  * fixup the access on exit by allowing the PtrAuth sysregs. The only
1135  * way this happens is when the guest does not have PtrAuth support
1136  * enabled.
1137  */
1138 #define __PTRAUTH_KEY(k)						\
1139 	{ SYS_DESC(SYS_## k), undef_access, reset_unknown, k,		\
1140 	.visibility = ptrauth_visibility}
1141 
1142 #define PTRAUTH_KEY(k)							\
1143 	__PTRAUTH_KEY(k ## KEYLO_EL1),					\
1144 	__PTRAUTH_KEY(k ## KEYHI_EL1)
1145 
1146 static bool access_arch_timer(struct kvm_vcpu *vcpu,
1147 			      struct sys_reg_params *p,
1148 			      const struct sys_reg_desc *r)
1149 {
1150 	enum kvm_arch_timers tmr;
1151 	enum kvm_arch_timer_regs treg;
1152 	u64 reg = reg_to_encoding(r);
1153 
1154 	switch (reg) {
1155 	case SYS_CNTP_TVAL_EL0:
1156 	case SYS_AARCH32_CNTP_TVAL:
1157 		tmr = TIMER_PTIMER;
1158 		treg = TIMER_REG_TVAL;
1159 		break;
1160 	case SYS_CNTP_CTL_EL0:
1161 	case SYS_AARCH32_CNTP_CTL:
1162 		tmr = TIMER_PTIMER;
1163 		treg = TIMER_REG_CTL;
1164 		break;
1165 	case SYS_CNTP_CVAL_EL0:
1166 	case SYS_AARCH32_CNTP_CVAL:
1167 		tmr = TIMER_PTIMER;
1168 		treg = TIMER_REG_CVAL;
1169 		break;
1170 	case SYS_CNTPCT_EL0:
1171 	case SYS_CNTPCTSS_EL0:
1172 	case SYS_AARCH32_CNTPCT:
1173 		tmr = TIMER_PTIMER;
1174 		treg = TIMER_REG_CNT;
1175 		break;
1176 	default:
1177 		print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
1178 		kvm_inject_undefined(vcpu);
1179 		return false;
1180 	}
1181 
1182 	if (p->is_write)
1183 		kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1184 	else
1185 		p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1186 
1187 	return true;
1188 }
1189 
1190 static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
1191 {
1192 	if (kvm_vcpu_has_pmu(vcpu))
1193 		return vcpu->kvm->arch.dfr0_pmuver.imp;
1194 
1195 	return vcpu->kvm->arch.dfr0_pmuver.unimp;
1196 }
1197 
1198 static u8 perfmon_to_pmuver(u8 perfmon)
1199 {
1200 	switch (perfmon) {
1201 	case ID_DFR0_EL1_PerfMon_PMUv3:
1202 		return ID_AA64DFR0_EL1_PMUVer_IMP;
1203 	case ID_DFR0_EL1_PerfMon_IMPDEF:
1204 		return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
1205 	default:
1206 		/* Anything ARMv8.1+ and NI have the same value. For now. */
1207 		return perfmon;
1208 	}
1209 }
1210 
1211 static u8 pmuver_to_perfmon(u8 pmuver)
1212 {
1213 	switch (pmuver) {
1214 	case ID_AA64DFR0_EL1_PMUVer_IMP:
1215 		return ID_DFR0_EL1_PerfMon_PMUv3;
1216 	case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
1217 		return ID_DFR0_EL1_PerfMon_IMPDEF;
1218 	default:
1219 		/* Anything ARMv8.1+ and NI have the same value. For now. */
1220 		return pmuver;
1221 	}
1222 }
1223 
1224 /* Read a sanitised cpufeature ID register by sys_reg_desc */
1225 static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r)
1226 {
1227 	u32 id = reg_to_encoding(r);
1228 	u64 val;
1229 
1230 	if (sysreg_visible_as_raz(vcpu, r))
1231 		return 0;
1232 
1233 	val = read_sanitised_ftr_reg(id);
1234 
1235 	switch (id) {
1236 	case SYS_ID_AA64PFR0_EL1:
1237 		if (!vcpu_has_sve(vcpu))
1238 			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
1239 		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
1240 		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
1241 		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
1242 		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
1243 		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
1244 		if (kvm_vgic_global_state.type == VGIC_V3) {
1245 			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
1246 			val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
1247 		}
1248 		break;
1249 	case SYS_ID_AA64PFR1_EL1:
1250 		if (!kvm_has_mte(vcpu->kvm))
1251 			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
1252 
1253 		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
1254 		break;
1255 	case SYS_ID_AA64ISAR1_EL1:
1256 		if (!vcpu_has_ptrauth(vcpu))
1257 			val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
1258 				 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
1259 				 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
1260 				 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
1261 		break;
1262 	case SYS_ID_AA64ISAR2_EL1:
1263 		if (!vcpu_has_ptrauth(vcpu))
1264 			val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
1265 				 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
1266 		if (!cpus_have_final_cap(ARM64_HAS_WFXT))
1267 			val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
1268 		val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS);
1269 		break;
1270 	case SYS_ID_AA64DFR0_EL1:
1271 		/* Limit debug to ARMv8.0 */
1272 		val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer);
1273 		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6);
1274 		/* Set PMUver to the required version */
1275 		val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
1276 		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
1277 				  vcpu_pmuver(vcpu));
1278 		/* Hide SPE from guests */
1279 		val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
1280 		break;
1281 	case SYS_ID_DFR0_EL1:
1282 		val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
1283 		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon),
1284 				  pmuver_to_perfmon(vcpu_pmuver(vcpu)));
1285 		break;
1286 	case SYS_ID_AA64MMFR2_EL1:
1287 		val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
1288 		break;
1289 	case SYS_ID_MMFR4_EL1:
1290 		val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
1291 		break;
1292 	}
1293 
1294 	return val;
1295 }
1296 
1297 static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1298 				  const struct sys_reg_desc *r)
1299 {
1300 	u32 id = reg_to_encoding(r);
1301 
1302 	switch (id) {
1303 	case SYS_ID_AA64ZFR0_EL1:
1304 		if (!vcpu_has_sve(vcpu))
1305 			return REG_RAZ;
1306 		break;
1307 	}
1308 
1309 	return 0;
1310 }
1311 
1312 static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
1313 				       const struct sys_reg_desc *r)
1314 {
1315 	/*
1316 	 * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1317 	 * EL. Promote to RAZ/WI in order to guarantee consistency between
1318 	 * systems.
1319 	 */
1320 	if (!kvm_supports_32bit_el0())
1321 		return REG_RAZ | REG_USER_WI;
1322 
1323 	return id_visibility(vcpu, r);
1324 }
1325 
1326 static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
1327 				   const struct sys_reg_desc *r)
1328 {
1329 	return REG_RAZ;
1330 }
1331 
1332 /* cpufeature ID register access trap handlers */
1333 
1334 static bool access_id_reg(struct kvm_vcpu *vcpu,
1335 			  struct sys_reg_params *p,
1336 			  const struct sys_reg_desc *r)
1337 {
1338 	if (p->is_write)
1339 		return write_to_read_only(vcpu, p, r);
1340 
1341 	p->regval = read_id_reg(vcpu, r);
1342 	if (vcpu_has_nv(vcpu))
1343 		access_nested_id_reg(vcpu, p, r);
1344 
1345 	return true;
1346 }
1347 
1348 /* Visibility overrides for SVE-specific control registers */
1349 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1350 				   const struct sys_reg_desc *rd)
1351 {
1352 	if (vcpu_has_sve(vcpu))
1353 		return 0;
1354 
1355 	return REG_HIDDEN;
1356 }
1357 
1358 static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
1359 			       const struct sys_reg_desc *rd,
1360 			       u64 val)
1361 {
1362 	u8 csv2, csv3;
1363 
1364 	/*
1365 	 * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
1366 	 * it doesn't promise more than what is actually provided (the
1367 	 * guest could otherwise be covered in ectoplasmic residue).
1368 	 */
1369 	csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV2_SHIFT);
1370 	if (csv2 > 1 ||
1371 	    (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
1372 		return -EINVAL;
1373 
1374 	/* Same thing for CSV3 */
1375 	csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV3_SHIFT);
1376 	if (csv3 > 1 ||
1377 	    (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
1378 		return -EINVAL;
1379 
1380 	/* We can only differ with CSV[23], and anything else is an error */
1381 	val ^= read_id_reg(vcpu, rd);
1382 	val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
1383 		 ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
1384 	if (val)
1385 		return -EINVAL;
1386 
1387 	vcpu->kvm->arch.pfr0_csv2 = csv2;
1388 	vcpu->kvm->arch.pfr0_csv3 = csv3;
1389 
1390 	return 0;
1391 }
1392 
1393 static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
1394 			       const struct sys_reg_desc *rd,
1395 			       u64 val)
1396 {
1397 	u8 pmuver, host_pmuver;
1398 	bool valid_pmu;
1399 
1400 	host_pmuver = kvm_arm_pmu_get_pmuver_limit();
1401 
1402 	/*
1403 	 * Allow AA64DFR0_EL1.PMUver to be set from userspace as long
1404 	 * as it doesn't promise more than what the HW gives us. We
1405 	 * allow an IMPDEF PMU though, only if no PMU is supported
1406 	 * (KVM backward compatibility handling).
1407 	 */
1408 	pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), val);
1409 	if ((pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && pmuver > host_pmuver))
1410 		return -EINVAL;
1411 
1412 	valid_pmu = (pmuver != 0 && pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
1413 
1414 	/* Make sure view register and PMU support do match */
1415 	if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
1416 		return -EINVAL;
1417 
1418 	/* We can only differ with PMUver, and anything else is an error */
1419 	val ^= read_id_reg(vcpu, rd);
1420 	val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
1421 	if (val)
1422 		return -EINVAL;
1423 
1424 	if (valid_pmu)
1425 		vcpu->kvm->arch.dfr0_pmuver.imp = pmuver;
1426 	else
1427 		vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver;
1428 
1429 	return 0;
1430 }
1431 
1432 static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
1433 			   const struct sys_reg_desc *rd,
1434 			   u64 val)
1435 {
1436 	u8 perfmon, host_perfmon;
1437 	bool valid_pmu;
1438 
1439 	host_perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
1440 
1441 	/*
1442 	 * Allow DFR0_EL1.PerfMon to be set from userspace as long as
1443 	 * it doesn't promise more than what the HW gives us on the
1444 	 * AArch64 side (as everything is emulated with that), and
1445 	 * that this is a PMUv3.
1446 	 */
1447 	perfmon = FIELD_GET(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon), val);
1448 	if ((perfmon != ID_DFR0_EL1_PerfMon_IMPDEF && perfmon > host_perfmon) ||
1449 	    (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3))
1450 		return -EINVAL;
1451 
1452 	valid_pmu = (perfmon != 0 && perfmon != ID_DFR0_EL1_PerfMon_IMPDEF);
1453 
1454 	/* Make sure view register and PMU support do match */
1455 	if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
1456 		return -EINVAL;
1457 
1458 	/* We can only differ with PerfMon, and anything else is an error */
1459 	val ^= read_id_reg(vcpu, rd);
1460 	val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
1461 	if (val)
1462 		return -EINVAL;
1463 
1464 	if (valid_pmu)
1465 		vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon);
1466 	else
1467 		vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon);
1468 
1469 	return 0;
1470 }
1471 
1472 /*
1473  * cpufeature ID register user accessors
1474  *
1475  * For now, these registers are immutable for userspace, so no values
1476  * are stored, and for set_id_reg() we don't allow the effective value
1477  * to be changed.
1478  */
1479 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1480 		      u64 *val)
1481 {
1482 	*val = read_id_reg(vcpu, rd);
1483 	return 0;
1484 }
1485 
1486 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1487 		      u64 val)
1488 {
1489 	/* This is what we mean by invariant: you can't change it. */
1490 	if (val != read_id_reg(vcpu, rd))
1491 		return -EINVAL;
1492 
1493 	return 0;
1494 }
1495 
1496 static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1497 		       u64 *val)
1498 {
1499 	*val = 0;
1500 	return 0;
1501 }
1502 
1503 static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1504 		      u64 val)
1505 {
1506 	return 0;
1507 }
1508 
1509 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1510 		       const struct sys_reg_desc *r)
1511 {
1512 	if (p->is_write)
1513 		return write_to_read_only(vcpu, p, r);
1514 
1515 	p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1516 	return true;
1517 }
1518 
1519 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1520 			 const struct sys_reg_desc *r)
1521 {
1522 	if (p->is_write)
1523 		return write_to_read_only(vcpu, p, r);
1524 
1525 	p->regval = __vcpu_sys_reg(vcpu, r->reg);
1526 	return true;
1527 }
1528 
1529 /*
1530  * Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
1531  * by the physical CPU which the vcpu currently resides in.
1532  */
1533 static void reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1534 {
1535 	u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
1536 	u64 clidr;
1537 	u8 loc;
1538 
1539 	if ((ctr_el0 & CTR_EL0_IDC)) {
1540 		/*
1541 		 * Data cache clean to the PoU is not required so LoUU and LoUIS
1542 		 * will not be set and a unified cache, which will be marked as
1543 		 * LoC, will be added.
1544 		 *
1545 		 * If not DIC, let the unified cache L2 so that an instruction
1546 		 * cache can be added as L1 later.
1547 		 */
1548 		loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2;
1549 		clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc);
1550 	} else {
1551 		/*
1552 		 * Data cache clean to the PoU is required so let L1 have a data
1553 		 * cache and mark it as LoUU and LoUIS. As L1 has a data cache,
1554 		 * it can be marked as LoC too.
1555 		 */
1556 		loc = 1;
1557 		clidr = 1 << CLIDR_LOUU_SHIFT;
1558 		clidr |= 1 << CLIDR_LOUIS_SHIFT;
1559 		clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1);
1560 	}
1561 
1562 	/*
1563 	 * Instruction cache invalidation to the PoU is required so let L1 have
1564 	 * an instruction cache. If L1 already has a data cache, it will be
1565 	 * CACHE_TYPE_SEPARATE.
1566 	 */
1567 	if (!(ctr_el0 & CTR_EL0_DIC))
1568 		clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1);
1569 
1570 	clidr |= loc << CLIDR_LOC_SHIFT;
1571 
1572 	/*
1573 	 * Add tag cache unified to data cache. Allocation tags and data are
1574 	 * unified in a cache line so that it looks valid even if there is only
1575 	 * one cache line.
1576 	 */
1577 	if (kvm_has_mte(vcpu->kvm))
1578 		clidr |= 2 << CLIDR_TTYPE_SHIFT(loc);
1579 
1580 	__vcpu_sys_reg(vcpu, r->reg) = clidr;
1581 }
1582 
1583 static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1584 		      u64 val)
1585 {
1586 	u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
1587 	u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val));
1588 
1589 	if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
1590 		return -EINVAL;
1591 
1592 	__vcpu_sys_reg(vcpu, rd->reg) = val;
1593 
1594 	return 0;
1595 }
1596 
1597 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1598 			  const struct sys_reg_desc *r)
1599 {
1600 	int reg = r->reg;
1601 
1602 	if (p->is_write)
1603 		vcpu_write_sys_reg(vcpu, p->regval, reg);
1604 	else
1605 		p->regval = vcpu_read_sys_reg(vcpu, reg);
1606 	return true;
1607 }
1608 
1609 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1610 			  const struct sys_reg_desc *r)
1611 {
1612 	u32 csselr;
1613 
1614 	if (p->is_write)
1615 		return write_to_read_only(vcpu, p, r);
1616 
1617 	csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
1618 	csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD;
1619 	if (csselr < CSSELR_MAX)
1620 		p->regval = get_ccsidr(vcpu, csselr);
1621 
1622 	return true;
1623 }
1624 
1625 static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
1626 				   const struct sys_reg_desc *rd)
1627 {
1628 	if (kvm_has_mte(vcpu->kvm))
1629 		return 0;
1630 
1631 	return REG_HIDDEN;
1632 }
1633 
1634 #define MTE_REG(name) {				\
1635 	SYS_DESC(SYS_##name),			\
1636 	.access = undef_access,			\
1637 	.reset = reset_unknown,			\
1638 	.reg = name,				\
1639 	.visibility = mte_visibility,		\
1640 }
1641 
1642 static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
1643 				   const struct sys_reg_desc *rd)
1644 {
1645 	if (vcpu_has_nv(vcpu))
1646 		return 0;
1647 
1648 	return REG_HIDDEN;
1649 }
1650 
1651 #define EL2_REG(name, acc, rst, v) {		\
1652 	SYS_DESC(SYS_##name),			\
1653 	.access = acc,				\
1654 	.reset = rst,				\
1655 	.reg = name,				\
1656 	.visibility = el2_visibility,		\
1657 	.val = v,				\
1658 }
1659 
1660 /*
1661  * EL{0,1}2 registers are the EL2 view on an EL0 or EL1 register when
1662  * HCR_EL2.E2H==1, and only in the sysreg table for convenience of
1663  * handling traps. Given that, they are always hidden from userspace.
1664  */
1665 static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
1666 				    const struct sys_reg_desc *rd)
1667 {
1668 	return REG_HIDDEN_USER;
1669 }
1670 
1671 #define EL12_REG(name, acc, rst, v) {		\
1672 	SYS_DESC(SYS_##name##_EL12),		\
1673 	.access = acc,				\
1674 	.reset = rst,				\
1675 	.reg = name##_EL1,			\
1676 	.val = v,				\
1677 	.visibility = elx2_visibility,		\
1678 }
1679 
1680 /* sys_reg_desc initialiser for known cpufeature ID registers */
1681 #define ID_SANITISED(name) {			\
1682 	SYS_DESC(SYS_##name),			\
1683 	.access	= access_id_reg,		\
1684 	.get_user = get_id_reg,			\
1685 	.set_user = set_id_reg,			\
1686 	.visibility = id_visibility,		\
1687 }
1688 
1689 /* sys_reg_desc initialiser for known cpufeature ID registers */
1690 #define AA32_ID_SANITISED(name) {		\
1691 	SYS_DESC(SYS_##name),			\
1692 	.access	= access_id_reg,		\
1693 	.get_user = get_id_reg,			\
1694 	.set_user = set_id_reg,			\
1695 	.visibility = aa32_id_visibility,	\
1696 }
1697 
1698 /*
1699  * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1700  * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1701  * (1 <= crm < 8, 0 <= Op2 < 8).
1702  */
1703 #define ID_UNALLOCATED(crm, op2) {			\
1704 	Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2),	\
1705 	.access = access_id_reg,			\
1706 	.get_user = get_id_reg,				\
1707 	.set_user = set_id_reg,				\
1708 	.visibility = raz_visibility			\
1709 }
1710 
1711 /*
1712  * sys_reg_desc initialiser for known ID registers that we hide from guests.
1713  * For now, these are exposed just like unallocated ID regs: they appear
1714  * RAZ for the guest.
1715  */
1716 #define ID_HIDDEN(name) {			\
1717 	SYS_DESC(SYS_##name),			\
1718 	.access = access_id_reg,		\
1719 	.get_user = get_id_reg,			\
1720 	.set_user = set_id_reg,			\
1721 	.visibility = raz_visibility,		\
1722 }
1723 
1724 static bool access_sp_el1(struct kvm_vcpu *vcpu,
1725 			  struct sys_reg_params *p,
1726 			  const struct sys_reg_desc *r)
1727 {
1728 	if (p->is_write)
1729 		__vcpu_sys_reg(vcpu, SP_EL1) = p->regval;
1730 	else
1731 		p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
1732 
1733 	return true;
1734 }
1735 
1736 static bool access_elr(struct kvm_vcpu *vcpu,
1737 		       struct sys_reg_params *p,
1738 		       const struct sys_reg_desc *r)
1739 {
1740 	if (p->is_write)
1741 		vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1);
1742 	else
1743 		p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1);
1744 
1745 	return true;
1746 }
1747 
1748 static bool access_spsr(struct kvm_vcpu *vcpu,
1749 			struct sys_reg_params *p,
1750 			const struct sys_reg_desc *r)
1751 {
1752 	if (p->is_write)
1753 		__vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval;
1754 	else
1755 		p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
1756 
1757 	return true;
1758 }
1759 
1760 /*
1761  * Architected system registers.
1762  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
1763  *
1764  * Debug handling: We do trap most, if not all debug related system
1765  * registers. The implementation is good enough to ensure that a guest
1766  * can use these with minimal performance degradation. The drawback is
1767  * that we don't implement any of the external debug architecture.
1768  * This should be revisited if we ever encounter a more demanding
1769  * guest...
1770  */
1771 static const struct sys_reg_desc sys_reg_descs[] = {
1772 	{ SYS_DESC(SYS_DC_ISW), access_dcsw },
1773 	{ SYS_DESC(SYS_DC_IGSW), access_dcgsw },
1774 	{ SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
1775 	{ SYS_DESC(SYS_DC_CSW), access_dcsw },
1776 	{ SYS_DESC(SYS_DC_CGSW), access_dcgsw },
1777 	{ SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
1778 	{ SYS_DESC(SYS_DC_CISW), access_dcsw },
1779 	{ SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
1780 	{ SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
1781 
1782 	DBG_BCR_BVR_WCR_WVR_EL1(0),
1783 	DBG_BCR_BVR_WCR_WVR_EL1(1),
1784 	{ SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1785 	{ SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1786 	DBG_BCR_BVR_WCR_WVR_EL1(2),
1787 	DBG_BCR_BVR_WCR_WVR_EL1(3),
1788 	DBG_BCR_BVR_WCR_WVR_EL1(4),
1789 	DBG_BCR_BVR_WCR_WVR_EL1(5),
1790 	DBG_BCR_BVR_WCR_WVR_EL1(6),
1791 	DBG_BCR_BVR_WCR_WVR_EL1(7),
1792 	DBG_BCR_BVR_WCR_WVR_EL1(8),
1793 	DBG_BCR_BVR_WCR_WVR_EL1(9),
1794 	DBG_BCR_BVR_WCR_WVR_EL1(10),
1795 	DBG_BCR_BVR_WCR_WVR_EL1(11),
1796 	DBG_BCR_BVR_WCR_WVR_EL1(12),
1797 	DBG_BCR_BVR_WCR_WVR_EL1(13),
1798 	DBG_BCR_BVR_WCR_WVR_EL1(14),
1799 	DBG_BCR_BVR_WCR_WVR_EL1(15),
1800 
1801 	{ SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1802 	{ SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
1803 	{ SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
1804 		OSLSR_EL1_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
1805 	{ SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1806 	{ SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1807 	{ SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1808 	{ SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1809 	{ SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1810 
1811 	{ SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1812 	{ SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1813 	// DBGDTR[TR]X_EL0 share the same encoding
1814 	{ SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1815 
1816 	{ SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1817 
1818 	{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1819 
1820 	/*
1821 	 * ID regs: all ID_SANITISED() entries here must have corresponding
1822 	 * entries in arm64_ftr_regs[].
1823 	 */
1824 
1825 	/* AArch64 mappings of the AArch32 ID registers */
1826 	/* CRm=1 */
1827 	AA32_ID_SANITISED(ID_PFR0_EL1),
1828 	AA32_ID_SANITISED(ID_PFR1_EL1),
1829 	{ SYS_DESC(SYS_ID_DFR0_EL1), .access = access_id_reg,
1830 	  .get_user = get_id_reg, .set_user = set_id_dfr0_el1,
1831 	  .visibility = aa32_id_visibility, },
1832 	ID_HIDDEN(ID_AFR0_EL1),
1833 	AA32_ID_SANITISED(ID_MMFR0_EL1),
1834 	AA32_ID_SANITISED(ID_MMFR1_EL1),
1835 	AA32_ID_SANITISED(ID_MMFR2_EL1),
1836 	AA32_ID_SANITISED(ID_MMFR3_EL1),
1837 
1838 	/* CRm=2 */
1839 	AA32_ID_SANITISED(ID_ISAR0_EL1),
1840 	AA32_ID_SANITISED(ID_ISAR1_EL1),
1841 	AA32_ID_SANITISED(ID_ISAR2_EL1),
1842 	AA32_ID_SANITISED(ID_ISAR3_EL1),
1843 	AA32_ID_SANITISED(ID_ISAR4_EL1),
1844 	AA32_ID_SANITISED(ID_ISAR5_EL1),
1845 	AA32_ID_SANITISED(ID_MMFR4_EL1),
1846 	AA32_ID_SANITISED(ID_ISAR6_EL1),
1847 
1848 	/* CRm=3 */
1849 	AA32_ID_SANITISED(MVFR0_EL1),
1850 	AA32_ID_SANITISED(MVFR1_EL1),
1851 	AA32_ID_SANITISED(MVFR2_EL1),
1852 	ID_UNALLOCATED(3,3),
1853 	AA32_ID_SANITISED(ID_PFR2_EL1),
1854 	ID_HIDDEN(ID_DFR1_EL1),
1855 	AA32_ID_SANITISED(ID_MMFR5_EL1),
1856 	ID_UNALLOCATED(3,7),
1857 
1858 	/* AArch64 ID registers */
1859 	/* CRm=4 */
1860 	{ SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg,
1861 	  .get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, },
1862 	ID_SANITISED(ID_AA64PFR1_EL1),
1863 	ID_UNALLOCATED(4,2),
1864 	ID_UNALLOCATED(4,3),
1865 	ID_SANITISED(ID_AA64ZFR0_EL1),
1866 	ID_HIDDEN(ID_AA64SMFR0_EL1),
1867 	ID_UNALLOCATED(4,6),
1868 	ID_UNALLOCATED(4,7),
1869 
1870 	/* CRm=5 */
1871 	{ SYS_DESC(SYS_ID_AA64DFR0_EL1), .access = access_id_reg,
1872 	  .get_user = get_id_reg, .set_user = set_id_aa64dfr0_el1, },
1873 	ID_SANITISED(ID_AA64DFR1_EL1),
1874 	ID_UNALLOCATED(5,2),
1875 	ID_UNALLOCATED(5,3),
1876 	ID_HIDDEN(ID_AA64AFR0_EL1),
1877 	ID_HIDDEN(ID_AA64AFR1_EL1),
1878 	ID_UNALLOCATED(5,6),
1879 	ID_UNALLOCATED(5,7),
1880 
1881 	/* CRm=6 */
1882 	ID_SANITISED(ID_AA64ISAR0_EL1),
1883 	ID_SANITISED(ID_AA64ISAR1_EL1),
1884 	ID_SANITISED(ID_AA64ISAR2_EL1),
1885 	ID_UNALLOCATED(6,3),
1886 	ID_UNALLOCATED(6,4),
1887 	ID_UNALLOCATED(6,5),
1888 	ID_UNALLOCATED(6,6),
1889 	ID_UNALLOCATED(6,7),
1890 
1891 	/* CRm=7 */
1892 	ID_SANITISED(ID_AA64MMFR0_EL1),
1893 	ID_SANITISED(ID_AA64MMFR1_EL1),
1894 	ID_SANITISED(ID_AA64MMFR2_EL1),
1895 	ID_SANITISED(ID_AA64MMFR3_EL1),
1896 	ID_UNALLOCATED(7,4),
1897 	ID_UNALLOCATED(7,5),
1898 	ID_UNALLOCATED(7,6),
1899 	ID_UNALLOCATED(7,7),
1900 
1901 	{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1902 	{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
1903 	{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1904 
1905 	MTE_REG(RGSR_EL1),
1906 	MTE_REG(GCR_EL1),
1907 
1908 	{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
1909 	{ SYS_DESC(SYS_TRFCR_EL1), undef_access },
1910 	{ SYS_DESC(SYS_SMPRI_EL1), undef_access },
1911 	{ SYS_DESC(SYS_SMCR_EL1), undef_access },
1912 	{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1913 	{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1914 	{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1915 	{ SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0 },
1916 
1917 	PTRAUTH_KEY(APIA),
1918 	PTRAUTH_KEY(APIB),
1919 	PTRAUTH_KEY(APDA),
1920 	PTRAUTH_KEY(APDB),
1921 	PTRAUTH_KEY(APGA),
1922 
1923 	{ SYS_DESC(SYS_SPSR_EL1), access_spsr},
1924 	{ SYS_DESC(SYS_ELR_EL1), access_elr},
1925 
1926 	{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1927 	{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1928 	{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1929 
1930 	{ SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1931 	{ SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1932 	{ SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1933 	{ SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1934 	{ SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1935 	{ SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1936 	{ SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1937 	{ SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1938 
1939 	MTE_REG(TFSR_EL1),
1940 	MTE_REG(TFSRE0_EL1),
1941 
1942 	{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1943 	{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1944 
1945 	{ SYS_DESC(SYS_PMSCR_EL1), undef_access },
1946 	{ SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
1947 	{ SYS_DESC(SYS_PMSICR_EL1), undef_access },
1948 	{ SYS_DESC(SYS_PMSIRR_EL1), undef_access },
1949 	{ SYS_DESC(SYS_PMSFCR_EL1), undef_access },
1950 	{ SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
1951 	{ SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
1952 	{ SYS_DESC(SYS_PMSIDR_EL1), undef_access },
1953 	{ SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
1954 	{ SYS_DESC(SYS_PMBPTR_EL1), undef_access },
1955 	{ SYS_DESC(SYS_PMBSR_EL1), undef_access },
1956 	/* PMBIDR_EL1 is not trapped */
1957 
1958 	{ PMU_SYS_REG(SYS_PMINTENSET_EL1),
1959 	  .access = access_pminten, .reg = PMINTENSET_EL1 },
1960 	{ PMU_SYS_REG(SYS_PMINTENCLR_EL1),
1961 	  .access = access_pminten, .reg = PMINTENSET_EL1 },
1962 	{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
1963 
1964 	{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1965 	{ SYS_DESC(SYS_PIRE0_EL1), access_vm_reg, reset_unknown, PIRE0_EL1 },
1966 	{ SYS_DESC(SYS_PIR_EL1), access_vm_reg, reset_unknown, PIR_EL1 },
1967 	{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1968 
1969 	{ SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1970 	{ SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1971 	{ SYS_DESC(SYS_LORN_EL1), trap_loregion },
1972 	{ SYS_DESC(SYS_LORC_EL1), trap_loregion },
1973 	{ SYS_DESC(SYS_LORID_EL1), trap_loregion },
1974 
1975 	{ SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
1976 	{ SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1977 
1978 	{ SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1979 	{ SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1980 	{ SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1981 	{ SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1982 	{ SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1983 	{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1984 	{ SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1985 	{ SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1986 	{ SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1987 	{ SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1988 	{ SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1989 	{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1990 
1991 	{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1992 	{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1993 
1994 	{ SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
1995 
1996 	{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1997 
1998 	{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
1999 	{ SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1,
2000 	  .set_user = set_clidr },
2001 	{ SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
2002 	{ SYS_DESC(SYS_SMIDR_EL1), undef_access },
2003 	{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
2004 	{ SYS_DESC(SYS_CTR_EL0), access_ctr },
2005 	{ SYS_DESC(SYS_SVCR), undef_access },
2006 
2007 	{ PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
2008 	  .reset = reset_pmcr, .reg = PMCR_EL0 },
2009 	{ PMU_SYS_REG(SYS_PMCNTENSET_EL0),
2010 	  .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
2011 	{ PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
2012 	  .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
2013 	{ PMU_SYS_REG(SYS_PMOVSCLR_EL0),
2014 	  .access = access_pmovs, .reg = PMOVSSET_EL0 },
2015 	/*
2016 	 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
2017 	 * previously (and pointlessly) advertised in the past...
2018 	 */
2019 	{ PMU_SYS_REG(SYS_PMSWINC_EL0),
2020 	  .get_user = get_raz_reg, .set_user = set_wi_reg,
2021 	  .access = access_pmswinc, .reset = NULL },
2022 	{ PMU_SYS_REG(SYS_PMSELR_EL0),
2023 	  .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
2024 	{ PMU_SYS_REG(SYS_PMCEID0_EL0),
2025 	  .access = access_pmceid, .reset = NULL },
2026 	{ PMU_SYS_REG(SYS_PMCEID1_EL0),
2027 	  .access = access_pmceid, .reset = NULL },
2028 	{ PMU_SYS_REG(SYS_PMCCNTR_EL0),
2029 	  .access = access_pmu_evcntr, .reset = reset_unknown,
2030 	  .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
2031 	{ PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
2032 	  .access = access_pmu_evtyper, .reset = NULL },
2033 	{ PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
2034 	  .access = access_pmu_evcntr, .reset = NULL },
2035 	/*
2036 	 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
2037 	 * in 32bit mode. Here we choose to reset it as zero for consistency.
2038 	 */
2039 	{ PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
2040 	  .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
2041 	{ PMU_SYS_REG(SYS_PMOVSSET_EL0),
2042 	  .access = access_pmovs, .reg = PMOVSSET_EL0 },
2043 
2044 	{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
2045 	{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
2046 	{ SYS_DESC(SYS_TPIDR2_EL0), undef_access },
2047 
2048 	{ SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
2049 
2050 	{ SYS_DESC(SYS_AMCR_EL0), undef_access },
2051 	{ SYS_DESC(SYS_AMCFGR_EL0), undef_access },
2052 	{ SYS_DESC(SYS_AMCGCR_EL0), undef_access },
2053 	{ SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
2054 	{ SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
2055 	{ SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
2056 	{ SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
2057 	{ SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
2058 	AMU_AMEVCNTR0_EL0(0),
2059 	AMU_AMEVCNTR0_EL0(1),
2060 	AMU_AMEVCNTR0_EL0(2),
2061 	AMU_AMEVCNTR0_EL0(3),
2062 	AMU_AMEVCNTR0_EL0(4),
2063 	AMU_AMEVCNTR0_EL0(5),
2064 	AMU_AMEVCNTR0_EL0(6),
2065 	AMU_AMEVCNTR0_EL0(7),
2066 	AMU_AMEVCNTR0_EL0(8),
2067 	AMU_AMEVCNTR0_EL0(9),
2068 	AMU_AMEVCNTR0_EL0(10),
2069 	AMU_AMEVCNTR0_EL0(11),
2070 	AMU_AMEVCNTR0_EL0(12),
2071 	AMU_AMEVCNTR0_EL0(13),
2072 	AMU_AMEVCNTR0_EL0(14),
2073 	AMU_AMEVCNTR0_EL0(15),
2074 	AMU_AMEVTYPER0_EL0(0),
2075 	AMU_AMEVTYPER0_EL0(1),
2076 	AMU_AMEVTYPER0_EL0(2),
2077 	AMU_AMEVTYPER0_EL0(3),
2078 	AMU_AMEVTYPER0_EL0(4),
2079 	AMU_AMEVTYPER0_EL0(5),
2080 	AMU_AMEVTYPER0_EL0(6),
2081 	AMU_AMEVTYPER0_EL0(7),
2082 	AMU_AMEVTYPER0_EL0(8),
2083 	AMU_AMEVTYPER0_EL0(9),
2084 	AMU_AMEVTYPER0_EL0(10),
2085 	AMU_AMEVTYPER0_EL0(11),
2086 	AMU_AMEVTYPER0_EL0(12),
2087 	AMU_AMEVTYPER0_EL0(13),
2088 	AMU_AMEVTYPER0_EL0(14),
2089 	AMU_AMEVTYPER0_EL0(15),
2090 	AMU_AMEVCNTR1_EL0(0),
2091 	AMU_AMEVCNTR1_EL0(1),
2092 	AMU_AMEVCNTR1_EL0(2),
2093 	AMU_AMEVCNTR1_EL0(3),
2094 	AMU_AMEVCNTR1_EL0(4),
2095 	AMU_AMEVCNTR1_EL0(5),
2096 	AMU_AMEVCNTR1_EL0(6),
2097 	AMU_AMEVCNTR1_EL0(7),
2098 	AMU_AMEVCNTR1_EL0(8),
2099 	AMU_AMEVCNTR1_EL0(9),
2100 	AMU_AMEVCNTR1_EL0(10),
2101 	AMU_AMEVCNTR1_EL0(11),
2102 	AMU_AMEVCNTR1_EL0(12),
2103 	AMU_AMEVCNTR1_EL0(13),
2104 	AMU_AMEVCNTR1_EL0(14),
2105 	AMU_AMEVCNTR1_EL0(15),
2106 	AMU_AMEVTYPER1_EL0(0),
2107 	AMU_AMEVTYPER1_EL0(1),
2108 	AMU_AMEVTYPER1_EL0(2),
2109 	AMU_AMEVTYPER1_EL0(3),
2110 	AMU_AMEVTYPER1_EL0(4),
2111 	AMU_AMEVTYPER1_EL0(5),
2112 	AMU_AMEVTYPER1_EL0(6),
2113 	AMU_AMEVTYPER1_EL0(7),
2114 	AMU_AMEVTYPER1_EL0(8),
2115 	AMU_AMEVTYPER1_EL0(9),
2116 	AMU_AMEVTYPER1_EL0(10),
2117 	AMU_AMEVTYPER1_EL0(11),
2118 	AMU_AMEVTYPER1_EL0(12),
2119 	AMU_AMEVTYPER1_EL0(13),
2120 	AMU_AMEVTYPER1_EL0(14),
2121 	AMU_AMEVTYPER1_EL0(15),
2122 
2123 	{ SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
2124 	{ SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
2125 	{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
2126 	{ SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
2127 	{ SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
2128 
2129 	/* PMEVCNTRn_EL0 */
2130 	PMU_PMEVCNTR_EL0(0),
2131 	PMU_PMEVCNTR_EL0(1),
2132 	PMU_PMEVCNTR_EL0(2),
2133 	PMU_PMEVCNTR_EL0(3),
2134 	PMU_PMEVCNTR_EL0(4),
2135 	PMU_PMEVCNTR_EL0(5),
2136 	PMU_PMEVCNTR_EL0(6),
2137 	PMU_PMEVCNTR_EL0(7),
2138 	PMU_PMEVCNTR_EL0(8),
2139 	PMU_PMEVCNTR_EL0(9),
2140 	PMU_PMEVCNTR_EL0(10),
2141 	PMU_PMEVCNTR_EL0(11),
2142 	PMU_PMEVCNTR_EL0(12),
2143 	PMU_PMEVCNTR_EL0(13),
2144 	PMU_PMEVCNTR_EL0(14),
2145 	PMU_PMEVCNTR_EL0(15),
2146 	PMU_PMEVCNTR_EL0(16),
2147 	PMU_PMEVCNTR_EL0(17),
2148 	PMU_PMEVCNTR_EL0(18),
2149 	PMU_PMEVCNTR_EL0(19),
2150 	PMU_PMEVCNTR_EL0(20),
2151 	PMU_PMEVCNTR_EL0(21),
2152 	PMU_PMEVCNTR_EL0(22),
2153 	PMU_PMEVCNTR_EL0(23),
2154 	PMU_PMEVCNTR_EL0(24),
2155 	PMU_PMEVCNTR_EL0(25),
2156 	PMU_PMEVCNTR_EL0(26),
2157 	PMU_PMEVCNTR_EL0(27),
2158 	PMU_PMEVCNTR_EL0(28),
2159 	PMU_PMEVCNTR_EL0(29),
2160 	PMU_PMEVCNTR_EL0(30),
2161 	/* PMEVTYPERn_EL0 */
2162 	PMU_PMEVTYPER_EL0(0),
2163 	PMU_PMEVTYPER_EL0(1),
2164 	PMU_PMEVTYPER_EL0(2),
2165 	PMU_PMEVTYPER_EL0(3),
2166 	PMU_PMEVTYPER_EL0(4),
2167 	PMU_PMEVTYPER_EL0(5),
2168 	PMU_PMEVTYPER_EL0(6),
2169 	PMU_PMEVTYPER_EL0(7),
2170 	PMU_PMEVTYPER_EL0(8),
2171 	PMU_PMEVTYPER_EL0(9),
2172 	PMU_PMEVTYPER_EL0(10),
2173 	PMU_PMEVTYPER_EL0(11),
2174 	PMU_PMEVTYPER_EL0(12),
2175 	PMU_PMEVTYPER_EL0(13),
2176 	PMU_PMEVTYPER_EL0(14),
2177 	PMU_PMEVTYPER_EL0(15),
2178 	PMU_PMEVTYPER_EL0(16),
2179 	PMU_PMEVTYPER_EL0(17),
2180 	PMU_PMEVTYPER_EL0(18),
2181 	PMU_PMEVTYPER_EL0(19),
2182 	PMU_PMEVTYPER_EL0(20),
2183 	PMU_PMEVTYPER_EL0(21),
2184 	PMU_PMEVTYPER_EL0(22),
2185 	PMU_PMEVTYPER_EL0(23),
2186 	PMU_PMEVTYPER_EL0(24),
2187 	PMU_PMEVTYPER_EL0(25),
2188 	PMU_PMEVTYPER_EL0(26),
2189 	PMU_PMEVTYPER_EL0(27),
2190 	PMU_PMEVTYPER_EL0(28),
2191 	PMU_PMEVTYPER_EL0(29),
2192 	PMU_PMEVTYPER_EL0(30),
2193 	/*
2194 	 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
2195 	 * in 32bit mode. Here we choose to reset it as zero for consistency.
2196 	 */
2197 	{ PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
2198 	  .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
2199 
2200 	EL2_REG(VPIDR_EL2, access_rw, reset_unknown, 0),
2201 	EL2_REG(VMPIDR_EL2, access_rw, reset_unknown, 0),
2202 	EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
2203 	EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
2204 	EL2_REG(HCR_EL2, access_rw, reset_val, 0),
2205 	EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
2206 	EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_EL2_DEFAULT ),
2207 	EL2_REG(HSTR_EL2, access_rw, reset_val, 0),
2208 	EL2_REG(HACR_EL2, access_rw, reset_val, 0),
2209 
2210 	EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
2211 	EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
2212 	EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
2213 	EL2_REG(VTTBR_EL2, access_rw, reset_val, 0),
2214 	EL2_REG(VTCR_EL2, access_rw, reset_val, 0),
2215 
2216 	{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
2217 	EL2_REG(SPSR_EL2, access_rw, reset_val, 0),
2218 	EL2_REG(ELR_EL2, access_rw, reset_val, 0),
2219 	{ SYS_DESC(SYS_SP_EL1), access_sp_el1},
2220 
2221 	{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
2222 	EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
2223 	EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
2224 	EL2_REG(ESR_EL2, access_rw, reset_val, 0),
2225 	{ SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
2226 
2227 	EL2_REG(FAR_EL2, access_rw, reset_val, 0),
2228 	EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
2229 
2230 	EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
2231 	EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
2232 
2233 	EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
2234 	EL2_REG(RVBAR_EL2, access_rw, reset_val, 0),
2235 	{ SYS_DESC(SYS_RMR_EL2), trap_undef },
2236 
2237 	EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
2238 	EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
2239 
2240 	EL2_REG(CNTVOFF_EL2, access_rw, reset_val, 0),
2241 	EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
2242 
2243 	EL12_REG(SCTLR, access_vm_reg, reset_val, 0x00C50078),
2244 	EL12_REG(CPACR, access_rw, reset_val, 0),
2245 	EL12_REG(TTBR0, access_vm_reg, reset_unknown, 0),
2246 	EL12_REG(TTBR1, access_vm_reg, reset_unknown, 0),
2247 	EL12_REG(TCR, access_vm_reg, reset_val, 0),
2248 	{ SYS_DESC(SYS_SPSR_EL12), access_spsr},
2249 	{ SYS_DESC(SYS_ELR_EL12), access_elr},
2250 	EL12_REG(AFSR0, access_vm_reg, reset_unknown, 0),
2251 	EL12_REG(AFSR1, access_vm_reg, reset_unknown, 0),
2252 	EL12_REG(ESR, access_vm_reg, reset_unknown, 0),
2253 	EL12_REG(FAR, access_vm_reg, reset_unknown, 0),
2254 	EL12_REG(MAIR, access_vm_reg, reset_unknown, 0),
2255 	EL12_REG(AMAIR, access_vm_reg, reset_amair_el1, 0),
2256 	EL12_REG(VBAR, access_rw, reset_val, 0),
2257 	EL12_REG(CONTEXTIDR, access_vm_reg, reset_val, 0),
2258 	EL12_REG(CNTKCTL, access_rw, reset_val, 0),
2259 
2260 	EL2_REG(SP_EL2, NULL, reset_unknown, 0),
2261 };
2262 
2263 static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
2264 			struct sys_reg_params *p,
2265 			const struct sys_reg_desc *r)
2266 {
2267 	if (p->is_write) {
2268 		return ignore_write(vcpu, p);
2269 	} else {
2270 		u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
2271 		u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2272 		u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT);
2273 
2274 		p->regval = ((((dfr >> ID_AA64DFR0_EL1_WRPs_SHIFT) & 0xf) << 28) |
2275 			     (((dfr >> ID_AA64DFR0_EL1_BRPs_SHIFT) & 0xf) << 24) |
2276 			     (((dfr >> ID_AA64DFR0_EL1_CTX_CMPs_SHIFT) & 0xf) << 20)
2277 			     | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
2278 		return true;
2279 	}
2280 }
2281 
2282 /*
2283  * AArch32 debug register mappings
2284  *
2285  * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
2286  * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
2287  *
2288  * None of the other registers share their location, so treat them as
2289  * if they were 64bit.
2290  */
2291 #define DBG_BCR_BVR_WCR_WVR(n)						      \
2292 	/* DBGBVRn */							      \
2293 	{ AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
2294 	/* DBGBCRn */							      \
2295 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n },	      \
2296 	/* DBGWVRn */							      \
2297 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n },	      \
2298 	/* DBGWCRn */							      \
2299 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
2300 
2301 #define DBGBXVR(n)							      \
2302 	{ AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
2303 
2304 /*
2305  * Trapped cp14 registers. We generally ignore most of the external
2306  * debug, on the principle that they don't really make sense to a
2307  * guest. Revisit this one day, would this principle change.
2308  */
2309 static const struct sys_reg_desc cp14_regs[] = {
2310 	/* DBGDIDR */
2311 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
2312 	/* DBGDTRRXext */
2313 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
2314 
2315 	DBG_BCR_BVR_WCR_WVR(0),
2316 	/* DBGDSCRint */
2317 	{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
2318 	DBG_BCR_BVR_WCR_WVR(1),
2319 	/* DBGDCCINT */
2320 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
2321 	/* DBGDSCRext */
2322 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
2323 	DBG_BCR_BVR_WCR_WVR(2),
2324 	/* DBGDTR[RT]Xint */
2325 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
2326 	/* DBGDTR[RT]Xext */
2327 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
2328 	DBG_BCR_BVR_WCR_WVR(3),
2329 	DBG_BCR_BVR_WCR_WVR(4),
2330 	DBG_BCR_BVR_WCR_WVR(5),
2331 	/* DBGWFAR */
2332 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
2333 	/* DBGOSECCR */
2334 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
2335 	DBG_BCR_BVR_WCR_WVR(6),
2336 	/* DBGVCR */
2337 	{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
2338 	DBG_BCR_BVR_WCR_WVR(7),
2339 	DBG_BCR_BVR_WCR_WVR(8),
2340 	DBG_BCR_BVR_WCR_WVR(9),
2341 	DBG_BCR_BVR_WCR_WVR(10),
2342 	DBG_BCR_BVR_WCR_WVR(11),
2343 	DBG_BCR_BVR_WCR_WVR(12),
2344 	DBG_BCR_BVR_WCR_WVR(13),
2345 	DBG_BCR_BVR_WCR_WVR(14),
2346 	DBG_BCR_BVR_WCR_WVR(15),
2347 
2348 	/* DBGDRAR (32bit) */
2349 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
2350 
2351 	DBGBXVR(0),
2352 	/* DBGOSLAR */
2353 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
2354 	DBGBXVR(1),
2355 	/* DBGOSLSR */
2356 	{ Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
2357 	DBGBXVR(2),
2358 	DBGBXVR(3),
2359 	/* DBGOSDLR */
2360 	{ Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
2361 	DBGBXVR(4),
2362 	/* DBGPRCR */
2363 	{ Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
2364 	DBGBXVR(5),
2365 	DBGBXVR(6),
2366 	DBGBXVR(7),
2367 	DBGBXVR(8),
2368 	DBGBXVR(9),
2369 	DBGBXVR(10),
2370 	DBGBXVR(11),
2371 	DBGBXVR(12),
2372 	DBGBXVR(13),
2373 	DBGBXVR(14),
2374 	DBGBXVR(15),
2375 
2376 	/* DBGDSAR (32bit) */
2377 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
2378 
2379 	/* DBGDEVID2 */
2380 	{ Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
2381 	/* DBGDEVID1 */
2382 	{ Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
2383 	/* DBGDEVID */
2384 	{ Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
2385 	/* DBGCLAIMSET */
2386 	{ Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
2387 	/* DBGCLAIMCLR */
2388 	{ Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
2389 	/* DBGAUTHSTATUS */
2390 	{ Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
2391 };
2392 
2393 /* Trapped cp14 64bit registers */
2394 static const struct sys_reg_desc cp14_64_regs[] = {
2395 	/* DBGDRAR (64bit) */
2396 	{ Op1( 0), CRm( 1), .access = trap_raz_wi },
2397 
2398 	/* DBGDSAR (64bit) */
2399 	{ Op1( 0), CRm( 2), .access = trap_raz_wi },
2400 };
2401 
2402 #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2)			\
2403 	AA32(_map),							\
2404 	Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2),			\
2405 	.visibility = pmu_visibility
2406 
2407 /* Macro to expand the PMEVCNTRn register */
2408 #define PMU_PMEVCNTR(n)							\
2409 	{ CP15_PMU_SYS_REG(DIRECT, 0, 0b1110,				\
2410 	  (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)),			\
2411 	  .access = access_pmu_evcntr }
2412 
2413 /* Macro to expand the PMEVTYPERn register */
2414 #define PMU_PMEVTYPER(n)						\
2415 	{ CP15_PMU_SYS_REG(DIRECT, 0, 0b1110,				\
2416 	  (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)),			\
2417 	  .access = access_pmu_evtyper }
2418 /*
2419  * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
2420  * depending on the way they are accessed (as a 32bit or a 64bit
2421  * register).
2422  */
2423 static const struct sys_reg_desc cp15_regs[] = {
2424 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
2425 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
2426 	/* ACTLR */
2427 	{ AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
2428 	/* ACTLR2 */
2429 	{ AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
2430 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2431 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
2432 	/* TTBCR */
2433 	{ AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
2434 	/* TTBCR2 */
2435 	{ AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
2436 	{ Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
2437 	/* DFSR */
2438 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
2439 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
2440 	/* ADFSR */
2441 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
2442 	/* AIFSR */
2443 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
2444 	/* DFAR */
2445 	{ AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
2446 	/* IFAR */
2447 	{ AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
2448 
2449 	/*
2450 	 * DC{C,I,CI}SW operations:
2451 	 */
2452 	{ Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
2453 	{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
2454 	{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
2455 
2456 	/* PMU */
2457 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
2458 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
2459 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
2460 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
2461 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
2462 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
2463 	{ CP15_PMU_SYS_REG(LO,     0, 9, 12, 6), .access = access_pmceid },
2464 	{ CP15_PMU_SYS_REG(LO,     0, 9, 12, 7), .access = access_pmceid },
2465 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
2466 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
2467 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
2468 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
2469 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
2470 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
2471 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
2472 	{ CP15_PMU_SYS_REG(HI,     0, 9, 14, 4), .access = access_pmceid },
2473 	{ CP15_PMU_SYS_REG(HI,     0, 9, 14, 5), .access = access_pmceid },
2474 	/* PMMIR */
2475 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
2476 
2477 	/* PRRR/MAIR0 */
2478 	{ AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
2479 	/* NMRR/MAIR1 */
2480 	{ AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
2481 	/* AMAIR0 */
2482 	{ AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
2483 	/* AMAIR1 */
2484 	{ AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
2485 
2486 	/* ICC_SRE */
2487 	{ Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
2488 
2489 	{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
2490 
2491 	/* Arch Tmers */
2492 	{ SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
2493 	{ SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
2494 
2495 	/* PMEVCNTRn */
2496 	PMU_PMEVCNTR(0),
2497 	PMU_PMEVCNTR(1),
2498 	PMU_PMEVCNTR(2),
2499 	PMU_PMEVCNTR(3),
2500 	PMU_PMEVCNTR(4),
2501 	PMU_PMEVCNTR(5),
2502 	PMU_PMEVCNTR(6),
2503 	PMU_PMEVCNTR(7),
2504 	PMU_PMEVCNTR(8),
2505 	PMU_PMEVCNTR(9),
2506 	PMU_PMEVCNTR(10),
2507 	PMU_PMEVCNTR(11),
2508 	PMU_PMEVCNTR(12),
2509 	PMU_PMEVCNTR(13),
2510 	PMU_PMEVCNTR(14),
2511 	PMU_PMEVCNTR(15),
2512 	PMU_PMEVCNTR(16),
2513 	PMU_PMEVCNTR(17),
2514 	PMU_PMEVCNTR(18),
2515 	PMU_PMEVCNTR(19),
2516 	PMU_PMEVCNTR(20),
2517 	PMU_PMEVCNTR(21),
2518 	PMU_PMEVCNTR(22),
2519 	PMU_PMEVCNTR(23),
2520 	PMU_PMEVCNTR(24),
2521 	PMU_PMEVCNTR(25),
2522 	PMU_PMEVCNTR(26),
2523 	PMU_PMEVCNTR(27),
2524 	PMU_PMEVCNTR(28),
2525 	PMU_PMEVCNTR(29),
2526 	PMU_PMEVCNTR(30),
2527 	/* PMEVTYPERn */
2528 	PMU_PMEVTYPER(0),
2529 	PMU_PMEVTYPER(1),
2530 	PMU_PMEVTYPER(2),
2531 	PMU_PMEVTYPER(3),
2532 	PMU_PMEVTYPER(4),
2533 	PMU_PMEVTYPER(5),
2534 	PMU_PMEVTYPER(6),
2535 	PMU_PMEVTYPER(7),
2536 	PMU_PMEVTYPER(8),
2537 	PMU_PMEVTYPER(9),
2538 	PMU_PMEVTYPER(10),
2539 	PMU_PMEVTYPER(11),
2540 	PMU_PMEVTYPER(12),
2541 	PMU_PMEVTYPER(13),
2542 	PMU_PMEVTYPER(14),
2543 	PMU_PMEVTYPER(15),
2544 	PMU_PMEVTYPER(16),
2545 	PMU_PMEVTYPER(17),
2546 	PMU_PMEVTYPER(18),
2547 	PMU_PMEVTYPER(19),
2548 	PMU_PMEVTYPER(20),
2549 	PMU_PMEVTYPER(21),
2550 	PMU_PMEVTYPER(22),
2551 	PMU_PMEVTYPER(23),
2552 	PMU_PMEVTYPER(24),
2553 	PMU_PMEVTYPER(25),
2554 	PMU_PMEVTYPER(26),
2555 	PMU_PMEVTYPER(27),
2556 	PMU_PMEVTYPER(28),
2557 	PMU_PMEVTYPER(29),
2558 	PMU_PMEVTYPER(30),
2559 	/* PMCCFILTR */
2560 	{ CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
2561 
2562 	{ Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
2563 	{ Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
2564 
2565 	/* CCSIDR2 */
2566 	{ Op1(1), CRn( 0), CRm( 0),  Op2(2), undef_access },
2567 
2568 	{ Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
2569 };
2570 
2571 static const struct sys_reg_desc cp15_64_regs[] = {
2572 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2573 	{ CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
2574 	{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
2575 	{ SYS_DESC(SYS_AARCH32_CNTPCT),	      access_arch_timer },
2576 	{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
2577 	{ Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
2578 	{ Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
2579 	{ SYS_DESC(SYS_AARCH32_CNTP_CVAL),    access_arch_timer },
2580 	{ SYS_DESC(SYS_AARCH32_CNTPCTSS),     access_arch_timer },
2581 };
2582 
2583 static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
2584 			       bool is_32)
2585 {
2586 	unsigned int i;
2587 
2588 	for (i = 0; i < n; i++) {
2589 		if (!is_32 && table[i].reg && !table[i].reset) {
2590 			kvm_err("sys_reg table %pS entry %d lacks reset\n", &table[i], i);
2591 			return false;
2592 		}
2593 
2594 		if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2595 			kvm_err("sys_reg table %pS entry %d out of order\n", &table[i - 1], i - 1);
2596 			return false;
2597 		}
2598 	}
2599 
2600 	return true;
2601 }
2602 
2603 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
2604 {
2605 	kvm_inject_undefined(vcpu);
2606 	return 1;
2607 }
2608 
2609 static void perform_access(struct kvm_vcpu *vcpu,
2610 			   struct sys_reg_params *params,
2611 			   const struct sys_reg_desc *r)
2612 {
2613 	trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
2614 
2615 	/* Check for regs disabled by runtime config */
2616 	if (sysreg_hidden(vcpu, r)) {
2617 		kvm_inject_undefined(vcpu);
2618 		return;
2619 	}
2620 
2621 	/*
2622 	 * Not having an accessor means that we have configured a trap
2623 	 * that we don't know how to handle. This certainly qualifies
2624 	 * as a gross bug that should be fixed right away.
2625 	 */
2626 	BUG_ON(!r->access);
2627 
2628 	/* Skip instruction if instructed so */
2629 	if (likely(r->access(vcpu, params, r)))
2630 		kvm_incr_pc(vcpu);
2631 }
2632 
2633 /*
2634  * emulate_cp --  tries to match a sys_reg access in a handling table, and
2635  *                call the corresponding trap handler.
2636  *
2637  * @params: pointer to the descriptor of the access
2638  * @table: array of trap descriptors
2639  * @num: size of the trap descriptor array
2640  *
2641  * Return true if the access has been handled, false if not.
2642  */
2643 static bool emulate_cp(struct kvm_vcpu *vcpu,
2644 		       struct sys_reg_params *params,
2645 		       const struct sys_reg_desc *table,
2646 		       size_t num)
2647 {
2648 	const struct sys_reg_desc *r;
2649 
2650 	if (!table)
2651 		return false;	/* Not handled */
2652 
2653 	r = find_reg(params, table, num);
2654 
2655 	if (r) {
2656 		perform_access(vcpu, params, r);
2657 		return true;
2658 	}
2659 
2660 	/* Not handled */
2661 	return false;
2662 }
2663 
2664 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
2665 				struct sys_reg_params *params)
2666 {
2667 	u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
2668 	int cp = -1;
2669 
2670 	switch (esr_ec) {
2671 	case ESR_ELx_EC_CP15_32:
2672 	case ESR_ELx_EC_CP15_64:
2673 		cp = 15;
2674 		break;
2675 	case ESR_ELx_EC_CP14_MR:
2676 	case ESR_ELx_EC_CP14_64:
2677 		cp = 14;
2678 		break;
2679 	default:
2680 		WARN_ON(1);
2681 	}
2682 
2683 	print_sys_reg_msg(params,
2684 			  "Unsupported guest CP%d access at: %08lx [%08lx]\n",
2685 			  cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2686 	kvm_inject_undefined(vcpu);
2687 }
2688 
2689 /**
2690  * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
2691  * @vcpu: The VCPU pointer
2692  * @run:  The kvm_run struct
2693  */
2694 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
2695 			    const struct sys_reg_desc *global,
2696 			    size_t nr_global)
2697 {
2698 	struct sys_reg_params params;
2699 	u64 esr = kvm_vcpu_get_esr(vcpu);
2700 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
2701 	int Rt2 = (esr >> 10) & 0x1f;
2702 
2703 	params.CRm = (esr >> 1) & 0xf;
2704 	params.is_write = ((esr & 1) == 0);
2705 
2706 	params.Op0 = 0;
2707 	params.Op1 = (esr >> 16) & 0xf;
2708 	params.Op2 = 0;
2709 	params.CRn = 0;
2710 
2711 	/*
2712 	 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
2713 	 * backends between AArch32 and AArch64, we get away with it.
2714 	 */
2715 	if (params.is_write) {
2716 		params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
2717 		params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
2718 	}
2719 
2720 	/*
2721 	 * If the table contains a handler, handle the
2722 	 * potential register operation in the case of a read and return
2723 	 * with success.
2724 	 */
2725 	if (emulate_cp(vcpu, &params, global, nr_global)) {
2726 		/* Split up the value between registers for the read side */
2727 		if (!params.is_write) {
2728 			vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
2729 			vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
2730 		}
2731 
2732 		return 1;
2733 	}
2734 
2735 	unhandled_cp_access(vcpu, &params);
2736 	return 1;
2737 }
2738 
2739 static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
2740 
2741 /*
2742  * The CP10 ID registers are architecturally mapped to AArch64 feature
2743  * registers. Abuse that fact so we can rely on the AArch64 handler for accesses
2744  * from AArch32.
2745  */
2746 static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
2747 {
2748 	u8 reg_id = (esr >> 10) & 0xf;
2749 	bool valid;
2750 
2751 	params->is_write = ((esr & 1) == 0);
2752 	params->Op0 = 3;
2753 	params->Op1 = 0;
2754 	params->CRn = 0;
2755 	params->CRm = 3;
2756 
2757 	/* CP10 ID registers are read-only */
2758 	valid = !params->is_write;
2759 
2760 	switch (reg_id) {
2761 	/* MVFR0 */
2762 	case 0b0111:
2763 		params->Op2 = 0;
2764 		break;
2765 	/* MVFR1 */
2766 	case 0b0110:
2767 		params->Op2 = 1;
2768 		break;
2769 	/* MVFR2 */
2770 	case 0b0101:
2771 		params->Op2 = 2;
2772 		break;
2773 	default:
2774 		valid = false;
2775 	}
2776 
2777 	if (valid)
2778 		return true;
2779 
2780 	kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
2781 		      params->is_write ? "write" : "read", reg_id);
2782 	return false;
2783 }
2784 
2785 /**
2786  * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
2787  *			  VFP Register' from AArch32.
2788  * @vcpu: The vCPU pointer
2789  *
2790  * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
2791  * Work out the correct AArch64 system register encoding and reroute to the
2792  * AArch64 system register emulation.
2793  */
2794 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
2795 {
2796 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
2797 	u64 esr = kvm_vcpu_get_esr(vcpu);
2798 	struct sys_reg_params params;
2799 
2800 	/* UNDEF on any unhandled register access */
2801 	if (!kvm_esr_cp10_id_to_sys64(esr, &params)) {
2802 		kvm_inject_undefined(vcpu);
2803 		return 1;
2804 	}
2805 
2806 	if (emulate_sys_reg(vcpu, &params))
2807 		vcpu_set_reg(vcpu, Rt, params.regval);
2808 
2809 	return 1;
2810 }
2811 
2812 /**
2813  * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
2814  *			       CRn=0, which corresponds to the AArch32 feature
2815  *			       registers.
2816  * @vcpu: the vCPU pointer
2817  * @params: the system register access parameters.
2818  *
2819  * Our cp15 system register tables do not enumerate the AArch32 feature
2820  * registers. Conveniently, our AArch64 table does, and the AArch32 system
2821  * register encoding can be trivially remapped into the AArch64 for the feature
2822  * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
2823  *
2824  * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
2825  * System registers with (coproc=0b1111, CRn==c0)", read accesses from this
2826  * range are either UNKNOWN or RES0. Rerouting remains architectural as we
2827  * treat undefined registers in this range as RAZ.
2828  */
2829 static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
2830 				   struct sys_reg_params *params)
2831 {
2832 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
2833 
2834 	/* Treat impossible writes to RO registers as UNDEFINED */
2835 	if (params->is_write) {
2836 		unhandled_cp_access(vcpu, params);
2837 		return 1;
2838 	}
2839 
2840 	params->Op0 = 3;
2841 
2842 	/*
2843 	 * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
2844 	 * Avoid conflicting with future expansion of AArch64 feature registers
2845 	 * and simply treat them as RAZ here.
2846 	 */
2847 	if (params->CRm > 3)
2848 		params->regval = 0;
2849 	else if (!emulate_sys_reg(vcpu, params))
2850 		return 1;
2851 
2852 	vcpu_set_reg(vcpu, Rt, params->regval);
2853 	return 1;
2854 }
2855 
2856 /**
2857  * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
2858  * @vcpu: The VCPU pointer
2859  * @run:  The kvm_run struct
2860  */
2861 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2862 			    struct sys_reg_params *params,
2863 			    const struct sys_reg_desc *global,
2864 			    size_t nr_global)
2865 {
2866 	int Rt  = kvm_vcpu_sys_get_rt(vcpu);
2867 
2868 	params->regval = vcpu_get_reg(vcpu, Rt);
2869 
2870 	if (emulate_cp(vcpu, params, global, nr_global)) {
2871 		if (!params->is_write)
2872 			vcpu_set_reg(vcpu, Rt, params->regval);
2873 		return 1;
2874 	}
2875 
2876 	unhandled_cp_access(vcpu, params);
2877 	return 1;
2878 }
2879 
2880 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
2881 {
2882 	return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
2883 }
2884 
2885 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
2886 {
2887 	struct sys_reg_params params;
2888 
2889 	params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
2890 
2891 	/*
2892 	 * Certain AArch32 ID registers are handled by rerouting to the AArch64
2893 	 * system register table. Registers in the ID range where CRm=0 are
2894 	 * excluded from this scheme as they do not trivially map into AArch64
2895 	 * system register encodings.
2896 	 */
2897 	if (params.Op1 == 0 && params.CRn == 0 && params.CRm)
2898 		return kvm_emulate_cp15_id_reg(vcpu, &params);
2899 
2900 	return kvm_handle_cp_32(vcpu, &params, cp15_regs, ARRAY_SIZE(cp15_regs));
2901 }
2902 
2903 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
2904 {
2905 	return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
2906 }
2907 
2908 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
2909 {
2910 	struct sys_reg_params params;
2911 
2912 	params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
2913 
2914 	return kvm_handle_cp_32(vcpu, &params, cp14_regs, ARRAY_SIZE(cp14_regs));
2915 }
2916 
2917 static bool is_imp_def_sys_reg(struct sys_reg_params *params)
2918 {
2919 	// See ARM DDI 0487E.a, section D12.3.2
2920 	return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
2921 }
2922 
2923 /**
2924  * emulate_sys_reg - Emulate a guest access to an AArch64 system register
2925  * @vcpu: The VCPU pointer
2926  * @params: Decoded system register parameters
2927  *
2928  * Return: true if the system register access was successful, false otherwise.
2929  */
2930 static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
2931 			   struct sys_reg_params *params)
2932 {
2933 	const struct sys_reg_desc *r;
2934 
2935 	r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2936 
2937 	if (likely(r)) {
2938 		perform_access(vcpu, params, r);
2939 		return true;
2940 	}
2941 
2942 	if (is_imp_def_sys_reg(params)) {
2943 		kvm_inject_undefined(vcpu);
2944 	} else {
2945 		print_sys_reg_msg(params,
2946 				  "Unsupported guest sys_reg access at: %lx [%08lx]\n",
2947 				  *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2948 		kvm_inject_undefined(vcpu);
2949 	}
2950 	return false;
2951 }
2952 
2953 /**
2954  * kvm_reset_sys_regs - sets system registers to reset value
2955  * @vcpu: The VCPU pointer
2956  *
2957  * This function finds the right table above and sets the registers on the
2958  * virtual CPU struct to their architecturally defined reset values.
2959  */
2960 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2961 {
2962 	unsigned long i;
2963 
2964 	for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++)
2965 		if (sys_reg_descs[i].reset)
2966 			sys_reg_descs[i].reset(vcpu, &sys_reg_descs[i]);
2967 }
2968 
2969 /**
2970  * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
2971  * @vcpu: The VCPU pointer
2972  */
2973 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
2974 {
2975 	struct sys_reg_params params;
2976 	unsigned long esr = kvm_vcpu_get_esr(vcpu);
2977 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
2978 
2979 	trace_kvm_handle_sys_reg(esr);
2980 
2981 	params = esr_sys64_to_params(esr);
2982 	params.regval = vcpu_get_reg(vcpu, Rt);
2983 
2984 	if (!emulate_sys_reg(vcpu, &params))
2985 		return 1;
2986 
2987 	if (!params.is_write)
2988 		vcpu_set_reg(vcpu, Rt, params.regval);
2989 	return 1;
2990 }
2991 
2992 /******************************************************************************
2993  * Userspace API
2994  *****************************************************************************/
2995 
2996 static bool index_to_params(u64 id, struct sys_reg_params *params)
2997 {
2998 	switch (id & KVM_REG_SIZE_MASK) {
2999 	case KVM_REG_SIZE_U64:
3000 		/* Any unused index bits means it's not valid. */
3001 		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
3002 			      | KVM_REG_ARM_COPROC_MASK
3003 			      | KVM_REG_ARM64_SYSREG_OP0_MASK
3004 			      | KVM_REG_ARM64_SYSREG_OP1_MASK
3005 			      | KVM_REG_ARM64_SYSREG_CRN_MASK
3006 			      | KVM_REG_ARM64_SYSREG_CRM_MASK
3007 			      | KVM_REG_ARM64_SYSREG_OP2_MASK))
3008 			return false;
3009 		params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
3010 			       >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
3011 		params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
3012 			       >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
3013 		params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
3014 			       >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
3015 		params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
3016 			       >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
3017 		params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
3018 			       >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
3019 		return true;
3020 	default:
3021 		return false;
3022 	}
3023 }
3024 
3025 const struct sys_reg_desc *get_reg_by_id(u64 id,
3026 					 const struct sys_reg_desc table[],
3027 					 unsigned int num)
3028 {
3029 	struct sys_reg_params params;
3030 
3031 	if (!index_to_params(id, &params))
3032 		return NULL;
3033 
3034 	return find_reg(&params, table, num);
3035 }
3036 
3037 /* Decode an index value, and find the sys_reg_desc entry. */
3038 static const struct sys_reg_desc *
3039 id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
3040 		   const struct sys_reg_desc table[], unsigned int num)
3041 
3042 {
3043 	const struct sys_reg_desc *r;
3044 
3045 	/* We only do sys_reg for now. */
3046 	if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
3047 		return NULL;
3048 
3049 	r = get_reg_by_id(id, table, num);
3050 
3051 	/* Not saved in the sys_reg array and not otherwise accessible? */
3052 	if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
3053 		r = NULL;
3054 
3055 	return r;
3056 }
3057 
3058 /*
3059  * These are the invariant sys_reg registers: we let the guest see the
3060  * host versions of these, so they're part of the guest state.
3061  *
3062  * A future CPU may provide a mechanism to present different values to
3063  * the guest, or a future kvm may trap them.
3064  */
3065 
3066 #define FUNCTION_INVARIANT(reg)						\
3067 	static void get_##reg(struct kvm_vcpu *v,			\
3068 			      const struct sys_reg_desc *r)		\
3069 	{								\
3070 		((struct sys_reg_desc *)r)->val = read_sysreg(reg);	\
3071 	}
3072 
3073 FUNCTION_INVARIANT(midr_el1)
3074 FUNCTION_INVARIANT(revidr_el1)
3075 FUNCTION_INVARIANT(aidr_el1)
3076 
3077 static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
3078 {
3079 	((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
3080 }
3081 
3082 /* ->val is filled in by kvm_sys_reg_table_init() */
3083 static struct sys_reg_desc invariant_sys_regs[] __ro_after_init = {
3084 	{ SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
3085 	{ SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
3086 	{ SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
3087 	{ SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
3088 };
3089 
3090 static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
3091 {
3092 	const struct sys_reg_desc *r;
3093 
3094 	r = get_reg_by_id(id, invariant_sys_regs,
3095 			  ARRAY_SIZE(invariant_sys_regs));
3096 	if (!r)
3097 		return -ENOENT;
3098 
3099 	return put_user(r->val, uaddr);
3100 }
3101 
3102 static int set_invariant_sys_reg(u64 id, u64 __user *uaddr)
3103 {
3104 	const struct sys_reg_desc *r;
3105 	u64 val;
3106 
3107 	r = get_reg_by_id(id, invariant_sys_regs,
3108 			  ARRAY_SIZE(invariant_sys_regs));
3109 	if (!r)
3110 		return -ENOENT;
3111 
3112 	if (get_user(val, uaddr))
3113 		return -EFAULT;
3114 
3115 	/* This is what we mean by invariant: you can't change it. */
3116 	if (r->val != val)
3117 		return -EINVAL;
3118 
3119 	return 0;
3120 }
3121 
3122 static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
3123 {
3124 	u32 val;
3125 	u32 __user *uval = uaddr;
3126 
3127 	/* Fail if we have unknown bits set. */
3128 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
3129 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
3130 		return -ENOENT;
3131 
3132 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
3133 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
3134 		if (KVM_REG_SIZE(id) != 4)
3135 			return -ENOENT;
3136 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
3137 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
3138 		if (val >= CSSELR_MAX)
3139 			return -ENOENT;
3140 
3141 		return put_user(get_ccsidr(vcpu, val), uval);
3142 	default:
3143 		return -ENOENT;
3144 	}
3145 }
3146 
3147 static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
3148 {
3149 	u32 val, newval;
3150 	u32 __user *uval = uaddr;
3151 
3152 	/* Fail if we have unknown bits set. */
3153 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
3154 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
3155 		return -ENOENT;
3156 
3157 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
3158 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
3159 		if (KVM_REG_SIZE(id) != 4)
3160 			return -ENOENT;
3161 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
3162 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
3163 		if (val >= CSSELR_MAX)
3164 			return -ENOENT;
3165 
3166 		if (get_user(newval, uval))
3167 			return -EFAULT;
3168 
3169 		return set_ccsidr(vcpu, val, newval);
3170 	default:
3171 		return -ENOENT;
3172 	}
3173 }
3174 
3175 int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
3176 			 const struct sys_reg_desc table[], unsigned int num)
3177 {
3178 	u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
3179 	const struct sys_reg_desc *r;
3180 	u64 val;
3181 	int ret;
3182 
3183 	r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
3184 	if (!r || sysreg_hidden_user(vcpu, r))
3185 		return -ENOENT;
3186 
3187 	if (r->get_user) {
3188 		ret = (r->get_user)(vcpu, r, &val);
3189 	} else {
3190 		val = __vcpu_sys_reg(vcpu, r->reg);
3191 		ret = 0;
3192 	}
3193 
3194 	if (!ret)
3195 		ret = put_user(val, uaddr);
3196 
3197 	return ret;
3198 }
3199 
3200 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
3201 {
3202 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
3203 	int err;
3204 
3205 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
3206 		return demux_c15_get(vcpu, reg->id, uaddr);
3207 
3208 	err = get_invariant_sys_reg(reg->id, uaddr);
3209 	if (err != -ENOENT)
3210 		return err;
3211 
3212 	return kvm_sys_reg_get_user(vcpu, reg,
3213 				    sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
3214 }
3215 
3216 int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
3217 			 const struct sys_reg_desc table[], unsigned int num)
3218 {
3219 	u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
3220 	const struct sys_reg_desc *r;
3221 	u64 val;
3222 	int ret;
3223 
3224 	if (get_user(val, uaddr))
3225 		return -EFAULT;
3226 
3227 	r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
3228 	if (!r || sysreg_hidden_user(vcpu, r))
3229 		return -ENOENT;
3230 
3231 	if (sysreg_user_write_ignore(vcpu, r))
3232 		return 0;
3233 
3234 	if (r->set_user) {
3235 		ret = (r->set_user)(vcpu, r, val);
3236 	} else {
3237 		__vcpu_sys_reg(vcpu, r->reg) = val;
3238 		ret = 0;
3239 	}
3240 
3241 	return ret;
3242 }
3243 
3244 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
3245 {
3246 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
3247 	int err;
3248 
3249 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
3250 		return demux_c15_set(vcpu, reg->id, uaddr);
3251 
3252 	err = set_invariant_sys_reg(reg->id, uaddr);
3253 	if (err != -ENOENT)
3254 		return err;
3255 
3256 	return kvm_sys_reg_set_user(vcpu, reg,
3257 				    sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
3258 }
3259 
3260 static unsigned int num_demux_regs(void)
3261 {
3262 	return CSSELR_MAX;
3263 }
3264 
3265 static int write_demux_regids(u64 __user *uindices)
3266 {
3267 	u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
3268 	unsigned int i;
3269 
3270 	val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
3271 	for (i = 0; i < CSSELR_MAX; i++) {
3272 		if (put_user(val | i, uindices))
3273 			return -EFAULT;
3274 		uindices++;
3275 	}
3276 	return 0;
3277 }
3278 
3279 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
3280 {
3281 	return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
3282 		KVM_REG_ARM64_SYSREG |
3283 		(reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
3284 		(reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
3285 		(reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
3286 		(reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
3287 		(reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
3288 }
3289 
3290 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
3291 {
3292 	if (!*uind)
3293 		return true;
3294 
3295 	if (put_user(sys_reg_to_index(reg), *uind))
3296 		return false;
3297 
3298 	(*uind)++;
3299 	return true;
3300 }
3301 
3302 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
3303 			    const struct sys_reg_desc *rd,
3304 			    u64 __user **uind,
3305 			    unsigned int *total)
3306 {
3307 	/*
3308 	 * Ignore registers we trap but don't save,
3309 	 * and for which no custom user accessor is provided.
3310 	 */
3311 	if (!(rd->reg || rd->get_user))
3312 		return 0;
3313 
3314 	if (sysreg_hidden_user(vcpu, rd))
3315 		return 0;
3316 
3317 	if (!copy_reg_to_user(rd, uind))
3318 		return -EFAULT;
3319 
3320 	(*total)++;
3321 	return 0;
3322 }
3323 
3324 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
3325 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
3326 {
3327 	const struct sys_reg_desc *i2, *end2;
3328 	unsigned int total = 0;
3329 	int err;
3330 
3331 	i2 = sys_reg_descs;
3332 	end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
3333 
3334 	while (i2 != end2) {
3335 		err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
3336 		if (err)
3337 			return err;
3338 	}
3339 	return total;
3340 }
3341 
3342 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
3343 {
3344 	return ARRAY_SIZE(invariant_sys_regs)
3345 		+ num_demux_regs()
3346 		+ walk_sys_regs(vcpu, (u64 __user *)NULL);
3347 }
3348 
3349 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
3350 {
3351 	unsigned int i;
3352 	int err;
3353 
3354 	/* Then give them all the invariant registers' indices. */
3355 	for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
3356 		if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
3357 			return -EFAULT;
3358 		uindices++;
3359 	}
3360 
3361 	err = walk_sys_regs(vcpu, uindices);
3362 	if (err < 0)
3363 		return err;
3364 	uindices += err;
3365 
3366 	return write_demux_regids(uindices);
3367 }
3368 
3369 int __init kvm_sys_reg_table_init(void)
3370 {
3371 	bool valid = true;
3372 	unsigned int i;
3373 
3374 	/* Make sure tables are unique and in order. */
3375 	valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
3376 	valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true);
3377 	valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true);
3378 	valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
3379 	valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
3380 	valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
3381 
3382 	if (!valid)
3383 		return -EINVAL;
3384 
3385 	/* We abuse the reset function to overwrite the table itself. */
3386 	for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
3387 		invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
3388 
3389 	return 0;
3390 }
3391