xref: /openbmc/linux/arch/arm64/kvm/sys_regs.c (revision e15a5365)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/kvm/coproc.c:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Authors: Rusty Russell <rusty@rustcorp.com.au>
9  *          Christoffer Dall <c.dall@virtualopensystems.com>
10  */
11 
12 #include <linux/bsearch.h>
13 #include <linux/kvm_host.h>
14 #include <linux/mm.h>
15 #include <linux/printk.h>
16 #include <linux/uaccess.h>
17 
18 #include <asm/cacheflush.h>
19 #include <asm/cputype.h>
20 #include <asm/debug-monitors.h>
21 #include <asm/esr.h>
22 #include <asm/kvm_arm.h>
23 #include <asm/kvm_coproc.h>
24 #include <asm/kvm_emulate.h>
25 #include <asm/kvm_hyp.h>
26 #include <asm/kvm_mmu.h>
27 #include <asm/perf_event.h>
28 #include <asm/sysreg.h>
29 
30 #include <trace/events/kvm.h>
31 
32 #include "sys_regs.h"
33 
34 #include "trace.h"
35 
36 /*
37  * All of this file is extremely similar to the ARM coproc.c, but the
38  * types are different. My gut feeling is that it should be pretty
39  * easy to merge, but that would be an ABI breakage -- again. VFP
40  * would also need to be abstracted.
41  *
42  * For AArch32, we only take care of what is being trapped. Anything
43  * that has to do with init and userspace access has to go via the
44  * 64bit interface.
45  */
46 
47 static bool read_from_write_only(struct kvm_vcpu *vcpu,
48 				 struct sys_reg_params *params,
49 				 const struct sys_reg_desc *r)
50 {
51 	WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
52 	print_sys_reg_instr(params);
53 	kvm_inject_undefined(vcpu);
54 	return false;
55 }
56 
57 static bool write_to_read_only(struct kvm_vcpu *vcpu,
58 			       struct sys_reg_params *params,
59 			       const struct sys_reg_desc *r)
60 {
61 	WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
62 	print_sys_reg_instr(params);
63 	kvm_inject_undefined(vcpu);
64 	return false;
65 }
66 
67 static bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
68 {
69 	/*
70 	 * System registers listed in the switch are not saved on every
71 	 * exit from the guest but are only saved on vcpu_put.
72 	 *
73 	 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
74 	 * should never be listed below, because the guest cannot modify its
75 	 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
76 	 * thread when emulating cross-VCPU communication.
77 	 */
78 	switch (reg) {
79 	case CSSELR_EL1:	*val = read_sysreg_s(SYS_CSSELR_EL1);	break;
80 	case SCTLR_EL1:		*val = read_sysreg_s(SYS_SCTLR_EL12);	break;
81 	case CPACR_EL1:		*val = read_sysreg_s(SYS_CPACR_EL12);	break;
82 	case TTBR0_EL1:		*val = read_sysreg_s(SYS_TTBR0_EL12);	break;
83 	case TTBR1_EL1:		*val = read_sysreg_s(SYS_TTBR1_EL12);	break;
84 	case TCR_EL1:		*val = read_sysreg_s(SYS_TCR_EL12);	break;
85 	case ESR_EL1:		*val = read_sysreg_s(SYS_ESR_EL12);	break;
86 	case AFSR0_EL1:		*val = read_sysreg_s(SYS_AFSR0_EL12);	break;
87 	case AFSR1_EL1:		*val = read_sysreg_s(SYS_AFSR1_EL12);	break;
88 	case FAR_EL1:		*val = read_sysreg_s(SYS_FAR_EL12);	break;
89 	case MAIR_EL1:		*val = read_sysreg_s(SYS_MAIR_EL12);	break;
90 	case VBAR_EL1:		*val = read_sysreg_s(SYS_VBAR_EL12);	break;
91 	case CONTEXTIDR_EL1:	*val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
92 	case TPIDR_EL0:		*val = read_sysreg_s(SYS_TPIDR_EL0);	break;
93 	case TPIDRRO_EL0:	*val = read_sysreg_s(SYS_TPIDRRO_EL0);	break;
94 	case TPIDR_EL1:		*val = read_sysreg_s(SYS_TPIDR_EL1);	break;
95 	case AMAIR_EL1:		*val = read_sysreg_s(SYS_AMAIR_EL12);	break;
96 	case CNTKCTL_EL1:	*val = read_sysreg_s(SYS_CNTKCTL_EL12);	break;
97 	case ELR_EL1:		*val = read_sysreg_s(SYS_ELR_EL12);	break;
98 	case PAR_EL1:		*val = read_sysreg_par();		break;
99 	case DACR32_EL2:	*val = read_sysreg_s(SYS_DACR32_EL2);	break;
100 	case IFSR32_EL2:	*val = read_sysreg_s(SYS_IFSR32_EL2);	break;
101 	case DBGVCR32_EL2:	*val = read_sysreg_s(SYS_DBGVCR32_EL2);	break;
102 	default:		return false;
103 	}
104 
105 	return true;
106 }
107 
108 static bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
109 {
110 	/*
111 	 * System registers listed in the switch are not restored on every
112 	 * entry to the guest but are only restored on vcpu_load.
113 	 *
114 	 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
115 	 * should never be listed below, because the MPIDR should only be set
116 	 * once, before running the VCPU, and never changed later.
117 	 */
118 	switch (reg) {
119 	case CSSELR_EL1:	write_sysreg_s(val, SYS_CSSELR_EL1);	break;
120 	case SCTLR_EL1:		write_sysreg_s(val, SYS_SCTLR_EL12);	break;
121 	case CPACR_EL1:		write_sysreg_s(val, SYS_CPACR_EL12);	break;
122 	case TTBR0_EL1:		write_sysreg_s(val, SYS_TTBR0_EL12);	break;
123 	case TTBR1_EL1:		write_sysreg_s(val, SYS_TTBR1_EL12);	break;
124 	case TCR_EL1:		write_sysreg_s(val, SYS_TCR_EL12);	break;
125 	case ESR_EL1:		write_sysreg_s(val, SYS_ESR_EL12);	break;
126 	case AFSR0_EL1:		write_sysreg_s(val, SYS_AFSR0_EL12);	break;
127 	case AFSR1_EL1:		write_sysreg_s(val, SYS_AFSR1_EL12);	break;
128 	case FAR_EL1:		write_sysreg_s(val, SYS_FAR_EL12);	break;
129 	case MAIR_EL1:		write_sysreg_s(val, SYS_MAIR_EL12);	break;
130 	case VBAR_EL1:		write_sysreg_s(val, SYS_VBAR_EL12);	break;
131 	case CONTEXTIDR_EL1:	write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
132 	case TPIDR_EL0:		write_sysreg_s(val, SYS_TPIDR_EL0);	break;
133 	case TPIDRRO_EL0:	write_sysreg_s(val, SYS_TPIDRRO_EL0);	break;
134 	case TPIDR_EL1:		write_sysreg_s(val, SYS_TPIDR_EL1);	break;
135 	case AMAIR_EL1:		write_sysreg_s(val, SYS_AMAIR_EL12);	break;
136 	case CNTKCTL_EL1:	write_sysreg_s(val, SYS_CNTKCTL_EL12);	break;
137 	case ELR_EL1:		write_sysreg_s(val, SYS_ELR_EL12);	break;
138 	case PAR_EL1:		write_sysreg_s(val, SYS_PAR_EL1);	break;
139 	case DACR32_EL2:	write_sysreg_s(val, SYS_DACR32_EL2);	break;
140 	case IFSR32_EL2:	write_sysreg_s(val, SYS_IFSR32_EL2);	break;
141 	case DBGVCR32_EL2:	write_sysreg_s(val, SYS_DBGVCR32_EL2);	break;
142 	default:		return false;
143 	}
144 
145 	return true;
146 }
147 
148 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
149 {
150 	u64 val = 0x8badf00d8badf00d;
151 
152 	if (vcpu->arch.sysregs_loaded_on_cpu &&
153 	    __vcpu_read_sys_reg_from_cpu(reg, &val))
154 		return val;
155 
156 	return __vcpu_sys_reg(vcpu, reg);
157 }
158 
159 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
160 {
161 	if (vcpu->arch.sysregs_loaded_on_cpu &&
162 	    __vcpu_write_sys_reg_to_cpu(val, reg))
163 		return;
164 
165 	 __vcpu_sys_reg(vcpu, reg) = val;
166 }
167 
168 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
169 static u32 cache_levels;
170 
171 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
172 #define CSSELR_MAX 12
173 
174 /* Which cache CCSIDR represents depends on CSSELR value. */
175 static u32 get_ccsidr(u32 csselr)
176 {
177 	u32 ccsidr;
178 
179 	/* Make sure noone else changes CSSELR during this! */
180 	local_irq_disable();
181 	write_sysreg(csselr, csselr_el1);
182 	isb();
183 	ccsidr = read_sysreg(ccsidr_el1);
184 	local_irq_enable();
185 
186 	return ccsidr;
187 }
188 
189 /*
190  * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
191  */
192 static bool access_dcsw(struct kvm_vcpu *vcpu,
193 			struct sys_reg_params *p,
194 			const struct sys_reg_desc *r)
195 {
196 	if (!p->is_write)
197 		return read_from_write_only(vcpu, p, r);
198 
199 	/*
200 	 * Only track S/W ops if we don't have FWB. It still indicates
201 	 * that the guest is a bit broken (S/W operations should only
202 	 * be done by firmware, knowing that there is only a single
203 	 * CPU left in the system, and certainly not from non-secure
204 	 * software).
205 	 */
206 	if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
207 		kvm_set_way_flush(vcpu);
208 
209 	return true;
210 }
211 
212 /*
213  * Generic accessor for VM registers. Only called as long as HCR_TVM
214  * is set. If the guest enables the MMU, we stop trapping the VM
215  * sys_regs and leave it in complete control of the caches.
216  */
217 static bool access_vm_reg(struct kvm_vcpu *vcpu,
218 			  struct sys_reg_params *p,
219 			  const struct sys_reg_desc *r)
220 {
221 	bool was_enabled = vcpu_has_cache_enabled(vcpu);
222 	u64 val;
223 	int reg = r->reg;
224 
225 	BUG_ON(!p->is_write);
226 
227 	/* See the 32bit mapping in kvm_host.h */
228 	if (p->is_aarch32)
229 		reg = r->reg / 2;
230 
231 	if (!p->is_aarch32 || !p->is_32bit) {
232 		val = p->regval;
233 	} else {
234 		val = vcpu_read_sys_reg(vcpu, reg);
235 		if (r->reg % 2)
236 			val = (p->regval << 32) | (u64)lower_32_bits(val);
237 		else
238 			val = ((u64)upper_32_bits(val) << 32) |
239 				lower_32_bits(p->regval);
240 	}
241 	vcpu_write_sys_reg(vcpu, val, reg);
242 
243 	kvm_toggle_cache(vcpu, was_enabled);
244 	return true;
245 }
246 
247 static bool access_actlr(struct kvm_vcpu *vcpu,
248 			 struct sys_reg_params *p,
249 			 const struct sys_reg_desc *r)
250 {
251 	if (p->is_write)
252 		return ignore_write(vcpu, p);
253 
254 	p->regval = vcpu_read_sys_reg(vcpu, ACTLR_EL1);
255 
256 	if (p->is_aarch32) {
257 		if (r->Op2 & 2)
258 			p->regval = upper_32_bits(p->regval);
259 		else
260 			p->regval = lower_32_bits(p->regval);
261 	}
262 
263 	return true;
264 }
265 
266 /*
267  * Trap handler for the GICv3 SGI generation system register.
268  * Forward the request to the VGIC emulation.
269  * The cp15_64 code makes sure this automatically works
270  * for both AArch64 and AArch32 accesses.
271  */
272 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
273 			   struct sys_reg_params *p,
274 			   const struct sys_reg_desc *r)
275 {
276 	bool g1;
277 
278 	if (!p->is_write)
279 		return read_from_write_only(vcpu, p, r);
280 
281 	/*
282 	 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
283 	 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
284 	 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
285 	 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
286 	 * group.
287 	 */
288 	if (p->is_aarch32) {
289 		switch (p->Op1) {
290 		default:		/* Keep GCC quiet */
291 		case 0:			/* ICC_SGI1R */
292 			g1 = true;
293 			break;
294 		case 1:			/* ICC_ASGI1R */
295 		case 2:			/* ICC_SGI0R */
296 			g1 = false;
297 			break;
298 		}
299 	} else {
300 		switch (p->Op2) {
301 		default:		/* Keep GCC quiet */
302 		case 5:			/* ICC_SGI1R_EL1 */
303 			g1 = true;
304 			break;
305 		case 6:			/* ICC_ASGI1R_EL1 */
306 		case 7:			/* ICC_SGI0R_EL1 */
307 			g1 = false;
308 			break;
309 		}
310 	}
311 
312 	vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
313 
314 	return true;
315 }
316 
317 static bool access_gic_sre(struct kvm_vcpu *vcpu,
318 			   struct sys_reg_params *p,
319 			   const struct sys_reg_desc *r)
320 {
321 	if (p->is_write)
322 		return ignore_write(vcpu, p);
323 
324 	p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
325 	return true;
326 }
327 
328 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
329 			struct sys_reg_params *p,
330 			const struct sys_reg_desc *r)
331 {
332 	if (p->is_write)
333 		return ignore_write(vcpu, p);
334 	else
335 		return read_zero(vcpu, p);
336 }
337 
338 /*
339  * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
340  * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
341  * system, these registers should UNDEF. LORID_EL1 being a RO register, we
342  * treat it separately.
343  */
344 static bool trap_loregion(struct kvm_vcpu *vcpu,
345 			  struct sys_reg_params *p,
346 			  const struct sys_reg_desc *r)
347 {
348 	u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
349 	u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
350 			 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
351 
352 	if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
353 		kvm_inject_undefined(vcpu);
354 		return false;
355 	}
356 
357 	if (p->is_write && sr == SYS_LORID_EL1)
358 		return write_to_read_only(vcpu, p, r);
359 
360 	return trap_raz_wi(vcpu, p, r);
361 }
362 
363 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
364 			   struct sys_reg_params *p,
365 			   const struct sys_reg_desc *r)
366 {
367 	if (p->is_write) {
368 		return ignore_write(vcpu, p);
369 	} else {
370 		p->regval = (1 << 3);
371 		return true;
372 	}
373 }
374 
375 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
376 				   struct sys_reg_params *p,
377 				   const struct sys_reg_desc *r)
378 {
379 	if (p->is_write) {
380 		return ignore_write(vcpu, p);
381 	} else {
382 		p->regval = read_sysreg(dbgauthstatus_el1);
383 		return true;
384 	}
385 }
386 
387 /*
388  * We want to avoid world-switching all the DBG registers all the
389  * time:
390  *
391  * - If we've touched any debug register, it is likely that we're
392  *   going to touch more of them. It then makes sense to disable the
393  *   traps and start doing the save/restore dance
394  * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
395  *   then mandatory to save/restore the registers, as the guest
396  *   depends on them.
397  *
398  * For this, we use a DIRTY bit, indicating the guest has modified the
399  * debug registers, used as follow:
400  *
401  * On guest entry:
402  * - If the dirty bit is set (because we're coming back from trapping),
403  *   disable the traps, save host registers, restore guest registers.
404  * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
405  *   set the dirty bit, disable the traps, save host registers,
406  *   restore guest registers.
407  * - Otherwise, enable the traps
408  *
409  * On guest exit:
410  * - If the dirty bit is set, save guest registers, restore host
411  *   registers and clear the dirty bit. This ensure that the host can
412  *   now use the debug registers.
413  */
414 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
415 			    struct sys_reg_params *p,
416 			    const struct sys_reg_desc *r)
417 {
418 	if (p->is_write) {
419 		vcpu_write_sys_reg(vcpu, p->regval, r->reg);
420 		vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
421 	} else {
422 		p->regval = vcpu_read_sys_reg(vcpu, r->reg);
423 	}
424 
425 	trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
426 
427 	return true;
428 }
429 
430 /*
431  * reg_to_dbg/dbg_to_reg
432  *
433  * A 32 bit write to a debug register leave top bits alone
434  * A 32 bit read from a debug register only returns the bottom bits
435  *
436  * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
437  * hyp.S code switches between host and guest values in future.
438  */
439 static void reg_to_dbg(struct kvm_vcpu *vcpu,
440 		       struct sys_reg_params *p,
441 		       u64 *dbg_reg)
442 {
443 	u64 val = p->regval;
444 
445 	if (p->is_32bit) {
446 		val &= 0xffffffffUL;
447 		val |= ((*dbg_reg >> 32) << 32);
448 	}
449 
450 	*dbg_reg = val;
451 	vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
452 }
453 
454 static void dbg_to_reg(struct kvm_vcpu *vcpu,
455 		       struct sys_reg_params *p,
456 		       u64 *dbg_reg)
457 {
458 	p->regval = *dbg_reg;
459 	if (p->is_32bit)
460 		p->regval &= 0xffffffffUL;
461 }
462 
463 static bool trap_bvr(struct kvm_vcpu *vcpu,
464 		     struct sys_reg_params *p,
465 		     const struct sys_reg_desc *rd)
466 {
467 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
468 
469 	if (p->is_write)
470 		reg_to_dbg(vcpu, p, dbg_reg);
471 	else
472 		dbg_to_reg(vcpu, p, dbg_reg);
473 
474 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
475 
476 	return true;
477 }
478 
479 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
480 		const struct kvm_one_reg *reg, void __user *uaddr)
481 {
482 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
483 
484 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
485 		return -EFAULT;
486 	return 0;
487 }
488 
489 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
490 	const struct kvm_one_reg *reg, void __user *uaddr)
491 {
492 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
493 
494 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
495 		return -EFAULT;
496 	return 0;
497 }
498 
499 static void reset_bvr(struct kvm_vcpu *vcpu,
500 		      const struct sys_reg_desc *rd)
501 {
502 	vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
503 }
504 
505 static bool trap_bcr(struct kvm_vcpu *vcpu,
506 		     struct sys_reg_params *p,
507 		     const struct sys_reg_desc *rd)
508 {
509 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
510 
511 	if (p->is_write)
512 		reg_to_dbg(vcpu, p, dbg_reg);
513 	else
514 		dbg_to_reg(vcpu, p, dbg_reg);
515 
516 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
517 
518 	return true;
519 }
520 
521 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
522 		const struct kvm_one_reg *reg, void __user *uaddr)
523 {
524 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
525 
526 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
527 		return -EFAULT;
528 
529 	return 0;
530 }
531 
532 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
533 	const struct kvm_one_reg *reg, void __user *uaddr)
534 {
535 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
536 
537 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
538 		return -EFAULT;
539 	return 0;
540 }
541 
542 static void reset_bcr(struct kvm_vcpu *vcpu,
543 		      const struct sys_reg_desc *rd)
544 {
545 	vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
546 }
547 
548 static bool trap_wvr(struct kvm_vcpu *vcpu,
549 		     struct sys_reg_params *p,
550 		     const struct sys_reg_desc *rd)
551 {
552 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
553 
554 	if (p->is_write)
555 		reg_to_dbg(vcpu, p, dbg_reg);
556 	else
557 		dbg_to_reg(vcpu, p, dbg_reg);
558 
559 	trace_trap_reg(__func__, rd->reg, p->is_write,
560 		vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
561 
562 	return true;
563 }
564 
565 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
566 		const struct kvm_one_reg *reg, void __user *uaddr)
567 {
568 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
569 
570 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
571 		return -EFAULT;
572 	return 0;
573 }
574 
575 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
576 	const struct kvm_one_reg *reg, void __user *uaddr)
577 {
578 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
579 
580 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
581 		return -EFAULT;
582 	return 0;
583 }
584 
585 static void reset_wvr(struct kvm_vcpu *vcpu,
586 		      const struct sys_reg_desc *rd)
587 {
588 	vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
589 }
590 
591 static bool trap_wcr(struct kvm_vcpu *vcpu,
592 		     struct sys_reg_params *p,
593 		     const struct sys_reg_desc *rd)
594 {
595 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
596 
597 	if (p->is_write)
598 		reg_to_dbg(vcpu, p, dbg_reg);
599 	else
600 		dbg_to_reg(vcpu, p, dbg_reg);
601 
602 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
603 
604 	return true;
605 }
606 
607 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
608 		const struct kvm_one_reg *reg, void __user *uaddr)
609 {
610 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
611 
612 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
613 		return -EFAULT;
614 	return 0;
615 }
616 
617 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
618 	const struct kvm_one_reg *reg, void __user *uaddr)
619 {
620 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
621 
622 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
623 		return -EFAULT;
624 	return 0;
625 }
626 
627 static void reset_wcr(struct kvm_vcpu *vcpu,
628 		      const struct sys_reg_desc *rd)
629 {
630 	vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
631 }
632 
633 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
634 {
635 	u64 amair = read_sysreg(amair_el1);
636 	vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
637 }
638 
639 static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
640 {
641 	u64 actlr = read_sysreg(actlr_el1);
642 	vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
643 }
644 
645 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
646 {
647 	u64 mpidr;
648 
649 	/*
650 	 * Map the vcpu_id into the first three affinity level fields of
651 	 * the MPIDR. We limit the number of VCPUs in level 0 due to a
652 	 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
653 	 * of the GICv3 to be able to address each CPU directly when
654 	 * sending IPIs.
655 	 */
656 	mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
657 	mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
658 	mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
659 	vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
660 }
661 
662 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
663 {
664 	u64 pmcr, val;
665 
666 	pmcr = read_sysreg(pmcr_el0);
667 	/*
668 	 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
669 	 * except PMCR.E resetting to zero.
670 	 */
671 	val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
672 	       | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
673 	if (!system_supports_32bit_el0())
674 		val |= ARMV8_PMU_PMCR_LC;
675 	__vcpu_sys_reg(vcpu, r->reg) = val;
676 }
677 
678 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
679 {
680 	u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
681 	bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
682 
683 	if (!enabled)
684 		kvm_inject_undefined(vcpu);
685 
686 	return !enabled;
687 }
688 
689 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
690 {
691 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
692 }
693 
694 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
695 {
696 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
697 }
698 
699 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
700 {
701 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
702 }
703 
704 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
705 {
706 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
707 }
708 
709 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
710 			const struct sys_reg_desc *r)
711 {
712 	u64 val;
713 
714 	if (!kvm_arm_pmu_v3_ready(vcpu))
715 		return trap_raz_wi(vcpu, p, r);
716 
717 	if (pmu_access_el0_disabled(vcpu))
718 		return false;
719 
720 	if (p->is_write) {
721 		/* Only update writeable bits of PMCR */
722 		val = __vcpu_sys_reg(vcpu, PMCR_EL0);
723 		val &= ~ARMV8_PMU_PMCR_MASK;
724 		val |= p->regval & ARMV8_PMU_PMCR_MASK;
725 		if (!system_supports_32bit_el0())
726 			val |= ARMV8_PMU_PMCR_LC;
727 		__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
728 		kvm_pmu_handle_pmcr(vcpu, val);
729 		kvm_vcpu_pmu_restore_guest(vcpu);
730 	} else {
731 		/* PMCR.P & PMCR.C are RAZ */
732 		val = __vcpu_sys_reg(vcpu, PMCR_EL0)
733 		      & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
734 		p->regval = val;
735 	}
736 
737 	return true;
738 }
739 
740 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
741 			  const struct sys_reg_desc *r)
742 {
743 	if (!kvm_arm_pmu_v3_ready(vcpu))
744 		return trap_raz_wi(vcpu, p, r);
745 
746 	if (pmu_access_event_counter_el0_disabled(vcpu))
747 		return false;
748 
749 	if (p->is_write)
750 		__vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
751 	else
752 		/* return PMSELR.SEL field */
753 		p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
754 			    & ARMV8_PMU_COUNTER_MASK;
755 
756 	return true;
757 }
758 
759 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
760 			  const struct sys_reg_desc *r)
761 {
762 	u64 pmceid;
763 
764 	if (!kvm_arm_pmu_v3_ready(vcpu))
765 		return trap_raz_wi(vcpu, p, r);
766 
767 	BUG_ON(p->is_write);
768 
769 	if (pmu_access_el0_disabled(vcpu))
770 		return false;
771 
772 	pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
773 
774 	p->regval = pmceid;
775 
776 	return true;
777 }
778 
779 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
780 {
781 	u64 pmcr, val;
782 
783 	pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
784 	val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
785 	if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
786 		kvm_inject_undefined(vcpu);
787 		return false;
788 	}
789 
790 	return true;
791 }
792 
793 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
794 			      struct sys_reg_params *p,
795 			      const struct sys_reg_desc *r)
796 {
797 	u64 idx;
798 
799 	if (!kvm_arm_pmu_v3_ready(vcpu))
800 		return trap_raz_wi(vcpu, p, r);
801 
802 	if (r->CRn == 9 && r->CRm == 13) {
803 		if (r->Op2 == 2) {
804 			/* PMXEVCNTR_EL0 */
805 			if (pmu_access_event_counter_el0_disabled(vcpu))
806 				return false;
807 
808 			idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
809 			      & ARMV8_PMU_COUNTER_MASK;
810 		} else if (r->Op2 == 0) {
811 			/* PMCCNTR_EL0 */
812 			if (pmu_access_cycle_counter_el0_disabled(vcpu))
813 				return false;
814 
815 			idx = ARMV8_PMU_CYCLE_IDX;
816 		} else {
817 			return false;
818 		}
819 	} else if (r->CRn == 0 && r->CRm == 9) {
820 		/* PMCCNTR */
821 		if (pmu_access_event_counter_el0_disabled(vcpu))
822 			return false;
823 
824 		idx = ARMV8_PMU_CYCLE_IDX;
825 	} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
826 		/* PMEVCNTRn_EL0 */
827 		if (pmu_access_event_counter_el0_disabled(vcpu))
828 			return false;
829 
830 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
831 	} else {
832 		return false;
833 	}
834 
835 	if (!pmu_counter_idx_valid(vcpu, idx))
836 		return false;
837 
838 	if (p->is_write) {
839 		if (pmu_access_el0_disabled(vcpu))
840 			return false;
841 
842 		kvm_pmu_set_counter_value(vcpu, idx, p->regval);
843 	} else {
844 		p->regval = kvm_pmu_get_counter_value(vcpu, idx);
845 	}
846 
847 	return true;
848 }
849 
850 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
851 			       const struct sys_reg_desc *r)
852 {
853 	u64 idx, reg;
854 
855 	if (!kvm_arm_pmu_v3_ready(vcpu))
856 		return trap_raz_wi(vcpu, p, r);
857 
858 	if (pmu_access_el0_disabled(vcpu))
859 		return false;
860 
861 	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
862 		/* PMXEVTYPER_EL0 */
863 		idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
864 		reg = PMEVTYPER0_EL0 + idx;
865 	} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
866 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
867 		if (idx == ARMV8_PMU_CYCLE_IDX)
868 			reg = PMCCFILTR_EL0;
869 		else
870 			/* PMEVTYPERn_EL0 */
871 			reg = PMEVTYPER0_EL0 + idx;
872 	} else {
873 		BUG();
874 	}
875 
876 	if (!pmu_counter_idx_valid(vcpu, idx))
877 		return false;
878 
879 	if (p->is_write) {
880 		kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
881 		__vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
882 		kvm_vcpu_pmu_restore_guest(vcpu);
883 	} else {
884 		p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
885 	}
886 
887 	return true;
888 }
889 
890 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
891 			   const struct sys_reg_desc *r)
892 {
893 	u64 val, mask;
894 
895 	if (!kvm_arm_pmu_v3_ready(vcpu))
896 		return trap_raz_wi(vcpu, p, r);
897 
898 	if (pmu_access_el0_disabled(vcpu))
899 		return false;
900 
901 	mask = kvm_pmu_valid_counter_mask(vcpu);
902 	if (p->is_write) {
903 		val = p->regval & mask;
904 		if (r->Op2 & 0x1) {
905 			/* accessing PMCNTENSET_EL0 */
906 			__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
907 			kvm_pmu_enable_counter_mask(vcpu, val);
908 			kvm_vcpu_pmu_restore_guest(vcpu);
909 		} else {
910 			/* accessing PMCNTENCLR_EL0 */
911 			__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
912 			kvm_pmu_disable_counter_mask(vcpu, val);
913 		}
914 	} else {
915 		p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
916 	}
917 
918 	return true;
919 }
920 
921 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
922 			   const struct sys_reg_desc *r)
923 {
924 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
925 
926 	if (!kvm_arm_pmu_v3_ready(vcpu))
927 		return trap_raz_wi(vcpu, p, r);
928 
929 	if (!vcpu_mode_priv(vcpu)) {
930 		kvm_inject_undefined(vcpu);
931 		return false;
932 	}
933 
934 	if (p->is_write) {
935 		u64 val = p->regval & mask;
936 
937 		if (r->Op2 & 0x1)
938 			/* accessing PMINTENSET_EL1 */
939 			__vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
940 		else
941 			/* accessing PMINTENCLR_EL1 */
942 			__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
943 	} else {
944 		p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
945 	}
946 
947 	return true;
948 }
949 
950 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
951 			 const struct sys_reg_desc *r)
952 {
953 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
954 
955 	if (!kvm_arm_pmu_v3_ready(vcpu))
956 		return trap_raz_wi(vcpu, p, r);
957 
958 	if (pmu_access_el0_disabled(vcpu))
959 		return false;
960 
961 	if (p->is_write) {
962 		if (r->CRm & 0x2)
963 			/* accessing PMOVSSET_EL0 */
964 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
965 		else
966 			/* accessing PMOVSCLR_EL0 */
967 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
968 	} else {
969 		p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
970 	}
971 
972 	return true;
973 }
974 
975 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
976 			   const struct sys_reg_desc *r)
977 {
978 	u64 mask;
979 
980 	if (!kvm_arm_pmu_v3_ready(vcpu))
981 		return trap_raz_wi(vcpu, p, r);
982 
983 	if (!p->is_write)
984 		return read_from_write_only(vcpu, p, r);
985 
986 	if (pmu_write_swinc_el0_disabled(vcpu))
987 		return false;
988 
989 	mask = kvm_pmu_valid_counter_mask(vcpu);
990 	kvm_pmu_software_increment(vcpu, p->regval & mask);
991 	return true;
992 }
993 
994 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
995 			     const struct sys_reg_desc *r)
996 {
997 	if (!kvm_arm_pmu_v3_ready(vcpu))
998 		return trap_raz_wi(vcpu, p, r);
999 
1000 	if (p->is_write) {
1001 		if (!vcpu_mode_priv(vcpu)) {
1002 			kvm_inject_undefined(vcpu);
1003 			return false;
1004 		}
1005 
1006 		__vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
1007 			       p->regval & ARMV8_PMU_USERENR_MASK;
1008 	} else {
1009 		p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1010 			    & ARMV8_PMU_USERENR_MASK;
1011 	}
1012 
1013 	return true;
1014 }
1015 
1016 #define reg_to_encoding(x)						\
1017 	sys_reg((u32)(x)->Op0, (u32)(x)->Op1,				\
1018 		(u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
1019 
1020 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1021 #define DBG_BCR_BVR_WCR_WVR_EL1(n)					\
1022 	{ SYS_DESC(SYS_DBGBVRn_EL1(n)),					\
1023 	  trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr },		\
1024 	{ SYS_DESC(SYS_DBGBCRn_EL1(n)),					\
1025 	  trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr },		\
1026 	{ SYS_DESC(SYS_DBGWVRn_EL1(n)),					\
1027 	  trap_wvr, reset_wvr, 0, 0,  get_wvr, set_wvr },		\
1028 	{ SYS_DESC(SYS_DBGWCRn_EL1(n)),					\
1029 	  trap_wcr, reset_wcr, 0, 0,  get_wcr, set_wcr }
1030 
1031 /* Macro to expand the PMEVCNTRn_EL0 register */
1032 #define PMU_PMEVCNTR_EL0(n)						\
1033 	{ SYS_DESC(SYS_PMEVCNTRn_EL0(n)),					\
1034 	  access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
1035 
1036 /* Macro to expand the PMEVTYPERn_EL0 register */
1037 #define PMU_PMEVTYPER_EL0(n)						\
1038 	{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)),					\
1039 	  access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
1040 
1041 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1042 			 const struct sys_reg_desc *r)
1043 {
1044 	kvm_inject_undefined(vcpu);
1045 
1046 	return false;
1047 }
1048 
1049 /* Macro to expand the AMU counter and type registers*/
1050 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1051 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1052 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1053 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1054 
1055 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1056 			const struct sys_reg_desc *rd)
1057 {
1058 	return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1059 }
1060 
1061 /*
1062  * If we land here on a PtrAuth access, that is because we didn't
1063  * fixup the access on exit by allowing the PtrAuth sysregs. The only
1064  * way this happens is when the guest does not have PtrAuth support
1065  * enabled.
1066  */
1067 #define __PTRAUTH_KEY(k)						\
1068 	{ SYS_DESC(SYS_## k), undef_access, reset_unknown, k,		\
1069 	.visibility = ptrauth_visibility}
1070 
1071 #define PTRAUTH_KEY(k)							\
1072 	__PTRAUTH_KEY(k ## KEYLO_EL1),					\
1073 	__PTRAUTH_KEY(k ## KEYHI_EL1)
1074 
1075 static bool access_arch_timer(struct kvm_vcpu *vcpu,
1076 			      struct sys_reg_params *p,
1077 			      const struct sys_reg_desc *r)
1078 {
1079 	enum kvm_arch_timers tmr;
1080 	enum kvm_arch_timer_regs treg;
1081 	u64 reg = reg_to_encoding(r);
1082 
1083 	switch (reg) {
1084 	case SYS_CNTP_TVAL_EL0:
1085 	case SYS_AARCH32_CNTP_TVAL:
1086 		tmr = TIMER_PTIMER;
1087 		treg = TIMER_REG_TVAL;
1088 		break;
1089 	case SYS_CNTP_CTL_EL0:
1090 	case SYS_AARCH32_CNTP_CTL:
1091 		tmr = TIMER_PTIMER;
1092 		treg = TIMER_REG_CTL;
1093 		break;
1094 	case SYS_CNTP_CVAL_EL0:
1095 	case SYS_AARCH32_CNTP_CVAL:
1096 		tmr = TIMER_PTIMER;
1097 		treg = TIMER_REG_CVAL;
1098 		break;
1099 	default:
1100 		BUG();
1101 	}
1102 
1103 	if (p->is_write)
1104 		kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1105 	else
1106 		p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1107 
1108 	return true;
1109 }
1110 
1111 /* Read a sanitised cpufeature ID register by sys_reg_desc */
1112 static u64 read_id_reg(const struct kvm_vcpu *vcpu,
1113 		struct sys_reg_desc const *r, bool raz)
1114 {
1115 	u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
1116 			 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
1117 	u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
1118 
1119 	if (id == SYS_ID_AA64PFR0_EL1) {
1120 		if (!vcpu_has_sve(vcpu))
1121 			val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
1122 		val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT);
1123 		val &= ~(0xfUL << ID_AA64PFR0_CSV2_SHIFT);
1124 		val |= ((u64)vcpu->kvm->arch.pfr0_csv2 << ID_AA64PFR0_CSV2_SHIFT);
1125 	} else if (id == SYS_ID_AA64PFR1_EL1) {
1126 		val &= ~(0xfUL << ID_AA64PFR1_MTE_SHIFT);
1127 	} else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
1128 		val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
1129 			 (0xfUL << ID_AA64ISAR1_API_SHIFT) |
1130 			 (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
1131 			 (0xfUL << ID_AA64ISAR1_GPI_SHIFT));
1132 	} else if (id == SYS_ID_AA64DFR0_EL1) {
1133 		/* Limit guests to PMUv3 for ARMv8.1 */
1134 		val = cpuid_feature_cap_perfmon_field(val,
1135 						ID_AA64DFR0_PMUVER_SHIFT,
1136 						ID_AA64DFR0_PMUVER_8_1);
1137 	} else if (id == SYS_ID_DFR0_EL1) {
1138 		/* Limit guests to PMUv3 for ARMv8.1 */
1139 		val = cpuid_feature_cap_perfmon_field(val,
1140 						ID_DFR0_PERFMON_SHIFT,
1141 						ID_DFR0_PERFMON_8_1);
1142 	}
1143 
1144 	return val;
1145 }
1146 
1147 static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1148 				  const struct sys_reg_desc *r)
1149 {
1150 	u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
1151 			 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
1152 
1153 	switch (id) {
1154 	case SYS_ID_AA64ZFR0_EL1:
1155 		if (!vcpu_has_sve(vcpu))
1156 			return REG_RAZ;
1157 		break;
1158 	}
1159 
1160 	return 0;
1161 }
1162 
1163 /* cpufeature ID register access trap handlers */
1164 
1165 static bool __access_id_reg(struct kvm_vcpu *vcpu,
1166 			    struct sys_reg_params *p,
1167 			    const struct sys_reg_desc *r,
1168 			    bool raz)
1169 {
1170 	if (p->is_write)
1171 		return write_to_read_only(vcpu, p, r);
1172 
1173 	p->regval = read_id_reg(vcpu, r, raz);
1174 	return true;
1175 }
1176 
1177 static bool access_id_reg(struct kvm_vcpu *vcpu,
1178 			  struct sys_reg_params *p,
1179 			  const struct sys_reg_desc *r)
1180 {
1181 	bool raz = sysreg_visible_as_raz(vcpu, r);
1182 
1183 	return __access_id_reg(vcpu, p, r, raz);
1184 }
1185 
1186 static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
1187 			      struct sys_reg_params *p,
1188 			      const struct sys_reg_desc *r)
1189 {
1190 	return __access_id_reg(vcpu, p, r, true);
1191 }
1192 
1193 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
1194 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
1195 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
1196 
1197 /* Visibility overrides for SVE-specific control registers */
1198 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1199 				   const struct sys_reg_desc *rd)
1200 {
1201 	if (vcpu_has_sve(vcpu))
1202 		return 0;
1203 
1204 	return REG_HIDDEN;
1205 }
1206 
1207 static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
1208 			       const struct sys_reg_desc *rd,
1209 			       const struct kvm_one_reg *reg, void __user *uaddr)
1210 {
1211 	const u64 id = sys_reg_to_index(rd);
1212 	int err;
1213 	u64 val;
1214 	u8 csv2;
1215 
1216 	err = reg_from_user(&val, uaddr, id);
1217 	if (err)
1218 		return err;
1219 
1220 	/*
1221 	 * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
1222 	 * it doesn't promise more than what is actually provided (the
1223 	 * guest could otherwise be covered in ectoplasmic residue).
1224 	 */
1225 	csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV2_SHIFT);
1226 	if (csv2 > 1 ||
1227 	    (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
1228 		return -EINVAL;
1229 
1230 	/* We can only differ with CSV2, and anything else is an error */
1231 	val ^= read_id_reg(vcpu, rd, false);
1232 	val &= ~(0xFUL << ID_AA64PFR0_CSV2_SHIFT);
1233 	if (val)
1234 		return -EINVAL;
1235 
1236 	vcpu->kvm->arch.pfr0_csv2 = csv2;
1237 
1238 	return 0;
1239 }
1240 
1241 /*
1242  * cpufeature ID register user accessors
1243  *
1244  * For now, these registers are immutable for userspace, so no values
1245  * are stored, and for set_id_reg() we don't allow the effective value
1246  * to be changed.
1247  */
1248 static int __get_id_reg(const struct kvm_vcpu *vcpu,
1249 			const struct sys_reg_desc *rd, void __user *uaddr,
1250 			bool raz)
1251 {
1252 	const u64 id = sys_reg_to_index(rd);
1253 	const u64 val = read_id_reg(vcpu, rd, raz);
1254 
1255 	return reg_to_user(uaddr, &val, id);
1256 }
1257 
1258 static int __set_id_reg(const struct kvm_vcpu *vcpu,
1259 			const struct sys_reg_desc *rd, void __user *uaddr,
1260 			bool raz)
1261 {
1262 	const u64 id = sys_reg_to_index(rd);
1263 	int err;
1264 	u64 val;
1265 
1266 	err = reg_from_user(&val, uaddr, id);
1267 	if (err)
1268 		return err;
1269 
1270 	/* This is what we mean by invariant: you can't change it. */
1271 	if (val != read_id_reg(vcpu, rd, raz))
1272 		return -EINVAL;
1273 
1274 	return 0;
1275 }
1276 
1277 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1278 		      const struct kvm_one_reg *reg, void __user *uaddr)
1279 {
1280 	bool raz = sysreg_visible_as_raz(vcpu, rd);
1281 
1282 	return __get_id_reg(vcpu, rd, uaddr, raz);
1283 }
1284 
1285 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1286 		      const struct kvm_one_reg *reg, void __user *uaddr)
1287 {
1288 	bool raz = sysreg_visible_as_raz(vcpu, rd);
1289 
1290 	return __set_id_reg(vcpu, rd, uaddr, raz);
1291 }
1292 
1293 static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1294 			  const struct kvm_one_reg *reg, void __user *uaddr)
1295 {
1296 	return __get_id_reg(vcpu, rd, uaddr, true);
1297 }
1298 
1299 static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1300 			  const struct kvm_one_reg *reg, void __user *uaddr)
1301 {
1302 	return __set_id_reg(vcpu, rd, uaddr, true);
1303 }
1304 
1305 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1306 		       const struct sys_reg_desc *r)
1307 {
1308 	if (p->is_write)
1309 		return write_to_read_only(vcpu, p, r);
1310 
1311 	p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1312 	return true;
1313 }
1314 
1315 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1316 			 const struct sys_reg_desc *r)
1317 {
1318 	if (p->is_write)
1319 		return write_to_read_only(vcpu, p, r);
1320 
1321 	p->regval = read_sysreg(clidr_el1);
1322 	return true;
1323 }
1324 
1325 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1326 			  const struct sys_reg_desc *r)
1327 {
1328 	int reg = r->reg;
1329 
1330 	/* See the 32bit mapping in kvm_host.h */
1331 	if (p->is_aarch32)
1332 		reg = r->reg / 2;
1333 
1334 	if (p->is_write)
1335 		vcpu_write_sys_reg(vcpu, p->regval, reg);
1336 	else
1337 		p->regval = vcpu_read_sys_reg(vcpu, reg);
1338 	return true;
1339 }
1340 
1341 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1342 			  const struct sys_reg_desc *r)
1343 {
1344 	u32 csselr;
1345 
1346 	if (p->is_write)
1347 		return write_to_read_only(vcpu, p, r);
1348 
1349 	csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
1350 	p->regval = get_ccsidr(csselr);
1351 
1352 	/*
1353 	 * Guests should not be doing cache operations by set/way at all, and
1354 	 * for this reason, we trap them and attempt to infer the intent, so
1355 	 * that we can flush the entire guest's address space at the appropriate
1356 	 * time.
1357 	 * To prevent this trapping from causing performance problems, let's
1358 	 * expose the geometry of all data and unified caches (which are
1359 	 * guaranteed to be PIPT and thus non-aliasing) as 1 set and 1 way.
1360 	 * [If guests should attempt to infer aliasing properties from the
1361 	 * geometry (which is not permitted by the architecture), they would
1362 	 * only do so for virtually indexed caches.]
1363 	 */
1364 	if (!(csselr & 1)) // data or unified cache
1365 		p->regval &= ~GENMASK(27, 3);
1366 	return true;
1367 }
1368 
1369 /* sys_reg_desc initialiser for known cpufeature ID registers */
1370 #define ID_SANITISED(name) {			\
1371 	SYS_DESC(SYS_##name),			\
1372 	.access	= access_id_reg,		\
1373 	.get_user = get_id_reg,			\
1374 	.set_user = set_id_reg,			\
1375 	.visibility = id_visibility,		\
1376 }
1377 
1378 /*
1379  * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1380  * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1381  * (1 <= crm < 8, 0 <= Op2 < 8).
1382  */
1383 #define ID_UNALLOCATED(crm, op2) {			\
1384 	Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2),	\
1385 	.access = access_raz_id_reg,			\
1386 	.get_user = get_raz_id_reg,			\
1387 	.set_user = set_raz_id_reg,			\
1388 }
1389 
1390 /*
1391  * sys_reg_desc initialiser for known ID registers that we hide from guests.
1392  * For now, these are exposed just like unallocated ID regs: they appear
1393  * RAZ for the guest.
1394  */
1395 #define ID_HIDDEN(name) {			\
1396 	SYS_DESC(SYS_##name),			\
1397 	.access = access_raz_id_reg,		\
1398 	.get_user = get_raz_id_reg,		\
1399 	.set_user = set_raz_id_reg,		\
1400 }
1401 
1402 /*
1403  * Architected system registers.
1404  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
1405  *
1406  * Debug handling: We do trap most, if not all debug related system
1407  * registers. The implementation is good enough to ensure that a guest
1408  * can use these with minimal performance degradation. The drawback is
1409  * that we don't implement any of the external debug, none of the
1410  * OSlock protocol. This should be revisited if we ever encounter a
1411  * more demanding guest...
1412  */
1413 static const struct sys_reg_desc sys_reg_descs[] = {
1414 	{ SYS_DESC(SYS_DC_ISW), access_dcsw },
1415 	{ SYS_DESC(SYS_DC_CSW), access_dcsw },
1416 	{ SYS_DESC(SYS_DC_CISW), access_dcsw },
1417 
1418 	DBG_BCR_BVR_WCR_WVR_EL1(0),
1419 	DBG_BCR_BVR_WCR_WVR_EL1(1),
1420 	{ SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1421 	{ SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1422 	DBG_BCR_BVR_WCR_WVR_EL1(2),
1423 	DBG_BCR_BVR_WCR_WVR_EL1(3),
1424 	DBG_BCR_BVR_WCR_WVR_EL1(4),
1425 	DBG_BCR_BVR_WCR_WVR_EL1(5),
1426 	DBG_BCR_BVR_WCR_WVR_EL1(6),
1427 	DBG_BCR_BVR_WCR_WVR_EL1(7),
1428 	DBG_BCR_BVR_WCR_WVR_EL1(8),
1429 	DBG_BCR_BVR_WCR_WVR_EL1(9),
1430 	DBG_BCR_BVR_WCR_WVR_EL1(10),
1431 	DBG_BCR_BVR_WCR_WVR_EL1(11),
1432 	DBG_BCR_BVR_WCR_WVR_EL1(12),
1433 	DBG_BCR_BVR_WCR_WVR_EL1(13),
1434 	DBG_BCR_BVR_WCR_WVR_EL1(14),
1435 	DBG_BCR_BVR_WCR_WVR_EL1(15),
1436 
1437 	{ SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1438 	{ SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
1439 	{ SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
1440 	{ SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1441 	{ SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1442 	{ SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1443 	{ SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1444 	{ SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1445 
1446 	{ SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1447 	{ SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1448 	// DBGDTR[TR]X_EL0 share the same encoding
1449 	{ SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1450 
1451 	{ SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1452 
1453 	{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1454 
1455 	/*
1456 	 * ID regs: all ID_SANITISED() entries here must have corresponding
1457 	 * entries in arm64_ftr_regs[].
1458 	 */
1459 
1460 	/* AArch64 mappings of the AArch32 ID registers */
1461 	/* CRm=1 */
1462 	ID_SANITISED(ID_PFR0_EL1),
1463 	ID_SANITISED(ID_PFR1_EL1),
1464 	ID_SANITISED(ID_DFR0_EL1),
1465 	ID_HIDDEN(ID_AFR0_EL1),
1466 	ID_SANITISED(ID_MMFR0_EL1),
1467 	ID_SANITISED(ID_MMFR1_EL1),
1468 	ID_SANITISED(ID_MMFR2_EL1),
1469 	ID_SANITISED(ID_MMFR3_EL1),
1470 
1471 	/* CRm=2 */
1472 	ID_SANITISED(ID_ISAR0_EL1),
1473 	ID_SANITISED(ID_ISAR1_EL1),
1474 	ID_SANITISED(ID_ISAR2_EL1),
1475 	ID_SANITISED(ID_ISAR3_EL1),
1476 	ID_SANITISED(ID_ISAR4_EL1),
1477 	ID_SANITISED(ID_ISAR5_EL1),
1478 	ID_SANITISED(ID_MMFR4_EL1),
1479 	ID_SANITISED(ID_ISAR6_EL1),
1480 
1481 	/* CRm=3 */
1482 	ID_SANITISED(MVFR0_EL1),
1483 	ID_SANITISED(MVFR1_EL1),
1484 	ID_SANITISED(MVFR2_EL1),
1485 	ID_UNALLOCATED(3,3),
1486 	ID_SANITISED(ID_PFR2_EL1),
1487 	ID_HIDDEN(ID_DFR1_EL1),
1488 	ID_SANITISED(ID_MMFR5_EL1),
1489 	ID_UNALLOCATED(3,7),
1490 
1491 	/* AArch64 ID registers */
1492 	/* CRm=4 */
1493 	{ SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg,
1494 	  .get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, },
1495 	ID_SANITISED(ID_AA64PFR1_EL1),
1496 	ID_UNALLOCATED(4,2),
1497 	ID_UNALLOCATED(4,3),
1498 	ID_SANITISED(ID_AA64ZFR0_EL1),
1499 	ID_UNALLOCATED(4,5),
1500 	ID_UNALLOCATED(4,6),
1501 	ID_UNALLOCATED(4,7),
1502 
1503 	/* CRm=5 */
1504 	ID_SANITISED(ID_AA64DFR0_EL1),
1505 	ID_SANITISED(ID_AA64DFR1_EL1),
1506 	ID_UNALLOCATED(5,2),
1507 	ID_UNALLOCATED(5,3),
1508 	ID_HIDDEN(ID_AA64AFR0_EL1),
1509 	ID_HIDDEN(ID_AA64AFR1_EL1),
1510 	ID_UNALLOCATED(5,6),
1511 	ID_UNALLOCATED(5,7),
1512 
1513 	/* CRm=6 */
1514 	ID_SANITISED(ID_AA64ISAR0_EL1),
1515 	ID_SANITISED(ID_AA64ISAR1_EL1),
1516 	ID_UNALLOCATED(6,2),
1517 	ID_UNALLOCATED(6,3),
1518 	ID_UNALLOCATED(6,4),
1519 	ID_UNALLOCATED(6,5),
1520 	ID_UNALLOCATED(6,6),
1521 	ID_UNALLOCATED(6,7),
1522 
1523 	/* CRm=7 */
1524 	ID_SANITISED(ID_AA64MMFR0_EL1),
1525 	ID_SANITISED(ID_AA64MMFR1_EL1),
1526 	ID_SANITISED(ID_AA64MMFR2_EL1),
1527 	ID_UNALLOCATED(7,3),
1528 	ID_UNALLOCATED(7,4),
1529 	ID_UNALLOCATED(7,5),
1530 	ID_UNALLOCATED(7,6),
1531 	ID_UNALLOCATED(7,7),
1532 
1533 	{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1534 	{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
1535 	{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1536 
1537 	{ SYS_DESC(SYS_RGSR_EL1), undef_access },
1538 	{ SYS_DESC(SYS_GCR_EL1), undef_access },
1539 
1540 	{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
1541 	{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1542 	{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1543 	{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1544 
1545 	PTRAUTH_KEY(APIA),
1546 	PTRAUTH_KEY(APIB),
1547 	PTRAUTH_KEY(APDA),
1548 	PTRAUTH_KEY(APDB),
1549 	PTRAUTH_KEY(APGA),
1550 
1551 	{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1552 	{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1553 	{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1554 
1555 	{ SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1556 	{ SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1557 	{ SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1558 	{ SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1559 	{ SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1560 	{ SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1561 	{ SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1562 	{ SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1563 
1564 	{ SYS_DESC(SYS_TFSR_EL1), undef_access },
1565 	{ SYS_DESC(SYS_TFSRE0_EL1), undef_access },
1566 
1567 	{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1568 	{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1569 
1570 	{ SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
1571 	{ SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
1572 
1573 	{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1574 	{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1575 
1576 	{ SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1577 	{ SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1578 	{ SYS_DESC(SYS_LORN_EL1), trap_loregion },
1579 	{ SYS_DESC(SYS_LORC_EL1), trap_loregion },
1580 	{ SYS_DESC(SYS_LORID_EL1), trap_loregion },
1581 
1582 	{ SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1583 	{ SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1584 
1585 	{ SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1586 	{ SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1587 	{ SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1588 	{ SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1589 	{ SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1590 	{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1591 	{ SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1592 	{ SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1593 	{ SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1594 	{ SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1595 	{ SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1596 	{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1597 
1598 	{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1599 	{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1600 
1601 	{ SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
1602 
1603 	{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1604 
1605 	{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
1606 	{ SYS_DESC(SYS_CLIDR_EL1), access_clidr },
1607 	{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
1608 	{ SYS_DESC(SYS_CTR_EL0), access_ctr },
1609 
1610 	{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
1611 	{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
1612 	{ SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
1613 	{ SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
1614 	{ SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
1615 	{ SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
1616 	{ SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
1617 	{ SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
1618 	{ SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
1619 	{ SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
1620 	{ SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
1621 	/*
1622 	 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
1623 	 * in 32bit mode. Here we choose to reset it as zero for consistency.
1624 	 */
1625 	{ SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
1626 	{ SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
1627 
1628 	{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1629 	{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
1630 
1631 	{ SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
1632 
1633 	{ SYS_DESC(SYS_AMCR_EL0), undef_access },
1634 	{ SYS_DESC(SYS_AMCFGR_EL0), undef_access },
1635 	{ SYS_DESC(SYS_AMCGCR_EL0), undef_access },
1636 	{ SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
1637 	{ SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
1638 	{ SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
1639 	{ SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
1640 	{ SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
1641 	AMU_AMEVCNTR0_EL0(0),
1642 	AMU_AMEVCNTR0_EL0(1),
1643 	AMU_AMEVCNTR0_EL0(2),
1644 	AMU_AMEVCNTR0_EL0(3),
1645 	AMU_AMEVCNTR0_EL0(4),
1646 	AMU_AMEVCNTR0_EL0(5),
1647 	AMU_AMEVCNTR0_EL0(6),
1648 	AMU_AMEVCNTR0_EL0(7),
1649 	AMU_AMEVCNTR0_EL0(8),
1650 	AMU_AMEVCNTR0_EL0(9),
1651 	AMU_AMEVCNTR0_EL0(10),
1652 	AMU_AMEVCNTR0_EL0(11),
1653 	AMU_AMEVCNTR0_EL0(12),
1654 	AMU_AMEVCNTR0_EL0(13),
1655 	AMU_AMEVCNTR0_EL0(14),
1656 	AMU_AMEVCNTR0_EL0(15),
1657 	AMU_AMEVTYPER0_EL0(0),
1658 	AMU_AMEVTYPER0_EL0(1),
1659 	AMU_AMEVTYPER0_EL0(2),
1660 	AMU_AMEVTYPER0_EL0(3),
1661 	AMU_AMEVTYPER0_EL0(4),
1662 	AMU_AMEVTYPER0_EL0(5),
1663 	AMU_AMEVTYPER0_EL0(6),
1664 	AMU_AMEVTYPER0_EL0(7),
1665 	AMU_AMEVTYPER0_EL0(8),
1666 	AMU_AMEVTYPER0_EL0(9),
1667 	AMU_AMEVTYPER0_EL0(10),
1668 	AMU_AMEVTYPER0_EL0(11),
1669 	AMU_AMEVTYPER0_EL0(12),
1670 	AMU_AMEVTYPER0_EL0(13),
1671 	AMU_AMEVTYPER0_EL0(14),
1672 	AMU_AMEVTYPER0_EL0(15),
1673 	AMU_AMEVCNTR1_EL0(0),
1674 	AMU_AMEVCNTR1_EL0(1),
1675 	AMU_AMEVCNTR1_EL0(2),
1676 	AMU_AMEVCNTR1_EL0(3),
1677 	AMU_AMEVCNTR1_EL0(4),
1678 	AMU_AMEVCNTR1_EL0(5),
1679 	AMU_AMEVCNTR1_EL0(6),
1680 	AMU_AMEVCNTR1_EL0(7),
1681 	AMU_AMEVCNTR1_EL0(8),
1682 	AMU_AMEVCNTR1_EL0(9),
1683 	AMU_AMEVCNTR1_EL0(10),
1684 	AMU_AMEVCNTR1_EL0(11),
1685 	AMU_AMEVCNTR1_EL0(12),
1686 	AMU_AMEVCNTR1_EL0(13),
1687 	AMU_AMEVCNTR1_EL0(14),
1688 	AMU_AMEVCNTR1_EL0(15),
1689 	AMU_AMEVTYPER1_EL0(0),
1690 	AMU_AMEVTYPER1_EL0(1),
1691 	AMU_AMEVTYPER1_EL0(2),
1692 	AMU_AMEVTYPER1_EL0(3),
1693 	AMU_AMEVTYPER1_EL0(4),
1694 	AMU_AMEVTYPER1_EL0(5),
1695 	AMU_AMEVTYPER1_EL0(6),
1696 	AMU_AMEVTYPER1_EL0(7),
1697 	AMU_AMEVTYPER1_EL0(8),
1698 	AMU_AMEVTYPER1_EL0(9),
1699 	AMU_AMEVTYPER1_EL0(10),
1700 	AMU_AMEVTYPER1_EL0(11),
1701 	AMU_AMEVTYPER1_EL0(12),
1702 	AMU_AMEVTYPER1_EL0(13),
1703 	AMU_AMEVTYPER1_EL0(14),
1704 	AMU_AMEVTYPER1_EL0(15),
1705 
1706 	{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
1707 	{ SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
1708 	{ SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
1709 
1710 	/* PMEVCNTRn_EL0 */
1711 	PMU_PMEVCNTR_EL0(0),
1712 	PMU_PMEVCNTR_EL0(1),
1713 	PMU_PMEVCNTR_EL0(2),
1714 	PMU_PMEVCNTR_EL0(3),
1715 	PMU_PMEVCNTR_EL0(4),
1716 	PMU_PMEVCNTR_EL0(5),
1717 	PMU_PMEVCNTR_EL0(6),
1718 	PMU_PMEVCNTR_EL0(7),
1719 	PMU_PMEVCNTR_EL0(8),
1720 	PMU_PMEVCNTR_EL0(9),
1721 	PMU_PMEVCNTR_EL0(10),
1722 	PMU_PMEVCNTR_EL0(11),
1723 	PMU_PMEVCNTR_EL0(12),
1724 	PMU_PMEVCNTR_EL0(13),
1725 	PMU_PMEVCNTR_EL0(14),
1726 	PMU_PMEVCNTR_EL0(15),
1727 	PMU_PMEVCNTR_EL0(16),
1728 	PMU_PMEVCNTR_EL0(17),
1729 	PMU_PMEVCNTR_EL0(18),
1730 	PMU_PMEVCNTR_EL0(19),
1731 	PMU_PMEVCNTR_EL0(20),
1732 	PMU_PMEVCNTR_EL0(21),
1733 	PMU_PMEVCNTR_EL0(22),
1734 	PMU_PMEVCNTR_EL0(23),
1735 	PMU_PMEVCNTR_EL0(24),
1736 	PMU_PMEVCNTR_EL0(25),
1737 	PMU_PMEVCNTR_EL0(26),
1738 	PMU_PMEVCNTR_EL0(27),
1739 	PMU_PMEVCNTR_EL0(28),
1740 	PMU_PMEVCNTR_EL0(29),
1741 	PMU_PMEVCNTR_EL0(30),
1742 	/* PMEVTYPERn_EL0 */
1743 	PMU_PMEVTYPER_EL0(0),
1744 	PMU_PMEVTYPER_EL0(1),
1745 	PMU_PMEVTYPER_EL0(2),
1746 	PMU_PMEVTYPER_EL0(3),
1747 	PMU_PMEVTYPER_EL0(4),
1748 	PMU_PMEVTYPER_EL0(5),
1749 	PMU_PMEVTYPER_EL0(6),
1750 	PMU_PMEVTYPER_EL0(7),
1751 	PMU_PMEVTYPER_EL0(8),
1752 	PMU_PMEVTYPER_EL0(9),
1753 	PMU_PMEVTYPER_EL0(10),
1754 	PMU_PMEVTYPER_EL0(11),
1755 	PMU_PMEVTYPER_EL0(12),
1756 	PMU_PMEVTYPER_EL0(13),
1757 	PMU_PMEVTYPER_EL0(14),
1758 	PMU_PMEVTYPER_EL0(15),
1759 	PMU_PMEVTYPER_EL0(16),
1760 	PMU_PMEVTYPER_EL0(17),
1761 	PMU_PMEVTYPER_EL0(18),
1762 	PMU_PMEVTYPER_EL0(19),
1763 	PMU_PMEVTYPER_EL0(20),
1764 	PMU_PMEVTYPER_EL0(21),
1765 	PMU_PMEVTYPER_EL0(22),
1766 	PMU_PMEVTYPER_EL0(23),
1767 	PMU_PMEVTYPER_EL0(24),
1768 	PMU_PMEVTYPER_EL0(25),
1769 	PMU_PMEVTYPER_EL0(26),
1770 	PMU_PMEVTYPER_EL0(27),
1771 	PMU_PMEVTYPER_EL0(28),
1772 	PMU_PMEVTYPER_EL0(29),
1773 	PMU_PMEVTYPER_EL0(30),
1774 	/*
1775 	 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
1776 	 * in 32bit mode. Here we choose to reset it as zero for consistency.
1777 	 */
1778 	{ SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
1779 
1780 	{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1781 	{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1782 	{ SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
1783 };
1784 
1785 static bool trap_dbgidr(struct kvm_vcpu *vcpu,
1786 			struct sys_reg_params *p,
1787 			const struct sys_reg_desc *r)
1788 {
1789 	if (p->is_write) {
1790 		return ignore_write(vcpu, p);
1791 	} else {
1792 		u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1793 		u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1794 		u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1795 
1796 		p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1797 			     (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1798 			     (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1799 			     | (6 << 16) | (el3 << 14) | (el3 << 12));
1800 		return true;
1801 	}
1802 }
1803 
1804 static bool trap_debug32(struct kvm_vcpu *vcpu,
1805 			 struct sys_reg_params *p,
1806 			 const struct sys_reg_desc *r)
1807 {
1808 	if (p->is_write) {
1809 		vcpu_cp14(vcpu, r->reg) = p->regval;
1810 		vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1811 	} else {
1812 		p->regval = vcpu_cp14(vcpu, r->reg);
1813 	}
1814 
1815 	return true;
1816 }
1817 
1818 /* AArch32 debug register mappings
1819  *
1820  * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1821  * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1822  *
1823  * All control registers and watchpoint value registers are mapped to
1824  * the lower 32 bits of their AArch64 equivalents. We share the trap
1825  * handlers with the above AArch64 code which checks what mode the
1826  * system is in.
1827  */
1828 
1829 static bool trap_xvr(struct kvm_vcpu *vcpu,
1830 		     struct sys_reg_params *p,
1831 		     const struct sys_reg_desc *rd)
1832 {
1833 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
1834 
1835 	if (p->is_write) {
1836 		u64 val = *dbg_reg;
1837 
1838 		val &= 0xffffffffUL;
1839 		val |= p->regval << 32;
1840 		*dbg_reg = val;
1841 
1842 		vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1843 	} else {
1844 		p->regval = *dbg_reg >> 32;
1845 	}
1846 
1847 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
1848 
1849 	return true;
1850 }
1851 
1852 #define DBG_BCR_BVR_WCR_WVR(n)						\
1853 	/* DBGBVRn */							\
1854 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, 	\
1855 	/* DBGBCRn */							\
1856 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n },	\
1857 	/* DBGWVRn */							\
1858 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n },	\
1859 	/* DBGWCRn */							\
1860 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1861 
1862 #define DBGBXVR(n)							\
1863 	{ Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1864 
1865 /*
1866  * Trapped cp14 registers. We generally ignore most of the external
1867  * debug, on the principle that they don't really make sense to a
1868  * guest. Revisit this one day, would this principle change.
1869  */
1870 static const struct sys_reg_desc cp14_regs[] = {
1871 	/* DBGIDR */
1872 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
1873 	/* DBGDTRRXext */
1874 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1875 
1876 	DBG_BCR_BVR_WCR_WVR(0),
1877 	/* DBGDSCRint */
1878 	{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1879 	DBG_BCR_BVR_WCR_WVR(1),
1880 	/* DBGDCCINT */
1881 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32, NULL, cp14_DBGDCCINT },
1882 	/* DBGDSCRext */
1883 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32, NULL, cp14_DBGDSCRext },
1884 	DBG_BCR_BVR_WCR_WVR(2),
1885 	/* DBGDTR[RT]Xint */
1886 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1887 	/* DBGDTR[RT]Xext */
1888 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1889 	DBG_BCR_BVR_WCR_WVR(3),
1890 	DBG_BCR_BVR_WCR_WVR(4),
1891 	DBG_BCR_BVR_WCR_WVR(5),
1892 	/* DBGWFAR */
1893 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1894 	/* DBGOSECCR */
1895 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1896 	DBG_BCR_BVR_WCR_WVR(6),
1897 	/* DBGVCR */
1898 	{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32, NULL, cp14_DBGVCR },
1899 	DBG_BCR_BVR_WCR_WVR(7),
1900 	DBG_BCR_BVR_WCR_WVR(8),
1901 	DBG_BCR_BVR_WCR_WVR(9),
1902 	DBG_BCR_BVR_WCR_WVR(10),
1903 	DBG_BCR_BVR_WCR_WVR(11),
1904 	DBG_BCR_BVR_WCR_WVR(12),
1905 	DBG_BCR_BVR_WCR_WVR(13),
1906 	DBG_BCR_BVR_WCR_WVR(14),
1907 	DBG_BCR_BVR_WCR_WVR(15),
1908 
1909 	/* DBGDRAR (32bit) */
1910 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1911 
1912 	DBGBXVR(0),
1913 	/* DBGOSLAR */
1914 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
1915 	DBGBXVR(1),
1916 	/* DBGOSLSR */
1917 	{ Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
1918 	DBGBXVR(2),
1919 	DBGBXVR(3),
1920 	/* DBGOSDLR */
1921 	{ Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1922 	DBGBXVR(4),
1923 	/* DBGPRCR */
1924 	{ Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1925 	DBGBXVR(5),
1926 	DBGBXVR(6),
1927 	DBGBXVR(7),
1928 	DBGBXVR(8),
1929 	DBGBXVR(9),
1930 	DBGBXVR(10),
1931 	DBGBXVR(11),
1932 	DBGBXVR(12),
1933 	DBGBXVR(13),
1934 	DBGBXVR(14),
1935 	DBGBXVR(15),
1936 
1937 	/* DBGDSAR (32bit) */
1938 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1939 
1940 	/* DBGDEVID2 */
1941 	{ Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1942 	/* DBGDEVID1 */
1943 	{ Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1944 	/* DBGDEVID */
1945 	{ Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1946 	/* DBGCLAIMSET */
1947 	{ Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1948 	/* DBGCLAIMCLR */
1949 	{ Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1950 	/* DBGAUTHSTATUS */
1951 	{ Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1952 };
1953 
1954 /* Trapped cp14 64bit registers */
1955 static const struct sys_reg_desc cp14_64_regs[] = {
1956 	/* DBGDRAR (64bit) */
1957 	{ Op1( 0), CRm( 1), .access = trap_raz_wi },
1958 
1959 	/* DBGDSAR (64bit) */
1960 	{ Op1( 0), CRm( 2), .access = trap_raz_wi },
1961 };
1962 
1963 /* Macro to expand the PMEVCNTRn register */
1964 #define PMU_PMEVCNTR(n)							\
1965 	/* PMEVCNTRn */							\
1966 	{ Op1(0), CRn(0b1110),						\
1967 	  CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),		\
1968 	  access_pmu_evcntr }
1969 
1970 /* Macro to expand the PMEVTYPERn register */
1971 #define PMU_PMEVTYPER(n)						\
1972 	/* PMEVTYPERn */						\
1973 	{ Op1(0), CRn(0b1110),						\
1974 	  CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),		\
1975 	  access_pmu_evtyper }
1976 
1977 /*
1978  * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
1979  * depending on the way they are accessed (as a 32bit or a 64bit
1980  * register).
1981  */
1982 static const struct sys_reg_desc cp15_regs[] = {
1983 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
1984 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
1985 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr },
1986 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr },
1987 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1988 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
1989 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
1990 	{ Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
1991 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
1992 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
1993 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
1994 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
1995 	{ Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
1996 	{ Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
1997 
1998 	/*
1999 	 * DC{C,I,CI}SW operations:
2000 	 */
2001 	{ Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
2002 	{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
2003 	{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
2004 
2005 	/* PMU */
2006 	{ Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
2007 	{ Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
2008 	{ Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
2009 	{ Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
2010 	{ Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
2011 	{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
2012 	{ Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
2013 	{ Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
2014 	{ Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
2015 	{ Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
2016 	{ Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
2017 	{ Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
2018 	{ Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
2019 	{ Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
2020 	{ Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
2021 
2022 	{ Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
2023 	{ Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
2024 	{ Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
2025 	{ Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
2026 
2027 	/* ICC_SRE */
2028 	{ Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
2029 
2030 	{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
2031 
2032 	/* Arch Tmers */
2033 	{ SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
2034 	{ SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
2035 
2036 	/* PMEVCNTRn */
2037 	PMU_PMEVCNTR(0),
2038 	PMU_PMEVCNTR(1),
2039 	PMU_PMEVCNTR(2),
2040 	PMU_PMEVCNTR(3),
2041 	PMU_PMEVCNTR(4),
2042 	PMU_PMEVCNTR(5),
2043 	PMU_PMEVCNTR(6),
2044 	PMU_PMEVCNTR(7),
2045 	PMU_PMEVCNTR(8),
2046 	PMU_PMEVCNTR(9),
2047 	PMU_PMEVCNTR(10),
2048 	PMU_PMEVCNTR(11),
2049 	PMU_PMEVCNTR(12),
2050 	PMU_PMEVCNTR(13),
2051 	PMU_PMEVCNTR(14),
2052 	PMU_PMEVCNTR(15),
2053 	PMU_PMEVCNTR(16),
2054 	PMU_PMEVCNTR(17),
2055 	PMU_PMEVCNTR(18),
2056 	PMU_PMEVCNTR(19),
2057 	PMU_PMEVCNTR(20),
2058 	PMU_PMEVCNTR(21),
2059 	PMU_PMEVCNTR(22),
2060 	PMU_PMEVCNTR(23),
2061 	PMU_PMEVCNTR(24),
2062 	PMU_PMEVCNTR(25),
2063 	PMU_PMEVCNTR(26),
2064 	PMU_PMEVCNTR(27),
2065 	PMU_PMEVCNTR(28),
2066 	PMU_PMEVCNTR(29),
2067 	PMU_PMEVCNTR(30),
2068 	/* PMEVTYPERn */
2069 	PMU_PMEVTYPER(0),
2070 	PMU_PMEVTYPER(1),
2071 	PMU_PMEVTYPER(2),
2072 	PMU_PMEVTYPER(3),
2073 	PMU_PMEVTYPER(4),
2074 	PMU_PMEVTYPER(5),
2075 	PMU_PMEVTYPER(6),
2076 	PMU_PMEVTYPER(7),
2077 	PMU_PMEVTYPER(8),
2078 	PMU_PMEVTYPER(9),
2079 	PMU_PMEVTYPER(10),
2080 	PMU_PMEVTYPER(11),
2081 	PMU_PMEVTYPER(12),
2082 	PMU_PMEVTYPER(13),
2083 	PMU_PMEVTYPER(14),
2084 	PMU_PMEVTYPER(15),
2085 	PMU_PMEVTYPER(16),
2086 	PMU_PMEVTYPER(17),
2087 	PMU_PMEVTYPER(18),
2088 	PMU_PMEVTYPER(19),
2089 	PMU_PMEVTYPER(20),
2090 	PMU_PMEVTYPER(21),
2091 	PMU_PMEVTYPER(22),
2092 	PMU_PMEVTYPER(23),
2093 	PMU_PMEVTYPER(24),
2094 	PMU_PMEVTYPER(25),
2095 	PMU_PMEVTYPER(26),
2096 	PMU_PMEVTYPER(27),
2097 	PMU_PMEVTYPER(28),
2098 	PMU_PMEVTYPER(29),
2099 	PMU_PMEVTYPER(30),
2100 	/* PMCCFILTR */
2101 	{ Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
2102 
2103 	{ Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
2104 	{ Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
2105 	{ Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, c0_CSSELR },
2106 };
2107 
2108 static const struct sys_reg_desc cp15_64_regs[] = {
2109 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
2110 	{ Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
2111 	{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
2112 	{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
2113 	{ Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
2114 	{ Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
2115 	{ SYS_DESC(SYS_AARCH32_CNTP_CVAL),    access_arch_timer },
2116 };
2117 
2118 static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
2119 			      bool is_32)
2120 {
2121 	unsigned int i;
2122 
2123 	for (i = 0; i < n; i++) {
2124 		if (!is_32 && table[i].reg && !table[i].reset) {
2125 			kvm_err("sys_reg table %p entry %d has lacks reset\n",
2126 				table, i);
2127 			return 1;
2128 		}
2129 
2130 		if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2131 			kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2132 			return 1;
2133 		}
2134 	}
2135 
2136 	return 0;
2137 }
2138 
2139 static int match_sys_reg(const void *key, const void *elt)
2140 {
2141 	const unsigned long pval = (unsigned long)key;
2142 	const struct sys_reg_desc *r = elt;
2143 
2144 	return pval - reg_to_encoding(r);
2145 }
2146 
2147 static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
2148 					 const struct sys_reg_desc table[],
2149 					 unsigned int num)
2150 {
2151 	unsigned long pval = reg_to_encoding(params);
2152 
2153 	return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
2154 }
2155 
2156 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
2157 {
2158 	kvm_inject_undefined(vcpu);
2159 	return 1;
2160 }
2161 
2162 static void perform_access(struct kvm_vcpu *vcpu,
2163 			   struct sys_reg_params *params,
2164 			   const struct sys_reg_desc *r)
2165 {
2166 	trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
2167 
2168 	/* Check for regs disabled by runtime config */
2169 	if (sysreg_hidden(vcpu, r)) {
2170 		kvm_inject_undefined(vcpu);
2171 		return;
2172 	}
2173 
2174 	/*
2175 	 * Not having an accessor means that we have configured a trap
2176 	 * that we don't know how to handle. This certainly qualifies
2177 	 * as a gross bug that should be fixed right away.
2178 	 */
2179 	BUG_ON(!r->access);
2180 
2181 	/* Skip instruction if instructed so */
2182 	if (likely(r->access(vcpu, params, r)))
2183 		kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
2184 }
2185 
2186 /*
2187  * emulate_cp --  tries to match a sys_reg access in a handling table, and
2188  *                call the corresponding trap handler.
2189  *
2190  * @params: pointer to the descriptor of the access
2191  * @table: array of trap descriptors
2192  * @num: size of the trap descriptor array
2193  *
2194  * Return 0 if the access has been handled, and -1 if not.
2195  */
2196 static int emulate_cp(struct kvm_vcpu *vcpu,
2197 		      struct sys_reg_params *params,
2198 		      const struct sys_reg_desc *table,
2199 		      size_t num)
2200 {
2201 	const struct sys_reg_desc *r;
2202 
2203 	if (!table)
2204 		return -1;	/* Not handled */
2205 
2206 	r = find_reg(params, table, num);
2207 
2208 	if (r) {
2209 		perform_access(vcpu, params, r);
2210 		return 0;
2211 	}
2212 
2213 	/* Not handled */
2214 	return -1;
2215 }
2216 
2217 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
2218 				struct sys_reg_params *params)
2219 {
2220 	u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
2221 	int cp = -1;
2222 
2223 	switch (esr_ec) {
2224 	case ESR_ELx_EC_CP15_32:
2225 	case ESR_ELx_EC_CP15_64:
2226 		cp = 15;
2227 		break;
2228 	case ESR_ELx_EC_CP14_MR:
2229 	case ESR_ELx_EC_CP14_64:
2230 		cp = 14;
2231 		break;
2232 	default:
2233 		WARN_ON(1);
2234 	}
2235 
2236 	print_sys_reg_msg(params,
2237 			  "Unsupported guest CP%d access at: %08lx [%08lx]\n",
2238 			  cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2239 	kvm_inject_undefined(vcpu);
2240 }
2241 
2242 /**
2243  * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
2244  * @vcpu: The VCPU pointer
2245  * @run:  The kvm_run struct
2246  */
2247 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
2248 			    const struct sys_reg_desc *global,
2249 			    size_t nr_global)
2250 {
2251 	struct sys_reg_params params;
2252 	u32 esr = kvm_vcpu_get_esr(vcpu);
2253 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
2254 	int Rt2 = (esr >> 10) & 0x1f;
2255 
2256 	params.is_aarch32 = true;
2257 	params.is_32bit = false;
2258 	params.CRm = (esr >> 1) & 0xf;
2259 	params.is_write = ((esr & 1) == 0);
2260 
2261 	params.Op0 = 0;
2262 	params.Op1 = (esr >> 16) & 0xf;
2263 	params.Op2 = 0;
2264 	params.CRn = 0;
2265 
2266 	/*
2267 	 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
2268 	 * backends between AArch32 and AArch64, we get away with it.
2269 	 */
2270 	if (params.is_write) {
2271 		params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
2272 		params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
2273 	}
2274 
2275 	/*
2276 	 * If the table contains a handler, handle the
2277 	 * potential register operation in the case of a read and return
2278 	 * with success.
2279 	 */
2280 	if (!emulate_cp(vcpu, &params, global, nr_global)) {
2281 		/* Split up the value between registers for the read side */
2282 		if (!params.is_write) {
2283 			vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
2284 			vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
2285 		}
2286 
2287 		return 1;
2288 	}
2289 
2290 	unhandled_cp_access(vcpu, &params);
2291 	return 1;
2292 }
2293 
2294 /**
2295  * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
2296  * @vcpu: The VCPU pointer
2297  * @run:  The kvm_run struct
2298  */
2299 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2300 			    const struct sys_reg_desc *global,
2301 			    size_t nr_global)
2302 {
2303 	struct sys_reg_params params;
2304 	u32 esr = kvm_vcpu_get_esr(vcpu);
2305 	int Rt  = kvm_vcpu_sys_get_rt(vcpu);
2306 
2307 	params.is_aarch32 = true;
2308 	params.is_32bit = true;
2309 	params.CRm = (esr >> 1) & 0xf;
2310 	params.regval = vcpu_get_reg(vcpu, Rt);
2311 	params.is_write = ((esr & 1) == 0);
2312 	params.CRn = (esr >> 10) & 0xf;
2313 	params.Op0 = 0;
2314 	params.Op1 = (esr >> 14) & 0x7;
2315 	params.Op2 = (esr >> 17) & 0x7;
2316 
2317 	if (!emulate_cp(vcpu, &params, global, nr_global)) {
2318 		if (!params.is_write)
2319 			vcpu_set_reg(vcpu, Rt, params.regval);
2320 		return 1;
2321 	}
2322 
2323 	unhandled_cp_access(vcpu, &params);
2324 	return 1;
2325 }
2326 
2327 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
2328 {
2329 	return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
2330 }
2331 
2332 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
2333 {
2334 	return kvm_handle_cp_32(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
2335 }
2336 
2337 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
2338 {
2339 	return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
2340 }
2341 
2342 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
2343 {
2344 	return kvm_handle_cp_32(vcpu, cp14_regs, ARRAY_SIZE(cp14_regs));
2345 }
2346 
2347 static bool is_imp_def_sys_reg(struct sys_reg_params *params)
2348 {
2349 	// See ARM DDI 0487E.a, section D12.3.2
2350 	return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
2351 }
2352 
2353 static int emulate_sys_reg(struct kvm_vcpu *vcpu,
2354 			   struct sys_reg_params *params)
2355 {
2356 	const struct sys_reg_desc *r;
2357 
2358 	r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2359 
2360 	if (likely(r)) {
2361 		perform_access(vcpu, params, r);
2362 	} else if (is_imp_def_sys_reg(params)) {
2363 		kvm_inject_undefined(vcpu);
2364 	} else {
2365 		print_sys_reg_msg(params,
2366 				  "Unsupported guest sys_reg access at: %lx [%08lx]\n",
2367 				  *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2368 		kvm_inject_undefined(vcpu);
2369 	}
2370 	return 1;
2371 }
2372 
2373 /**
2374  * kvm_reset_sys_regs - sets system registers to reset value
2375  * @vcpu: The VCPU pointer
2376  *
2377  * This function finds the right table above and sets the registers on the
2378  * virtual CPU struct to their architecturally defined reset values.
2379  */
2380 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2381 {
2382 	unsigned long i;
2383 
2384 	for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++)
2385 		if (sys_reg_descs[i].reset)
2386 			sys_reg_descs[i].reset(vcpu, &sys_reg_descs[i]);
2387 }
2388 
2389 /**
2390  * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
2391  * @vcpu: The VCPU pointer
2392  */
2393 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
2394 {
2395 	struct sys_reg_params params;
2396 	unsigned long esr = kvm_vcpu_get_esr(vcpu);
2397 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
2398 	int ret;
2399 
2400 	trace_kvm_handle_sys_reg(esr);
2401 
2402 	params.is_aarch32 = false;
2403 	params.is_32bit = false;
2404 	params.Op0 = (esr >> 20) & 3;
2405 	params.Op1 = (esr >> 14) & 0x7;
2406 	params.CRn = (esr >> 10) & 0xf;
2407 	params.CRm = (esr >> 1) & 0xf;
2408 	params.Op2 = (esr >> 17) & 0x7;
2409 	params.regval = vcpu_get_reg(vcpu, Rt);
2410 	params.is_write = !(esr & 1);
2411 
2412 	ret = emulate_sys_reg(vcpu, &params);
2413 
2414 	if (!params.is_write)
2415 		vcpu_set_reg(vcpu, Rt, params.regval);
2416 	return ret;
2417 }
2418 
2419 /******************************************************************************
2420  * Userspace API
2421  *****************************************************************************/
2422 
2423 static bool index_to_params(u64 id, struct sys_reg_params *params)
2424 {
2425 	switch (id & KVM_REG_SIZE_MASK) {
2426 	case KVM_REG_SIZE_U64:
2427 		/* Any unused index bits means it's not valid. */
2428 		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
2429 			      | KVM_REG_ARM_COPROC_MASK
2430 			      | KVM_REG_ARM64_SYSREG_OP0_MASK
2431 			      | KVM_REG_ARM64_SYSREG_OP1_MASK
2432 			      | KVM_REG_ARM64_SYSREG_CRN_MASK
2433 			      | KVM_REG_ARM64_SYSREG_CRM_MASK
2434 			      | KVM_REG_ARM64_SYSREG_OP2_MASK))
2435 			return false;
2436 		params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
2437 			       >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
2438 		params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
2439 			       >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
2440 		params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
2441 			       >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
2442 		params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
2443 			       >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
2444 		params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
2445 			       >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
2446 		return true;
2447 	default:
2448 		return false;
2449 	}
2450 }
2451 
2452 const struct sys_reg_desc *find_reg_by_id(u64 id,
2453 					  struct sys_reg_params *params,
2454 					  const struct sys_reg_desc table[],
2455 					  unsigned int num)
2456 {
2457 	if (!index_to_params(id, params))
2458 		return NULL;
2459 
2460 	return find_reg(params, table, num);
2461 }
2462 
2463 /* Decode an index value, and find the sys_reg_desc entry. */
2464 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
2465 						    u64 id)
2466 {
2467 	const struct sys_reg_desc *r;
2468 	struct sys_reg_params params;
2469 
2470 	/* We only do sys_reg for now. */
2471 	if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
2472 		return NULL;
2473 
2474 	if (!index_to_params(id, &params))
2475 		return NULL;
2476 
2477 	r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2478 
2479 	/* Not saved in the sys_reg array and not otherwise accessible? */
2480 	if (r && !(r->reg || r->get_user))
2481 		r = NULL;
2482 
2483 	return r;
2484 }
2485 
2486 /*
2487  * These are the invariant sys_reg registers: we let the guest see the
2488  * host versions of these, so they're part of the guest state.
2489  *
2490  * A future CPU may provide a mechanism to present different values to
2491  * the guest, or a future kvm may trap them.
2492  */
2493 
2494 #define FUNCTION_INVARIANT(reg)						\
2495 	static void get_##reg(struct kvm_vcpu *v,			\
2496 			      const struct sys_reg_desc *r)		\
2497 	{								\
2498 		((struct sys_reg_desc *)r)->val = read_sysreg(reg);	\
2499 	}
2500 
2501 FUNCTION_INVARIANT(midr_el1)
2502 FUNCTION_INVARIANT(revidr_el1)
2503 FUNCTION_INVARIANT(clidr_el1)
2504 FUNCTION_INVARIANT(aidr_el1)
2505 
2506 static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
2507 {
2508 	((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
2509 }
2510 
2511 /* ->val is filled in by kvm_sys_reg_table_init() */
2512 static struct sys_reg_desc invariant_sys_regs[] = {
2513 	{ SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
2514 	{ SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
2515 	{ SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
2516 	{ SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
2517 	{ SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
2518 };
2519 
2520 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
2521 {
2522 	if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
2523 		return -EFAULT;
2524 	return 0;
2525 }
2526 
2527 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
2528 {
2529 	if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
2530 		return -EFAULT;
2531 	return 0;
2532 }
2533 
2534 static int get_invariant_sys_reg(u64 id, void __user *uaddr)
2535 {
2536 	struct sys_reg_params params;
2537 	const struct sys_reg_desc *r;
2538 
2539 	r = find_reg_by_id(id, &params, invariant_sys_regs,
2540 			   ARRAY_SIZE(invariant_sys_regs));
2541 	if (!r)
2542 		return -ENOENT;
2543 
2544 	return reg_to_user(uaddr, &r->val, id);
2545 }
2546 
2547 static int set_invariant_sys_reg(u64 id, void __user *uaddr)
2548 {
2549 	struct sys_reg_params params;
2550 	const struct sys_reg_desc *r;
2551 	int err;
2552 	u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
2553 
2554 	r = find_reg_by_id(id, &params, invariant_sys_regs,
2555 			   ARRAY_SIZE(invariant_sys_regs));
2556 	if (!r)
2557 		return -ENOENT;
2558 
2559 	err = reg_from_user(&val, uaddr, id);
2560 	if (err)
2561 		return err;
2562 
2563 	/* This is what we mean by invariant: you can't change it. */
2564 	if (r->val != val)
2565 		return -EINVAL;
2566 
2567 	return 0;
2568 }
2569 
2570 static bool is_valid_cache(u32 val)
2571 {
2572 	u32 level, ctype;
2573 
2574 	if (val >= CSSELR_MAX)
2575 		return false;
2576 
2577 	/* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
2578 	level = (val >> 1);
2579 	ctype = (cache_levels >> (level * 3)) & 7;
2580 
2581 	switch (ctype) {
2582 	case 0: /* No cache */
2583 		return false;
2584 	case 1: /* Instruction cache only */
2585 		return (val & 1);
2586 	case 2: /* Data cache only */
2587 	case 4: /* Unified cache */
2588 		return !(val & 1);
2589 	case 3: /* Separate instruction and data caches */
2590 		return true;
2591 	default: /* Reserved: we can't know instruction or data. */
2592 		return false;
2593 	}
2594 }
2595 
2596 static int demux_c15_get(u64 id, void __user *uaddr)
2597 {
2598 	u32 val;
2599 	u32 __user *uval = uaddr;
2600 
2601 	/* Fail if we have unknown bits set. */
2602 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2603 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2604 		return -ENOENT;
2605 
2606 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2607 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2608 		if (KVM_REG_SIZE(id) != 4)
2609 			return -ENOENT;
2610 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2611 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2612 		if (!is_valid_cache(val))
2613 			return -ENOENT;
2614 
2615 		return put_user(get_ccsidr(val), uval);
2616 	default:
2617 		return -ENOENT;
2618 	}
2619 }
2620 
2621 static int demux_c15_set(u64 id, void __user *uaddr)
2622 {
2623 	u32 val, newval;
2624 	u32 __user *uval = uaddr;
2625 
2626 	/* Fail if we have unknown bits set. */
2627 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2628 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2629 		return -ENOENT;
2630 
2631 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2632 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2633 		if (KVM_REG_SIZE(id) != 4)
2634 			return -ENOENT;
2635 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2636 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2637 		if (!is_valid_cache(val))
2638 			return -ENOENT;
2639 
2640 		if (get_user(newval, uval))
2641 			return -EFAULT;
2642 
2643 		/* This is also invariant: you can't change it. */
2644 		if (newval != get_ccsidr(val))
2645 			return -EINVAL;
2646 		return 0;
2647 	default:
2648 		return -ENOENT;
2649 	}
2650 }
2651 
2652 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2653 {
2654 	const struct sys_reg_desc *r;
2655 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2656 
2657 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2658 		return demux_c15_get(reg->id, uaddr);
2659 
2660 	if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2661 		return -ENOENT;
2662 
2663 	r = index_to_sys_reg_desc(vcpu, reg->id);
2664 	if (!r)
2665 		return get_invariant_sys_reg(reg->id, uaddr);
2666 
2667 	/* Check for regs disabled by runtime config */
2668 	if (sysreg_hidden(vcpu, r))
2669 		return -ENOENT;
2670 
2671 	if (r->get_user)
2672 		return (r->get_user)(vcpu, r, reg, uaddr);
2673 
2674 	return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
2675 }
2676 
2677 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2678 {
2679 	const struct sys_reg_desc *r;
2680 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2681 
2682 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2683 		return demux_c15_set(reg->id, uaddr);
2684 
2685 	if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2686 		return -ENOENT;
2687 
2688 	r = index_to_sys_reg_desc(vcpu, reg->id);
2689 	if (!r)
2690 		return set_invariant_sys_reg(reg->id, uaddr);
2691 
2692 	/* Check for regs disabled by runtime config */
2693 	if (sysreg_hidden(vcpu, r))
2694 		return -ENOENT;
2695 
2696 	if (r->set_user)
2697 		return (r->set_user)(vcpu, r, reg, uaddr);
2698 
2699 	return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2700 }
2701 
2702 static unsigned int num_demux_regs(void)
2703 {
2704 	unsigned int i, count = 0;
2705 
2706 	for (i = 0; i < CSSELR_MAX; i++)
2707 		if (is_valid_cache(i))
2708 			count++;
2709 
2710 	return count;
2711 }
2712 
2713 static int write_demux_regids(u64 __user *uindices)
2714 {
2715 	u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2716 	unsigned int i;
2717 
2718 	val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2719 	for (i = 0; i < CSSELR_MAX; i++) {
2720 		if (!is_valid_cache(i))
2721 			continue;
2722 		if (put_user(val | i, uindices))
2723 			return -EFAULT;
2724 		uindices++;
2725 	}
2726 	return 0;
2727 }
2728 
2729 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2730 {
2731 	return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2732 		KVM_REG_ARM64_SYSREG |
2733 		(reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2734 		(reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2735 		(reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2736 		(reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2737 		(reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2738 }
2739 
2740 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2741 {
2742 	if (!*uind)
2743 		return true;
2744 
2745 	if (put_user(sys_reg_to_index(reg), *uind))
2746 		return false;
2747 
2748 	(*uind)++;
2749 	return true;
2750 }
2751 
2752 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
2753 			    const struct sys_reg_desc *rd,
2754 			    u64 __user **uind,
2755 			    unsigned int *total)
2756 {
2757 	/*
2758 	 * Ignore registers we trap but don't save,
2759 	 * and for which no custom user accessor is provided.
2760 	 */
2761 	if (!(rd->reg || rd->get_user))
2762 		return 0;
2763 
2764 	if (sysreg_hidden(vcpu, rd))
2765 		return 0;
2766 
2767 	if (!copy_reg_to_user(rd, uind))
2768 		return -EFAULT;
2769 
2770 	(*total)++;
2771 	return 0;
2772 }
2773 
2774 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
2775 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2776 {
2777 	const struct sys_reg_desc *i2, *end2;
2778 	unsigned int total = 0;
2779 	int err;
2780 
2781 	i2 = sys_reg_descs;
2782 	end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2783 
2784 	while (i2 != end2) {
2785 		err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
2786 		if (err)
2787 			return err;
2788 	}
2789 	return total;
2790 }
2791 
2792 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2793 {
2794 	return ARRAY_SIZE(invariant_sys_regs)
2795 		+ num_demux_regs()
2796 		+ walk_sys_regs(vcpu, (u64 __user *)NULL);
2797 }
2798 
2799 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2800 {
2801 	unsigned int i;
2802 	int err;
2803 
2804 	/* Then give them all the invariant registers' indices. */
2805 	for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2806 		if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2807 			return -EFAULT;
2808 		uindices++;
2809 	}
2810 
2811 	err = walk_sys_regs(vcpu, uindices);
2812 	if (err < 0)
2813 		return err;
2814 	uindices += err;
2815 
2816 	return write_demux_regids(uindices);
2817 }
2818 
2819 void kvm_sys_reg_table_init(void)
2820 {
2821 	unsigned int i;
2822 	struct sys_reg_desc clidr;
2823 
2824 	/* Make sure tables are unique and in order. */
2825 	BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false));
2826 	BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true));
2827 	BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true));
2828 	BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true));
2829 	BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true));
2830 	BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false));
2831 
2832 	/* We abuse the reset function to overwrite the table itself. */
2833 	for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2834 		invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2835 
2836 	/*
2837 	 * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
2838 	 *
2839 	 *   If software reads the Cache Type fields from Ctype1
2840 	 *   upwards, once it has seen a value of 0b000, no caches
2841 	 *   exist at further-out levels of the hierarchy. So, for
2842 	 *   example, if Ctype3 is the first Cache Type field with a
2843 	 *   value of 0b000, the values of Ctype4 to Ctype7 must be
2844 	 *   ignored.
2845 	 */
2846 	get_clidr_el1(NULL, &clidr); /* Ugly... */
2847 	cache_levels = clidr.val;
2848 	for (i = 0; i < 7; i++)
2849 		if (((cache_levels >> (i*3)) & 7) == 0)
2850 			break;
2851 	/* Clear all higher bits. */
2852 	cache_levels &= (1 << (i*3))-1;
2853 }
2854