xref: /openbmc/linux/arch/arm64/kvm/sys_regs.c (revision d0e22329)
1 /*
2  * Copyright (C) 2012,2013 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * Derived from arch/arm/kvm/coproc.c:
6  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7  * Authors: Rusty Russell <rusty@rustcorp.com.au>
8  *          Christoffer Dall <c.dall@virtualopensystems.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License, version 2, as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
21  */
22 
23 #include <linux/bsearch.h>
24 #include <linux/kvm_host.h>
25 #include <linux/mm.h>
26 #include <linux/printk.h>
27 #include <linux/uaccess.h>
28 
29 #include <asm/cacheflush.h>
30 #include <asm/cputype.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/esr.h>
33 #include <asm/kvm_arm.h>
34 #include <asm/kvm_coproc.h>
35 #include <asm/kvm_emulate.h>
36 #include <asm/kvm_host.h>
37 #include <asm/kvm_hyp.h>
38 #include <asm/kvm_mmu.h>
39 #include <asm/perf_event.h>
40 #include <asm/sysreg.h>
41 
42 #include <trace/events/kvm.h>
43 
44 #include "sys_regs.h"
45 
46 #include "trace.h"
47 
48 /*
49  * All of this file is extremly similar to the ARM coproc.c, but the
50  * types are different. My gut feeling is that it should be pretty
51  * easy to merge, but that would be an ABI breakage -- again. VFP
52  * would also need to be abstracted.
53  *
54  * For AArch32, we only take care of what is being trapped. Anything
55  * that has to do with init and userspace access has to go via the
56  * 64bit interface.
57  */
58 
59 static bool read_from_write_only(struct kvm_vcpu *vcpu,
60 				 struct sys_reg_params *params,
61 				 const struct sys_reg_desc *r)
62 {
63 	WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
64 	print_sys_reg_instr(params);
65 	kvm_inject_undefined(vcpu);
66 	return false;
67 }
68 
69 static bool write_to_read_only(struct kvm_vcpu *vcpu,
70 			       struct sys_reg_params *params,
71 			       const struct sys_reg_desc *r)
72 {
73 	WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
74 	print_sys_reg_instr(params);
75 	kvm_inject_undefined(vcpu);
76 	return false;
77 }
78 
79 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
80 {
81 	if (!vcpu->arch.sysregs_loaded_on_cpu)
82 		goto immediate_read;
83 
84 	/*
85 	 * System registers listed in the switch are not saved on every
86 	 * exit from the guest but are only saved on vcpu_put.
87 	 *
88 	 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
89 	 * should never be listed below, because the guest cannot modify its
90 	 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
91 	 * thread when emulating cross-VCPU communication.
92 	 */
93 	switch (reg) {
94 	case CSSELR_EL1:	return read_sysreg_s(SYS_CSSELR_EL1);
95 	case SCTLR_EL1:		return read_sysreg_s(sctlr_EL12);
96 	case ACTLR_EL1:		return read_sysreg_s(SYS_ACTLR_EL1);
97 	case CPACR_EL1:		return read_sysreg_s(cpacr_EL12);
98 	case TTBR0_EL1:		return read_sysreg_s(ttbr0_EL12);
99 	case TTBR1_EL1:		return read_sysreg_s(ttbr1_EL12);
100 	case TCR_EL1:		return read_sysreg_s(tcr_EL12);
101 	case ESR_EL1:		return read_sysreg_s(esr_EL12);
102 	case AFSR0_EL1:		return read_sysreg_s(afsr0_EL12);
103 	case AFSR1_EL1:		return read_sysreg_s(afsr1_EL12);
104 	case FAR_EL1:		return read_sysreg_s(far_EL12);
105 	case MAIR_EL1:		return read_sysreg_s(mair_EL12);
106 	case VBAR_EL1:		return read_sysreg_s(vbar_EL12);
107 	case CONTEXTIDR_EL1:	return read_sysreg_s(contextidr_EL12);
108 	case TPIDR_EL0:		return read_sysreg_s(SYS_TPIDR_EL0);
109 	case TPIDRRO_EL0:	return read_sysreg_s(SYS_TPIDRRO_EL0);
110 	case TPIDR_EL1:		return read_sysreg_s(SYS_TPIDR_EL1);
111 	case AMAIR_EL1:		return read_sysreg_s(amair_EL12);
112 	case CNTKCTL_EL1:	return read_sysreg_s(cntkctl_EL12);
113 	case PAR_EL1:		return read_sysreg_s(SYS_PAR_EL1);
114 	case DACR32_EL2:	return read_sysreg_s(SYS_DACR32_EL2);
115 	case IFSR32_EL2:	return read_sysreg_s(SYS_IFSR32_EL2);
116 	case DBGVCR32_EL2:	return read_sysreg_s(SYS_DBGVCR32_EL2);
117 	}
118 
119 immediate_read:
120 	return __vcpu_sys_reg(vcpu, reg);
121 }
122 
123 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
124 {
125 	if (!vcpu->arch.sysregs_loaded_on_cpu)
126 		goto immediate_write;
127 
128 	/*
129 	 * System registers listed in the switch are not restored on every
130 	 * entry to the guest but are only restored on vcpu_load.
131 	 *
132 	 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
133 	 * should never be listed below, because the the MPIDR should only be
134 	 * set once, before running the VCPU, and never changed later.
135 	 */
136 	switch (reg) {
137 	case CSSELR_EL1:	write_sysreg_s(val, SYS_CSSELR_EL1);	return;
138 	case SCTLR_EL1:		write_sysreg_s(val, sctlr_EL12);	return;
139 	case ACTLR_EL1:		write_sysreg_s(val, SYS_ACTLR_EL1);	return;
140 	case CPACR_EL1:		write_sysreg_s(val, cpacr_EL12);	return;
141 	case TTBR0_EL1:		write_sysreg_s(val, ttbr0_EL12);	return;
142 	case TTBR1_EL1:		write_sysreg_s(val, ttbr1_EL12);	return;
143 	case TCR_EL1:		write_sysreg_s(val, tcr_EL12);		return;
144 	case ESR_EL1:		write_sysreg_s(val, esr_EL12);		return;
145 	case AFSR0_EL1:		write_sysreg_s(val, afsr0_EL12);	return;
146 	case AFSR1_EL1:		write_sysreg_s(val, afsr1_EL12);	return;
147 	case FAR_EL1:		write_sysreg_s(val, far_EL12);		return;
148 	case MAIR_EL1:		write_sysreg_s(val, mair_EL12);		return;
149 	case VBAR_EL1:		write_sysreg_s(val, vbar_EL12);		return;
150 	case CONTEXTIDR_EL1:	write_sysreg_s(val, contextidr_EL12);	return;
151 	case TPIDR_EL0:		write_sysreg_s(val, SYS_TPIDR_EL0);	return;
152 	case TPIDRRO_EL0:	write_sysreg_s(val, SYS_TPIDRRO_EL0);	return;
153 	case TPIDR_EL1:		write_sysreg_s(val, SYS_TPIDR_EL1);	return;
154 	case AMAIR_EL1:		write_sysreg_s(val, amair_EL12);	return;
155 	case CNTKCTL_EL1:	write_sysreg_s(val, cntkctl_EL12);	return;
156 	case PAR_EL1:		write_sysreg_s(val, SYS_PAR_EL1);	return;
157 	case DACR32_EL2:	write_sysreg_s(val, SYS_DACR32_EL2);	return;
158 	case IFSR32_EL2:	write_sysreg_s(val, SYS_IFSR32_EL2);	return;
159 	case DBGVCR32_EL2:	write_sysreg_s(val, SYS_DBGVCR32_EL2);	return;
160 	}
161 
162 immediate_write:
163 	 __vcpu_sys_reg(vcpu, reg) = val;
164 }
165 
166 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
167 static u32 cache_levels;
168 
169 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
170 #define CSSELR_MAX 12
171 
172 /* Which cache CCSIDR represents depends on CSSELR value. */
173 static u32 get_ccsidr(u32 csselr)
174 {
175 	u32 ccsidr;
176 
177 	/* Make sure noone else changes CSSELR during this! */
178 	local_irq_disable();
179 	write_sysreg(csselr, csselr_el1);
180 	isb();
181 	ccsidr = read_sysreg(ccsidr_el1);
182 	local_irq_enable();
183 
184 	return ccsidr;
185 }
186 
187 /*
188  * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
189  */
190 static bool access_dcsw(struct kvm_vcpu *vcpu,
191 			struct sys_reg_params *p,
192 			const struct sys_reg_desc *r)
193 {
194 	if (!p->is_write)
195 		return read_from_write_only(vcpu, p, r);
196 
197 	/*
198 	 * Only track S/W ops if we don't have FWB. It still indicates
199 	 * that the guest is a bit broken (S/W operations should only
200 	 * be done by firmware, knowing that there is only a single
201 	 * CPU left in the system, and certainly not from non-secure
202 	 * software).
203 	 */
204 	if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
205 		kvm_set_way_flush(vcpu);
206 
207 	return true;
208 }
209 
210 /*
211  * Generic accessor for VM registers. Only called as long as HCR_TVM
212  * is set. If the guest enables the MMU, we stop trapping the VM
213  * sys_regs and leave it in complete control of the caches.
214  */
215 static bool access_vm_reg(struct kvm_vcpu *vcpu,
216 			  struct sys_reg_params *p,
217 			  const struct sys_reg_desc *r)
218 {
219 	bool was_enabled = vcpu_has_cache_enabled(vcpu);
220 	u64 val;
221 	int reg = r->reg;
222 
223 	BUG_ON(!p->is_write);
224 
225 	/* See the 32bit mapping in kvm_host.h */
226 	if (p->is_aarch32)
227 		reg = r->reg / 2;
228 
229 	if (!p->is_aarch32 || !p->is_32bit) {
230 		val = p->regval;
231 	} else {
232 		val = vcpu_read_sys_reg(vcpu, reg);
233 		if (r->reg % 2)
234 			val = (p->regval << 32) | (u64)lower_32_bits(val);
235 		else
236 			val = ((u64)upper_32_bits(val) << 32) |
237 				lower_32_bits(p->regval);
238 	}
239 	vcpu_write_sys_reg(vcpu, val, reg);
240 
241 	kvm_toggle_cache(vcpu, was_enabled);
242 	return true;
243 }
244 
245 /*
246  * Trap handler for the GICv3 SGI generation system register.
247  * Forward the request to the VGIC emulation.
248  * The cp15_64 code makes sure this automatically works
249  * for both AArch64 and AArch32 accesses.
250  */
251 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
252 			   struct sys_reg_params *p,
253 			   const struct sys_reg_desc *r)
254 {
255 	bool g1;
256 
257 	if (!p->is_write)
258 		return read_from_write_only(vcpu, p, r);
259 
260 	/*
261 	 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
262 	 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
263 	 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
264 	 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
265 	 * group.
266 	 */
267 	if (p->is_aarch32) {
268 		switch (p->Op1) {
269 		default:		/* Keep GCC quiet */
270 		case 0:			/* ICC_SGI1R */
271 			g1 = true;
272 			break;
273 		case 1:			/* ICC_ASGI1R */
274 		case 2:			/* ICC_SGI0R */
275 			g1 = false;
276 			break;
277 		}
278 	} else {
279 		switch (p->Op2) {
280 		default:		/* Keep GCC quiet */
281 		case 5:			/* ICC_SGI1R_EL1 */
282 			g1 = true;
283 			break;
284 		case 6:			/* ICC_ASGI1R_EL1 */
285 		case 7:			/* ICC_SGI0R_EL1 */
286 			g1 = false;
287 			break;
288 		}
289 	}
290 
291 	vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
292 
293 	return true;
294 }
295 
296 static bool access_gic_sre(struct kvm_vcpu *vcpu,
297 			   struct sys_reg_params *p,
298 			   const struct sys_reg_desc *r)
299 {
300 	if (p->is_write)
301 		return ignore_write(vcpu, p);
302 
303 	p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
304 	return true;
305 }
306 
307 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
308 			struct sys_reg_params *p,
309 			const struct sys_reg_desc *r)
310 {
311 	if (p->is_write)
312 		return ignore_write(vcpu, p);
313 	else
314 		return read_zero(vcpu, p);
315 }
316 
317 static bool trap_undef(struct kvm_vcpu *vcpu,
318 		       struct sys_reg_params *p,
319 		       const struct sys_reg_desc *r)
320 {
321 	kvm_inject_undefined(vcpu);
322 	return false;
323 }
324 
325 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
326 			   struct sys_reg_params *p,
327 			   const struct sys_reg_desc *r)
328 {
329 	if (p->is_write) {
330 		return ignore_write(vcpu, p);
331 	} else {
332 		p->regval = (1 << 3);
333 		return true;
334 	}
335 }
336 
337 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
338 				   struct sys_reg_params *p,
339 				   const struct sys_reg_desc *r)
340 {
341 	if (p->is_write) {
342 		return ignore_write(vcpu, p);
343 	} else {
344 		p->regval = read_sysreg(dbgauthstatus_el1);
345 		return true;
346 	}
347 }
348 
349 /*
350  * We want to avoid world-switching all the DBG registers all the
351  * time:
352  *
353  * - If we've touched any debug register, it is likely that we're
354  *   going to touch more of them. It then makes sense to disable the
355  *   traps and start doing the save/restore dance
356  * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
357  *   then mandatory to save/restore the registers, as the guest
358  *   depends on them.
359  *
360  * For this, we use a DIRTY bit, indicating the guest has modified the
361  * debug registers, used as follow:
362  *
363  * On guest entry:
364  * - If the dirty bit is set (because we're coming back from trapping),
365  *   disable the traps, save host registers, restore guest registers.
366  * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
367  *   set the dirty bit, disable the traps, save host registers,
368  *   restore guest registers.
369  * - Otherwise, enable the traps
370  *
371  * On guest exit:
372  * - If the dirty bit is set, save guest registers, restore host
373  *   registers and clear the dirty bit. This ensure that the host can
374  *   now use the debug registers.
375  */
376 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
377 			    struct sys_reg_params *p,
378 			    const struct sys_reg_desc *r)
379 {
380 	if (p->is_write) {
381 		vcpu_write_sys_reg(vcpu, p->regval, r->reg);
382 		vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
383 	} else {
384 		p->regval = vcpu_read_sys_reg(vcpu, r->reg);
385 	}
386 
387 	trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
388 
389 	return true;
390 }
391 
392 /*
393  * reg_to_dbg/dbg_to_reg
394  *
395  * A 32 bit write to a debug register leave top bits alone
396  * A 32 bit read from a debug register only returns the bottom bits
397  *
398  * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
399  * hyp.S code switches between host and guest values in future.
400  */
401 static void reg_to_dbg(struct kvm_vcpu *vcpu,
402 		       struct sys_reg_params *p,
403 		       u64 *dbg_reg)
404 {
405 	u64 val = p->regval;
406 
407 	if (p->is_32bit) {
408 		val &= 0xffffffffUL;
409 		val |= ((*dbg_reg >> 32) << 32);
410 	}
411 
412 	*dbg_reg = val;
413 	vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
414 }
415 
416 static void dbg_to_reg(struct kvm_vcpu *vcpu,
417 		       struct sys_reg_params *p,
418 		       u64 *dbg_reg)
419 {
420 	p->regval = *dbg_reg;
421 	if (p->is_32bit)
422 		p->regval &= 0xffffffffUL;
423 }
424 
425 static bool trap_bvr(struct kvm_vcpu *vcpu,
426 		     struct sys_reg_params *p,
427 		     const struct sys_reg_desc *rd)
428 {
429 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
430 
431 	if (p->is_write)
432 		reg_to_dbg(vcpu, p, dbg_reg);
433 	else
434 		dbg_to_reg(vcpu, p, dbg_reg);
435 
436 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
437 
438 	return true;
439 }
440 
441 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
442 		const struct kvm_one_reg *reg, void __user *uaddr)
443 {
444 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
445 
446 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
447 		return -EFAULT;
448 	return 0;
449 }
450 
451 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
452 	const struct kvm_one_reg *reg, void __user *uaddr)
453 {
454 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
455 
456 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
457 		return -EFAULT;
458 	return 0;
459 }
460 
461 static void reset_bvr(struct kvm_vcpu *vcpu,
462 		      const struct sys_reg_desc *rd)
463 {
464 	vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
465 }
466 
467 static bool trap_bcr(struct kvm_vcpu *vcpu,
468 		     struct sys_reg_params *p,
469 		     const struct sys_reg_desc *rd)
470 {
471 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
472 
473 	if (p->is_write)
474 		reg_to_dbg(vcpu, p, dbg_reg);
475 	else
476 		dbg_to_reg(vcpu, p, dbg_reg);
477 
478 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
479 
480 	return true;
481 }
482 
483 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
484 		const struct kvm_one_reg *reg, void __user *uaddr)
485 {
486 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
487 
488 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
489 		return -EFAULT;
490 
491 	return 0;
492 }
493 
494 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
495 	const struct kvm_one_reg *reg, void __user *uaddr)
496 {
497 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
498 
499 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
500 		return -EFAULT;
501 	return 0;
502 }
503 
504 static void reset_bcr(struct kvm_vcpu *vcpu,
505 		      const struct sys_reg_desc *rd)
506 {
507 	vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
508 }
509 
510 static bool trap_wvr(struct kvm_vcpu *vcpu,
511 		     struct sys_reg_params *p,
512 		     const struct sys_reg_desc *rd)
513 {
514 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
515 
516 	if (p->is_write)
517 		reg_to_dbg(vcpu, p, dbg_reg);
518 	else
519 		dbg_to_reg(vcpu, p, dbg_reg);
520 
521 	trace_trap_reg(__func__, rd->reg, p->is_write,
522 		vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
523 
524 	return true;
525 }
526 
527 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
528 		const struct kvm_one_reg *reg, void __user *uaddr)
529 {
530 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
531 
532 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
533 		return -EFAULT;
534 	return 0;
535 }
536 
537 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
538 	const struct kvm_one_reg *reg, void __user *uaddr)
539 {
540 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
541 
542 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
543 		return -EFAULT;
544 	return 0;
545 }
546 
547 static void reset_wvr(struct kvm_vcpu *vcpu,
548 		      const struct sys_reg_desc *rd)
549 {
550 	vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
551 }
552 
553 static bool trap_wcr(struct kvm_vcpu *vcpu,
554 		     struct sys_reg_params *p,
555 		     const struct sys_reg_desc *rd)
556 {
557 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
558 
559 	if (p->is_write)
560 		reg_to_dbg(vcpu, p, dbg_reg);
561 	else
562 		dbg_to_reg(vcpu, p, dbg_reg);
563 
564 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
565 
566 	return true;
567 }
568 
569 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
570 		const struct kvm_one_reg *reg, void __user *uaddr)
571 {
572 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
573 
574 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
575 		return -EFAULT;
576 	return 0;
577 }
578 
579 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
580 	const struct kvm_one_reg *reg, void __user *uaddr)
581 {
582 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
583 
584 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
585 		return -EFAULT;
586 	return 0;
587 }
588 
589 static void reset_wcr(struct kvm_vcpu *vcpu,
590 		      const struct sys_reg_desc *rd)
591 {
592 	vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
593 }
594 
595 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
596 {
597 	u64 amair = read_sysreg(amair_el1);
598 	vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
599 }
600 
601 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
602 {
603 	u64 mpidr;
604 
605 	/*
606 	 * Map the vcpu_id into the first three affinity level fields of
607 	 * the MPIDR. We limit the number of VCPUs in level 0 due to a
608 	 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
609 	 * of the GICv3 to be able to address each CPU directly when
610 	 * sending IPIs.
611 	 */
612 	mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
613 	mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
614 	mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
615 	vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
616 }
617 
618 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
619 {
620 	u64 pmcr, val;
621 
622 	pmcr = read_sysreg(pmcr_el0);
623 	/*
624 	 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
625 	 * except PMCR.E resetting to zero.
626 	 */
627 	val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
628 	       | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
629 	__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
630 }
631 
632 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
633 {
634 	u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
635 	bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
636 
637 	if (!enabled)
638 		kvm_inject_undefined(vcpu);
639 
640 	return !enabled;
641 }
642 
643 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
644 {
645 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
646 }
647 
648 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
649 {
650 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
651 }
652 
653 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
654 {
655 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
656 }
657 
658 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
659 {
660 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
661 }
662 
663 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
664 			const struct sys_reg_desc *r)
665 {
666 	u64 val;
667 
668 	if (!kvm_arm_pmu_v3_ready(vcpu))
669 		return trap_raz_wi(vcpu, p, r);
670 
671 	if (pmu_access_el0_disabled(vcpu))
672 		return false;
673 
674 	if (p->is_write) {
675 		/* Only update writeable bits of PMCR */
676 		val = __vcpu_sys_reg(vcpu, PMCR_EL0);
677 		val &= ~ARMV8_PMU_PMCR_MASK;
678 		val |= p->regval & ARMV8_PMU_PMCR_MASK;
679 		__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
680 		kvm_pmu_handle_pmcr(vcpu, val);
681 	} else {
682 		/* PMCR.P & PMCR.C are RAZ */
683 		val = __vcpu_sys_reg(vcpu, PMCR_EL0)
684 		      & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
685 		p->regval = val;
686 	}
687 
688 	return true;
689 }
690 
691 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
692 			  const struct sys_reg_desc *r)
693 {
694 	if (!kvm_arm_pmu_v3_ready(vcpu))
695 		return trap_raz_wi(vcpu, p, r);
696 
697 	if (pmu_access_event_counter_el0_disabled(vcpu))
698 		return false;
699 
700 	if (p->is_write)
701 		__vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
702 	else
703 		/* return PMSELR.SEL field */
704 		p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
705 			    & ARMV8_PMU_COUNTER_MASK;
706 
707 	return true;
708 }
709 
710 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
711 			  const struct sys_reg_desc *r)
712 {
713 	u64 pmceid;
714 
715 	if (!kvm_arm_pmu_v3_ready(vcpu))
716 		return trap_raz_wi(vcpu, p, r);
717 
718 	BUG_ON(p->is_write);
719 
720 	if (pmu_access_el0_disabled(vcpu))
721 		return false;
722 
723 	if (!(p->Op2 & 1))
724 		pmceid = read_sysreg(pmceid0_el0);
725 	else
726 		pmceid = read_sysreg(pmceid1_el0);
727 
728 	p->regval = pmceid;
729 
730 	return true;
731 }
732 
733 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
734 {
735 	u64 pmcr, val;
736 
737 	pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
738 	val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
739 	if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
740 		kvm_inject_undefined(vcpu);
741 		return false;
742 	}
743 
744 	return true;
745 }
746 
747 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
748 			      struct sys_reg_params *p,
749 			      const struct sys_reg_desc *r)
750 {
751 	u64 idx;
752 
753 	if (!kvm_arm_pmu_v3_ready(vcpu))
754 		return trap_raz_wi(vcpu, p, r);
755 
756 	if (r->CRn == 9 && r->CRm == 13) {
757 		if (r->Op2 == 2) {
758 			/* PMXEVCNTR_EL0 */
759 			if (pmu_access_event_counter_el0_disabled(vcpu))
760 				return false;
761 
762 			idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
763 			      & ARMV8_PMU_COUNTER_MASK;
764 		} else if (r->Op2 == 0) {
765 			/* PMCCNTR_EL0 */
766 			if (pmu_access_cycle_counter_el0_disabled(vcpu))
767 				return false;
768 
769 			idx = ARMV8_PMU_CYCLE_IDX;
770 		} else {
771 			return false;
772 		}
773 	} else if (r->CRn == 0 && r->CRm == 9) {
774 		/* PMCCNTR */
775 		if (pmu_access_event_counter_el0_disabled(vcpu))
776 			return false;
777 
778 		idx = ARMV8_PMU_CYCLE_IDX;
779 	} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
780 		/* PMEVCNTRn_EL0 */
781 		if (pmu_access_event_counter_el0_disabled(vcpu))
782 			return false;
783 
784 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
785 	} else {
786 		return false;
787 	}
788 
789 	if (!pmu_counter_idx_valid(vcpu, idx))
790 		return false;
791 
792 	if (p->is_write) {
793 		if (pmu_access_el0_disabled(vcpu))
794 			return false;
795 
796 		kvm_pmu_set_counter_value(vcpu, idx, p->regval);
797 	} else {
798 		p->regval = kvm_pmu_get_counter_value(vcpu, idx);
799 	}
800 
801 	return true;
802 }
803 
804 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
805 			       const struct sys_reg_desc *r)
806 {
807 	u64 idx, reg;
808 
809 	if (!kvm_arm_pmu_v3_ready(vcpu))
810 		return trap_raz_wi(vcpu, p, r);
811 
812 	if (pmu_access_el0_disabled(vcpu))
813 		return false;
814 
815 	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
816 		/* PMXEVTYPER_EL0 */
817 		idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
818 		reg = PMEVTYPER0_EL0 + idx;
819 	} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
820 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
821 		if (idx == ARMV8_PMU_CYCLE_IDX)
822 			reg = PMCCFILTR_EL0;
823 		else
824 			/* PMEVTYPERn_EL0 */
825 			reg = PMEVTYPER0_EL0 + idx;
826 	} else {
827 		BUG();
828 	}
829 
830 	if (!pmu_counter_idx_valid(vcpu, idx))
831 		return false;
832 
833 	if (p->is_write) {
834 		kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
835 		__vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
836 	} else {
837 		p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
838 	}
839 
840 	return true;
841 }
842 
843 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
844 			   const struct sys_reg_desc *r)
845 {
846 	u64 val, mask;
847 
848 	if (!kvm_arm_pmu_v3_ready(vcpu))
849 		return trap_raz_wi(vcpu, p, r);
850 
851 	if (pmu_access_el0_disabled(vcpu))
852 		return false;
853 
854 	mask = kvm_pmu_valid_counter_mask(vcpu);
855 	if (p->is_write) {
856 		val = p->regval & mask;
857 		if (r->Op2 & 0x1) {
858 			/* accessing PMCNTENSET_EL0 */
859 			__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
860 			kvm_pmu_enable_counter(vcpu, val);
861 		} else {
862 			/* accessing PMCNTENCLR_EL0 */
863 			__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
864 			kvm_pmu_disable_counter(vcpu, val);
865 		}
866 	} else {
867 		p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
868 	}
869 
870 	return true;
871 }
872 
873 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
874 			   const struct sys_reg_desc *r)
875 {
876 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
877 
878 	if (!kvm_arm_pmu_v3_ready(vcpu))
879 		return trap_raz_wi(vcpu, p, r);
880 
881 	if (!vcpu_mode_priv(vcpu)) {
882 		kvm_inject_undefined(vcpu);
883 		return false;
884 	}
885 
886 	if (p->is_write) {
887 		u64 val = p->regval & mask;
888 
889 		if (r->Op2 & 0x1)
890 			/* accessing PMINTENSET_EL1 */
891 			__vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
892 		else
893 			/* accessing PMINTENCLR_EL1 */
894 			__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
895 	} else {
896 		p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
897 	}
898 
899 	return true;
900 }
901 
902 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
903 			 const struct sys_reg_desc *r)
904 {
905 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
906 
907 	if (!kvm_arm_pmu_v3_ready(vcpu))
908 		return trap_raz_wi(vcpu, p, r);
909 
910 	if (pmu_access_el0_disabled(vcpu))
911 		return false;
912 
913 	if (p->is_write) {
914 		if (r->CRm & 0x2)
915 			/* accessing PMOVSSET_EL0 */
916 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
917 		else
918 			/* accessing PMOVSCLR_EL0 */
919 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
920 	} else {
921 		p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
922 	}
923 
924 	return true;
925 }
926 
927 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
928 			   const struct sys_reg_desc *r)
929 {
930 	u64 mask;
931 
932 	if (!kvm_arm_pmu_v3_ready(vcpu))
933 		return trap_raz_wi(vcpu, p, r);
934 
935 	if (!p->is_write)
936 		return read_from_write_only(vcpu, p, r);
937 
938 	if (pmu_write_swinc_el0_disabled(vcpu))
939 		return false;
940 
941 	mask = kvm_pmu_valid_counter_mask(vcpu);
942 	kvm_pmu_software_increment(vcpu, p->regval & mask);
943 	return true;
944 }
945 
946 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
947 			     const struct sys_reg_desc *r)
948 {
949 	if (!kvm_arm_pmu_v3_ready(vcpu))
950 		return trap_raz_wi(vcpu, p, r);
951 
952 	if (p->is_write) {
953 		if (!vcpu_mode_priv(vcpu)) {
954 			kvm_inject_undefined(vcpu);
955 			return false;
956 		}
957 
958 		__vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
959 			       p->regval & ARMV8_PMU_USERENR_MASK;
960 	} else {
961 		p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
962 			    & ARMV8_PMU_USERENR_MASK;
963 	}
964 
965 	return true;
966 }
967 
968 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
969 #define DBG_BCR_BVR_WCR_WVR_EL1(n)					\
970 	{ SYS_DESC(SYS_DBGBVRn_EL1(n)),					\
971 	  trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr },		\
972 	{ SYS_DESC(SYS_DBGBCRn_EL1(n)),					\
973 	  trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr },		\
974 	{ SYS_DESC(SYS_DBGWVRn_EL1(n)),					\
975 	  trap_wvr, reset_wvr, n, 0,  get_wvr, set_wvr },		\
976 	{ SYS_DESC(SYS_DBGWCRn_EL1(n)),					\
977 	  trap_wcr, reset_wcr, n, 0,  get_wcr, set_wcr }
978 
979 /* Macro to expand the PMEVCNTRn_EL0 register */
980 #define PMU_PMEVCNTR_EL0(n)						\
981 	{ SYS_DESC(SYS_PMEVCNTRn_EL0(n)),					\
982 	  access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
983 
984 /* Macro to expand the PMEVTYPERn_EL0 register */
985 #define PMU_PMEVTYPER_EL0(n)						\
986 	{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)),					\
987 	  access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
988 
989 static bool access_cntp_tval(struct kvm_vcpu *vcpu,
990 		struct sys_reg_params *p,
991 		const struct sys_reg_desc *r)
992 {
993 	u64 now = kvm_phys_timer_read();
994 	u64 cval;
995 
996 	if (p->is_write) {
997 		kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL,
998 				      p->regval + now);
999 	} else {
1000 		cval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
1001 		p->regval = cval - now;
1002 	}
1003 
1004 	return true;
1005 }
1006 
1007 static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
1008 		struct sys_reg_params *p,
1009 		const struct sys_reg_desc *r)
1010 {
1011 	if (p->is_write)
1012 		kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, p->regval);
1013 	else
1014 		p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
1015 
1016 	return true;
1017 }
1018 
1019 static bool access_cntp_cval(struct kvm_vcpu *vcpu,
1020 		struct sys_reg_params *p,
1021 		const struct sys_reg_desc *r)
1022 {
1023 	if (p->is_write)
1024 		kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, p->regval);
1025 	else
1026 		p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
1027 
1028 	return true;
1029 }
1030 
1031 /* Read a sanitised cpufeature ID register by sys_reg_desc */
1032 static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
1033 {
1034 	u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
1035 			 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
1036 	u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
1037 
1038 	if (id == SYS_ID_AA64PFR0_EL1) {
1039 		if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT))
1040 			kvm_debug("SVE unsupported for guests, suppressing\n");
1041 
1042 		val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
1043 	} else if (id == SYS_ID_AA64ISAR1_EL1) {
1044 		const u64 ptrauth_mask = (0xfUL << ID_AA64ISAR1_APA_SHIFT) |
1045 					 (0xfUL << ID_AA64ISAR1_API_SHIFT) |
1046 					 (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
1047 					 (0xfUL << ID_AA64ISAR1_GPI_SHIFT);
1048 		if (val & ptrauth_mask)
1049 			kvm_debug("ptrauth unsupported for guests, suppressing\n");
1050 		val &= ~ptrauth_mask;
1051 	} else if (id == SYS_ID_AA64MMFR1_EL1) {
1052 		if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
1053 			kvm_debug("LORegions unsupported for guests, suppressing\n");
1054 
1055 		val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
1056 	}
1057 
1058 	return val;
1059 }
1060 
1061 /* cpufeature ID register access trap handlers */
1062 
1063 static bool __access_id_reg(struct kvm_vcpu *vcpu,
1064 			    struct sys_reg_params *p,
1065 			    const struct sys_reg_desc *r,
1066 			    bool raz)
1067 {
1068 	if (p->is_write)
1069 		return write_to_read_only(vcpu, p, r);
1070 
1071 	p->regval = read_id_reg(r, raz);
1072 	return true;
1073 }
1074 
1075 static bool access_id_reg(struct kvm_vcpu *vcpu,
1076 			  struct sys_reg_params *p,
1077 			  const struct sys_reg_desc *r)
1078 {
1079 	return __access_id_reg(vcpu, p, r, false);
1080 }
1081 
1082 static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
1083 			      struct sys_reg_params *p,
1084 			      const struct sys_reg_desc *r)
1085 {
1086 	return __access_id_reg(vcpu, p, r, true);
1087 }
1088 
1089 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
1090 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
1091 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
1092 
1093 /*
1094  * cpufeature ID register user accessors
1095  *
1096  * For now, these registers are immutable for userspace, so no values
1097  * are stored, and for set_id_reg() we don't allow the effective value
1098  * to be changed.
1099  */
1100 static int __get_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
1101 			bool raz)
1102 {
1103 	const u64 id = sys_reg_to_index(rd);
1104 	const u64 val = read_id_reg(rd, raz);
1105 
1106 	return reg_to_user(uaddr, &val, id);
1107 }
1108 
1109 static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
1110 			bool raz)
1111 {
1112 	const u64 id = sys_reg_to_index(rd);
1113 	int err;
1114 	u64 val;
1115 
1116 	err = reg_from_user(&val, uaddr, id);
1117 	if (err)
1118 		return err;
1119 
1120 	/* This is what we mean by invariant: you can't change it. */
1121 	if (val != read_id_reg(rd, raz))
1122 		return -EINVAL;
1123 
1124 	return 0;
1125 }
1126 
1127 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1128 		      const struct kvm_one_reg *reg, void __user *uaddr)
1129 {
1130 	return __get_id_reg(rd, uaddr, false);
1131 }
1132 
1133 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1134 		      const struct kvm_one_reg *reg, void __user *uaddr)
1135 {
1136 	return __set_id_reg(rd, uaddr, false);
1137 }
1138 
1139 static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1140 			  const struct kvm_one_reg *reg, void __user *uaddr)
1141 {
1142 	return __get_id_reg(rd, uaddr, true);
1143 }
1144 
1145 static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1146 			  const struct kvm_one_reg *reg, void __user *uaddr)
1147 {
1148 	return __set_id_reg(rd, uaddr, true);
1149 }
1150 
1151 /* sys_reg_desc initialiser for known cpufeature ID registers */
1152 #define ID_SANITISED(name) {			\
1153 	SYS_DESC(SYS_##name),			\
1154 	.access	= access_id_reg,		\
1155 	.get_user = get_id_reg,			\
1156 	.set_user = set_id_reg,			\
1157 }
1158 
1159 /*
1160  * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1161  * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1162  * (1 <= crm < 8, 0 <= Op2 < 8).
1163  */
1164 #define ID_UNALLOCATED(crm, op2) {			\
1165 	Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2),	\
1166 	.access = access_raz_id_reg,			\
1167 	.get_user = get_raz_id_reg,			\
1168 	.set_user = set_raz_id_reg,			\
1169 }
1170 
1171 /*
1172  * sys_reg_desc initialiser for known ID registers that we hide from guests.
1173  * For now, these are exposed just like unallocated ID regs: they appear
1174  * RAZ for the guest.
1175  */
1176 #define ID_HIDDEN(name) {			\
1177 	SYS_DESC(SYS_##name),			\
1178 	.access = access_raz_id_reg,		\
1179 	.get_user = get_raz_id_reg,		\
1180 	.set_user = set_raz_id_reg,		\
1181 }
1182 
1183 /*
1184  * Architected system registers.
1185  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
1186  *
1187  * Debug handling: We do trap most, if not all debug related system
1188  * registers. The implementation is good enough to ensure that a guest
1189  * can use these with minimal performance degradation. The drawback is
1190  * that we don't implement any of the external debug, none of the
1191  * OSlock protocol. This should be revisited if we ever encounter a
1192  * more demanding guest...
1193  */
1194 static const struct sys_reg_desc sys_reg_descs[] = {
1195 	{ SYS_DESC(SYS_DC_ISW), access_dcsw },
1196 	{ SYS_DESC(SYS_DC_CSW), access_dcsw },
1197 	{ SYS_DESC(SYS_DC_CISW), access_dcsw },
1198 
1199 	DBG_BCR_BVR_WCR_WVR_EL1(0),
1200 	DBG_BCR_BVR_WCR_WVR_EL1(1),
1201 	{ SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1202 	{ SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1203 	DBG_BCR_BVR_WCR_WVR_EL1(2),
1204 	DBG_BCR_BVR_WCR_WVR_EL1(3),
1205 	DBG_BCR_BVR_WCR_WVR_EL1(4),
1206 	DBG_BCR_BVR_WCR_WVR_EL1(5),
1207 	DBG_BCR_BVR_WCR_WVR_EL1(6),
1208 	DBG_BCR_BVR_WCR_WVR_EL1(7),
1209 	DBG_BCR_BVR_WCR_WVR_EL1(8),
1210 	DBG_BCR_BVR_WCR_WVR_EL1(9),
1211 	DBG_BCR_BVR_WCR_WVR_EL1(10),
1212 	DBG_BCR_BVR_WCR_WVR_EL1(11),
1213 	DBG_BCR_BVR_WCR_WVR_EL1(12),
1214 	DBG_BCR_BVR_WCR_WVR_EL1(13),
1215 	DBG_BCR_BVR_WCR_WVR_EL1(14),
1216 	DBG_BCR_BVR_WCR_WVR_EL1(15),
1217 
1218 	{ SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1219 	{ SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
1220 	{ SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
1221 	{ SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1222 	{ SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1223 	{ SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1224 	{ SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1225 	{ SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1226 
1227 	{ SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1228 	{ SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1229 	// DBGDTR[TR]X_EL0 share the same encoding
1230 	{ SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1231 
1232 	{ SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1233 
1234 	{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1235 
1236 	/*
1237 	 * ID regs: all ID_SANITISED() entries here must have corresponding
1238 	 * entries in arm64_ftr_regs[].
1239 	 */
1240 
1241 	/* AArch64 mappings of the AArch32 ID registers */
1242 	/* CRm=1 */
1243 	ID_SANITISED(ID_PFR0_EL1),
1244 	ID_SANITISED(ID_PFR1_EL1),
1245 	ID_SANITISED(ID_DFR0_EL1),
1246 	ID_HIDDEN(ID_AFR0_EL1),
1247 	ID_SANITISED(ID_MMFR0_EL1),
1248 	ID_SANITISED(ID_MMFR1_EL1),
1249 	ID_SANITISED(ID_MMFR2_EL1),
1250 	ID_SANITISED(ID_MMFR3_EL1),
1251 
1252 	/* CRm=2 */
1253 	ID_SANITISED(ID_ISAR0_EL1),
1254 	ID_SANITISED(ID_ISAR1_EL1),
1255 	ID_SANITISED(ID_ISAR2_EL1),
1256 	ID_SANITISED(ID_ISAR3_EL1),
1257 	ID_SANITISED(ID_ISAR4_EL1),
1258 	ID_SANITISED(ID_ISAR5_EL1),
1259 	ID_SANITISED(ID_MMFR4_EL1),
1260 	ID_UNALLOCATED(2,7),
1261 
1262 	/* CRm=3 */
1263 	ID_SANITISED(MVFR0_EL1),
1264 	ID_SANITISED(MVFR1_EL1),
1265 	ID_SANITISED(MVFR2_EL1),
1266 	ID_UNALLOCATED(3,3),
1267 	ID_UNALLOCATED(3,4),
1268 	ID_UNALLOCATED(3,5),
1269 	ID_UNALLOCATED(3,6),
1270 	ID_UNALLOCATED(3,7),
1271 
1272 	/* AArch64 ID registers */
1273 	/* CRm=4 */
1274 	ID_SANITISED(ID_AA64PFR0_EL1),
1275 	ID_SANITISED(ID_AA64PFR1_EL1),
1276 	ID_UNALLOCATED(4,2),
1277 	ID_UNALLOCATED(4,3),
1278 	ID_UNALLOCATED(4,4),
1279 	ID_UNALLOCATED(4,5),
1280 	ID_UNALLOCATED(4,6),
1281 	ID_UNALLOCATED(4,7),
1282 
1283 	/* CRm=5 */
1284 	ID_SANITISED(ID_AA64DFR0_EL1),
1285 	ID_SANITISED(ID_AA64DFR1_EL1),
1286 	ID_UNALLOCATED(5,2),
1287 	ID_UNALLOCATED(5,3),
1288 	ID_HIDDEN(ID_AA64AFR0_EL1),
1289 	ID_HIDDEN(ID_AA64AFR1_EL1),
1290 	ID_UNALLOCATED(5,6),
1291 	ID_UNALLOCATED(5,7),
1292 
1293 	/* CRm=6 */
1294 	ID_SANITISED(ID_AA64ISAR0_EL1),
1295 	ID_SANITISED(ID_AA64ISAR1_EL1),
1296 	ID_UNALLOCATED(6,2),
1297 	ID_UNALLOCATED(6,3),
1298 	ID_UNALLOCATED(6,4),
1299 	ID_UNALLOCATED(6,5),
1300 	ID_UNALLOCATED(6,6),
1301 	ID_UNALLOCATED(6,7),
1302 
1303 	/* CRm=7 */
1304 	ID_SANITISED(ID_AA64MMFR0_EL1),
1305 	ID_SANITISED(ID_AA64MMFR1_EL1),
1306 	ID_SANITISED(ID_AA64MMFR2_EL1),
1307 	ID_UNALLOCATED(7,3),
1308 	ID_UNALLOCATED(7,4),
1309 	ID_UNALLOCATED(7,5),
1310 	ID_UNALLOCATED(7,6),
1311 	ID_UNALLOCATED(7,7),
1312 
1313 	{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1314 	{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1315 	{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1316 	{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1317 	{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1318 
1319 	{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1320 	{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1321 	{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1322 
1323 	{ SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1324 	{ SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1325 	{ SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1326 	{ SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1327 	{ SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1328 	{ SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1329 	{ SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1330 	{ SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1331 
1332 	{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1333 	{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1334 
1335 	{ SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
1336 	{ SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
1337 
1338 	{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1339 	{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1340 
1341 	{ SYS_DESC(SYS_LORSA_EL1), trap_undef },
1342 	{ SYS_DESC(SYS_LOREA_EL1), trap_undef },
1343 	{ SYS_DESC(SYS_LORN_EL1), trap_undef },
1344 	{ SYS_DESC(SYS_LORC_EL1), trap_undef },
1345 	{ SYS_DESC(SYS_LORID_EL1), trap_undef },
1346 
1347 	{ SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1348 	{ SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1349 
1350 	{ SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1351 	{ SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1352 	{ SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1353 	{ SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1354 	{ SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1355 	{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1356 	{ SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1357 	{ SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1358 	{ SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1359 	{ SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1360 	{ SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1361 	{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1362 
1363 	{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1364 	{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1365 
1366 	{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1367 
1368 	{ SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
1369 
1370 	{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
1371 	{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
1372 	{ SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
1373 	{ SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
1374 	{ SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
1375 	{ SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
1376 	{ SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
1377 	{ SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
1378 	{ SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
1379 	{ SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
1380 	{ SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
1381 	/*
1382 	 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
1383 	 * in 32bit mode. Here we choose to reset it as zero for consistency.
1384 	 */
1385 	{ SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
1386 	{ SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
1387 
1388 	{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1389 	{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
1390 
1391 	{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval },
1392 	{ SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl },
1393 	{ SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval },
1394 
1395 	/* PMEVCNTRn_EL0 */
1396 	PMU_PMEVCNTR_EL0(0),
1397 	PMU_PMEVCNTR_EL0(1),
1398 	PMU_PMEVCNTR_EL0(2),
1399 	PMU_PMEVCNTR_EL0(3),
1400 	PMU_PMEVCNTR_EL0(4),
1401 	PMU_PMEVCNTR_EL0(5),
1402 	PMU_PMEVCNTR_EL0(6),
1403 	PMU_PMEVCNTR_EL0(7),
1404 	PMU_PMEVCNTR_EL0(8),
1405 	PMU_PMEVCNTR_EL0(9),
1406 	PMU_PMEVCNTR_EL0(10),
1407 	PMU_PMEVCNTR_EL0(11),
1408 	PMU_PMEVCNTR_EL0(12),
1409 	PMU_PMEVCNTR_EL0(13),
1410 	PMU_PMEVCNTR_EL0(14),
1411 	PMU_PMEVCNTR_EL0(15),
1412 	PMU_PMEVCNTR_EL0(16),
1413 	PMU_PMEVCNTR_EL0(17),
1414 	PMU_PMEVCNTR_EL0(18),
1415 	PMU_PMEVCNTR_EL0(19),
1416 	PMU_PMEVCNTR_EL0(20),
1417 	PMU_PMEVCNTR_EL0(21),
1418 	PMU_PMEVCNTR_EL0(22),
1419 	PMU_PMEVCNTR_EL0(23),
1420 	PMU_PMEVCNTR_EL0(24),
1421 	PMU_PMEVCNTR_EL0(25),
1422 	PMU_PMEVCNTR_EL0(26),
1423 	PMU_PMEVCNTR_EL0(27),
1424 	PMU_PMEVCNTR_EL0(28),
1425 	PMU_PMEVCNTR_EL0(29),
1426 	PMU_PMEVCNTR_EL0(30),
1427 	/* PMEVTYPERn_EL0 */
1428 	PMU_PMEVTYPER_EL0(0),
1429 	PMU_PMEVTYPER_EL0(1),
1430 	PMU_PMEVTYPER_EL0(2),
1431 	PMU_PMEVTYPER_EL0(3),
1432 	PMU_PMEVTYPER_EL0(4),
1433 	PMU_PMEVTYPER_EL0(5),
1434 	PMU_PMEVTYPER_EL0(6),
1435 	PMU_PMEVTYPER_EL0(7),
1436 	PMU_PMEVTYPER_EL0(8),
1437 	PMU_PMEVTYPER_EL0(9),
1438 	PMU_PMEVTYPER_EL0(10),
1439 	PMU_PMEVTYPER_EL0(11),
1440 	PMU_PMEVTYPER_EL0(12),
1441 	PMU_PMEVTYPER_EL0(13),
1442 	PMU_PMEVTYPER_EL0(14),
1443 	PMU_PMEVTYPER_EL0(15),
1444 	PMU_PMEVTYPER_EL0(16),
1445 	PMU_PMEVTYPER_EL0(17),
1446 	PMU_PMEVTYPER_EL0(18),
1447 	PMU_PMEVTYPER_EL0(19),
1448 	PMU_PMEVTYPER_EL0(20),
1449 	PMU_PMEVTYPER_EL0(21),
1450 	PMU_PMEVTYPER_EL0(22),
1451 	PMU_PMEVTYPER_EL0(23),
1452 	PMU_PMEVTYPER_EL0(24),
1453 	PMU_PMEVTYPER_EL0(25),
1454 	PMU_PMEVTYPER_EL0(26),
1455 	PMU_PMEVTYPER_EL0(27),
1456 	PMU_PMEVTYPER_EL0(28),
1457 	PMU_PMEVTYPER_EL0(29),
1458 	PMU_PMEVTYPER_EL0(30),
1459 	/*
1460 	 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
1461 	 * in 32bit mode. Here we choose to reset it as zero for consistency.
1462 	 */
1463 	{ SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
1464 
1465 	{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1466 	{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1467 	{ SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
1468 };
1469 
1470 static bool trap_dbgidr(struct kvm_vcpu *vcpu,
1471 			struct sys_reg_params *p,
1472 			const struct sys_reg_desc *r)
1473 {
1474 	if (p->is_write) {
1475 		return ignore_write(vcpu, p);
1476 	} else {
1477 		u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1478 		u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1479 		u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1480 
1481 		p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1482 			     (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1483 			     (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1484 			     | (6 << 16) | (el3 << 14) | (el3 << 12));
1485 		return true;
1486 	}
1487 }
1488 
1489 static bool trap_debug32(struct kvm_vcpu *vcpu,
1490 			 struct sys_reg_params *p,
1491 			 const struct sys_reg_desc *r)
1492 {
1493 	if (p->is_write) {
1494 		vcpu_cp14(vcpu, r->reg) = p->regval;
1495 		vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1496 	} else {
1497 		p->regval = vcpu_cp14(vcpu, r->reg);
1498 	}
1499 
1500 	return true;
1501 }
1502 
1503 /* AArch32 debug register mappings
1504  *
1505  * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1506  * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1507  *
1508  * All control registers and watchpoint value registers are mapped to
1509  * the lower 32 bits of their AArch64 equivalents. We share the trap
1510  * handlers with the above AArch64 code which checks what mode the
1511  * system is in.
1512  */
1513 
1514 static bool trap_xvr(struct kvm_vcpu *vcpu,
1515 		     struct sys_reg_params *p,
1516 		     const struct sys_reg_desc *rd)
1517 {
1518 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
1519 
1520 	if (p->is_write) {
1521 		u64 val = *dbg_reg;
1522 
1523 		val &= 0xffffffffUL;
1524 		val |= p->regval << 32;
1525 		*dbg_reg = val;
1526 
1527 		vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1528 	} else {
1529 		p->regval = *dbg_reg >> 32;
1530 	}
1531 
1532 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
1533 
1534 	return true;
1535 }
1536 
1537 #define DBG_BCR_BVR_WCR_WVR(n)						\
1538 	/* DBGBVRn */							\
1539 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, 	\
1540 	/* DBGBCRn */							\
1541 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n },	\
1542 	/* DBGWVRn */							\
1543 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n },	\
1544 	/* DBGWCRn */							\
1545 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1546 
1547 #define DBGBXVR(n)							\
1548 	{ Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1549 
1550 /*
1551  * Trapped cp14 registers. We generally ignore most of the external
1552  * debug, on the principle that they don't really make sense to a
1553  * guest. Revisit this one day, would this principle change.
1554  */
1555 static const struct sys_reg_desc cp14_regs[] = {
1556 	/* DBGIDR */
1557 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
1558 	/* DBGDTRRXext */
1559 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1560 
1561 	DBG_BCR_BVR_WCR_WVR(0),
1562 	/* DBGDSCRint */
1563 	{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1564 	DBG_BCR_BVR_WCR_WVR(1),
1565 	/* DBGDCCINT */
1566 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
1567 	/* DBGDSCRext */
1568 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
1569 	DBG_BCR_BVR_WCR_WVR(2),
1570 	/* DBGDTR[RT]Xint */
1571 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1572 	/* DBGDTR[RT]Xext */
1573 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1574 	DBG_BCR_BVR_WCR_WVR(3),
1575 	DBG_BCR_BVR_WCR_WVR(4),
1576 	DBG_BCR_BVR_WCR_WVR(5),
1577 	/* DBGWFAR */
1578 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1579 	/* DBGOSECCR */
1580 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1581 	DBG_BCR_BVR_WCR_WVR(6),
1582 	/* DBGVCR */
1583 	{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
1584 	DBG_BCR_BVR_WCR_WVR(7),
1585 	DBG_BCR_BVR_WCR_WVR(8),
1586 	DBG_BCR_BVR_WCR_WVR(9),
1587 	DBG_BCR_BVR_WCR_WVR(10),
1588 	DBG_BCR_BVR_WCR_WVR(11),
1589 	DBG_BCR_BVR_WCR_WVR(12),
1590 	DBG_BCR_BVR_WCR_WVR(13),
1591 	DBG_BCR_BVR_WCR_WVR(14),
1592 	DBG_BCR_BVR_WCR_WVR(15),
1593 
1594 	/* DBGDRAR (32bit) */
1595 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1596 
1597 	DBGBXVR(0),
1598 	/* DBGOSLAR */
1599 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
1600 	DBGBXVR(1),
1601 	/* DBGOSLSR */
1602 	{ Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
1603 	DBGBXVR(2),
1604 	DBGBXVR(3),
1605 	/* DBGOSDLR */
1606 	{ Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1607 	DBGBXVR(4),
1608 	/* DBGPRCR */
1609 	{ Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1610 	DBGBXVR(5),
1611 	DBGBXVR(6),
1612 	DBGBXVR(7),
1613 	DBGBXVR(8),
1614 	DBGBXVR(9),
1615 	DBGBXVR(10),
1616 	DBGBXVR(11),
1617 	DBGBXVR(12),
1618 	DBGBXVR(13),
1619 	DBGBXVR(14),
1620 	DBGBXVR(15),
1621 
1622 	/* DBGDSAR (32bit) */
1623 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1624 
1625 	/* DBGDEVID2 */
1626 	{ Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1627 	/* DBGDEVID1 */
1628 	{ Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1629 	/* DBGDEVID */
1630 	{ Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1631 	/* DBGCLAIMSET */
1632 	{ Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1633 	/* DBGCLAIMCLR */
1634 	{ Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1635 	/* DBGAUTHSTATUS */
1636 	{ Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1637 };
1638 
1639 /* Trapped cp14 64bit registers */
1640 static const struct sys_reg_desc cp14_64_regs[] = {
1641 	/* DBGDRAR (64bit) */
1642 	{ Op1( 0), CRm( 1), .access = trap_raz_wi },
1643 
1644 	/* DBGDSAR (64bit) */
1645 	{ Op1( 0), CRm( 2), .access = trap_raz_wi },
1646 };
1647 
1648 /* Macro to expand the PMEVCNTRn register */
1649 #define PMU_PMEVCNTR(n)							\
1650 	/* PMEVCNTRn */							\
1651 	{ Op1(0), CRn(0b1110),						\
1652 	  CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),		\
1653 	  access_pmu_evcntr }
1654 
1655 /* Macro to expand the PMEVTYPERn register */
1656 #define PMU_PMEVTYPER(n)						\
1657 	/* PMEVTYPERn */						\
1658 	{ Op1(0), CRn(0b1110),						\
1659 	  CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),		\
1660 	  access_pmu_evtyper }
1661 
1662 /*
1663  * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
1664  * depending on the way they are accessed (as a 32bit or a 64bit
1665  * register).
1666  */
1667 static const struct sys_reg_desc cp15_regs[] = {
1668 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
1669 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1670 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
1671 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
1672 	{ Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
1673 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
1674 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
1675 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
1676 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
1677 	{ Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
1678 	{ Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
1679 
1680 	/*
1681 	 * DC{C,I,CI}SW operations:
1682 	 */
1683 	{ Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
1684 	{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
1685 	{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
1686 
1687 	/* PMU */
1688 	{ Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
1689 	{ Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
1690 	{ Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
1691 	{ Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
1692 	{ Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
1693 	{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
1694 	{ Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
1695 	{ Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
1696 	{ Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
1697 	{ Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
1698 	{ Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
1699 	{ Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
1700 	{ Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
1701 	{ Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
1702 	{ Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
1703 
1704 	{ Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
1705 	{ Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
1706 	{ Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
1707 	{ Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
1708 
1709 	/* ICC_SRE */
1710 	{ Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
1711 
1712 	{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
1713 
1714 	/* CNTP_TVAL */
1715 	{ Op1( 0), CRn(14), CRm( 2), Op2( 0), access_cntp_tval },
1716 	/* CNTP_CTL */
1717 	{ Op1( 0), CRn(14), CRm( 2), Op2( 1), access_cntp_ctl },
1718 
1719 	/* PMEVCNTRn */
1720 	PMU_PMEVCNTR(0),
1721 	PMU_PMEVCNTR(1),
1722 	PMU_PMEVCNTR(2),
1723 	PMU_PMEVCNTR(3),
1724 	PMU_PMEVCNTR(4),
1725 	PMU_PMEVCNTR(5),
1726 	PMU_PMEVCNTR(6),
1727 	PMU_PMEVCNTR(7),
1728 	PMU_PMEVCNTR(8),
1729 	PMU_PMEVCNTR(9),
1730 	PMU_PMEVCNTR(10),
1731 	PMU_PMEVCNTR(11),
1732 	PMU_PMEVCNTR(12),
1733 	PMU_PMEVCNTR(13),
1734 	PMU_PMEVCNTR(14),
1735 	PMU_PMEVCNTR(15),
1736 	PMU_PMEVCNTR(16),
1737 	PMU_PMEVCNTR(17),
1738 	PMU_PMEVCNTR(18),
1739 	PMU_PMEVCNTR(19),
1740 	PMU_PMEVCNTR(20),
1741 	PMU_PMEVCNTR(21),
1742 	PMU_PMEVCNTR(22),
1743 	PMU_PMEVCNTR(23),
1744 	PMU_PMEVCNTR(24),
1745 	PMU_PMEVCNTR(25),
1746 	PMU_PMEVCNTR(26),
1747 	PMU_PMEVCNTR(27),
1748 	PMU_PMEVCNTR(28),
1749 	PMU_PMEVCNTR(29),
1750 	PMU_PMEVCNTR(30),
1751 	/* PMEVTYPERn */
1752 	PMU_PMEVTYPER(0),
1753 	PMU_PMEVTYPER(1),
1754 	PMU_PMEVTYPER(2),
1755 	PMU_PMEVTYPER(3),
1756 	PMU_PMEVTYPER(4),
1757 	PMU_PMEVTYPER(5),
1758 	PMU_PMEVTYPER(6),
1759 	PMU_PMEVTYPER(7),
1760 	PMU_PMEVTYPER(8),
1761 	PMU_PMEVTYPER(9),
1762 	PMU_PMEVTYPER(10),
1763 	PMU_PMEVTYPER(11),
1764 	PMU_PMEVTYPER(12),
1765 	PMU_PMEVTYPER(13),
1766 	PMU_PMEVTYPER(14),
1767 	PMU_PMEVTYPER(15),
1768 	PMU_PMEVTYPER(16),
1769 	PMU_PMEVTYPER(17),
1770 	PMU_PMEVTYPER(18),
1771 	PMU_PMEVTYPER(19),
1772 	PMU_PMEVTYPER(20),
1773 	PMU_PMEVTYPER(21),
1774 	PMU_PMEVTYPER(22),
1775 	PMU_PMEVTYPER(23),
1776 	PMU_PMEVTYPER(24),
1777 	PMU_PMEVTYPER(25),
1778 	PMU_PMEVTYPER(26),
1779 	PMU_PMEVTYPER(27),
1780 	PMU_PMEVTYPER(28),
1781 	PMU_PMEVTYPER(29),
1782 	PMU_PMEVTYPER(30),
1783 	/* PMCCFILTR */
1784 	{ Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
1785 };
1786 
1787 static const struct sys_reg_desc cp15_64_regs[] = {
1788 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1789 	{ Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
1790 	{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
1791 	{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
1792 	{ Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
1793 	{ Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
1794 	{ Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval },
1795 };
1796 
1797 /* Target specific emulation tables */
1798 static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
1799 
1800 void kvm_register_target_sys_reg_table(unsigned int target,
1801 				       struct kvm_sys_reg_target_table *table)
1802 {
1803 	target_tables[target] = table;
1804 }
1805 
1806 /* Get specific register table for this target. */
1807 static const struct sys_reg_desc *get_target_table(unsigned target,
1808 						   bool mode_is_64,
1809 						   size_t *num)
1810 {
1811 	struct kvm_sys_reg_target_table *table;
1812 
1813 	table = target_tables[target];
1814 	if (mode_is_64) {
1815 		*num = table->table64.num;
1816 		return table->table64.table;
1817 	} else {
1818 		*num = table->table32.num;
1819 		return table->table32.table;
1820 	}
1821 }
1822 
1823 #define reg_to_match_value(x)						\
1824 	({								\
1825 		unsigned long val;					\
1826 		val  = (x)->Op0 << 14;					\
1827 		val |= (x)->Op1 << 11;					\
1828 		val |= (x)->CRn << 7;					\
1829 		val |= (x)->CRm << 3;					\
1830 		val |= (x)->Op2;					\
1831 		val;							\
1832 	 })
1833 
1834 static int match_sys_reg(const void *key, const void *elt)
1835 {
1836 	const unsigned long pval = (unsigned long)key;
1837 	const struct sys_reg_desc *r = elt;
1838 
1839 	return pval - reg_to_match_value(r);
1840 }
1841 
1842 static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
1843 					 const struct sys_reg_desc table[],
1844 					 unsigned int num)
1845 {
1846 	unsigned long pval = reg_to_match_value(params);
1847 
1848 	return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
1849 }
1850 
1851 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
1852 {
1853 	kvm_inject_undefined(vcpu);
1854 	return 1;
1855 }
1856 
1857 static void perform_access(struct kvm_vcpu *vcpu,
1858 			   struct sys_reg_params *params,
1859 			   const struct sys_reg_desc *r)
1860 {
1861 	trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
1862 
1863 	/*
1864 	 * Not having an accessor means that we have configured a trap
1865 	 * that we don't know how to handle. This certainly qualifies
1866 	 * as a gross bug that should be fixed right away.
1867 	 */
1868 	BUG_ON(!r->access);
1869 
1870 	/* Skip instruction if instructed so */
1871 	if (likely(r->access(vcpu, params, r)))
1872 		kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1873 }
1874 
1875 /*
1876  * emulate_cp --  tries to match a sys_reg access in a handling table, and
1877  *                call the corresponding trap handler.
1878  *
1879  * @params: pointer to the descriptor of the access
1880  * @table: array of trap descriptors
1881  * @num: size of the trap descriptor array
1882  *
1883  * Return 0 if the access has been handled, and -1 if not.
1884  */
1885 static int emulate_cp(struct kvm_vcpu *vcpu,
1886 		      struct sys_reg_params *params,
1887 		      const struct sys_reg_desc *table,
1888 		      size_t num)
1889 {
1890 	const struct sys_reg_desc *r;
1891 
1892 	if (!table)
1893 		return -1;	/* Not handled */
1894 
1895 	r = find_reg(params, table, num);
1896 
1897 	if (r) {
1898 		perform_access(vcpu, params, r);
1899 		return 0;
1900 	}
1901 
1902 	/* Not handled */
1903 	return -1;
1904 }
1905 
1906 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
1907 				struct sys_reg_params *params)
1908 {
1909 	u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
1910 	int cp = -1;
1911 
1912 	switch(hsr_ec) {
1913 	case ESR_ELx_EC_CP15_32:
1914 	case ESR_ELx_EC_CP15_64:
1915 		cp = 15;
1916 		break;
1917 	case ESR_ELx_EC_CP14_MR:
1918 	case ESR_ELx_EC_CP14_64:
1919 		cp = 14;
1920 		break;
1921 	default:
1922 		WARN_ON(1);
1923 	}
1924 
1925 	kvm_err("Unsupported guest CP%d access at: %08lx [%08lx]\n",
1926 		cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
1927 	print_sys_reg_instr(params);
1928 	kvm_inject_undefined(vcpu);
1929 }
1930 
1931 /**
1932  * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
1933  * @vcpu: The VCPU pointer
1934  * @run:  The kvm_run struct
1935  */
1936 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1937 			    const struct sys_reg_desc *global,
1938 			    size_t nr_global,
1939 			    const struct sys_reg_desc *target_specific,
1940 			    size_t nr_specific)
1941 {
1942 	struct sys_reg_params params;
1943 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
1944 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
1945 	int Rt2 = (hsr >> 10) & 0x1f;
1946 
1947 	params.is_aarch32 = true;
1948 	params.is_32bit = false;
1949 	params.CRm = (hsr >> 1) & 0xf;
1950 	params.is_write = ((hsr & 1) == 0);
1951 
1952 	params.Op0 = 0;
1953 	params.Op1 = (hsr >> 16) & 0xf;
1954 	params.Op2 = 0;
1955 	params.CRn = 0;
1956 
1957 	/*
1958 	 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
1959 	 * backends between AArch32 and AArch64, we get away with it.
1960 	 */
1961 	if (params.is_write) {
1962 		params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
1963 		params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
1964 	}
1965 
1966 	/*
1967 	 * Try to emulate the coprocessor access using the target
1968 	 * specific table first, and using the global table afterwards.
1969 	 * If either of the tables contains a handler, handle the
1970 	 * potential register operation in the case of a read and return
1971 	 * with success.
1972 	 */
1973 	if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
1974 	    !emulate_cp(vcpu, &params, global, nr_global)) {
1975 		/* Split up the value between registers for the read side */
1976 		if (!params.is_write) {
1977 			vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
1978 			vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
1979 		}
1980 
1981 		return 1;
1982 	}
1983 
1984 	unhandled_cp_access(vcpu, &params);
1985 	return 1;
1986 }
1987 
1988 /**
1989  * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
1990  * @vcpu: The VCPU pointer
1991  * @run:  The kvm_run struct
1992  */
1993 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
1994 			    const struct sys_reg_desc *global,
1995 			    size_t nr_global,
1996 			    const struct sys_reg_desc *target_specific,
1997 			    size_t nr_specific)
1998 {
1999 	struct sys_reg_params params;
2000 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
2001 	int Rt  = kvm_vcpu_sys_get_rt(vcpu);
2002 
2003 	params.is_aarch32 = true;
2004 	params.is_32bit = true;
2005 	params.CRm = (hsr >> 1) & 0xf;
2006 	params.regval = vcpu_get_reg(vcpu, Rt);
2007 	params.is_write = ((hsr & 1) == 0);
2008 	params.CRn = (hsr >> 10) & 0xf;
2009 	params.Op0 = 0;
2010 	params.Op1 = (hsr >> 14) & 0x7;
2011 	params.Op2 = (hsr >> 17) & 0x7;
2012 
2013 	if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
2014 	    !emulate_cp(vcpu, &params, global, nr_global)) {
2015 		if (!params.is_write)
2016 			vcpu_set_reg(vcpu, Rt, params.regval);
2017 		return 1;
2018 	}
2019 
2020 	unhandled_cp_access(vcpu, &params);
2021 	return 1;
2022 }
2023 
2024 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
2025 {
2026 	const struct sys_reg_desc *target_specific;
2027 	size_t num;
2028 
2029 	target_specific = get_target_table(vcpu->arch.target, false, &num);
2030 	return kvm_handle_cp_64(vcpu,
2031 				cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
2032 				target_specific, num);
2033 }
2034 
2035 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
2036 {
2037 	const struct sys_reg_desc *target_specific;
2038 	size_t num;
2039 
2040 	target_specific = get_target_table(vcpu->arch.target, false, &num);
2041 	return kvm_handle_cp_32(vcpu,
2042 				cp15_regs, ARRAY_SIZE(cp15_regs),
2043 				target_specific, num);
2044 }
2045 
2046 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
2047 {
2048 	return kvm_handle_cp_64(vcpu,
2049 				cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
2050 				NULL, 0);
2051 }
2052 
2053 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
2054 {
2055 	return kvm_handle_cp_32(vcpu,
2056 				cp14_regs, ARRAY_SIZE(cp14_regs),
2057 				NULL, 0);
2058 }
2059 
2060 static int emulate_sys_reg(struct kvm_vcpu *vcpu,
2061 			   struct sys_reg_params *params)
2062 {
2063 	size_t num;
2064 	const struct sys_reg_desc *table, *r;
2065 
2066 	table = get_target_table(vcpu->arch.target, true, &num);
2067 
2068 	/* Search target-specific then generic table. */
2069 	r = find_reg(params, table, num);
2070 	if (!r)
2071 		r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2072 
2073 	if (likely(r)) {
2074 		perform_access(vcpu, params, r);
2075 	} else {
2076 		kvm_err("Unsupported guest sys_reg access at: %lx [%08lx]\n",
2077 			*vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2078 		print_sys_reg_instr(params);
2079 		kvm_inject_undefined(vcpu);
2080 	}
2081 	return 1;
2082 }
2083 
2084 static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
2085 			      const struct sys_reg_desc *table, size_t num)
2086 {
2087 	unsigned long i;
2088 
2089 	for (i = 0; i < num; i++)
2090 		if (table[i].reset)
2091 			table[i].reset(vcpu, &table[i]);
2092 }
2093 
2094 /**
2095  * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
2096  * @vcpu: The VCPU pointer
2097  * @run:  The kvm_run struct
2098  */
2099 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
2100 {
2101 	struct sys_reg_params params;
2102 	unsigned long esr = kvm_vcpu_get_hsr(vcpu);
2103 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
2104 	int ret;
2105 
2106 	trace_kvm_handle_sys_reg(esr);
2107 
2108 	params.is_aarch32 = false;
2109 	params.is_32bit = false;
2110 	params.Op0 = (esr >> 20) & 3;
2111 	params.Op1 = (esr >> 14) & 0x7;
2112 	params.CRn = (esr >> 10) & 0xf;
2113 	params.CRm = (esr >> 1) & 0xf;
2114 	params.Op2 = (esr >> 17) & 0x7;
2115 	params.regval = vcpu_get_reg(vcpu, Rt);
2116 	params.is_write = !(esr & 1);
2117 
2118 	ret = emulate_sys_reg(vcpu, &params);
2119 
2120 	if (!params.is_write)
2121 		vcpu_set_reg(vcpu, Rt, params.regval);
2122 	return ret;
2123 }
2124 
2125 /******************************************************************************
2126  * Userspace API
2127  *****************************************************************************/
2128 
2129 static bool index_to_params(u64 id, struct sys_reg_params *params)
2130 {
2131 	switch (id & KVM_REG_SIZE_MASK) {
2132 	case KVM_REG_SIZE_U64:
2133 		/* Any unused index bits means it's not valid. */
2134 		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
2135 			      | KVM_REG_ARM_COPROC_MASK
2136 			      | KVM_REG_ARM64_SYSREG_OP0_MASK
2137 			      | KVM_REG_ARM64_SYSREG_OP1_MASK
2138 			      | KVM_REG_ARM64_SYSREG_CRN_MASK
2139 			      | KVM_REG_ARM64_SYSREG_CRM_MASK
2140 			      | KVM_REG_ARM64_SYSREG_OP2_MASK))
2141 			return false;
2142 		params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
2143 			       >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
2144 		params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
2145 			       >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
2146 		params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
2147 			       >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
2148 		params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
2149 			       >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
2150 		params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
2151 			       >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
2152 		return true;
2153 	default:
2154 		return false;
2155 	}
2156 }
2157 
2158 const struct sys_reg_desc *find_reg_by_id(u64 id,
2159 					  struct sys_reg_params *params,
2160 					  const struct sys_reg_desc table[],
2161 					  unsigned int num)
2162 {
2163 	if (!index_to_params(id, params))
2164 		return NULL;
2165 
2166 	return find_reg(params, table, num);
2167 }
2168 
2169 /* Decode an index value, and find the sys_reg_desc entry. */
2170 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
2171 						    u64 id)
2172 {
2173 	size_t num;
2174 	const struct sys_reg_desc *table, *r;
2175 	struct sys_reg_params params;
2176 
2177 	/* We only do sys_reg for now. */
2178 	if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
2179 		return NULL;
2180 
2181 	table = get_target_table(vcpu->arch.target, true, &num);
2182 	r = find_reg_by_id(id, &params, table, num);
2183 	if (!r)
2184 		r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2185 
2186 	/* Not saved in the sys_reg array and not otherwise accessible? */
2187 	if (r && !(r->reg || r->get_user))
2188 		r = NULL;
2189 
2190 	return r;
2191 }
2192 
2193 /*
2194  * These are the invariant sys_reg registers: we let the guest see the
2195  * host versions of these, so they're part of the guest state.
2196  *
2197  * A future CPU may provide a mechanism to present different values to
2198  * the guest, or a future kvm may trap them.
2199  */
2200 
2201 #define FUNCTION_INVARIANT(reg)						\
2202 	static void get_##reg(struct kvm_vcpu *v,			\
2203 			      const struct sys_reg_desc *r)		\
2204 	{								\
2205 		((struct sys_reg_desc *)r)->val = read_sysreg(reg);	\
2206 	}
2207 
2208 FUNCTION_INVARIANT(midr_el1)
2209 FUNCTION_INVARIANT(ctr_el0)
2210 FUNCTION_INVARIANT(revidr_el1)
2211 FUNCTION_INVARIANT(clidr_el1)
2212 FUNCTION_INVARIANT(aidr_el1)
2213 
2214 /* ->val is filled in by kvm_sys_reg_table_init() */
2215 static struct sys_reg_desc invariant_sys_regs[] = {
2216 	{ SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
2217 	{ SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
2218 	{ SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
2219 	{ SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
2220 	{ SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
2221 };
2222 
2223 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
2224 {
2225 	if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
2226 		return -EFAULT;
2227 	return 0;
2228 }
2229 
2230 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
2231 {
2232 	if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
2233 		return -EFAULT;
2234 	return 0;
2235 }
2236 
2237 static int get_invariant_sys_reg(u64 id, void __user *uaddr)
2238 {
2239 	struct sys_reg_params params;
2240 	const struct sys_reg_desc *r;
2241 
2242 	r = find_reg_by_id(id, &params, invariant_sys_regs,
2243 			   ARRAY_SIZE(invariant_sys_regs));
2244 	if (!r)
2245 		return -ENOENT;
2246 
2247 	return reg_to_user(uaddr, &r->val, id);
2248 }
2249 
2250 static int set_invariant_sys_reg(u64 id, void __user *uaddr)
2251 {
2252 	struct sys_reg_params params;
2253 	const struct sys_reg_desc *r;
2254 	int err;
2255 	u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
2256 
2257 	r = find_reg_by_id(id, &params, invariant_sys_regs,
2258 			   ARRAY_SIZE(invariant_sys_regs));
2259 	if (!r)
2260 		return -ENOENT;
2261 
2262 	err = reg_from_user(&val, uaddr, id);
2263 	if (err)
2264 		return err;
2265 
2266 	/* This is what we mean by invariant: you can't change it. */
2267 	if (r->val != val)
2268 		return -EINVAL;
2269 
2270 	return 0;
2271 }
2272 
2273 static bool is_valid_cache(u32 val)
2274 {
2275 	u32 level, ctype;
2276 
2277 	if (val >= CSSELR_MAX)
2278 		return false;
2279 
2280 	/* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
2281 	level = (val >> 1);
2282 	ctype = (cache_levels >> (level * 3)) & 7;
2283 
2284 	switch (ctype) {
2285 	case 0: /* No cache */
2286 		return false;
2287 	case 1: /* Instruction cache only */
2288 		return (val & 1);
2289 	case 2: /* Data cache only */
2290 	case 4: /* Unified cache */
2291 		return !(val & 1);
2292 	case 3: /* Separate instruction and data caches */
2293 		return true;
2294 	default: /* Reserved: we can't know instruction or data. */
2295 		return false;
2296 	}
2297 }
2298 
2299 static int demux_c15_get(u64 id, void __user *uaddr)
2300 {
2301 	u32 val;
2302 	u32 __user *uval = uaddr;
2303 
2304 	/* Fail if we have unknown bits set. */
2305 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2306 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2307 		return -ENOENT;
2308 
2309 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2310 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2311 		if (KVM_REG_SIZE(id) != 4)
2312 			return -ENOENT;
2313 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2314 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2315 		if (!is_valid_cache(val))
2316 			return -ENOENT;
2317 
2318 		return put_user(get_ccsidr(val), uval);
2319 	default:
2320 		return -ENOENT;
2321 	}
2322 }
2323 
2324 static int demux_c15_set(u64 id, void __user *uaddr)
2325 {
2326 	u32 val, newval;
2327 	u32 __user *uval = uaddr;
2328 
2329 	/* Fail if we have unknown bits set. */
2330 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2331 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2332 		return -ENOENT;
2333 
2334 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2335 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2336 		if (KVM_REG_SIZE(id) != 4)
2337 			return -ENOENT;
2338 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2339 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2340 		if (!is_valid_cache(val))
2341 			return -ENOENT;
2342 
2343 		if (get_user(newval, uval))
2344 			return -EFAULT;
2345 
2346 		/* This is also invariant: you can't change it. */
2347 		if (newval != get_ccsidr(val))
2348 			return -EINVAL;
2349 		return 0;
2350 	default:
2351 		return -ENOENT;
2352 	}
2353 }
2354 
2355 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2356 {
2357 	const struct sys_reg_desc *r;
2358 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2359 
2360 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2361 		return demux_c15_get(reg->id, uaddr);
2362 
2363 	if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2364 		return -ENOENT;
2365 
2366 	r = index_to_sys_reg_desc(vcpu, reg->id);
2367 	if (!r)
2368 		return get_invariant_sys_reg(reg->id, uaddr);
2369 
2370 	if (r->get_user)
2371 		return (r->get_user)(vcpu, r, reg, uaddr);
2372 
2373 	return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
2374 }
2375 
2376 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2377 {
2378 	const struct sys_reg_desc *r;
2379 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2380 
2381 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2382 		return demux_c15_set(reg->id, uaddr);
2383 
2384 	if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2385 		return -ENOENT;
2386 
2387 	r = index_to_sys_reg_desc(vcpu, reg->id);
2388 	if (!r)
2389 		return set_invariant_sys_reg(reg->id, uaddr);
2390 
2391 	if (r->set_user)
2392 		return (r->set_user)(vcpu, r, reg, uaddr);
2393 
2394 	return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2395 }
2396 
2397 static unsigned int num_demux_regs(void)
2398 {
2399 	unsigned int i, count = 0;
2400 
2401 	for (i = 0; i < CSSELR_MAX; i++)
2402 		if (is_valid_cache(i))
2403 			count++;
2404 
2405 	return count;
2406 }
2407 
2408 static int write_demux_regids(u64 __user *uindices)
2409 {
2410 	u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2411 	unsigned int i;
2412 
2413 	val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2414 	for (i = 0; i < CSSELR_MAX; i++) {
2415 		if (!is_valid_cache(i))
2416 			continue;
2417 		if (put_user(val | i, uindices))
2418 			return -EFAULT;
2419 		uindices++;
2420 	}
2421 	return 0;
2422 }
2423 
2424 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2425 {
2426 	return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2427 		KVM_REG_ARM64_SYSREG |
2428 		(reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2429 		(reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2430 		(reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2431 		(reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2432 		(reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2433 }
2434 
2435 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2436 {
2437 	if (!*uind)
2438 		return true;
2439 
2440 	if (put_user(sys_reg_to_index(reg), *uind))
2441 		return false;
2442 
2443 	(*uind)++;
2444 	return true;
2445 }
2446 
2447 static int walk_one_sys_reg(const struct sys_reg_desc *rd,
2448 			    u64 __user **uind,
2449 			    unsigned int *total)
2450 {
2451 	/*
2452 	 * Ignore registers we trap but don't save,
2453 	 * and for which no custom user accessor is provided.
2454 	 */
2455 	if (!(rd->reg || rd->get_user))
2456 		return 0;
2457 
2458 	if (!copy_reg_to_user(rd, uind))
2459 		return -EFAULT;
2460 
2461 	(*total)++;
2462 	return 0;
2463 }
2464 
2465 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
2466 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2467 {
2468 	const struct sys_reg_desc *i1, *i2, *end1, *end2;
2469 	unsigned int total = 0;
2470 	size_t num;
2471 	int err;
2472 
2473 	/* We check for duplicates here, to allow arch-specific overrides. */
2474 	i1 = get_target_table(vcpu->arch.target, true, &num);
2475 	end1 = i1 + num;
2476 	i2 = sys_reg_descs;
2477 	end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2478 
2479 	BUG_ON(i1 == end1 || i2 == end2);
2480 
2481 	/* Walk carefully, as both tables may refer to the same register. */
2482 	while (i1 || i2) {
2483 		int cmp = cmp_sys_reg(i1, i2);
2484 		/* target-specific overrides generic entry. */
2485 		if (cmp <= 0)
2486 			err = walk_one_sys_reg(i1, &uind, &total);
2487 		else
2488 			err = walk_one_sys_reg(i2, &uind, &total);
2489 
2490 		if (err)
2491 			return err;
2492 
2493 		if (cmp <= 0 && ++i1 == end1)
2494 			i1 = NULL;
2495 		if (cmp >= 0 && ++i2 == end2)
2496 			i2 = NULL;
2497 	}
2498 	return total;
2499 }
2500 
2501 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2502 {
2503 	return ARRAY_SIZE(invariant_sys_regs)
2504 		+ num_demux_regs()
2505 		+ walk_sys_regs(vcpu, (u64 __user *)NULL);
2506 }
2507 
2508 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2509 {
2510 	unsigned int i;
2511 	int err;
2512 
2513 	/* Then give them all the invariant registers' indices. */
2514 	for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2515 		if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2516 			return -EFAULT;
2517 		uindices++;
2518 	}
2519 
2520 	err = walk_sys_regs(vcpu, uindices);
2521 	if (err < 0)
2522 		return err;
2523 	uindices += err;
2524 
2525 	return write_demux_regids(uindices);
2526 }
2527 
2528 static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
2529 {
2530 	unsigned int i;
2531 
2532 	for (i = 1; i < n; i++) {
2533 		if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2534 			kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2535 			return 1;
2536 		}
2537 	}
2538 
2539 	return 0;
2540 }
2541 
2542 void kvm_sys_reg_table_init(void)
2543 {
2544 	unsigned int i;
2545 	struct sys_reg_desc clidr;
2546 
2547 	/* Make sure tables are unique and in order. */
2548 	BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
2549 	BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
2550 	BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
2551 	BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
2552 	BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
2553 	BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
2554 
2555 	/* We abuse the reset function to overwrite the table itself. */
2556 	for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2557 		invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2558 
2559 	/*
2560 	 * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
2561 	 *
2562 	 *   If software reads the Cache Type fields from Ctype1
2563 	 *   upwards, once it has seen a value of 0b000, no caches
2564 	 *   exist at further-out levels of the hierarchy. So, for
2565 	 *   example, if Ctype3 is the first Cache Type field with a
2566 	 *   value of 0b000, the values of Ctype4 to Ctype7 must be
2567 	 *   ignored.
2568 	 */
2569 	get_clidr_el1(NULL, &clidr); /* Ugly... */
2570 	cache_levels = clidr.val;
2571 	for (i = 0; i < 7; i++)
2572 		if (((cache_levels >> (i*3)) & 7) == 0)
2573 			break;
2574 	/* Clear all higher bits. */
2575 	cache_levels &= (1 << (i*3))-1;
2576 }
2577 
2578 /**
2579  * kvm_reset_sys_regs - sets system registers to reset value
2580  * @vcpu: The VCPU pointer
2581  *
2582  * This function finds the right table above and sets the registers on the
2583  * virtual CPU struct to their architecturally defined reset values.
2584  */
2585 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2586 {
2587 	size_t num;
2588 	const struct sys_reg_desc *table;
2589 
2590 	/* Catch someone adding a register without putting in reset entry. */
2591 	memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
2592 
2593 	/* Generic chip reset first (so target could override). */
2594 	reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2595 
2596 	table = get_target_table(vcpu->arch.target, true, &num);
2597 	reset_sys_reg_descs(vcpu, table, num);
2598 
2599 	for (num = 1; num < NR_SYS_REGS; num++)
2600 		if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
2601 			panic("Didn't reset __vcpu_sys_reg(%zi)", num);
2602 }
2603