xref: /openbmc/linux/arch/arm64/kvm/sys_regs.c (revision 239480ab)
1 /*
2  * Copyright (C) 2012,2013 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * Derived from arch/arm/kvm/coproc.c:
6  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7  * Authors: Rusty Russell <rusty@rustcorp.com.au>
8  *          Christoffer Dall <c.dall@virtualopensystems.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License, version 2, as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
21  */
22 
23 #include <linux/bsearch.h>
24 #include <linux/kvm_host.h>
25 #include <linux/mm.h>
26 #include <linux/uaccess.h>
27 
28 #include <asm/cacheflush.h>
29 #include <asm/cputype.h>
30 #include <asm/debug-monitors.h>
31 #include <asm/esr.h>
32 #include <asm/kvm_arm.h>
33 #include <asm/kvm_asm.h>
34 #include <asm/kvm_coproc.h>
35 #include <asm/kvm_emulate.h>
36 #include <asm/kvm_host.h>
37 #include <asm/kvm_mmu.h>
38 #include <asm/perf_event.h>
39 #include <asm/sysreg.h>
40 
41 #include <trace/events/kvm.h>
42 
43 #include "sys_regs.h"
44 
45 #include "trace.h"
46 
47 /*
48  * All of this file is extremly similar to the ARM coproc.c, but the
49  * types are different. My gut feeling is that it should be pretty
50  * easy to merge, but that would be an ABI breakage -- again. VFP
51  * would also need to be abstracted.
52  *
53  * For AArch32, we only take care of what is being trapped. Anything
54  * that has to do with init and userspace access has to go via the
55  * 64bit interface.
56  */
57 
58 static bool read_from_write_only(struct kvm_vcpu *vcpu,
59 				 const struct sys_reg_params *params)
60 {
61 	WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
62 	print_sys_reg_instr(params);
63 	kvm_inject_undefined(vcpu);
64 	return false;
65 }
66 
67 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
68 static u32 cache_levels;
69 
70 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
71 #define CSSELR_MAX 12
72 
73 /* Which cache CCSIDR represents depends on CSSELR value. */
74 static u32 get_ccsidr(u32 csselr)
75 {
76 	u32 ccsidr;
77 
78 	/* Make sure noone else changes CSSELR during this! */
79 	local_irq_disable();
80 	write_sysreg(csselr, csselr_el1);
81 	isb();
82 	ccsidr = read_sysreg(ccsidr_el1);
83 	local_irq_enable();
84 
85 	return ccsidr;
86 }
87 
88 /*
89  * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
90  */
91 static bool access_dcsw(struct kvm_vcpu *vcpu,
92 			struct sys_reg_params *p,
93 			const struct sys_reg_desc *r)
94 {
95 	if (!p->is_write)
96 		return read_from_write_only(vcpu, p);
97 
98 	kvm_set_way_flush(vcpu);
99 	return true;
100 }
101 
102 /*
103  * Generic accessor for VM registers. Only called as long as HCR_TVM
104  * is set. If the guest enables the MMU, we stop trapping the VM
105  * sys_regs and leave it in complete control of the caches.
106  */
107 static bool access_vm_reg(struct kvm_vcpu *vcpu,
108 			  struct sys_reg_params *p,
109 			  const struct sys_reg_desc *r)
110 {
111 	bool was_enabled = vcpu_has_cache_enabled(vcpu);
112 
113 	BUG_ON(!p->is_write);
114 
115 	if (!p->is_aarch32) {
116 		vcpu_sys_reg(vcpu, r->reg) = p->regval;
117 	} else {
118 		if (!p->is_32bit)
119 			vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval);
120 		vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval);
121 	}
122 
123 	kvm_toggle_cache(vcpu, was_enabled);
124 	return true;
125 }
126 
127 /*
128  * Trap handler for the GICv3 SGI generation system register.
129  * Forward the request to the VGIC emulation.
130  * The cp15_64 code makes sure this automatically works
131  * for both AArch64 and AArch32 accesses.
132  */
133 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
134 			   struct sys_reg_params *p,
135 			   const struct sys_reg_desc *r)
136 {
137 	if (!p->is_write)
138 		return read_from_write_only(vcpu, p);
139 
140 	vgic_v3_dispatch_sgi(vcpu, p->regval);
141 
142 	return true;
143 }
144 
145 static bool access_gic_sre(struct kvm_vcpu *vcpu,
146 			   struct sys_reg_params *p,
147 			   const struct sys_reg_desc *r)
148 {
149 	if (p->is_write)
150 		return ignore_write(vcpu, p);
151 
152 	p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
153 	return true;
154 }
155 
156 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
157 			struct sys_reg_params *p,
158 			const struct sys_reg_desc *r)
159 {
160 	if (p->is_write)
161 		return ignore_write(vcpu, p);
162 	else
163 		return read_zero(vcpu, p);
164 }
165 
166 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
167 			   struct sys_reg_params *p,
168 			   const struct sys_reg_desc *r)
169 {
170 	if (p->is_write) {
171 		return ignore_write(vcpu, p);
172 	} else {
173 		p->regval = (1 << 3);
174 		return true;
175 	}
176 }
177 
178 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
179 				   struct sys_reg_params *p,
180 				   const struct sys_reg_desc *r)
181 {
182 	if (p->is_write) {
183 		return ignore_write(vcpu, p);
184 	} else {
185 		p->regval = read_sysreg(dbgauthstatus_el1);
186 		return true;
187 	}
188 }
189 
190 /*
191  * We want to avoid world-switching all the DBG registers all the
192  * time:
193  *
194  * - If we've touched any debug register, it is likely that we're
195  *   going to touch more of them. It then makes sense to disable the
196  *   traps and start doing the save/restore dance
197  * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
198  *   then mandatory to save/restore the registers, as the guest
199  *   depends on them.
200  *
201  * For this, we use a DIRTY bit, indicating the guest has modified the
202  * debug registers, used as follow:
203  *
204  * On guest entry:
205  * - If the dirty bit is set (because we're coming back from trapping),
206  *   disable the traps, save host registers, restore guest registers.
207  * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
208  *   set the dirty bit, disable the traps, save host registers,
209  *   restore guest registers.
210  * - Otherwise, enable the traps
211  *
212  * On guest exit:
213  * - If the dirty bit is set, save guest registers, restore host
214  *   registers and clear the dirty bit. This ensure that the host can
215  *   now use the debug registers.
216  */
217 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
218 			    struct sys_reg_params *p,
219 			    const struct sys_reg_desc *r)
220 {
221 	if (p->is_write) {
222 		vcpu_sys_reg(vcpu, r->reg) = p->regval;
223 		vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
224 	} else {
225 		p->regval = vcpu_sys_reg(vcpu, r->reg);
226 	}
227 
228 	trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
229 
230 	return true;
231 }
232 
233 /*
234  * reg_to_dbg/dbg_to_reg
235  *
236  * A 32 bit write to a debug register leave top bits alone
237  * A 32 bit read from a debug register only returns the bottom bits
238  *
239  * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
240  * hyp.S code switches between host and guest values in future.
241  */
242 static void reg_to_dbg(struct kvm_vcpu *vcpu,
243 		       struct sys_reg_params *p,
244 		       u64 *dbg_reg)
245 {
246 	u64 val = p->regval;
247 
248 	if (p->is_32bit) {
249 		val &= 0xffffffffUL;
250 		val |= ((*dbg_reg >> 32) << 32);
251 	}
252 
253 	*dbg_reg = val;
254 	vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
255 }
256 
257 static void dbg_to_reg(struct kvm_vcpu *vcpu,
258 		       struct sys_reg_params *p,
259 		       u64 *dbg_reg)
260 {
261 	p->regval = *dbg_reg;
262 	if (p->is_32bit)
263 		p->regval &= 0xffffffffUL;
264 }
265 
266 static bool trap_bvr(struct kvm_vcpu *vcpu,
267 		     struct sys_reg_params *p,
268 		     const struct sys_reg_desc *rd)
269 {
270 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
271 
272 	if (p->is_write)
273 		reg_to_dbg(vcpu, p, dbg_reg);
274 	else
275 		dbg_to_reg(vcpu, p, dbg_reg);
276 
277 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
278 
279 	return true;
280 }
281 
282 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
283 		const struct kvm_one_reg *reg, void __user *uaddr)
284 {
285 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
286 
287 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
288 		return -EFAULT;
289 	return 0;
290 }
291 
292 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
293 	const struct kvm_one_reg *reg, void __user *uaddr)
294 {
295 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
296 
297 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
298 		return -EFAULT;
299 	return 0;
300 }
301 
302 static void reset_bvr(struct kvm_vcpu *vcpu,
303 		      const struct sys_reg_desc *rd)
304 {
305 	vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
306 }
307 
308 static bool trap_bcr(struct kvm_vcpu *vcpu,
309 		     struct sys_reg_params *p,
310 		     const struct sys_reg_desc *rd)
311 {
312 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
313 
314 	if (p->is_write)
315 		reg_to_dbg(vcpu, p, dbg_reg);
316 	else
317 		dbg_to_reg(vcpu, p, dbg_reg);
318 
319 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
320 
321 	return true;
322 }
323 
324 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
325 		const struct kvm_one_reg *reg, void __user *uaddr)
326 {
327 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
328 
329 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
330 		return -EFAULT;
331 
332 	return 0;
333 }
334 
335 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
336 	const struct kvm_one_reg *reg, void __user *uaddr)
337 {
338 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
339 
340 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
341 		return -EFAULT;
342 	return 0;
343 }
344 
345 static void reset_bcr(struct kvm_vcpu *vcpu,
346 		      const struct sys_reg_desc *rd)
347 {
348 	vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
349 }
350 
351 static bool trap_wvr(struct kvm_vcpu *vcpu,
352 		     struct sys_reg_params *p,
353 		     const struct sys_reg_desc *rd)
354 {
355 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
356 
357 	if (p->is_write)
358 		reg_to_dbg(vcpu, p, dbg_reg);
359 	else
360 		dbg_to_reg(vcpu, p, dbg_reg);
361 
362 	trace_trap_reg(__func__, rd->reg, p->is_write,
363 		vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
364 
365 	return true;
366 }
367 
368 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
369 		const struct kvm_one_reg *reg, void __user *uaddr)
370 {
371 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
372 
373 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
374 		return -EFAULT;
375 	return 0;
376 }
377 
378 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
379 	const struct kvm_one_reg *reg, void __user *uaddr)
380 {
381 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
382 
383 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
384 		return -EFAULT;
385 	return 0;
386 }
387 
388 static void reset_wvr(struct kvm_vcpu *vcpu,
389 		      const struct sys_reg_desc *rd)
390 {
391 	vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
392 }
393 
394 static bool trap_wcr(struct kvm_vcpu *vcpu,
395 		     struct sys_reg_params *p,
396 		     const struct sys_reg_desc *rd)
397 {
398 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
399 
400 	if (p->is_write)
401 		reg_to_dbg(vcpu, p, dbg_reg);
402 	else
403 		dbg_to_reg(vcpu, p, dbg_reg);
404 
405 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
406 
407 	return true;
408 }
409 
410 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
411 		const struct kvm_one_reg *reg, void __user *uaddr)
412 {
413 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
414 
415 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
416 		return -EFAULT;
417 	return 0;
418 }
419 
420 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
421 	const struct kvm_one_reg *reg, void __user *uaddr)
422 {
423 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
424 
425 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
426 		return -EFAULT;
427 	return 0;
428 }
429 
430 static void reset_wcr(struct kvm_vcpu *vcpu,
431 		      const struct sys_reg_desc *rd)
432 {
433 	vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
434 }
435 
436 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
437 {
438 	vcpu_sys_reg(vcpu, AMAIR_EL1) = read_sysreg(amair_el1);
439 }
440 
441 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
442 {
443 	u64 mpidr;
444 
445 	/*
446 	 * Map the vcpu_id into the first three affinity level fields of
447 	 * the MPIDR. We limit the number of VCPUs in level 0 due to a
448 	 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
449 	 * of the GICv3 to be able to address each CPU directly when
450 	 * sending IPIs.
451 	 */
452 	mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
453 	mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
454 	mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
455 	vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
456 }
457 
458 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
459 {
460 	u64 pmcr, val;
461 
462 	pmcr = read_sysreg(pmcr_el0);
463 	/*
464 	 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
465 	 * except PMCR.E resetting to zero.
466 	 */
467 	val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
468 	       | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
469 	vcpu_sys_reg(vcpu, PMCR_EL0) = val;
470 }
471 
472 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
473 {
474 	u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
475 	bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
476 
477 	if (!enabled)
478 		kvm_inject_undefined(vcpu);
479 
480 	return !enabled;
481 }
482 
483 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
484 {
485 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
486 }
487 
488 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
489 {
490 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
491 }
492 
493 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
494 {
495 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
496 }
497 
498 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
499 {
500 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
501 }
502 
503 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
504 			const struct sys_reg_desc *r)
505 {
506 	u64 val;
507 
508 	if (!kvm_arm_pmu_v3_ready(vcpu))
509 		return trap_raz_wi(vcpu, p, r);
510 
511 	if (pmu_access_el0_disabled(vcpu))
512 		return false;
513 
514 	if (p->is_write) {
515 		/* Only update writeable bits of PMCR */
516 		val = vcpu_sys_reg(vcpu, PMCR_EL0);
517 		val &= ~ARMV8_PMU_PMCR_MASK;
518 		val |= p->regval & ARMV8_PMU_PMCR_MASK;
519 		vcpu_sys_reg(vcpu, PMCR_EL0) = val;
520 		kvm_pmu_handle_pmcr(vcpu, val);
521 	} else {
522 		/* PMCR.P & PMCR.C are RAZ */
523 		val = vcpu_sys_reg(vcpu, PMCR_EL0)
524 		      & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
525 		p->regval = val;
526 	}
527 
528 	return true;
529 }
530 
531 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
532 			  const struct sys_reg_desc *r)
533 {
534 	if (!kvm_arm_pmu_v3_ready(vcpu))
535 		return trap_raz_wi(vcpu, p, r);
536 
537 	if (pmu_access_event_counter_el0_disabled(vcpu))
538 		return false;
539 
540 	if (p->is_write)
541 		vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
542 	else
543 		/* return PMSELR.SEL field */
544 		p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0)
545 			    & ARMV8_PMU_COUNTER_MASK;
546 
547 	return true;
548 }
549 
550 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
551 			  const struct sys_reg_desc *r)
552 {
553 	u64 pmceid;
554 
555 	if (!kvm_arm_pmu_v3_ready(vcpu))
556 		return trap_raz_wi(vcpu, p, r);
557 
558 	BUG_ON(p->is_write);
559 
560 	if (pmu_access_el0_disabled(vcpu))
561 		return false;
562 
563 	if (!(p->Op2 & 1))
564 		pmceid = read_sysreg(pmceid0_el0);
565 	else
566 		pmceid = read_sysreg(pmceid1_el0);
567 
568 	p->regval = pmceid;
569 
570 	return true;
571 }
572 
573 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
574 {
575 	u64 pmcr, val;
576 
577 	pmcr = vcpu_sys_reg(vcpu, PMCR_EL0);
578 	val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
579 	if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
580 		kvm_inject_undefined(vcpu);
581 		return false;
582 	}
583 
584 	return true;
585 }
586 
587 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
588 			      struct sys_reg_params *p,
589 			      const struct sys_reg_desc *r)
590 {
591 	u64 idx;
592 
593 	if (!kvm_arm_pmu_v3_ready(vcpu))
594 		return trap_raz_wi(vcpu, p, r);
595 
596 	if (r->CRn == 9 && r->CRm == 13) {
597 		if (r->Op2 == 2) {
598 			/* PMXEVCNTR_EL0 */
599 			if (pmu_access_event_counter_el0_disabled(vcpu))
600 				return false;
601 
602 			idx = vcpu_sys_reg(vcpu, PMSELR_EL0)
603 			      & ARMV8_PMU_COUNTER_MASK;
604 		} else if (r->Op2 == 0) {
605 			/* PMCCNTR_EL0 */
606 			if (pmu_access_cycle_counter_el0_disabled(vcpu))
607 				return false;
608 
609 			idx = ARMV8_PMU_CYCLE_IDX;
610 		} else {
611 			return false;
612 		}
613 	} else if (r->CRn == 0 && r->CRm == 9) {
614 		/* PMCCNTR */
615 		if (pmu_access_event_counter_el0_disabled(vcpu))
616 			return false;
617 
618 		idx = ARMV8_PMU_CYCLE_IDX;
619 	} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
620 		/* PMEVCNTRn_EL0 */
621 		if (pmu_access_event_counter_el0_disabled(vcpu))
622 			return false;
623 
624 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
625 	} else {
626 		return false;
627 	}
628 
629 	if (!pmu_counter_idx_valid(vcpu, idx))
630 		return false;
631 
632 	if (p->is_write) {
633 		if (pmu_access_el0_disabled(vcpu))
634 			return false;
635 
636 		kvm_pmu_set_counter_value(vcpu, idx, p->regval);
637 	} else {
638 		p->regval = kvm_pmu_get_counter_value(vcpu, idx);
639 	}
640 
641 	return true;
642 }
643 
644 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
645 			       const struct sys_reg_desc *r)
646 {
647 	u64 idx, reg;
648 
649 	if (!kvm_arm_pmu_v3_ready(vcpu))
650 		return trap_raz_wi(vcpu, p, r);
651 
652 	if (pmu_access_el0_disabled(vcpu))
653 		return false;
654 
655 	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
656 		/* PMXEVTYPER_EL0 */
657 		idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
658 		reg = PMEVTYPER0_EL0 + idx;
659 	} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
660 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
661 		if (idx == ARMV8_PMU_CYCLE_IDX)
662 			reg = PMCCFILTR_EL0;
663 		else
664 			/* PMEVTYPERn_EL0 */
665 			reg = PMEVTYPER0_EL0 + idx;
666 	} else {
667 		BUG();
668 	}
669 
670 	if (!pmu_counter_idx_valid(vcpu, idx))
671 		return false;
672 
673 	if (p->is_write) {
674 		kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
675 		vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
676 	} else {
677 		p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
678 	}
679 
680 	return true;
681 }
682 
683 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
684 			   const struct sys_reg_desc *r)
685 {
686 	u64 val, mask;
687 
688 	if (!kvm_arm_pmu_v3_ready(vcpu))
689 		return trap_raz_wi(vcpu, p, r);
690 
691 	if (pmu_access_el0_disabled(vcpu))
692 		return false;
693 
694 	mask = kvm_pmu_valid_counter_mask(vcpu);
695 	if (p->is_write) {
696 		val = p->regval & mask;
697 		if (r->Op2 & 0x1) {
698 			/* accessing PMCNTENSET_EL0 */
699 			vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
700 			kvm_pmu_enable_counter(vcpu, val);
701 		} else {
702 			/* accessing PMCNTENCLR_EL0 */
703 			vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
704 			kvm_pmu_disable_counter(vcpu, val);
705 		}
706 	} else {
707 		p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
708 	}
709 
710 	return true;
711 }
712 
713 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
714 			   const struct sys_reg_desc *r)
715 {
716 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
717 
718 	if (!kvm_arm_pmu_v3_ready(vcpu))
719 		return trap_raz_wi(vcpu, p, r);
720 
721 	if (!vcpu_mode_priv(vcpu)) {
722 		kvm_inject_undefined(vcpu);
723 		return false;
724 	}
725 
726 	if (p->is_write) {
727 		u64 val = p->regval & mask;
728 
729 		if (r->Op2 & 0x1)
730 			/* accessing PMINTENSET_EL1 */
731 			vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
732 		else
733 			/* accessing PMINTENCLR_EL1 */
734 			vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
735 	} else {
736 		p->regval = vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
737 	}
738 
739 	return true;
740 }
741 
742 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
743 			 const struct sys_reg_desc *r)
744 {
745 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
746 
747 	if (!kvm_arm_pmu_v3_ready(vcpu))
748 		return trap_raz_wi(vcpu, p, r);
749 
750 	if (pmu_access_el0_disabled(vcpu))
751 		return false;
752 
753 	if (p->is_write) {
754 		if (r->CRm & 0x2)
755 			/* accessing PMOVSSET_EL0 */
756 			kvm_pmu_overflow_set(vcpu, p->regval & mask);
757 		else
758 			/* accessing PMOVSCLR_EL0 */
759 			vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
760 	} else {
761 		p->regval = vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
762 	}
763 
764 	return true;
765 }
766 
767 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
768 			   const struct sys_reg_desc *r)
769 {
770 	u64 mask;
771 
772 	if (!kvm_arm_pmu_v3_ready(vcpu))
773 		return trap_raz_wi(vcpu, p, r);
774 
775 	if (!p->is_write)
776 		return read_from_write_only(vcpu, p);
777 
778 	if (pmu_write_swinc_el0_disabled(vcpu))
779 		return false;
780 
781 	mask = kvm_pmu_valid_counter_mask(vcpu);
782 	kvm_pmu_software_increment(vcpu, p->regval & mask);
783 	return true;
784 }
785 
786 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
787 			     const struct sys_reg_desc *r)
788 {
789 	if (!kvm_arm_pmu_v3_ready(vcpu))
790 		return trap_raz_wi(vcpu, p, r);
791 
792 	if (p->is_write) {
793 		if (!vcpu_mode_priv(vcpu)) {
794 			kvm_inject_undefined(vcpu);
795 			return false;
796 		}
797 
798 		vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval
799 						    & ARMV8_PMU_USERENR_MASK;
800 	} else {
801 		p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0)
802 			    & ARMV8_PMU_USERENR_MASK;
803 	}
804 
805 	return true;
806 }
807 
808 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
809 #define DBG_BCR_BVR_WCR_WVR_EL1(n)					\
810 	{ SYS_DESC(SYS_DBGBVRn_EL1(n)),					\
811 	  trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr },		\
812 	{ SYS_DESC(SYS_DBGBCRn_EL1(n)),					\
813 	  trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr },		\
814 	{ SYS_DESC(SYS_DBGWVRn_EL1(n)),					\
815 	  trap_wvr, reset_wvr, n, 0,  get_wvr, set_wvr },		\
816 	{ SYS_DESC(SYS_DBGWCRn_EL1(n)),					\
817 	  trap_wcr, reset_wcr, n, 0,  get_wcr, set_wcr }
818 
819 /* Macro to expand the PMEVCNTRn_EL0 register */
820 #define PMU_PMEVCNTR_EL0(n)						\
821 	{ SYS_DESC(SYS_PMEVCNTRn_EL0(n)),					\
822 	  access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
823 
824 /* Macro to expand the PMEVTYPERn_EL0 register */
825 #define PMU_PMEVTYPER_EL0(n)						\
826 	{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)),					\
827 	  access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
828 
829 static bool access_cntp_tval(struct kvm_vcpu *vcpu,
830 		struct sys_reg_params *p,
831 		const struct sys_reg_desc *r)
832 {
833 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
834 	u64 now = kvm_phys_timer_read();
835 
836 	if (p->is_write)
837 		ptimer->cnt_cval = p->regval + now;
838 	else
839 		p->regval = ptimer->cnt_cval - now;
840 
841 	return true;
842 }
843 
844 static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
845 		struct sys_reg_params *p,
846 		const struct sys_reg_desc *r)
847 {
848 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
849 
850 	if (p->is_write) {
851 		/* ISTATUS bit is read-only */
852 		ptimer->cnt_ctl = p->regval & ~ARCH_TIMER_CTRL_IT_STAT;
853 	} else {
854 		u64 now = kvm_phys_timer_read();
855 
856 		p->regval = ptimer->cnt_ctl;
857 		/*
858 		 * Set ISTATUS bit if it's expired.
859 		 * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
860 		 * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
861 		 * regardless of ENABLE bit for our implementation convenience.
862 		 */
863 		if (ptimer->cnt_cval <= now)
864 			p->regval |= ARCH_TIMER_CTRL_IT_STAT;
865 	}
866 
867 	return true;
868 }
869 
870 static bool access_cntp_cval(struct kvm_vcpu *vcpu,
871 		struct sys_reg_params *p,
872 		const struct sys_reg_desc *r)
873 {
874 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
875 
876 	if (p->is_write)
877 		ptimer->cnt_cval = p->regval;
878 	else
879 		p->regval = ptimer->cnt_cval;
880 
881 	return true;
882 }
883 
884 /*
885  * Architected system registers.
886  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
887  *
888  * Debug handling: We do trap most, if not all debug related system
889  * registers. The implementation is good enough to ensure that a guest
890  * can use these with minimal performance degradation. The drawback is
891  * that we don't implement any of the external debug, none of the
892  * OSlock protocol. This should be revisited if we ever encounter a
893  * more demanding guest...
894  */
895 static const struct sys_reg_desc sys_reg_descs[] = {
896 	{ SYS_DESC(SYS_DC_ISW), access_dcsw },
897 	{ SYS_DESC(SYS_DC_CSW), access_dcsw },
898 	{ SYS_DESC(SYS_DC_CISW), access_dcsw },
899 
900 	DBG_BCR_BVR_WCR_WVR_EL1(0),
901 	DBG_BCR_BVR_WCR_WVR_EL1(1),
902 	{ SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
903 	{ SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
904 	DBG_BCR_BVR_WCR_WVR_EL1(2),
905 	DBG_BCR_BVR_WCR_WVR_EL1(3),
906 	DBG_BCR_BVR_WCR_WVR_EL1(4),
907 	DBG_BCR_BVR_WCR_WVR_EL1(5),
908 	DBG_BCR_BVR_WCR_WVR_EL1(6),
909 	DBG_BCR_BVR_WCR_WVR_EL1(7),
910 	DBG_BCR_BVR_WCR_WVR_EL1(8),
911 	DBG_BCR_BVR_WCR_WVR_EL1(9),
912 	DBG_BCR_BVR_WCR_WVR_EL1(10),
913 	DBG_BCR_BVR_WCR_WVR_EL1(11),
914 	DBG_BCR_BVR_WCR_WVR_EL1(12),
915 	DBG_BCR_BVR_WCR_WVR_EL1(13),
916 	DBG_BCR_BVR_WCR_WVR_EL1(14),
917 	DBG_BCR_BVR_WCR_WVR_EL1(15),
918 
919 	{ SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
920 	{ SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
921 	{ SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
922 	{ SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
923 	{ SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
924 	{ SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
925 	{ SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
926 	{ SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
927 
928 	{ SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
929 	{ SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
930 	// DBGDTR[TR]X_EL0 share the same encoding
931 	{ SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
932 
933 	{ SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
934 
935 	{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
936 	{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
937 	{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
938 	{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
939 	{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
940 	{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
941 
942 	{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
943 	{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
944 	{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
945 	{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
946 	{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
947 
948 	{ SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
949 	{ SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
950 
951 	{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
952 	{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
953 
954 	{ SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
955 
956 	{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
957 	{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
958 
959 	{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
960 	{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
961 
962 	{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
963 
964 	{ SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
965 
966 	{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
967 	{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
968 	{ SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
969 	{ SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
970 	{ SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
971 	{ SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
972 	{ SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
973 	{ SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
974 	{ SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
975 	{ SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
976 	{ SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
977 	/*
978 	 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
979 	 * in 32bit mode. Here we choose to reset it as zero for consistency.
980 	 */
981 	{ SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
982 	{ SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
983 
984 	{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
985 	{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
986 
987 	{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval },
988 	{ SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl },
989 	{ SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval },
990 
991 	/* PMEVCNTRn_EL0 */
992 	PMU_PMEVCNTR_EL0(0),
993 	PMU_PMEVCNTR_EL0(1),
994 	PMU_PMEVCNTR_EL0(2),
995 	PMU_PMEVCNTR_EL0(3),
996 	PMU_PMEVCNTR_EL0(4),
997 	PMU_PMEVCNTR_EL0(5),
998 	PMU_PMEVCNTR_EL0(6),
999 	PMU_PMEVCNTR_EL0(7),
1000 	PMU_PMEVCNTR_EL0(8),
1001 	PMU_PMEVCNTR_EL0(9),
1002 	PMU_PMEVCNTR_EL0(10),
1003 	PMU_PMEVCNTR_EL0(11),
1004 	PMU_PMEVCNTR_EL0(12),
1005 	PMU_PMEVCNTR_EL0(13),
1006 	PMU_PMEVCNTR_EL0(14),
1007 	PMU_PMEVCNTR_EL0(15),
1008 	PMU_PMEVCNTR_EL0(16),
1009 	PMU_PMEVCNTR_EL0(17),
1010 	PMU_PMEVCNTR_EL0(18),
1011 	PMU_PMEVCNTR_EL0(19),
1012 	PMU_PMEVCNTR_EL0(20),
1013 	PMU_PMEVCNTR_EL0(21),
1014 	PMU_PMEVCNTR_EL0(22),
1015 	PMU_PMEVCNTR_EL0(23),
1016 	PMU_PMEVCNTR_EL0(24),
1017 	PMU_PMEVCNTR_EL0(25),
1018 	PMU_PMEVCNTR_EL0(26),
1019 	PMU_PMEVCNTR_EL0(27),
1020 	PMU_PMEVCNTR_EL0(28),
1021 	PMU_PMEVCNTR_EL0(29),
1022 	PMU_PMEVCNTR_EL0(30),
1023 	/* PMEVTYPERn_EL0 */
1024 	PMU_PMEVTYPER_EL0(0),
1025 	PMU_PMEVTYPER_EL0(1),
1026 	PMU_PMEVTYPER_EL0(2),
1027 	PMU_PMEVTYPER_EL0(3),
1028 	PMU_PMEVTYPER_EL0(4),
1029 	PMU_PMEVTYPER_EL0(5),
1030 	PMU_PMEVTYPER_EL0(6),
1031 	PMU_PMEVTYPER_EL0(7),
1032 	PMU_PMEVTYPER_EL0(8),
1033 	PMU_PMEVTYPER_EL0(9),
1034 	PMU_PMEVTYPER_EL0(10),
1035 	PMU_PMEVTYPER_EL0(11),
1036 	PMU_PMEVTYPER_EL0(12),
1037 	PMU_PMEVTYPER_EL0(13),
1038 	PMU_PMEVTYPER_EL0(14),
1039 	PMU_PMEVTYPER_EL0(15),
1040 	PMU_PMEVTYPER_EL0(16),
1041 	PMU_PMEVTYPER_EL0(17),
1042 	PMU_PMEVTYPER_EL0(18),
1043 	PMU_PMEVTYPER_EL0(19),
1044 	PMU_PMEVTYPER_EL0(20),
1045 	PMU_PMEVTYPER_EL0(21),
1046 	PMU_PMEVTYPER_EL0(22),
1047 	PMU_PMEVTYPER_EL0(23),
1048 	PMU_PMEVTYPER_EL0(24),
1049 	PMU_PMEVTYPER_EL0(25),
1050 	PMU_PMEVTYPER_EL0(26),
1051 	PMU_PMEVTYPER_EL0(27),
1052 	PMU_PMEVTYPER_EL0(28),
1053 	PMU_PMEVTYPER_EL0(29),
1054 	PMU_PMEVTYPER_EL0(30),
1055 	/*
1056 	 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
1057 	 * in 32bit mode. Here we choose to reset it as zero for consistency.
1058 	 */
1059 	{ SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
1060 
1061 	{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1062 	{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1063 	{ SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
1064 };
1065 
1066 static bool trap_dbgidr(struct kvm_vcpu *vcpu,
1067 			struct sys_reg_params *p,
1068 			const struct sys_reg_desc *r)
1069 {
1070 	if (p->is_write) {
1071 		return ignore_write(vcpu, p);
1072 	} else {
1073 		u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1074 		u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1075 		u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1076 
1077 		p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1078 			     (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1079 			     (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1080 			     | (6 << 16) | (el3 << 14) | (el3 << 12));
1081 		return true;
1082 	}
1083 }
1084 
1085 static bool trap_debug32(struct kvm_vcpu *vcpu,
1086 			 struct sys_reg_params *p,
1087 			 const struct sys_reg_desc *r)
1088 {
1089 	if (p->is_write) {
1090 		vcpu_cp14(vcpu, r->reg) = p->regval;
1091 		vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
1092 	} else {
1093 		p->regval = vcpu_cp14(vcpu, r->reg);
1094 	}
1095 
1096 	return true;
1097 }
1098 
1099 /* AArch32 debug register mappings
1100  *
1101  * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1102  * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1103  *
1104  * All control registers and watchpoint value registers are mapped to
1105  * the lower 32 bits of their AArch64 equivalents. We share the trap
1106  * handlers with the above AArch64 code which checks what mode the
1107  * system is in.
1108  */
1109 
1110 static bool trap_xvr(struct kvm_vcpu *vcpu,
1111 		     struct sys_reg_params *p,
1112 		     const struct sys_reg_desc *rd)
1113 {
1114 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
1115 
1116 	if (p->is_write) {
1117 		u64 val = *dbg_reg;
1118 
1119 		val &= 0xffffffffUL;
1120 		val |= p->regval << 32;
1121 		*dbg_reg = val;
1122 
1123 		vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
1124 	} else {
1125 		p->regval = *dbg_reg >> 32;
1126 	}
1127 
1128 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
1129 
1130 	return true;
1131 }
1132 
1133 #define DBG_BCR_BVR_WCR_WVR(n)						\
1134 	/* DBGBVRn */							\
1135 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, 	\
1136 	/* DBGBCRn */							\
1137 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n },	\
1138 	/* DBGWVRn */							\
1139 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n },	\
1140 	/* DBGWCRn */							\
1141 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1142 
1143 #define DBGBXVR(n)							\
1144 	{ Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1145 
1146 /*
1147  * Trapped cp14 registers. We generally ignore most of the external
1148  * debug, on the principle that they don't really make sense to a
1149  * guest. Revisit this one day, would this principle change.
1150  */
1151 static const struct sys_reg_desc cp14_regs[] = {
1152 	/* DBGIDR */
1153 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
1154 	/* DBGDTRRXext */
1155 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1156 
1157 	DBG_BCR_BVR_WCR_WVR(0),
1158 	/* DBGDSCRint */
1159 	{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1160 	DBG_BCR_BVR_WCR_WVR(1),
1161 	/* DBGDCCINT */
1162 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
1163 	/* DBGDSCRext */
1164 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
1165 	DBG_BCR_BVR_WCR_WVR(2),
1166 	/* DBGDTR[RT]Xint */
1167 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1168 	/* DBGDTR[RT]Xext */
1169 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1170 	DBG_BCR_BVR_WCR_WVR(3),
1171 	DBG_BCR_BVR_WCR_WVR(4),
1172 	DBG_BCR_BVR_WCR_WVR(5),
1173 	/* DBGWFAR */
1174 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1175 	/* DBGOSECCR */
1176 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1177 	DBG_BCR_BVR_WCR_WVR(6),
1178 	/* DBGVCR */
1179 	{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
1180 	DBG_BCR_BVR_WCR_WVR(7),
1181 	DBG_BCR_BVR_WCR_WVR(8),
1182 	DBG_BCR_BVR_WCR_WVR(9),
1183 	DBG_BCR_BVR_WCR_WVR(10),
1184 	DBG_BCR_BVR_WCR_WVR(11),
1185 	DBG_BCR_BVR_WCR_WVR(12),
1186 	DBG_BCR_BVR_WCR_WVR(13),
1187 	DBG_BCR_BVR_WCR_WVR(14),
1188 	DBG_BCR_BVR_WCR_WVR(15),
1189 
1190 	/* DBGDRAR (32bit) */
1191 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1192 
1193 	DBGBXVR(0),
1194 	/* DBGOSLAR */
1195 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
1196 	DBGBXVR(1),
1197 	/* DBGOSLSR */
1198 	{ Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
1199 	DBGBXVR(2),
1200 	DBGBXVR(3),
1201 	/* DBGOSDLR */
1202 	{ Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1203 	DBGBXVR(4),
1204 	/* DBGPRCR */
1205 	{ Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1206 	DBGBXVR(5),
1207 	DBGBXVR(6),
1208 	DBGBXVR(7),
1209 	DBGBXVR(8),
1210 	DBGBXVR(9),
1211 	DBGBXVR(10),
1212 	DBGBXVR(11),
1213 	DBGBXVR(12),
1214 	DBGBXVR(13),
1215 	DBGBXVR(14),
1216 	DBGBXVR(15),
1217 
1218 	/* DBGDSAR (32bit) */
1219 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1220 
1221 	/* DBGDEVID2 */
1222 	{ Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1223 	/* DBGDEVID1 */
1224 	{ Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1225 	/* DBGDEVID */
1226 	{ Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1227 	/* DBGCLAIMSET */
1228 	{ Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1229 	/* DBGCLAIMCLR */
1230 	{ Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1231 	/* DBGAUTHSTATUS */
1232 	{ Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1233 };
1234 
1235 /* Trapped cp14 64bit registers */
1236 static const struct sys_reg_desc cp14_64_regs[] = {
1237 	/* DBGDRAR (64bit) */
1238 	{ Op1( 0), CRm( 1), .access = trap_raz_wi },
1239 
1240 	/* DBGDSAR (64bit) */
1241 	{ Op1( 0), CRm( 2), .access = trap_raz_wi },
1242 };
1243 
1244 /* Macro to expand the PMEVCNTRn register */
1245 #define PMU_PMEVCNTR(n)							\
1246 	/* PMEVCNTRn */							\
1247 	{ Op1(0), CRn(0b1110),						\
1248 	  CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),		\
1249 	  access_pmu_evcntr }
1250 
1251 /* Macro to expand the PMEVTYPERn register */
1252 #define PMU_PMEVTYPER(n)						\
1253 	/* PMEVTYPERn */						\
1254 	{ Op1(0), CRn(0b1110),						\
1255 	  CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),		\
1256 	  access_pmu_evtyper }
1257 
1258 /*
1259  * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
1260  * depending on the way they are accessed (as a 32bit or a 64bit
1261  * register).
1262  */
1263 static const struct sys_reg_desc cp15_regs[] = {
1264 	{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1265 
1266 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
1267 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1268 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
1269 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
1270 	{ Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
1271 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
1272 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
1273 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
1274 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
1275 	{ Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
1276 	{ Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
1277 
1278 	/*
1279 	 * DC{C,I,CI}SW operations:
1280 	 */
1281 	{ Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
1282 	{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
1283 	{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
1284 
1285 	/* PMU */
1286 	{ Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
1287 	{ Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
1288 	{ Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
1289 	{ Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
1290 	{ Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
1291 	{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
1292 	{ Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
1293 	{ Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
1294 	{ Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
1295 	{ Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
1296 	{ Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
1297 	{ Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
1298 	{ Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
1299 	{ Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
1300 	{ Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
1301 
1302 	{ Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
1303 	{ Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
1304 	{ Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
1305 	{ Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
1306 
1307 	/* ICC_SRE */
1308 	{ Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
1309 
1310 	{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
1311 
1312 	/* PMEVCNTRn */
1313 	PMU_PMEVCNTR(0),
1314 	PMU_PMEVCNTR(1),
1315 	PMU_PMEVCNTR(2),
1316 	PMU_PMEVCNTR(3),
1317 	PMU_PMEVCNTR(4),
1318 	PMU_PMEVCNTR(5),
1319 	PMU_PMEVCNTR(6),
1320 	PMU_PMEVCNTR(7),
1321 	PMU_PMEVCNTR(8),
1322 	PMU_PMEVCNTR(9),
1323 	PMU_PMEVCNTR(10),
1324 	PMU_PMEVCNTR(11),
1325 	PMU_PMEVCNTR(12),
1326 	PMU_PMEVCNTR(13),
1327 	PMU_PMEVCNTR(14),
1328 	PMU_PMEVCNTR(15),
1329 	PMU_PMEVCNTR(16),
1330 	PMU_PMEVCNTR(17),
1331 	PMU_PMEVCNTR(18),
1332 	PMU_PMEVCNTR(19),
1333 	PMU_PMEVCNTR(20),
1334 	PMU_PMEVCNTR(21),
1335 	PMU_PMEVCNTR(22),
1336 	PMU_PMEVCNTR(23),
1337 	PMU_PMEVCNTR(24),
1338 	PMU_PMEVCNTR(25),
1339 	PMU_PMEVCNTR(26),
1340 	PMU_PMEVCNTR(27),
1341 	PMU_PMEVCNTR(28),
1342 	PMU_PMEVCNTR(29),
1343 	PMU_PMEVCNTR(30),
1344 	/* PMEVTYPERn */
1345 	PMU_PMEVTYPER(0),
1346 	PMU_PMEVTYPER(1),
1347 	PMU_PMEVTYPER(2),
1348 	PMU_PMEVTYPER(3),
1349 	PMU_PMEVTYPER(4),
1350 	PMU_PMEVTYPER(5),
1351 	PMU_PMEVTYPER(6),
1352 	PMU_PMEVTYPER(7),
1353 	PMU_PMEVTYPER(8),
1354 	PMU_PMEVTYPER(9),
1355 	PMU_PMEVTYPER(10),
1356 	PMU_PMEVTYPER(11),
1357 	PMU_PMEVTYPER(12),
1358 	PMU_PMEVTYPER(13),
1359 	PMU_PMEVTYPER(14),
1360 	PMU_PMEVTYPER(15),
1361 	PMU_PMEVTYPER(16),
1362 	PMU_PMEVTYPER(17),
1363 	PMU_PMEVTYPER(18),
1364 	PMU_PMEVTYPER(19),
1365 	PMU_PMEVTYPER(20),
1366 	PMU_PMEVTYPER(21),
1367 	PMU_PMEVTYPER(22),
1368 	PMU_PMEVTYPER(23),
1369 	PMU_PMEVTYPER(24),
1370 	PMU_PMEVTYPER(25),
1371 	PMU_PMEVTYPER(26),
1372 	PMU_PMEVTYPER(27),
1373 	PMU_PMEVTYPER(28),
1374 	PMU_PMEVTYPER(29),
1375 	PMU_PMEVTYPER(30),
1376 	/* PMCCFILTR */
1377 	{ Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
1378 };
1379 
1380 static const struct sys_reg_desc cp15_64_regs[] = {
1381 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1382 	{ Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
1383 	{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1384 	{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
1385 };
1386 
1387 /* Target specific emulation tables */
1388 static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
1389 
1390 void kvm_register_target_sys_reg_table(unsigned int target,
1391 				       struct kvm_sys_reg_target_table *table)
1392 {
1393 	target_tables[target] = table;
1394 }
1395 
1396 /* Get specific register table for this target. */
1397 static const struct sys_reg_desc *get_target_table(unsigned target,
1398 						   bool mode_is_64,
1399 						   size_t *num)
1400 {
1401 	struct kvm_sys_reg_target_table *table;
1402 
1403 	table = target_tables[target];
1404 	if (mode_is_64) {
1405 		*num = table->table64.num;
1406 		return table->table64.table;
1407 	} else {
1408 		*num = table->table32.num;
1409 		return table->table32.table;
1410 	}
1411 }
1412 
1413 #define reg_to_match_value(x)						\
1414 	({								\
1415 		unsigned long val;					\
1416 		val  = (x)->Op0 << 14;					\
1417 		val |= (x)->Op1 << 11;					\
1418 		val |= (x)->CRn << 7;					\
1419 		val |= (x)->CRm << 3;					\
1420 		val |= (x)->Op2;					\
1421 		val;							\
1422 	 })
1423 
1424 static int match_sys_reg(const void *key, const void *elt)
1425 {
1426 	const unsigned long pval = (unsigned long)key;
1427 	const struct sys_reg_desc *r = elt;
1428 
1429 	return pval - reg_to_match_value(r);
1430 }
1431 
1432 static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
1433 					 const struct sys_reg_desc table[],
1434 					 unsigned int num)
1435 {
1436 	unsigned long pval = reg_to_match_value(params);
1437 
1438 	return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
1439 }
1440 
1441 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
1442 {
1443 	kvm_inject_undefined(vcpu);
1444 	return 1;
1445 }
1446 
1447 static void perform_access(struct kvm_vcpu *vcpu,
1448 			   struct sys_reg_params *params,
1449 			   const struct sys_reg_desc *r)
1450 {
1451 	/*
1452 	 * Not having an accessor means that we have configured a trap
1453 	 * that we don't know how to handle. This certainly qualifies
1454 	 * as a gross bug that should be fixed right away.
1455 	 */
1456 	BUG_ON(!r->access);
1457 
1458 	/* Skip instruction if instructed so */
1459 	if (likely(r->access(vcpu, params, r)))
1460 		kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1461 }
1462 
1463 /*
1464  * emulate_cp --  tries to match a sys_reg access in a handling table, and
1465  *                call the corresponding trap handler.
1466  *
1467  * @params: pointer to the descriptor of the access
1468  * @table: array of trap descriptors
1469  * @num: size of the trap descriptor array
1470  *
1471  * Return 0 if the access has been handled, and -1 if not.
1472  */
1473 static int emulate_cp(struct kvm_vcpu *vcpu,
1474 		      struct sys_reg_params *params,
1475 		      const struct sys_reg_desc *table,
1476 		      size_t num)
1477 {
1478 	const struct sys_reg_desc *r;
1479 
1480 	if (!table)
1481 		return -1;	/* Not handled */
1482 
1483 	r = find_reg(params, table, num);
1484 
1485 	if (r) {
1486 		perform_access(vcpu, params, r);
1487 		return 0;
1488 	}
1489 
1490 	/* Not handled */
1491 	return -1;
1492 }
1493 
1494 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
1495 				struct sys_reg_params *params)
1496 {
1497 	u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
1498 	int cp = -1;
1499 
1500 	switch(hsr_ec) {
1501 	case ESR_ELx_EC_CP15_32:
1502 	case ESR_ELx_EC_CP15_64:
1503 		cp = 15;
1504 		break;
1505 	case ESR_ELx_EC_CP14_MR:
1506 	case ESR_ELx_EC_CP14_64:
1507 		cp = 14;
1508 		break;
1509 	default:
1510 		WARN_ON(1);
1511 	}
1512 
1513 	kvm_err("Unsupported guest CP%d access at: %08lx\n",
1514 		cp, *vcpu_pc(vcpu));
1515 	print_sys_reg_instr(params);
1516 	kvm_inject_undefined(vcpu);
1517 }
1518 
1519 /**
1520  * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
1521  * @vcpu: The VCPU pointer
1522  * @run:  The kvm_run struct
1523  */
1524 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1525 			    const struct sys_reg_desc *global,
1526 			    size_t nr_global,
1527 			    const struct sys_reg_desc *target_specific,
1528 			    size_t nr_specific)
1529 {
1530 	struct sys_reg_params params;
1531 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
1532 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
1533 	int Rt2 = (hsr >> 10) & 0x1f;
1534 
1535 	params.is_aarch32 = true;
1536 	params.is_32bit = false;
1537 	params.CRm = (hsr >> 1) & 0xf;
1538 	params.is_write = ((hsr & 1) == 0);
1539 
1540 	params.Op0 = 0;
1541 	params.Op1 = (hsr >> 16) & 0xf;
1542 	params.Op2 = 0;
1543 	params.CRn = 0;
1544 
1545 	/*
1546 	 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
1547 	 * backends between AArch32 and AArch64, we get away with it.
1548 	 */
1549 	if (params.is_write) {
1550 		params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
1551 		params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
1552 	}
1553 
1554 	/*
1555 	 * Try to emulate the coprocessor access using the target
1556 	 * specific table first, and using the global table afterwards.
1557 	 * If either of the tables contains a handler, handle the
1558 	 * potential register operation in the case of a read and return
1559 	 * with success.
1560 	 */
1561 	if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
1562 	    !emulate_cp(vcpu, &params, global, nr_global)) {
1563 		/* Split up the value between registers for the read side */
1564 		if (!params.is_write) {
1565 			vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
1566 			vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
1567 		}
1568 
1569 		return 1;
1570 	}
1571 
1572 	unhandled_cp_access(vcpu, &params);
1573 	return 1;
1574 }
1575 
1576 /**
1577  * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
1578  * @vcpu: The VCPU pointer
1579  * @run:  The kvm_run struct
1580  */
1581 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
1582 			    const struct sys_reg_desc *global,
1583 			    size_t nr_global,
1584 			    const struct sys_reg_desc *target_specific,
1585 			    size_t nr_specific)
1586 {
1587 	struct sys_reg_params params;
1588 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
1589 	int Rt  = kvm_vcpu_sys_get_rt(vcpu);
1590 
1591 	params.is_aarch32 = true;
1592 	params.is_32bit = true;
1593 	params.CRm = (hsr >> 1) & 0xf;
1594 	params.regval = vcpu_get_reg(vcpu, Rt);
1595 	params.is_write = ((hsr & 1) == 0);
1596 	params.CRn = (hsr >> 10) & 0xf;
1597 	params.Op0 = 0;
1598 	params.Op1 = (hsr >> 14) & 0x7;
1599 	params.Op2 = (hsr >> 17) & 0x7;
1600 
1601 	if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
1602 	    !emulate_cp(vcpu, &params, global, nr_global)) {
1603 		if (!params.is_write)
1604 			vcpu_set_reg(vcpu, Rt, params.regval);
1605 		return 1;
1606 	}
1607 
1608 	unhandled_cp_access(vcpu, &params);
1609 	return 1;
1610 }
1611 
1612 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
1613 {
1614 	const struct sys_reg_desc *target_specific;
1615 	size_t num;
1616 
1617 	target_specific = get_target_table(vcpu->arch.target, false, &num);
1618 	return kvm_handle_cp_64(vcpu,
1619 				cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
1620 				target_specific, num);
1621 }
1622 
1623 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1624 {
1625 	const struct sys_reg_desc *target_specific;
1626 	size_t num;
1627 
1628 	target_specific = get_target_table(vcpu->arch.target, false, &num);
1629 	return kvm_handle_cp_32(vcpu,
1630 				cp15_regs, ARRAY_SIZE(cp15_regs),
1631 				target_specific, num);
1632 }
1633 
1634 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
1635 {
1636 	return kvm_handle_cp_64(vcpu,
1637 				cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
1638 				NULL, 0);
1639 }
1640 
1641 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1642 {
1643 	return kvm_handle_cp_32(vcpu,
1644 				cp14_regs, ARRAY_SIZE(cp14_regs),
1645 				NULL, 0);
1646 }
1647 
1648 static int emulate_sys_reg(struct kvm_vcpu *vcpu,
1649 			   struct sys_reg_params *params)
1650 {
1651 	size_t num;
1652 	const struct sys_reg_desc *table, *r;
1653 
1654 	table = get_target_table(vcpu->arch.target, true, &num);
1655 
1656 	/* Search target-specific then generic table. */
1657 	r = find_reg(params, table, num);
1658 	if (!r)
1659 		r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1660 
1661 	if (likely(r)) {
1662 		perform_access(vcpu, params, r);
1663 	} else {
1664 		kvm_err("Unsupported guest sys_reg access at: %lx\n",
1665 			*vcpu_pc(vcpu));
1666 		print_sys_reg_instr(params);
1667 		kvm_inject_undefined(vcpu);
1668 	}
1669 	return 1;
1670 }
1671 
1672 static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
1673 			      const struct sys_reg_desc *table, size_t num)
1674 {
1675 	unsigned long i;
1676 
1677 	for (i = 0; i < num; i++)
1678 		if (table[i].reset)
1679 			table[i].reset(vcpu, &table[i]);
1680 }
1681 
1682 /**
1683  * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
1684  * @vcpu: The VCPU pointer
1685  * @run:  The kvm_run struct
1686  */
1687 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1688 {
1689 	struct sys_reg_params params;
1690 	unsigned long esr = kvm_vcpu_get_hsr(vcpu);
1691 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
1692 	int ret;
1693 
1694 	trace_kvm_handle_sys_reg(esr);
1695 
1696 	params.is_aarch32 = false;
1697 	params.is_32bit = false;
1698 	params.Op0 = (esr >> 20) & 3;
1699 	params.Op1 = (esr >> 14) & 0x7;
1700 	params.CRn = (esr >> 10) & 0xf;
1701 	params.CRm = (esr >> 1) & 0xf;
1702 	params.Op2 = (esr >> 17) & 0x7;
1703 	params.regval = vcpu_get_reg(vcpu, Rt);
1704 	params.is_write = !(esr & 1);
1705 
1706 	ret = emulate_sys_reg(vcpu, &params);
1707 
1708 	if (!params.is_write)
1709 		vcpu_set_reg(vcpu, Rt, params.regval);
1710 	return ret;
1711 }
1712 
1713 /******************************************************************************
1714  * Userspace API
1715  *****************************************************************************/
1716 
1717 static bool index_to_params(u64 id, struct sys_reg_params *params)
1718 {
1719 	switch (id & KVM_REG_SIZE_MASK) {
1720 	case KVM_REG_SIZE_U64:
1721 		/* Any unused index bits means it's not valid. */
1722 		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
1723 			      | KVM_REG_ARM_COPROC_MASK
1724 			      | KVM_REG_ARM64_SYSREG_OP0_MASK
1725 			      | KVM_REG_ARM64_SYSREG_OP1_MASK
1726 			      | KVM_REG_ARM64_SYSREG_CRN_MASK
1727 			      | KVM_REG_ARM64_SYSREG_CRM_MASK
1728 			      | KVM_REG_ARM64_SYSREG_OP2_MASK))
1729 			return false;
1730 		params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
1731 			       >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
1732 		params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
1733 			       >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
1734 		params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
1735 			       >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
1736 		params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
1737 			       >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
1738 		params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
1739 			       >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
1740 		return true;
1741 	default:
1742 		return false;
1743 	}
1744 }
1745 
1746 const struct sys_reg_desc *find_reg_by_id(u64 id,
1747 					  struct sys_reg_params *params,
1748 					  const struct sys_reg_desc table[],
1749 					  unsigned int num)
1750 {
1751 	if (!index_to_params(id, params))
1752 		return NULL;
1753 
1754 	return find_reg(params, table, num);
1755 }
1756 
1757 /* Decode an index value, and find the sys_reg_desc entry. */
1758 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
1759 						    u64 id)
1760 {
1761 	size_t num;
1762 	const struct sys_reg_desc *table, *r;
1763 	struct sys_reg_params params;
1764 
1765 	/* We only do sys_reg for now. */
1766 	if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
1767 		return NULL;
1768 
1769 	table = get_target_table(vcpu->arch.target, true, &num);
1770 	r = find_reg_by_id(id, &params, table, num);
1771 	if (!r)
1772 		r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1773 
1774 	/* Not saved in the sys_reg array? */
1775 	if (r && !r->reg)
1776 		r = NULL;
1777 
1778 	return r;
1779 }
1780 
1781 /*
1782  * These are the invariant sys_reg registers: we let the guest see the
1783  * host versions of these, so they're part of the guest state.
1784  *
1785  * A future CPU may provide a mechanism to present different values to
1786  * the guest, or a future kvm may trap them.
1787  */
1788 
1789 #define FUNCTION_INVARIANT(reg)						\
1790 	static void get_##reg(struct kvm_vcpu *v,			\
1791 			      const struct sys_reg_desc *r)		\
1792 	{								\
1793 		((struct sys_reg_desc *)r)->val = read_sysreg(reg);	\
1794 	}
1795 
1796 FUNCTION_INVARIANT(midr_el1)
1797 FUNCTION_INVARIANT(ctr_el0)
1798 FUNCTION_INVARIANT(revidr_el1)
1799 FUNCTION_INVARIANT(id_pfr0_el1)
1800 FUNCTION_INVARIANT(id_pfr1_el1)
1801 FUNCTION_INVARIANT(id_dfr0_el1)
1802 FUNCTION_INVARIANT(id_afr0_el1)
1803 FUNCTION_INVARIANT(id_mmfr0_el1)
1804 FUNCTION_INVARIANT(id_mmfr1_el1)
1805 FUNCTION_INVARIANT(id_mmfr2_el1)
1806 FUNCTION_INVARIANT(id_mmfr3_el1)
1807 FUNCTION_INVARIANT(id_isar0_el1)
1808 FUNCTION_INVARIANT(id_isar1_el1)
1809 FUNCTION_INVARIANT(id_isar2_el1)
1810 FUNCTION_INVARIANT(id_isar3_el1)
1811 FUNCTION_INVARIANT(id_isar4_el1)
1812 FUNCTION_INVARIANT(id_isar5_el1)
1813 FUNCTION_INVARIANT(clidr_el1)
1814 FUNCTION_INVARIANT(aidr_el1)
1815 
1816 /* ->val is filled in by kvm_sys_reg_table_init() */
1817 static struct sys_reg_desc invariant_sys_regs[] = {
1818 	{ SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
1819 	{ SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
1820 	{ SYS_DESC(SYS_ID_PFR0_EL1), NULL, get_id_pfr0_el1 },
1821 	{ SYS_DESC(SYS_ID_PFR1_EL1), NULL, get_id_pfr1_el1 },
1822 	{ SYS_DESC(SYS_ID_DFR0_EL1), NULL, get_id_dfr0_el1 },
1823 	{ SYS_DESC(SYS_ID_AFR0_EL1), NULL, get_id_afr0_el1 },
1824 	{ SYS_DESC(SYS_ID_MMFR0_EL1), NULL, get_id_mmfr0_el1 },
1825 	{ SYS_DESC(SYS_ID_MMFR1_EL1), NULL, get_id_mmfr1_el1 },
1826 	{ SYS_DESC(SYS_ID_MMFR2_EL1), NULL, get_id_mmfr2_el1 },
1827 	{ SYS_DESC(SYS_ID_MMFR3_EL1), NULL, get_id_mmfr3_el1 },
1828 	{ SYS_DESC(SYS_ID_ISAR0_EL1), NULL, get_id_isar0_el1 },
1829 	{ SYS_DESC(SYS_ID_ISAR1_EL1), NULL, get_id_isar1_el1 },
1830 	{ SYS_DESC(SYS_ID_ISAR2_EL1), NULL, get_id_isar2_el1 },
1831 	{ SYS_DESC(SYS_ID_ISAR3_EL1), NULL, get_id_isar3_el1 },
1832 	{ SYS_DESC(SYS_ID_ISAR4_EL1), NULL, get_id_isar4_el1 },
1833 	{ SYS_DESC(SYS_ID_ISAR5_EL1), NULL, get_id_isar5_el1 },
1834 	{ SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
1835 	{ SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
1836 	{ SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
1837 };
1838 
1839 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
1840 {
1841 	if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
1842 		return -EFAULT;
1843 	return 0;
1844 }
1845 
1846 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
1847 {
1848 	if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
1849 		return -EFAULT;
1850 	return 0;
1851 }
1852 
1853 static int get_invariant_sys_reg(u64 id, void __user *uaddr)
1854 {
1855 	struct sys_reg_params params;
1856 	const struct sys_reg_desc *r;
1857 
1858 	r = find_reg_by_id(id, &params, invariant_sys_regs,
1859 			   ARRAY_SIZE(invariant_sys_regs));
1860 	if (!r)
1861 		return -ENOENT;
1862 
1863 	return reg_to_user(uaddr, &r->val, id);
1864 }
1865 
1866 static int set_invariant_sys_reg(u64 id, void __user *uaddr)
1867 {
1868 	struct sys_reg_params params;
1869 	const struct sys_reg_desc *r;
1870 	int err;
1871 	u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
1872 
1873 	r = find_reg_by_id(id, &params, invariant_sys_regs,
1874 			   ARRAY_SIZE(invariant_sys_regs));
1875 	if (!r)
1876 		return -ENOENT;
1877 
1878 	err = reg_from_user(&val, uaddr, id);
1879 	if (err)
1880 		return err;
1881 
1882 	/* This is what we mean by invariant: you can't change it. */
1883 	if (r->val != val)
1884 		return -EINVAL;
1885 
1886 	return 0;
1887 }
1888 
1889 static bool is_valid_cache(u32 val)
1890 {
1891 	u32 level, ctype;
1892 
1893 	if (val >= CSSELR_MAX)
1894 		return false;
1895 
1896 	/* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
1897 	level = (val >> 1);
1898 	ctype = (cache_levels >> (level * 3)) & 7;
1899 
1900 	switch (ctype) {
1901 	case 0: /* No cache */
1902 		return false;
1903 	case 1: /* Instruction cache only */
1904 		return (val & 1);
1905 	case 2: /* Data cache only */
1906 	case 4: /* Unified cache */
1907 		return !(val & 1);
1908 	case 3: /* Separate instruction and data caches */
1909 		return true;
1910 	default: /* Reserved: we can't know instruction or data. */
1911 		return false;
1912 	}
1913 }
1914 
1915 static int demux_c15_get(u64 id, void __user *uaddr)
1916 {
1917 	u32 val;
1918 	u32 __user *uval = uaddr;
1919 
1920 	/* Fail if we have unknown bits set. */
1921 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1922 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1923 		return -ENOENT;
1924 
1925 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1926 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1927 		if (KVM_REG_SIZE(id) != 4)
1928 			return -ENOENT;
1929 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1930 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1931 		if (!is_valid_cache(val))
1932 			return -ENOENT;
1933 
1934 		return put_user(get_ccsidr(val), uval);
1935 	default:
1936 		return -ENOENT;
1937 	}
1938 }
1939 
1940 static int demux_c15_set(u64 id, void __user *uaddr)
1941 {
1942 	u32 val, newval;
1943 	u32 __user *uval = uaddr;
1944 
1945 	/* Fail if we have unknown bits set. */
1946 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1947 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1948 		return -ENOENT;
1949 
1950 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1951 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1952 		if (KVM_REG_SIZE(id) != 4)
1953 			return -ENOENT;
1954 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1955 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1956 		if (!is_valid_cache(val))
1957 			return -ENOENT;
1958 
1959 		if (get_user(newval, uval))
1960 			return -EFAULT;
1961 
1962 		/* This is also invariant: you can't change it. */
1963 		if (newval != get_ccsidr(val))
1964 			return -EINVAL;
1965 		return 0;
1966 	default:
1967 		return -ENOENT;
1968 	}
1969 }
1970 
1971 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1972 {
1973 	const struct sys_reg_desc *r;
1974 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
1975 
1976 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1977 		return demux_c15_get(reg->id, uaddr);
1978 
1979 	if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
1980 		return -ENOENT;
1981 
1982 	r = index_to_sys_reg_desc(vcpu, reg->id);
1983 	if (!r)
1984 		return get_invariant_sys_reg(reg->id, uaddr);
1985 
1986 	if (r->get_user)
1987 		return (r->get_user)(vcpu, r, reg, uaddr);
1988 
1989 	return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
1990 }
1991 
1992 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1993 {
1994 	const struct sys_reg_desc *r;
1995 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
1996 
1997 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1998 		return demux_c15_set(reg->id, uaddr);
1999 
2000 	if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2001 		return -ENOENT;
2002 
2003 	r = index_to_sys_reg_desc(vcpu, reg->id);
2004 	if (!r)
2005 		return set_invariant_sys_reg(reg->id, uaddr);
2006 
2007 	if (r->set_user)
2008 		return (r->set_user)(vcpu, r, reg, uaddr);
2009 
2010 	return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2011 }
2012 
2013 static unsigned int num_demux_regs(void)
2014 {
2015 	unsigned int i, count = 0;
2016 
2017 	for (i = 0; i < CSSELR_MAX; i++)
2018 		if (is_valid_cache(i))
2019 			count++;
2020 
2021 	return count;
2022 }
2023 
2024 static int write_demux_regids(u64 __user *uindices)
2025 {
2026 	u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2027 	unsigned int i;
2028 
2029 	val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2030 	for (i = 0; i < CSSELR_MAX; i++) {
2031 		if (!is_valid_cache(i))
2032 			continue;
2033 		if (put_user(val | i, uindices))
2034 			return -EFAULT;
2035 		uindices++;
2036 	}
2037 	return 0;
2038 }
2039 
2040 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2041 {
2042 	return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2043 		KVM_REG_ARM64_SYSREG |
2044 		(reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2045 		(reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2046 		(reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2047 		(reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2048 		(reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2049 }
2050 
2051 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2052 {
2053 	if (!*uind)
2054 		return true;
2055 
2056 	if (put_user(sys_reg_to_index(reg), *uind))
2057 		return false;
2058 
2059 	(*uind)++;
2060 	return true;
2061 }
2062 
2063 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
2064 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2065 {
2066 	const struct sys_reg_desc *i1, *i2, *end1, *end2;
2067 	unsigned int total = 0;
2068 	size_t num;
2069 
2070 	/* We check for duplicates here, to allow arch-specific overrides. */
2071 	i1 = get_target_table(vcpu->arch.target, true, &num);
2072 	end1 = i1 + num;
2073 	i2 = sys_reg_descs;
2074 	end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2075 
2076 	BUG_ON(i1 == end1 || i2 == end2);
2077 
2078 	/* Walk carefully, as both tables may refer to the same register. */
2079 	while (i1 || i2) {
2080 		int cmp = cmp_sys_reg(i1, i2);
2081 		/* target-specific overrides generic entry. */
2082 		if (cmp <= 0) {
2083 			/* Ignore registers we trap but don't save. */
2084 			if (i1->reg) {
2085 				if (!copy_reg_to_user(i1, &uind))
2086 					return -EFAULT;
2087 				total++;
2088 			}
2089 		} else {
2090 			/* Ignore registers we trap but don't save. */
2091 			if (i2->reg) {
2092 				if (!copy_reg_to_user(i2, &uind))
2093 					return -EFAULT;
2094 				total++;
2095 			}
2096 		}
2097 
2098 		if (cmp <= 0 && ++i1 == end1)
2099 			i1 = NULL;
2100 		if (cmp >= 0 && ++i2 == end2)
2101 			i2 = NULL;
2102 	}
2103 	return total;
2104 }
2105 
2106 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2107 {
2108 	return ARRAY_SIZE(invariant_sys_regs)
2109 		+ num_demux_regs()
2110 		+ walk_sys_regs(vcpu, (u64 __user *)NULL);
2111 }
2112 
2113 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2114 {
2115 	unsigned int i;
2116 	int err;
2117 
2118 	/* Then give them all the invariant registers' indices. */
2119 	for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2120 		if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2121 			return -EFAULT;
2122 		uindices++;
2123 	}
2124 
2125 	err = walk_sys_regs(vcpu, uindices);
2126 	if (err < 0)
2127 		return err;
2128 	uindices += err;
2129 
2130 	return write_demux_regids(uindices);
2131 }
2132 
2133 static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
2134 {
2135 	unsigned int i;
2136 
2137 	for (i = 1; i < n; i++) {
2138 		if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2139 			kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2140 			return 1;
2141 		}
2142 	}
2143 
2144 	return 0;
2145 }
2146 
2147 void kvm_sys_reg_table_init(void)
2148 {
2149 	unsigned int i;
2150 	struct sys_reg_desc clidr;
2151 
2152 	/* Make sure tables are unique and in order. */
2153 	BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
2154 	BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
2155 	BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
2156 	BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
2157 	BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
2158 	BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
2159 
2160 	/* We abuse the reset function to overwrite the table itself. */
2161 	for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2162 		invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2163 
2164 	/*
2165 	 * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
2166 	 *
2167 	 *   If software reads the Cache Type fields from Ctype1
2168 	 *   upwards, once it has seen a value of 0b000, no caches
2169 	 *   exist at further-out levels of the hierarchy. So, for
2170 	 *   example, if Ctype3 is the first Cache Type field with a
2171 	 *   value of 0b000, the values of Ctype4 to Ctype7 must be
2172 	 *   ignored.
2173 	 */
2174 	get_clidr_el1(NULL, &clidr); /* Ugly... */
2175 	cache_levels = clidr.val;
2176 	for (i = 0; i < 7; i++)
2177 		if (((cache_levels >> (i*3)) & 7) == 0)
2178 			break;
2179 	/* Clear all higher bits. */
2180 	cache_levels &= (1 << (i*3))-1;
2181 }
2182 
2183 /**
2184  * kvm_reset_sys_regs - sets system registers to reset value
2185  * @vcpu: The VCPU pointer
2186  *
2187  * This function finds the right table above and sets the registers on the
2188  * virtual CPU struct to their architecturally defined reset values.
2189  */
2190 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2191 {
2192 	size_t num;
2193 	const struct sys_reg_desc *table;
2194 
2195 	/* Catch someone adding a register without putting in reset entry. */
2196 	memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
2197 
2198 	/* Generic chip reset first (so target could override). */
2199 	reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2200 
2201 	table = get_target_table(vcpu->arch.target, true, &num);
2202 	reset_sys_reg_descs(vcpu, table, num);
2203 
2204 	for (num = 1; num < NR_SYS_REGS; num++)
2205 		if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
2206 			panic("Didn't reset vcpu_sys_reg(%zi)", num);
2207 }
2208