xref: /openbmc/linux/arch/arm64/kvm/sys_regs.c (revision b35565bb)
1 /*
2  * Copyright (C) 2012,2013 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * Derived from arch/arm/kvm/coproc.c:
6  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7  * Authors: Rusty Russell <rusty@rustcorp.com.au>
8  *          Christoffer Dall <c.dall@virtualopensystems.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License, version 2, as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
21  */
22 
23 #include <linux/bsearch.h>
24 #include <linux/kvm_host.h>
25 #include <linux/mm.h>
26 #include <linux/uaccess.h>
27 
28 #include <asm/cacheflush.h>
29 #include <asm/cputype.h>
30 #include <asm/debug-monitors.h>
31 #include <asm/esr.h>
32 #include <asm/kvm_arm.h>
33 #include <asm/kvm_asm.h>
34 #include <asm/kvm_coproc.h>
35 #include <asm/kvm_emulate.h>
36 #include <asm/kvm_host.h>
37 #include <asm/kvm_mmu.h>
38 #include <asm/perf_event.h>
39 #include <asm/sysreg.h>
40 
41 #include <trace/events/kvm.h>
42 
43 #include "sys_regs.h"
44 
45 #include "trace.h"
46 
47 /*
48  * All of this file is extremly similar to the ARM coproc.c, but the
49  * types are different. My gut feeling is that it should be pretty
50  * easy to merge, but that would be an ABI breakage -- again. VFP
51  * would also need to be abstracted.
52  *
53  * For AArch32, we only take care of what is being trapped. Anything
54  * that has to do with init and userspace access has to go via the
55  * 64bit interface.
56  */
57 
58 static bool read_from_write_only(struct kvm_vcpu *vcpu,
59 				 struct sys_reg_params *params,
60 				 const struct sys_reg_desc *r)
61 {
62 	WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
63 	print_sys_reg_instr(params);
64 	kvm_inject_undefined(vcpu);
65 	return false;
66 }
67 
68 static bool write_to_read_only(struct kvm_vcpu *vcpu,
69 			       struct sys_reg_params *params,
70 			       const struct sys_reg_desc *r)
71 {
72 	WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
73 	print_sys_reg_instr(params);
74 	kvm_inject_undefined(vcpu);
75 	return false;
76 }
77 
78 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
79 static u32 cache_levels;
80 
81 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
82 #define CSSELR_MAX 12
83 
84 /* Which cache CCSIDR represents depends on CSSELR value. */
85 static u32 get_ccsidr(u32 csselr)
86 {
87 	u32 ccsidr;
88 
89 	/* Make sure noone else changes CSSELR during this! */
90 	local_irq_disable();
91 	write_sysreg(csselr, csselr_el1);
92 	isb();
93 	ccsidr = read_sysreg(ccsidr_el1);
94 	local_irq_enable();
95 
96 	return ccsidr;
97 }
98 
99 /*
100  * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
101  */
102 static bool access_dcsw(struct kvm_vcpu *vcpu,
103 			struct sys_reg_params *p,
104 			const struct sys_reg_desc *r)
105 {
106 	if (!p->is_write)
107 		return read_from_write_only(vcpu, p, r);
108 
109 	kvm_set_way_flush(vcpu);
110 	return true;
111 }
112 
113 /*
114  * Generic accessor for VM registers. Only called as long as HCR_TVM
115  * is set. If the guest enables the MMU, we stop trapping the VM
116  * sys_regs and leave it in complete control of the caches.
117  */
118 static bool access_vm_reg(struct kvm_vcpu *vcpu,
119 			  struct sys_reg_params *p,
120 			  const struct sys_reg_desc *r)
121 {
122 	bool was_enabled = vcpu_has_cache_enabled(vcpu);
123 
124 	BUG_ON(!p->is_write);
125 
126 	if (!p->is_aarch32) {
127 		vcpu_sys_reg(vcpu, r->reg) = p->regval;
128 	} else {
129 		if (!p->is_32bit)
130 			vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval);
131 		vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval);
132 	}
133 
134 	kvm_toggle_cache(vcpu, was_enabled);
135 	return true;
136 }
137 
138 /*
139  * Trap handler for the GICv3 SGI generation system register.
140  * Forward the request to the VGIC emulation.
141  * The cp15_64 code makes sure this automatically works
142  * for both AArch64 and AArch32 accesses.
143  */
144 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
145 			   struct sys_reg_params *p,
146 			   const struct sys_reg_desc *r)
147 {
148 	if (!p->is_write)
149 		return read_from_write_only(vcpu, p, r);
150 
151 	vgic_v3_dispatch_sgi(vcpu, p->regval);
152 
153 	return true;
154 }
155 
156 static bool access_gic_sre(struct kvm_vcpu *vcpu,
157 			   struct sys_reg_params *p,
158 			   const struct sys_reg_desc *r)
159 {
160 	if (p->is_write)
161 		return ignore_write(vcpu, p);
162 
163 	p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
164 	return true;
165 }
166 
167 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
168 			struct sys_reg_params *p,
169 			const struct sys_reg_desc *r)
170 {
171 	if (p->is_write)
172 		return ignore_write(vcpu, p);
173 	else
174 		return read_zero(vcpu, p);
175 }
176 
177 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
178 			   struct sys_reg_params *p,
179 			   const struct sys_reg_desc *r)
180 {
181 	if (p->is_write) {
182 		return ignore_write(vcpu, p);
183 	} else {
184 		p->regval = (1 << 3);
185 		return true;
186 	}
187 }
188 
189 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
190 				   struct sys_reg_params *p,
191 				   const struct sys_reg_desc *r)
192 {
193 	if (p->is_write) {
194 		return ignore_write(vcpu, p);
195 	} else {
196 		p->regval = read_sysreg(dbgauthstatus_el1);
197 		return true;
198 	}
199 }
200 
201 /*
202  * We want to avoid world-switching all the DBG registers all the
203  * time:
204  *
205  * - If we've touched any debug register, it is likely that we're
206  *   going to touch more of them. It then makes sense to disable the
207  *   traps and start doing the save/restore dance
208  * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
209  *   then mandatory to save/restore the registers, as the guest
210  *   depends on them.
211  *
212  * For this, we use a DIRTY bit, indicating the guest has modified the
213  * debug registers, used as follow:
214  *
215  * On guest entry:
216  * - If the dirty bit is set (because we're coming back from trapping),
217  *   disable the traps, save host registers, restore guest registers.
218  * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
219  *   set the dirty bit, disable the traps, save host registers,
220  *   restore guest registers.
221  * - Otherwise, enable the traps
222  *
223  * On guest exit:
224  * - If the dirty bit is set, save guest registers, restore host
225  *   registers and clear the dirty bit. This ensure that the host can
226  *   now use the debug registers.
227  */
228 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
229 			    struct sys_reg_params *p,
230 			    const struct sys_reg_desc *r)
231 {
232 	if (p->is_write) {
233 		vcpu_sys_reg(vcpu, r->reg) = p->regval;
234 		vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
235 	} else {
236 		p->regval = vcpu_sys_reg(vcpu, r->reg);
237 	}
238 
239 	trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
240 
241 	return true;
242 }
243 
244 /*
245  * reg_to_dbg/dbg_to_reg
246  *
247  * A 32 bit write to a debug register leave top bits alone
248  * A 32 bit read from a debug register only returns the bottom bits
249  *
250  * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
251  * hyp.S code switches between host and guest values in future.
252  */
253 static void reg_to_dbg(struct kvm_vcpu *vcpu,
254 		       struct sys_reg_params *p,
255 		       u64 *dbg_reg)
256 {
257 	u64 val = p->regval;
258 
259 	if (p->is_32bit) {
260 		val &= 0xffffffffUL;
261 		val |= ((*dbg_reg >> 32) << 32);
262 	}
263 
264 	*dbg_reg = val;
265 	vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
266 }
267 
268 static void dbg_to_reg(struct kvm_vcpu *vcpu,
269 		       struct sys_reg_params *p,
270 		       u64 *dbg_reg)
271 {
272 	p->regval = *dbg_reg;
273 	if (p->is_32bit)
274 		p->regval &= 0xffffffffUL;
275 }
276 
277 static bool trap_bvr(struct kvm_vcpu *vcpu,
278 		     struct sys_reg_params *p,
279 		     const struct sys_reg_desc *rd)
280 {
281 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
282 
283 	if (p->is_write)
284 		reg_to_dbg(vcpu, p, dbg_reg);
285 	else
286 		dbg_to_reg(vcpu, p, dbg_reg);
287 
288 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
289 
290 	return true;
291 }
292 
293 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
294 		const struct kvm_one_reg *reg, void __user *uaddr)
295 {
296 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
297 
298 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
299 		return -EFAULT;
300 	return 0;
301 }
302 
303 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
304 	const struct kvm_one_reg *reg, void __user *uaddr)
305 {
306 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
307 
308 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
309 		return -EFAULT;
310 	return 0;
311 }
312 
313 static void reset_bvr(struct kvm_vcpu *vcpu,
314 		      const struct sys_reg_desc *rd)
315 {
316 	vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
317 }
318 
319 static bool trap_bcr(struct kvm_vcpu *vcpu,
320 		     struct sys_reg_params *p,
321 		     const struct sys_reg_desc *rd)
322 {
323 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
324 
325 	if (p->is_write)
326 		reg_to_dbg(vcpu, p, dbg_reg);
327 	else
328 		dbg_to_reg(vcpu, p, dbg_reg);
329 
330 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
331 
332 	return true;
333 }
334 
335 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
336 		const struct kvm_one_reg *reg, void __user *uaddr)
337 {
338 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
339 
340 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
341 		return -EFAULT;
342 
343 	return 0;
344 }
345 
346 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
347 	const struct kvm_one_reg *reg, void __user *uaddr)
348 {
349 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
350 
351 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
352 		return -EFAULT;
353 	return 0;
354 }
355 
356 static void reset_bcr(struct kvm_vcpu *vcpu,
357 		      const struct sys_reg_desc *rd)
358 {
359 	vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
360 }
361 
362 static bool trap_wvr(struct kvm_vcpu *vcpu,
363 		     struct sys_reg_params *p,
364 		     const struct sys_reg_desc *rd)
365 {
366 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
367 
368 	if (p->is_write)
369 		reg_to_dbg(vcpu, p, dbg_reg);
370 	else
371 		dbg_to_reg(vcpu, p, dbg_reg);
372 
373 	trace_trap_reg(__func__, rd->reg, p->is_write,
374 		vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
375 
376 	return true;
377 }
378 
379 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
380 		const struct kvm_one_reg *reg, void __user *uaddr)
381 {
382 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
383 
384 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
385 		return -EFAULT;
386 	return 0;
387 }
388 
389 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
390 	const struct kvm_one_reg *reg, void __user *uaddr)
391 {
392 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
393 
394 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
395 		return -EFAULT;
396 	return 0;
397 }
398 
399 static void reset_wvr(struct kvm_vcpu *vcpu,
400 		      const struct sys_reg_desc *rd)
401 {
402 	vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
403 }
404 
405 static bool trap_wcr(struct kvm_vcpu *vcpu,
406 		     struct sys_reg_params *p,
407 		     const struct sys_reg_desc *rd)
408 {
409 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
410 
411 	if (p->is_write)
412 		reg_to_dbg(vcpu, p, dbg_reg);
413 	else
414 		dbg_to_reg(vcpu, p, dbg_reg);
415 
416 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
417 
418 	return true;
419 }
420 
421 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
422 		const struct kvm_one_reg *reg, void __user *uaddr)
423 {
424 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
425 
426 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
427 		return -EFAULT;
428 	return 0;
429 }
430 
431 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
432 	const struct kvm_one_reg *reg, void __user *uaddr)
433 {
434 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
435 
436 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
437 		return -EFAULT;
438 	return 0;
439 }
440 
441 static void reset_wcr(struct kvm_vcpu *vcpu,
442 		      const struct sys_reg_desc *rd)
443 {
444 	vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
445 }
446 
447 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
448 {
449 	vcpu_sys_reg(vcpu, AMAIR_EL1) = read_sysreg(amair_el1);
450 }
451 
452 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
453 {
454 	u64 mpidr;
455 
456 	/*
457 	 * Map the vcpu_id into the first three affinity level fields of
458 	 * the MPIDR. We limit the number of VCPUs in level 0 due to a
459 	 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
460 	 * of the GICv3 to be able to address each CPU directly when
461 	 * sending IPIs.
462 	 */
463 	mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
464 	mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
465 	mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
466 	vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
467 }
468 
469 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
470 {
471 	u64 pmcr, val;
472 
473 	pmcr = read_sysreg(pmcr_el0);
474 	/*
475 	 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
476 	 * except PMCR.E resetting to zero.
477 	 */
478 	val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
479 	       | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
480 	vcpu_sys_reg(vcpu, PMCR_EL0) = val;
481 }
482 
483 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
484 {
485 	u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
486 	bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
487 
488 	if (!enabled)
489 		kvm_inject_undefined(vcpu);
490 
491 	return !enabled;
492 }
493 
494 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
495 {
496 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
497 }
498 
499 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
500 {
501 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
502 }
503 
504 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
505 {
506 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
507 }
508 
509 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
510 {
511 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
512 }
513 
514 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
515 			const struct sys_reg_desc *r)
516 {
517 	u64 val;
518 
519 	if (!kvm_arm_pmu_v3_ready(vcpu))
520 		return trap_raz_wi(vcpu, p, r);
521 
522 	if (pmu_access_el0_disabled(vcpu))
523 		return false;
524 
525 	if (p->is_write) {
526 		/* Only update writeable bits of PMCR */
527 		val = vcpu_sys_reg(vcpu, PMCR_EL0);
528 		val &= ~ARMV8_PMU_PMCR_MASK;
529 		val |= p->regval & ARMV8_PMU_PMCR_MASK;
530 		vcpu_sys_reg(vcpu, PMCR_EL0) = val;
531 		kvm_pmu_handle_pmcr(vcpu, val);
532 	} else {
533 		/* PMCR.P & PMCR.C are RAZ */
534 		val = vcpu_sys_reg(vcpu, PMCR_EL0)
535 		      & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
536 		p->regval = val;
537 	}
538 
539 	return true;
540 }
541 
542 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
543 			  const struct sys_reg_desc *r)
544 {
545 	if (!kvm_arm_pmu_v3_ready(vcpu))
546 		return trap_raz_wi(vcpu, p, r);
547 
548 	if (pmu_access_event_counter_el0_disabled(vcpu))
549 		return false;
550 
551 	if (p->is_write)
552 		vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
553 	else
554 		/* return PMSELR.SEL field */
555 		p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0)
556 			    & ARMV8_PMU_COUNTER_MASK;
557 
558 	return true;
559 }
560 
561 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
562 			  const struct sys_reg_desc *r)
563 {
564 	u64 pmceid;
565 
566 	if (!kvm_arm_pmu_v3_ready(vcpu))
567 		return trap_raz_wi(vcpu, p, r);
568 
569 	BUG_ON(p->is_write);
570 
571 	if (pmu_access_el0_disabled(vcpu))
572 		return false;
573 
574 	if (!(p->Op2 & 1))
575 		pmceid = read_sysreg(pmceid0_el0);
576 	else
577 		pmceid = read_sysreg(pmceid1_el0);
578 
579 	p->regval = pmceid;
580 
581 	return true;
582 }
583 
584 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
585 {
586 	u64 pmcr, val;
587 
588 	pmcr = vcpu_sys_reg(vcpu, PMCR_EL0);
589 	val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
590 	if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
591 		kvm_inject_undefined(vcpu);
592 		return false;
593 	}
594 
595 	return true;
596 }
597 
598 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
599 			      struct sys_reg_params *p,
600 			      const struct sys_reg_desc *r)
601 {
602 	u64 idx;
603 
604 	if (!kvm_arm_pmu_v3_ready(vcpu))
605 		return trap_raz_wi(vcpu, p, r);
606 
607 	if (r->CRn == 9 && r->CRm == 13) {
608 		if (r->Op2 == 2) {
609 			/* PMXEVCNTR_EL0 */
610 			if (pmu_access_event_counter_el0_disabled(vcpu))
611 				return false;
612 
613 			idx = vcpu_sys_reg(vcpu, PMSELR_EL0)
614 			      & ARMV8_PMU_COUNTER_MASK;
615 		} else if (r->Op2 == 0) {
616 			/* PMCCNTR_EL0 */
617 			if (pmu_access_cycle_counter_el0_disabled(vcpu))
618 				return false;
619 
620 			idx = ARMV8_PMU_CYCLE_IDX;
621 		} else {
622 			return false;
623 		}
624 	} else if (r->CRn == 0 && r->CRm == 9) {
625 		/* PMCCNTR */
626 		if (pmu_access_event_counter_el0_disabled(vcpu))
627 			return false;
628 
629 		idx = ARMV8_PMU_CYCLE_IDX;
630 	} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
631 		/* PMEVCNTRn_EL0 */
632 		if (pmu_access_event_counter_el0_disabled(vcpu))
633 			return false;
634 
635 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
636 	} else {
637 		return false;
638 	}
639 
640 	if (!pmu_counter_idx_valid(vcpu, idx))
641 		return false;
642 
643 	if (p->is_write) {
644 		if (pmu_access_el0_disabled(vcpu))
645 			return false;
646 
647 		kvm_pmu_set_counter_value(vcpu, idx, p->regval);
648 	} else {
649 		p->regval = kvm_pmu_get_counter_value(vcpu, idx);
650 	}
651 
652 	return true;
653 }
654 
655 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
656 			       const struct sys_reg_desc *r)
657 {
658 	u64 idx, reg;
659 
660 	if (!kvm_arm_pmu_v3_ready(vcpu))
661 		return trap_raz_wi(vcpu, p, r);
662 
663 	if (pmu_access_el0_disabled(vcpu))
664 		return false;
665 
666 	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
667 		/* PMXEVTYPER_EL0 */
668 		idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
669 		reg = PMEVTYPER0_EL0 + idx;
670 	} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
671 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
672 		if (idx == ARMV8_PMU_CYCLE_IDX)
673 			reg = PMCCFILTR_EL0;
674 		else
675 			/* PMEVTYPERn_EL0 */
676 			reg = PMEVTYPER0_EL0 + idx;
677 	} else {
678 		BUG();
679 	}
680 
681 	if (!pmu_counter_idx_valid(vcpu, idx))
682 		return false;
683 
684 	if (p->is_write) {
685 		kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
686 		vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
687 	} else {
688 		p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
689 	}
690 
691 	return true;
692 }
693 
694 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
695 			   const struct sys_reg_desc *r)
696 {
697 	u64 val, mask;
698 
699 	if (!kvm_arm_pmu_v3_ready(vcpu))
700 		return trap_raz_wi(vcpu, p, r);
701 
702 	if (pmu_access_el0_disabled(vcpu))
703 		return false;
704 
705 	mask = kvm_pmu_valid_counter_mask(vcpu);
706 	if (p->is_write) {
707 		val = p->regval & mask;
708 		if (r->Op2 & 0x1) {
709 			/* accessing PMCNTENSET_EL0 */
710 			vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
711 			kvm_pmu_enable_counter(vcpu, val);
712 		} else {
713 			/* accessing PMCNTENCLR_EL0 */
714 			vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
715 			kvm_pmu_disable_counter(vcpu, val);
716 		}
717 	} else {
718 		p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
719 	}
720 
721 	return true;
722 }
723 
724 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
725 			   const struct sys_reg_desc *r)
726 {
727 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
728 
729 	if (!kvm_arm_pmu_v3_ready(vcpu))
730 		return trap_raz_wi(vcpu, p, r);
731 
732 	if (!vcpu_mode_priv(vcpu)) {
733 		kvm_inject_undefined(vcpu);
734 		return false;
735 	}
736 
737 	if (p->is_write) {
738 		u64 val = p->regval & mask;
739 
740 		if (r->Op2 & 0x1)
741 			/* accessing PMINTENSET_EL1 */
742 			vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
743 		else
744 			/* accessing PMINTENCLR_EL1 */
745 			vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
746 	} else {
747 		p->regval = vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
748 	}
749 
750 	return true;
751 }
752 
753 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
754 			 const struct sys_reg_desc *r)
755 {
756 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
757 
758 	if (!kvm_arm_pmu_v3_ready(vcpu))
759 		return trap_raz_wi(vcpu, p, r);
760 
761 	if (pmu_access_el0_disabled(vcpu))
762 		return false;
763 
764 	if (p->is_write) {
765 		if (r->CRm & 0x2)
766 			/* accessing PMOVSSET_EL0 */
767 			vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
768 		else
769 			/* accessing PMOVSCLR_EL0 */
770 			vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
771 	} else {
772 		p->regval = vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
773 	}
774 
775 	return true;
776 }
777 
778 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
779 			   const struct sys_reg_desc *r)
780 {
781 	u64 mask;
782 
783 	if (!kvm_arm_pmu_v3_ready(vcpu))
784 		return trap_raz_wi(vcpu, p, r);
785 
786 	if (!p->is_write)
787 		return read_from_write_only(vcpu, p, r);
788 
789 	if (pmu_write_swinc_el0_disabled(vcpu))
790 		return false;
791 
792 	mask = kvm_pmu_valid_counter_mask(vcpu);
793 	kvm_pmu_software_increment(vcpu, p->regval & mask);
794 	return true;
795 }
796 
797 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
798 			     const struct sys_reg_desc *r)
799 {
800 	if (!kvm_arm_pmu_v3_ready(vcpu))
801 		return trap_raz_wi(vcpu, p, r);
802 
803 	if (p->is_write) {
804 		if (!vcpu_mode_priv(vcpu)) {
805 			kvm_inject_undefined(vcpu);
806 			return false;
807 		}
808 
809 		vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval
810 						    & ARMV8_PMU_USERENR_MASK;
811 	} else {
812 		p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0)
813 			    & ARMV8_PMU_USERENR_MASK;
814 	}
815 
816 	return true;
817 }
818 
819 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
820 #define DBG_BCR_BVR_WCR_WVR_EL1(n)					\
821 	{ SYS_DESC(SYS_DBGBVRn_EL1(n)),					\
822 	  trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr },		\
823 	{ SYS_DESC(SYS_DBGBCRn_EL1(n)),					\
824 	  trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr },		\
825 	{ SYS_DESC(SYS_DBGWVRn_EL1(n)),					\
826 	  trap_wvr, reset_wvr, n, 0,  get_wvr, set_wvr },		\
827 	{ SYS_DESC(SYS_DBGWCRn_EL1(n)),					\
828 	  trap_wcr, reset_wcr, n, 0,  get_wcr, set_wcr }
829 
830 /* Macro to expand the PMEVCNTRn_EL0 register */
831 #define PMU_PMEVCNTR_EL0(n)						\
832 	{ SYS_DESC(SYS_PMEVCNTRn_EL0(n)),					\
833 	  access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
834 
835 /* Macro to expand the PMEVTYPERn_EL0 register */
836 #define PMU_PMEVTYPER_EL0(n)						\
837 	{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)),					\
838 	  access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
839 
840 static bool access_cntp_tval(struct kvm_vcpu *vcpu,
841 		struct sys_reg_params *p,
842 		const struct sys_reg_desc *r)
843 {
844 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
845 	u64 now = kvm_phys_timer_read();
846 
847 	if (p->is_write)
848 		ptimer->cnt_cval = p->regval + now;
849 	else
850 		p->regval = ptimer->cnt_cval - now;
851 
852 	return true;
853 }
854 
855 static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
856 		struct sys_reg_params *p,
857 		const struct sys_reg_desc *r)
858 {
859 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
860 
861 	if (p->is_write) {
862 		/* ISTATUS bit is read-only */
863 		ptimer->cnt_ctl = p->regval & ~ARCH_TIMER_CTRL_IT_STAT;
864 	} else {
865 		u64 now = kvm_phys_timer_read();
866 
867 		p->regval = ptimer->cnt_ctl;
868 		/*
869 		 * Set ISTATUS bit if it's expired.
870 		 * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
871 		 * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
872 		 * regardless of ENABLE bit for our implementation convenience.
873 		 */
874 		if (ptimer->cnt_cval <= now)
875 			p->regval |= ARCH_TIMER_CTRL_IT_STAT;
876 	}
877 
878 	return true;
879 }
880 
881 static bool access_cntp_cval(struct kvm_vcpu *vcpu,
882 		struct sys_reg_params *p,
883 		const struct sys_reg_desc *r)
884 {
885 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
886 
887 	if (p->is_write)
888 		ptimer->cnt_cval = p->regval;
889 	else
890 		p->regval = ptimer->cnt_cval;
891 
892 	return true;
893 }
894 
895 /*
896  * Architected system registers.
897  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
898  *
899  * Debug handling: We do trap most, if not all debug related system
900  * registers. The implementation is good enough to ensure that a guest
901  * can use these with minimal performance degradation. The drawback is
902  * that we don't implement any of the external debug, none of the
903  * OSlock protocol. This should be revisited if we ever encounter a
904  * more demanding guest...
905  */
906 static const struct sys_reg_desc sys_reg_descs[] = {
907 	{ SYS_DESC(SYS_DC_ISW), access_dcsw },
908 	{ SYS_DESC(SYS_DC_CSW), access_dcsw },
909 	{ SYS_DESC(SYS_DC_CISW), access_dcsw },
910 
911 	DBG_BCR_BVR_WCR_WVR_EL1(0),
912 	DBG_BCR_BVR_WCR_WVR_EL1(1),
913 	{ SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
914 	{ SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
915 	DBG_BCR_BVR_WCR_WVR_EL1(2),
916 	DBG_BCR_BVR_WCR_WVR_EL1(3),
917 	DBG_BCR_BVR_WCR_WVR_EL1(4),
918 	DBG_BCR_BVR_WCR_WVR_EL1(5),
919 	DBG_BCR_BVR_WCR_WVR_EL1(6),
920 	DBG_BCR_BVR_WCR_WVR_EL1(7),
921 	DBG_BCR_BVR_WCR_WVR_EL1(8),
922 	DBG_BCR_BVR_WCR_WVR_EL1(9),
923 	DBG_BCR_BVR_WCR_WVR_EL1(10),
924 	DBG_BCR_BVR_WCR_WVR_EL1(11),
925 	DBG_BCR_BVR_WCR_WVR_EL1(12),
926 	DBG_BCR_BVR_WCR_WVR_EL1(13),
927 	DBG_BCR_BVR_WCR_WVR_EL1(14),
928 	DBG_BCR_BVR_WCR_WVR_EL1(15),
929 
930 	{ SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
931 	{ SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
932 	{ SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
933 	{ SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
934 	{ SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
935 	{ SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
936 	{ SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
937 	{ SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
938 
939 	{ SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
940 	{ SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
941 	// DBGDTR[TR]X_EL0 share the same encoding
942 	{ SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
943 
944 	{ SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
945 
946 	{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
947 	{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
948 	{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
949 	{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
950 	{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
951 	{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
952 
953 	{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
954 	{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
955 	{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
956 	{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
957 	{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
958 
959 	{ SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
960 	{ SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
961 
962 	{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
963 	{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
964 
965 	{ SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
966 
967 	{ SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
968 	{ SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
969 	{ SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
970 	{ SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
971 	{ SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
972 	{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
973 	{ SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
974 	{ SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
975 	{ SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
976 	{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
977 
978 	{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
979 	{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
980 
981 	{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
982 
983 	{ SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
984 
985 	{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
986 	{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
987 	{ SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
988 	{ SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
989 	{ SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
990 	{ SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
991 	{ SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
992 	{ SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
993 	{ SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
994 	{ SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
995 	{ SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
996 	/*
997 	 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
998 	 * in 32bit mode. Here we choose to reset it as zero for consistency.
999 	 */
1000 	{ SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
1001 	{ SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
1002 
1003 	{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1004 	{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
1005 
1006 	{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval },
1007 	{ SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl },
1008 	{ SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval },
1009 
1010 	/* PMEVCNTRn_EL0 */
1011 	PMU_PMEVCNTR_EL0(0),
1012 	PMU_PMEVCNTR_EL0(1),
1013 	PMU_PMEVCNTR_EL0(2),
1014 	PMU_PMEVCNTR_EL0(3),
1015 	PMU_PMEVCNTR_EL0(4),
1016 	PMU_PMEVCNTR_EL0(5),
1017 	PMU_PMEVCNTR_EL0(6),
1018 	PMU_PMEVCNTR_EL0(7),
1019 	PMU_PMEVCNTR_EL0(8),
1020 	PMU_PMEVCNTR_EL0(9),
1021 	PMU_PMEVCNTR_EL0(10),
1022 	PMU_PMEVCNTR_EL0(11),
1023 	PMU_PMEVCNTR_EL0(12),
1024 	PMU_PMEVCNTR_EL0(13),
1025 	PMU_PMEVCNTR_EL0(14),
1026 	PMU_PMEVCNTR_EL0(15),
1027 	PMU_PMEVCNTR_EL0(16),
1028 	PMU_PMEVCNTR_EL0(17),
1029 	PMU_PMEVCNTR_EL0(18),
1030 	PMU_PMEVCNTR_EL0(19),
1031 	PMU_PMEVCNTR_EL0(20),
1032 	PMU_PMEVCNTR_EL0(21),
1033 	PMU_PMEVCNTR_EL0(22),
1034 	PMU_PMEVCNTR_EL0(23),
1035 	PMU_PMEVCNTR_EL0(24),
1036 	PMU_PMEVCNTR_EL0(25),
1037 	PMU_PMEVCNTR_EL0(26),
1038 	PMU_PMEVCNTR_EL0(27),
1039 	PMU_PMEVCNTR_EL0(28),
1040 	PMU_PMEVCNTR_EL0(29),
1041 	PMU_PMEVCNTR_EL0(30),
1042 	/* PMEVTYPERn_EL0 */
1043 	PMU_PMEVTYPER_EL0(0),
1044 	PMU_PMEVTYPER_EL0(1),
1045 	PMU_PMEVTYPER_EL0(2),
1046 	PMU_PMEVTYPER_EL0(3),
1047 	PMU_PMEVTYPER_EL0(4),
1048 	PMU_PMEVTYPER_EL0(5),
1049 	PMU_PMEVTYPER_EL0(6),
1050 	PMU_PMEVTYPER_EL0(7),
1051 	PMU_PMEVTYPER_EL0(8),
1052 	PMU_PMEVTYPER_EL0(9),
1053 	PMU_PMEVTYPER_EL0(10),
1054 	PMU_PMEVTYPER_EL0(11),
1055 	PMU_PMEVTYPER_EL0(12),
1056 	PMU_PMEVTYPER_EL0(13),
1057 	PMU_PMEVTYPER_EL0(14),
1058 	PMU_PMEVTYPER_EL0(15),
1059 	PMU_PMEVTYPER_EL0(16),
1060 	PMU_PMEVTYPER_EL0(17),
1061 	PMU_PMEVTYPER_EL0(18),
1062 	PMU_PMEVTYPER_EL0(19),
1063 	PMU_PMEVTYPER_EL0(20),
1064 	PMU_PMEVTYPER_EL0(21),
1065 	PMU_PMEVTYPER_EL0(22),
1066 	PMU_PMEVTYPER_EL0(23),
1067 	PMU_PMEVTYPER_EL0(24),
1068 	PMU_PMEVTYPER_EL0(25),
1069 	PMU_PMEVTYPER_EL0(26),
1070 	PMU_PMEVTYPER_EL0(27),
1071 	PMU_PMEVTYPER_EL0(28),
1072 	PMU_PMEVTYPER_EL0(29),
1073 	PMU_PMEVTYPER_EL0(30),
1074 	/*
1075 	 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
1076 	 * in 32bit mode. Here we choose to reset it as zero for consistency.
1077 	 */
1078 	{ SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
1079 
1080 	{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1081 	{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1082 	{ SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
1083 };
1084 
1085 static bool trap_dbgidr(struct kvm_vcpu *vcpu,
1086 			struct sys_reg_params *p,
1087 			const struct sys_reg_desc *r)
1088 {
1089 	if (p->is_write) {
1090 		return ignore_write(vcpu, p);
1091 	} else {
1092 		u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1093 		u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1094 		u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1095 
1096 		p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1097 			     (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1098 			     (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1099 			     | (6 << 16) | (el3 << 14) | (el3 << 12));
1100 		return true;
1101 	}
1102 }
1103 
1104 static bool trap_debug32(struct kvm_vcpu *vcpu,
1105 			 struct sys_reg_params *p,
1106 			 const struct sys_reg_desc *r)
1107 {
1108 	if (p->is_write) {
1109 		vcpu_cp14(vcpu, r->reg) = p->regval;
1110 		vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
1111 	} else {
1112 		p->regval = vcpu_cp14(vcpu, r->reg);
1113 	}
1114 
1115 	return true;
1116 }
1117 
1118 /* AArch32 debug register mappings
1119  *
1120  * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1121  * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1122  *
1123  * All control registers and watchpoint value registers are mapped to
1124  * the lower 32 bits of their AArch64 equivalents. We share the trap
1125  * handlers with the above AArch64 code which checks what mode the
1126  * system is in.
1127  */
1128 
1129 static bool trap_xvr(struct kvm_vcpu *vcpu,
1130 		     struct sys_reg_params *p,
1131 		     const struct sys_reg_desc *rd)
1132 {
1133 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
1134 
1135 	if (p->is_write) {
1136 		u64 val = *dbg_reg;
1137 
1138 		val &= 0xffffffffUL;
1139 		val |= p->regval << 32;
1140 		*dbg_reg = val;
1141 
1142 		vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
1143 	} else {
1144 		p->regval = *dbg_reg >> 32;
1145 	}
1146 
1147 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
1148 
1149 	return true;
1150 }
1151 
1152 #define DBG_BCR_BVR_WCR_WVR(n)						\
1153 	/* DBGBVRn */							\
1154 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, 	\
1155 	/* DBGBCRn */							\
1156 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n },	\
1157 	/* DBGWVRn */							\
1158 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n },	\
1159 	/* DBGWCRn */							\
1160 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1161 
1162 #define DBGBXVR(n)							\
1163 	{ Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1164 
1165 /*
1166  * Trapped cp14 registers. We generally ignore most of the external
1167  * debug, on the principle that they don't really make sense to a
1168  * guest. Revisit this one day, would this principle change.
1169  */
1170 static const struct sys_reg_desc cp14_regs[] = {
1171 	/* DBGIDR */
1172 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
1173 	/* DBGDTRRXext */
1174 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1175 
1176 	DBG_BCR_BVR_WCR_WVR(0),
1177 	/* DBGDSCRint */
1178 	{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1179 	DBG_BCR_BVR_WCR_WVR(1),
1180 	/* DBGDCCINT */
1181 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
1182 	/* DBGDSCRext */
1183 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
1184 	DBG_BCR_BVR_WCR_WVR(2),
1185 	/* DBGDTR[RT]Xint */
1186 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1187 	/* DBGDTR[RT]Xext */
1188 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1189 	DBG_BCR_BVR_WCR_WVR(3),
1190 	DBG_BCR_BVR_WCR_WVR(4),
1191 	DBG_BCR_BVR_WCR_WVR(5),
1192 	/* DBGWFAR */
1193 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1194 	/* DBGOSECCR */
1195 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1196 	DBG_BCR_BVR_WCR_WVR(6),
1197 	/* DBGVCR */
1198 	{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
1199 	DBG_BCR_BVR_WCR_WVR(7),
1200 	DBG_BCR_BVR_WCR_WVR(8),
1201 	DBG_BCR_BVR_WCR_WVR(9),
1202 	DBG_BCR_BVR_WCR_WVR(10),
1203 	DBG_BCR_BVR_WCR_WVR(11),
1204 	DBG_BCR_BVR_WCR_WVR(12),
1205 	DBG_BCR_BVR_WCR_WVR(13),
1206 	DBG_BCR_BVR_WCR_WVR(14),
1207 	DBG_BCR_BVR_WCR_WVR(15),
1208 
1209 	/* DBGDRAR (32bit) */
1210 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1211 
1212 	DBGBXVR(0),
1213 	/* DBGOSLAR */
1214 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
1215 	DBGBXVR(1),
1216 	/* DBGOSLSR */
1217 	{ Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
1218 	DBGBXVR(2),
1219 	DBGBXVR(3),
1220 	/* DBGOSDLR */
1221 	{ Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1222 	DBGBXVR(4),
1223 	/* DBGPRCR */
1224 	{ Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1225 	DBGBXVR(5),
1226 	DBGBXVR(6),
1227 	DBGBXVR(7),
1228 	DBGBXVR(8),
1229 	DBGBXVR(9),
1230 	DBGBXVR(10),
1231 	DBGBXVR(11),
1232 	DBGBXVR(12),
1233 	DBGBXVR(13),
1234 	DBGBXVR(14),
1235 	DBGBXVR(15),
1236 
1237 	/* DBGDSAR (32bit) */
1238 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1239 
1240 	/* DBGDEVID2 */
1241 	{ Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1242 	/* DBGDEVID1 */
1243 	{ Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1244 	/* DBGDEVID */
1245 	{ Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1246 	/* DBGCLAIMSET */
1247 	{ Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1248 	/* DBGCLAIMCLR */
1249 	{ Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1250 	/* DBGAUTHSTATUS */
1251 	{ Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1252 };
1253 
1254 /* Trapped cp14 64bit registers */
1255 static const struct sys_reg_desc cp14_64_regs[] = {
1256 	/* DBGDRAR (64bit) */
1257 	{ Op1( 0), CRm( 1), .access = trap_raz_wi },
1258 
1259 	/* DBGDSAR (64bit) */
1260 	{ Op1( 0), CRm( 2), .access = trap_raz_wi },
1261 };
1262 
1263 /* Macro to expand the PMEVCNTRn register */
1264 #define PMU_PMEVCNTR(n)							\
1265 	/* PMEVCNTRn */							\
1266 	{ Op1(0), CRn(0b1110),						\
1267 	  CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),		\
1268 	  access_pmu_evcntr }
1269 
1270 /* Macro to expand the PMEVTYPERn register */
1271 #define PMU_PMEVTYPER(n)						\
1272 	/* PMEVTYPERn */						\
1273 	{ Op1(0), CRn(0b1110),						\
1274 	  CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),		\
1275 	  access_pmu_evtyper }
1276 
1277 /*
1278  * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
1279  * depending on the way they are accessed (as a 32bit or a 64bit
1280  * register).
1281  */
1282 static const struct sys_reg_desc cp15_regs[] = {
1283 	{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1284 
1285 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
1286 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1287 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
1288 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
1289 	{ Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
1290 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
1291 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
1292 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
1293 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
1294 	{ Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
1295 	{ Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
1296 
1297 	/*
1298 	 * DC{C,I,CI}SW operations:
1299 	 */
1300 	{ Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
1301 	{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
1302 	{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
1303 
1304 	/* PMU */
1305 	{ Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
1306 	{ Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
1307 	{ Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
1308 	{ Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
1309 	{ Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
1310 	{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
1311 	{ Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
1312 	{ Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
1313 	{ Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
1314 	{ Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
1315 	{ Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
1316 	{ Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
1317 	{ Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
1318 	{ Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
1319 	{ Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
1320 
1321 	{ Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
1322 	{ Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
1323 	{ Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
1324 	{ Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
1325 
1326 	/* ICC_SRE */
1327 	{ Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
1328 
1329 	{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
1330 
1331 	/* PMEVCNTRn */
1332 	PMU_PMEVCNTR(0),
1333 	PMU_PMEVCNTR(1),
1334 	PMU_PMEVCNTR(2),
1335 	PMU_PMEVCNTR(3),
1336 	PMU_PMEVCNTR(4),
1337 	PMU_PMEVCNTR(5),
1338 	PMU_PMEVCNTR(6),
1339 	PMU_PMEVCNTR(7),
1340 	PMU_PMEVCNTR(8),
1341 	PMU_PMEVCNTR(9),
1342 	PMU_PMEVCNTR(10),
1343 	PMU_PMEVCNTR(11),
1344 	PMU_PMEVCNTR(12),
1345 	PMU_PMEVCNTR(13),
1346 	PMU_PMEVCNTR(14),
1347 	PMU_PMEVCNTR(15),
1348 	PMU_PMEVCNTR(16),
1349 	PMU_PMEVCNTR(17),
1350 	PMU_PMEVCNTR(18),
1351 	PMU_PMEVCNTR(19),
1352 	PMU_PMEVCNTR(20),
1353 	PMU_PMEVCNTR(21),
1354 	PMU_PMEVCNTR(22),
1355 	PMU_PMEVCNTR(23),
1356 	PMU_PMEVCNTR(24),
1357 	PMU_PMEVCNTR(25),
1358 	PMU_PMEVCNTR(26),
1359 	PMU_PMEVCNTR(27),
1360 	PMU_PMEVCNTR(28),
1361 	PMU_PMEVCNTR(29),
1362 	PMU_PMEVCNTR(30),
1363 	/* PMEVTYPERn */
1364 	PMU_PMEVTYPER(0),
1365 	PMU_PMEVTYPER(1),
1366 	PMU_PMEVTYPER(2),
1367 	PMU_PMEVTYPER(3),
1368 	PMU_PMEVTYPER(4),
1369 	PMU_PMEVTYPER(5),
1370 	PMU_PMEVTYPER(6),
1371 	PMU_PMEVTYPER(7),
1372 	PMU_PMEVTYPER(8),
1373 	PMU_PMEVTYPER(9),
1374 	PMU_PMEVTYPER(10),
1375 	PMU_PMEVTYPER(11),
1376 	PMU_PMEVTYPER(12),
1377 	PMU_PMEVTYPER(13),
1378 	PMU_PMEVTYPER(14),
1379 	PMU_PMEVTYPER(15),
1380 	PMU_PMEVTYPER(16),
1381 	PMU_PMEVTYPER(17),
1382 	PMU_PMEVTYPER(18),
1383 	PMU_PMEVTYPER(19),
1384 	PMU_PMEVTYPER(20),
1385 	PMU_PMEVTYPER(21),
1386 	PMU_PMEVTYPER(22),
1387 	PMU_PMEVTYPER(23),
1388 	PMU_PMEVTYPER(24),
1389 	PMU_PMEVTYPER(25),
1390 	PMU_PMEVTYPER(26),
1391 	PMU_PMEVTYPER(27),
1392 	PMU_PMEVTYPER(28),
1393 	PMU_PMEVTYPER(29),
1394 	PMU_PMEVTYPER(30),
1395 	/* PMCCFILTR */
1396 	{ Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
1397 };
1398 
1399 static const struct sys_reg_desc cp15_64_regs[] = {
1400 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1401 	{ Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
1402 	{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1403 	{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
1404 };
1405 
1406 /* Target specific emulation tables */
1407 static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
1408 
1409 void kvm_register_target_sys_reg_table(unsigned int target,
1410 				       struct kvm_sys_reg_target_table *table)
1411 {
1412 	target_tables[target] = table;
1413 }
1414 
1415 /* Get specific register table for this target. */
1416 static const struct sys_reg_desc *get_target_table(unsigned target,
1417 						   bool mode_is_64,
1418 						   size_t *num)
1419 {
1420 	struct kvm_sys_reg_target_table *table;
1421 
1422 	table = target_tables[target];
1423 	if (mode_is_64) {
1424 		*num = table->table64.num;
1425 		return table->table64.table;
1426 	} else {
1427 		*num = table->table32.num;
1428 		return table->table32.table;
1429 	}
1430 }
1431 
1432 #define reg_to_match_value(x)						\
1433 	({								\
1434 		unsigned long val;					\
1435 		val  = (x)->Op0 << 14;					\
1436 		val |= (x)->Op1 << 11;					\
1437 		val |= (x)->CRn << 7;					\
1438 		val |= (x)->CRm << 3;					\
1439 		val |= (x)->Op2;					\
1440 		val;							\
1441 	 })
1442 
1443 static int match_sys_reg(const void *key, const void *elt)
1444 {
1445 	const unsigned long pval = (unsigned long)key;
1446 	const struct sys_reg_desc *r = elt;
1447 
1448 	return pval - reg_to_match_value(r);
1449 }
1450 
1451 static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
1452 					 const struct sys_reg_desc table[],
1453 					 unsigned int num)
1454 {
1455 	unsigned long pval = reg_to_match_value(params);
1456 
1457 	return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
1458 }
1459 
1460 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
1461 {
1462 	kvm_inject_undefined(vcpu);
1463 	return 1;
1464 }
1465 
1466 static void perform_access(struct kvm_vcpu *vcpu,
1467 			   struct sys_reg_params *params,
1468 			   const struct sys_reg_desc *r)
1469 {
1470 	/*
1471 	 * Not having an accessor means that we have configured a trap
1472 	 * that we don't know how to handle. This certainly qualifies
1473 	 * as a gross bug that should be fixed right away.
1474 	 */
1475 	BUG_ON(!r->access);
1476 
1477 	/* Skip instruction if instructed so */
1478 	if (likely(r->access(vcpu, params, r)))
1479 		kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1480 }
1481 
1482 /*
1483  * emulate_cp --  tries to match a sys_reg access in a handling table, and
1484  *                call the corresponding trap handler.
1485  *
1486  * @params: pointer to the descriptor of the access
1487  * @table: array of trap descriptors
1488  * @num: size of the trap descriptor array
1489  *
1490  * Return 0 if the access has been handled, and -1 if not.
1491  */
1492 static int emulate_cp(struct kvm_vcpu *vcpu,
1493 		      struct sys_reg_params *params,
1494 		      const struct sys_reg_desc *table,
1495 		      size_t num)
1496 {
1497 	const struct sys_reg_desc *r;
1498 
1499 	if (!table)
1500 		return -1;	/* Not handled */
1501 
1502 	r = find_reg(params, table, num);
1503 
1504 	if (r) {
1505 		perform_access(vcpu, params, r);
1506 		return 0;
1507 	}
1508 
1509 	/* Not handled */
1510 	return -1;
1511 }
1512 
1513 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
1514 				struct sys_reg_params *params)
1515 {
1516 	u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
1517 	int cp = -1;
1518 
1519 	switch(hsr_ec) {
1520 	case ESR_ELx_EC_CP15_32:
1521 	case ESR_ELx_EC_CP15_64:
1522 		cp = 15;
1523 		break;
1524 	case ESR_ELx_EC_CP14_MR:
1525 	case ESR_ELx_EC_CP14_64:
1526 		cp = 14;
1527 		break;
1528 	default:
1529 		WARN_ON(1);
1530 	}
1531 
1532 	kvm_err("Unsupported guest CP%d access at: %08lx\n",
1533 		cp, *vcpu_pc(vcpu));
1534 	print_sys_reg_instr(params);
1535 	kvm_inject_undefined(vcpu);
1536 }
1537 
1538 /**
1539  * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
1540  * @vcpu: The VCPU pointer
1541  * @run:  The kvm_run struct
1542  */
1543 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1544 			    const struct sys_reg_desc *global,
1545 			    size_t nr_global,
1546 			    const struct sys_reg_desc *target_specific,
1547 			    size_t nr_specific)
1548 {
1549 	struct sys_reg_params params;
1550 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
1551 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
1552 	int Rt2 = (hsr >> 10) & 0x1f;
1553 
1554 	params.is_aarch32 = true;
1555 	params.is_32bit = false;
1556 	params.CRm = (hsr >> 1) & 0xf;
1557 	params.is_write = ((hsr & 1) == 0);
1558 
1559 	params.Op0 = 0;
1560 	params.Op1 = (hsr >> 16) & 0xf;
1561 	params.Op2 = 0;
1562 	params.CRn = 0;
1563 
1564 	/*
1565 	 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
1566 	 * backends between AArch32 and AArch64, we get away with it.
1567 	 */
1568 	if (params.is_write) {
1569 		params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
1570 		params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
1571 	}
1572 
1573 	/*
1574 	 * Try to emulate the coprocessor access using the target
1575 	 * specific table first, and using the global table afterwards.
1576 	 * If either of the tables contains a handler, handle the
1577 	 * potential register operation in the case of a read and return
1578 	 * with success.
1579 	 */
1580 	if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
1581 	    !emulate_cp(vcpu, &params, global, nr_global)) {
1582 		/* Split up the value between registers for the read side */
1583 		if (!params.is_write) {
1584 			vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
1585 			vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
1586 		}
1587 
1588 		return 1;
1589 	}
1590 
1591 	unhandled_cp_access(vcpu, &params);
1592 	return 1;
1593 }
1594 
1595 /**
1596  * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
1597  * @vcpu: The VCPU pointer
1598  * @run:  The kvm_run struct
1599  */
1600 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
1601 			    const struct sys_reg_desc *global,
1602 			    size_t nr_global,
1603 			    const struct sys_reg_desc *target_specific,
1604 			    size_t nr_specific)
1605 {
1606 	struct sys_reg_params params;
1607 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
1608 	int Rt  = kvm_vcpu_sys_get_rt(vcpu);
1609 
1610 	params.is_aarch32 = true;
1611 	params.is_32bit = true;
1612 	params.CRm = (hsr >> 1) & 0xf;
1613 	params.regval = vcpu_get_reg(vcpu, Rt);
1614 	params.is_write = ((hsr & 1) == 0);
1615 	params.CRn = (hsr >> 10) & 0xf;
1616 	params.Op0 = 0;
1617 	params.Op1 = (hsr >> 14) & 0x7;
1618 	params.Op2 = (hsr >> 17) & 0x7;
1619 
1620 	if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
1621 	    !emulate_cp(vcpu, &params, global, nr_global)) {
1622 		if (!params.is_write)
1623 			vcpu_set_reg(vcpu, Rt, params.regval);
1624 		return 1;
1625 	}
1626 
1627 	unhandled_cp_access(vcpu, &params);
1628 	return 1;
1629 }
1630 
1631 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
1632 {
1633 	const struct sys_reg_desc *target_specific;
1634 	size_t num;
1635 
1636 	target_specific = get_target_table(vcpu->arch.target, false, &num);
1637 	return kvm_handle_cp_64(vcpu,
1638 				cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
1639 				target_specific, num);
1640 }
1641 
1642 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1643 {
1644 	const struct sys_reg_desc *target_specific;
1645 	size_t num;
1646 
1647 	target_specific = get_target_table(vcpu->arch.target, false, &num);
1648 	return kvm_handle_cp_32(vcpu,
1649 				cp15_regs, ARRAY_SIZE(cp15_regs),
1650 				target_specific, num);
1651 }
1652 
1653 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
1654 {
1655 	return kvm_handle_cp_64(vcpu,
1656 				cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
1657 				NULL, 0);
1658 }
1659 
1660 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1661 {
1662 	return kvm_handle_cp_32(vcpu,
1663 				cp14_regs, ARRAY_SIZE(cp14_regs),
1664 				NULL, 0);
1665 }
1666 
1667 static int emulate_sys_reg(struct kvm_vcpu *vcpu,
1668 			   struct sys_reg_params *params)
1669 {
1670 	size_t num;
1671 	const struct sys_reg_desc *table, *r;
1672 
1673 	table = get_target_table(vcpu->arch.target, true, &num);
1674 
1675 	/* Search target-specific then generic table. */
1676 	r = find_reg(params, table, num);
1677 	if (!r)
1678 		r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1679 
1680 	if (likely(r)) {
1681 		perform_access(vcpu, params, r);
1682 	} else {
1683 		kvm_err("Unsupported guest sys_reg access at: %lx\n",
1684 			*vcpu_pc(vcpu));
1685 		print_sys_reg_instr(params);
1686 		kvm_inject_undefined(vcpu);
1687 	}
1688 	return 1;
1689 }
1690 
1691 static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
1692 			      const struct sys_reg_desc *table, size_t num)
1693 {
1694 	unsigned long i;
1695 
1696 	for (i = 0; i < num; i++)
1697 		if (table[i].reset)
1698 			table[i].reset(vcpu, &table[i]);
1699 }
1700 
1701 /**
1702  * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
1703  * @vcpu: The VCPU pointer
1704  * @run:  The kvm_run struct
1705  */
1706 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1707 {
1708 	struct sys_reg_params params;
1709 	unsigned long esr = kvm_vcpu_get_hsr(vcpu);
1710 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
1711 	int ret;
1712 
1713 	trace_kvm_handle_sys_reg(esr);
1714 
1715 	params.is_aarch32 = false;
1716 	params.is_32bit = false;
1717 	params.Op0 = (esr >> 20) & 3;
1718 	params.Op1 = (esr >> 14) & 0x7;
1719 	params.CRn = (esr >> 10) & 0xf;
1720 	params.CRm = (esr >> 1) & 0xf;
1721 	params.Op2 = (esr >> 17) & 0x7;
1722 	params.regval = vcpu_get_reg(vcpu, Rt);
1723 	params.is_write = !(esr & 1);
1724 
1725 	ret = emulate_sys_reg(vcpu, &params);
1726 
1727 	if (!params.is_write)
1728 		vcpu_set_reg(vcpu, Rt, params.regval);
1729 	return ret;
1730 }
1731 
1732 /******************************************************************************
1733  * Userspace API
1734  *****************************************************************************/
1735 
1736 static bool index_to_params(u64 id, struct sys_reg_params *params)
1737 {
1738 	switch (id & KVM_REG_SIZE_MASK) {
1739 	case KVM_REG_SIZE_U64:
1740 		/* Any unused index bits means it's not valid. */
1741 		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
1742 			      | KVM_REG_ARM_COPROC_MASK
1743 			      | KVM_REG_ARM64_SYSREG_OP0_MASK
1744 			      | KVM_REG_ARM64_SYSREG_OP1_MASK
1745 			      | KVM_REG_ARM64_SYSREG_CRN_MASK
1746 			      | KVM_REG_ARM64_SYSREG_CRM_MASK
1747 			      | KVM_REG_ARM64_SYSREG_OP2_MASK))
1748 			return false;
1749 		params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
1750 			       >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
1751 		params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
1752 			       >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
1753 		params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
1754 			       >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
1755 		params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
1756 			       >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
1757 		params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
1758 			       >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
1759 		return true;
1760 	default:
1761 		return false;
1762 	}
1763 }
1764 
1765 const struct sys_reg_desc *find_reg_by_id(u64 id,
1766 					  struct sys_reg_params *params,
1767 					  const struct sys_reg_desc table[],
1768 					  unsigned int num)
1769 {
1770 	if (!index_to_params(id, params))
1771 		return NULL;
1772 
1773 	return find_reg(params, table, num);
1774 }
1775 
1776 /* Decode an index value, and find the sys_reg_desc entry. */
1777 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
1778 						    u64 id)
1779 {
1780 	size_t num;
1781 	const struct sys_reg_desc *table, *r;
1782 	struct sys_reg_params params;
1783 
1784 	/* We only do sys_reg for now. */
1785 	if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
1786 		return NULL;
1787 
1788 	table = get_target_table(vcpu->arch.target, true, &num);
1789 	r = find_reg_by_id(id, &params, table, num);
1790 	if (!r)
1791 		r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1792 
1793 	/* Not saved in the sys_reg array? */
1794 	if (r && !r->reg)
1795 		r = NULL;
1796 
1797 	return r;
1798 }
1799 
1800 /*
1801  * These are the invariant sys_reg registers: we let the guest see the
1802  * host versions of these, so they're part of the guest state.
1803  *
1804  * A future CPU may provide a mechanism to present different values to
1805  * the guest, or a future kvm may trap them.
1806  */
1807 
1808 #define FUNCTION_INVARIANT(reg)						\
1809 	static void get_##reg(struct kvm_vcpu *v,			\
1810 			      const struct sys_reg_desc *r)		\
1811 	{								\
1812 		((struct sys_reg_desc *)r)->val = read_sysreg(reg);	\
1813 	}
1814 
1815 FUNCTION_INVARIANT(midr_el1)
1816 FUNCTION_INVARIANT(ctr_el0)
1817 FUNCTION_INVARIANT(revidr_el1)
1818 FUNCTION_INVARIANT(id_pfr0_el1)
1819 FUNCTION_INVARIANT(id_pfr1_el1)
1820 FUNCTION_INVARIANT(id_dfr0_el1)
1821 FUNCTION_INVARIANT(id_afr0_el1)
1822 FUNCTION_INVARIANT(id_mmfr0_el1)
1823 FUNCTION_INVARIANT(id_mmfr1_el1)
1824 FUNCTION_INVARIANT(id_mmfr2_el1)
1825 FUNCTION_INVARIANT(id_mmfr3_el1)
1826 FUNCTION_INVARIANT(id_isar0_el1)
1827 FUNCTION_INVARIANT(id_isar1_el1)
1828 FUNCTION_INVARIANT(id_isar2_el1)
1829 FUNCTION_INVARIANT(id_isar3_el1)
1830 FUNCTION_INVARIANT(id_isar4_el1)
1831 FUNCTION_INVARIANT(id_isar5_el1)
1832 FUNCTION_INVARIANT(clidr_el1)
1833 FUNCTION_INVARIANT(aidr_el1)
1834 
1835 /* ->val is filled in by kvm_sys_reg_table_init() */
1836 static struct sys_reg_desc invariant_sys_regs[] = {
1837 	{ SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
1838 	{ SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
1839 	{ SYS_DESC(SYS_ID_PFR0_EL1), NULL, get_id_pfr0_el1 },
1840 	{ SYS_DESC(SYS_ID_PFR1_EL1), NULL, get_id_pfr1_el1 },
1841 	{ SYS_DESC(SYS_ID_DFR0_EL1), NULL, get_id_dfr0_el1 },
1842 	{ SYS_DESC(SYS_ID_AFR0_EL1), NULL, get_id_afr0_el1 },
1843 	{ SYS_DESC(SYS_ID_MMFR0_EL1), NULL, get_id_mmfr0_el1 },
1844 	{ SYS_DESC(SYS_ID_MMFR1_EL1), NULL, get_id_mmfr1_el1 },
1845 	{ SYS_DESC(SYS_ID_MMFR2_EL1), NULL, get_id_mmfr2_el1 },
1846 	{ SYS_DESC(SYS_ID_MMFR3_EL1), NULL, get_id_mmfr3_el1 },
1847 	{ SYS_DESC(SYS_ID_ISAR0_EL1), NULL, get_id_isar0_el1 },
1848 	{ SYS_DESC(SYS_ID_ISAR1_EL1), NULL, get_id_isar1_el1 },
1849 	{ SYS_DESC(SYS_ID_ISAR2_EL1), NULL, get_id_isar2_el1 },
1850 	{ SYS_DESC(SYS_ID_ISAR3_EL1), NULL, get_id_isar3_el1 },
1851 	{ SYS_DESC(SYS_ID_ISAR4_EL1), NULL, get_id_isar4_el1 },
1852 	{ SYS_DESC(SYS_ID_ISAR5_EL1), NULL, get_id_isar5_el1 },
1853 	{ SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
1854 	{ SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
1855 	{ SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
1856 };
1857 
1858 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
1859 {
1860 	if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
1861 		return -EFAULT;
1862 	return 0;
1863 }
1864 
1865 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
1866 {
1867 	if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
1868 		return -EFAULT;
1869 	return 0;
1870 }
1871 
1872 static int get_invariant_sys_reg(u64 id, void __user *uaddr)
1873 {
1874 	struct sys_reg_params params;
1875 	const struct sys_reg_desc *r;
1876 
1877 	r = find_reg_by_id(id, &params, invariant_sys_regs,
1878 			   ARRAY_SIZE(invariant_sys_regs));
1879 	if (!r)
1880 		return -ENOENT;
1881 
1882 	return reg_to_user(uaddr, &r->val, id);
1883 }
1884 
1885 static int set_invariant_sys_reg(u64 id, void __user *uaddr)
1886 {
1887 	struct sys_reg_params params;
1888 	const struct sys_reg_desc *r;
1889 	int err;
1890 	u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
1891 
1892 	r = find_reg_by_id(id, &params, invariant_sys_regs,
1893 			   ARRAY_SIZE(invariant_sys_regs));
1894 	if (!r)
1895 		return -ENOENT;
1896 
1897 	err = reg_from_user(&val, uaddr, id);
1898 	if (err)
1899 		return err;
1900 
1901 	/* This is what we mean by invariant: you can't change it. */
1902 	if (r->val != val)
1903 		return -EINVAL;
1904 
1905 	return 0;
1906 }
1907 
1908 static bool is_valid_cache(u32 val)
1909 {
1910 	u32 level, ctype;
1911 
1912 	if (val >= CSSELR_MAX)
1913 		return false;
1914 
1915 	/* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
1916 	level = (val >> 1);
1917 	ctype = (cache_levels >> (level * 3)) & 7;
1918 
1919 	switch (ctype) {
1920 	case 0: /* No cache */
1921 		return false;
1922 	case 1: /* Instruction cache only */
1923 		return (val & 1);
1924 	case 2: /* Data cache only */
1925 	case 4: /* Unified cache */
1926 		return !(val & 1);
1927 	case 3: /* Separate instruction and data caches */
1928 		return true;
1929 	default: /* Reserved: we can't know instruction or data. */
1930 		return false;
1931 	}
1932 }
1933 
1934 static int demux_c15_get(u64 id, void __user *uaddr)
1935 {
1936 	u32 val;
1937 	u32 __user *uval = uaddr;
1938 
1939 	/* Fail if we have unknown bits set. */
1940 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1941 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1942 		return -ENOENT;
1943 
1944 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1945 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1946 		if (KVM_REG_SIZE(id) != 4)
1947 			return -ENOENT;
1948 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1949 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1950 		if (!is_valid_cache(val))
1951 			return -ENOENT;
1952 
1953 		return put_user(get_ccsidr(val), uval);
1954 	default:
1955 		return -ENOENT;
1956 	}
1957 }
1958 
1959 static int demux_c15_set(u64 id, void __user *uaddr)
1960 {
1961 	u32 val, newval;
1962 	u32 __user *uval = uaddr;
1963 
1964 	/* Fail if we have unknown bits set. */
1965 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1966 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1967 		return -ENOENT;
1968 
1969 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1970 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1971 		if (KVM_REG_SIZE(id) != 4)
1972 			return -ENOENT;
1973 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1974 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1975 		if (!is_valid_cache(val))
1976 			return -ENOENT;
1977 
1978 		if (get_user(newval, uval))
1979 			return -EFAULT;
1980 
1981 		/* This is also invariant: you can't change it. */
1982 		if (newval != get_ccsidr(val))
1983 			return -EINVAL;
1984 		return 0;
1985 	default:
1986 		return -ENOENT;
1987 	}
1988 }
1989 
1990 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1991 {
1992 	const struct sys_reg_desc *r;
1993 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
1994 
1995 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1996 		return demux_c15_get(reg->id, uaddr);
1997 
1998 	if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
1999 		return -ENOENT;
2000 
2001 	r = index_to_sys_reg_desc(vcpu, reg->id);
2002 	if (!r)
2003 		return get_invariant_sys_reg(reg->id, uaddr);
2004 
2005 	if (r->get_user)
2006 		return (r->get_user)(vcpu, r, reg, uaddr);
2007 
2008 	return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
2009 }
2010 
2011 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2012 {
2013 	const struct sys_reg_desc *r;
2014 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2015 
2016 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2017 		return demux_c15_set(reg->id, uaddr);
2018 
2019 	if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2020 		return -ENOENT;
2021 
2022 	r = index_to_sys_reg_desc(vcpu, reg->id);
2023 	if (!r)
2024 		return set_invariant_sys_reg(reg->id, uaddr);
2025 
2026 	if (r->set_user)
2027 		return (r->set_user)(vcpu, r, reg, uaddr);
2028 
2029 	return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2030 }
2031 
2032 static unsigned int num_demux_regs(void)
2033 {
2034 	unsigned int i, count = 0;
2035 
2036 	for (i = 0; i < CSSELR_MAX; i++)
2037 		if (is_valid_cache(i))
2038 			count++;
2039 
2040 	return count;
2041 }
2042 
2043 static int write_demux_regids(u64 __user *uindices)
2044 {
2045 	u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2046 	unsigned int i;
2047 
2048 	val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2049 	for (i = 0; i < CSSELR_MAX; i++) {
2050 		if (!is_valid_cache(i))
2051 			continue;
2052 		if (put_user(val | i, uindices))
2053 			return -EFAULT;
2054 		uindices++;
2055 	}
2056 	return 0;
2057 }
2058 
2059 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2060 {
2061 	return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2062 		KVM_REG_ARM64_SYSREG |
2063 		(reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2064 		(reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2065 		(reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2066 		(reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2067 		(reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2068 }
2069 
2070 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2071 {
2072 	if (!*uind)
2073 		return true;
2074 
2075 	if (put_user(sys_reg_to_index(reg), *uind))
2076 		return false;
2077 
2078 	(*uind)++;
2079 	return true;
2080 }
2081 
2082 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
2083 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2084 {
2085 	const struct sys_reg_desc *i1, *i2, *end1, *end2;
2086 	unsigned int total = 0;
2087 	size_t num;
2088 
2089 	/* We check for duplicates here, to allow arch-specific overrides. */
2090 	i1 = get_target_table(vcpu->arch.target, true, &num);
2091 	end1 = i1 + num;
2092 	i2 = sys_reg_descs;
2093 	end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2094 
2095 	BUG_ON(i1 == end1 || i2 == end2);
2096 
2097 	/* Walk carefully, as both tables may refer to the same register. */
2098 	while (i1 || i2) {
2099 		int cmp = cmp_sys_reg(i1, i2);
2100 		/* target-specific overrides generic entry. */
2101 		if (cmp <= 0) {
2102 			/* Ignore registers we trap but don't save. */
2103 			if (i1->reg) {
2104 				if (!copy_reg_to_user(i1, &uind))
2105 					return -EFAULT;
2106 				total++;
2107 			}
2108 		} else {
2109 			/* Ignore registers we trap but don't save. */
2110 			if (i2->reg) {
2111 				if (!copy_reg_to_user(i2, &uind))
2112 					return -EFAULT;
2113 				total++;
2114 			}
2115 		}
2116 
2117 		if (cmp <= 0 && ++i1 == end1)
2118 			i1 = NULL;
2119 		if (cmp >= 0 && ++i2 == end2)
2120 			i2 = NULL;
2121 	}
2122 	return total;
2123 }
2124 
2125 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2126 {
2127 	return ARRAY_SIZE(invariant_sys_regs)
2128 		+ num_demux_regs()
2129 		+ walk_sys_regs(vcpu, (u64 __user *)NULL);
2130 }
2131 
2132 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2133 {
2134 	unsigned int i;
2135 	int err;
2136 
2137 	/* Then give them all the invariant registers' indices. */
2138 	for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2139 		if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2140 			return -EFAULT;
2141 		uindices++;
2142 	}
2143 
2144 	err = walk_sys_regs(vcpu, uindices);
2145 	if (err < 0)
2146 		return err;
2147 	uindices += err;
2148 
2149 	return write_demux_regids(uindices);
2150 }
2151 
2152 static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
2153 {
2154 	unsigned int i;
2155 
2156 	for (i = 1; i < n; i++) {
2157 		if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2158 			kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2159 			return 1;
2160 		}
2161 	}
2162 
2163 	return 0;
2164 }
2165 
2166 void kvm_sys_reg_table_init(void)
2167 {
2168 	unsigned int i;
2169 	struct sys_reg_desc clidr;
2170 
2171 	/* Make sure tables are unique and in order. */
2172 	BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
2173 	BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
2174 	BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
2175 	BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
2176 	BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
2177 	BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
2178 
2179 	/* We abuse the reset function to overwrite the table itself. */
2180 	for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2181 		invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2182 
2183 	/*
2184 	 * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
2185 	 *
2186 	 *   If software reads the Cache Type fields from Ctype1
2187 	 *   upwards, once it has seen a value of 0b000, no caches
2188 	 *   exist at further-out levels of the hierarchy. So, for
2189 	 *   example, if Ctype3 is the first Cache Type field with a
2190 	 *   value of 0b000, the values of Ctype4 to Ctype7 must be
2191 	 *   ignored.
2192 	 */
2193 	get_clidr_el1(NULL, &clidr); /* Ugly... */
2194 	cache_levels = clidr.val;
2195 	for (i = 0; i < 7; i++)
2196 		if (((cache_levels >> (i*3)) & 7) == 0)
2197 			break;
2198 	/* Clear all higher bits. */
2199 	cache_levels &= (1 << (i*3))-1;
2200 }
2201 
2202 /**
2203  * kvm_reset_sys_regs - sets system registers to reset value
2204  * @vcpu: The VCPU pointer
2205  *
2206  * This function finds the right table above and sets the registers on the
2207  * virtual CPU struct to their architecturally defined reset values.
2208  */
2209 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2210 {
2211 	size_t num;
2212 	const struct sys_reg_desc *table;
2213 
2214 	/* Catch someone adding a register without putting in reset entry. */
2215 	memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
2216 
2217 	/* Generic chip reset first (so target could override). */
2218 	reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2219 
2220 	table = get_target_table(vcpu->arch.target, true, &num);
2221 	reset_sys_reg_descs(vcpu, table, num);
2222 
2223 	for (num = 1; num < NR_SYS_REGS; num++)
2224 		if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
2225 			panic("Didn't reset vcpu_sys_reg(%zi)", num);
2226 }
2227