xref: /openbmc/linux/arch/arm64/kvm/sys_regs.c (revision a2cce7a9)
1 /*
2  * Copyright (C) 2012,2013 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * Derived from arch/arm/kvm/coproc.c:
6  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7  * Authors: Rusty Russell <rusty@rustcorp.com.au>
8  *          Christoffer Dall <c.dall@virtualopensystems.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License, version 2, as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
21  */
22 
23 #include <linux/kvm_host.h>
24 #include <linux/mm.h>
25 #include <linux/uaccess.h>
26 
27 #include <asm/cacheflush.h>
28 #include <asm/cputype.h>
29 #include <asm/debug-monitors.h>
30 #include <asm/esr.h>
31 #include <asm/kvm_arm.h>
32 #include <asm/kvm_coproc.h>
33 #include <asm/kvm_emulate.h>
34 #include <asm/kvm_host.h>
35 #include <asm/kvm_mmu.h>
36 
37 #include <trace/events/kvm.h>
38 
39 #include "sys_regs.h"
40 
41 #include "trace.h"
42 
43 /*
44  * All of this file is extremly similar to the ARM coproc.c, but the
45  * types are different. My gut feeling is that it should be pretty
46  * easy to merge, but that would be an ABI breakage -- again. VFP
47  * would also need to be abstracted.
48  *
49  * For AArch32, we only take care of what is being trapped. Anything
50  * that has to do with init and userspace access has to go via the
51  * 64bit interface.
52  */
53 
54 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
55 static u32 cache_levels;
56 
57 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
58 #define CSSELR_MAX 12
59 
60 /* Which cache CCSIDR represents depends on CSSELR value. */
61 static u32 get_ccsidr(u32 csselr)
62 {
63 	u32 ccsidr;
64 
65 	/* Make sure noone else changes CSSELR during this! */
66 	local_irq_disable();
67 	/* Put value into CSSELR */
68 	asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
69 	isb();
70 	/* Read result out of CCSIDR */
71 	asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr));
72 	local_irq_enable();
73 
74 	return ccsidr;
75 }
76 
77 /*
78  * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
79  */
80 static bool access_dcsw(struct kvm_vcpu *vcpu,
81 			const struct sys_reg_params *p,
82 			const struct sys_reg_desc *r)
83 {
84 	if (!p->is_write)
85 		return read_from_write_only(vcpu, p);
86 
87 	kvm_set_way_flush(vcpu);
88 	return true;
89 }
90 
91 /*
92  * Generic accessor for VM registers. Only called as long as HCR_TVM
93  * is set. If the guest enables the MMU, we stop trapping the VM
94  * sys_regs and leave it in complete control of the caches.
95  */
96 static bool access_vm_reg(struct kvm_vcpu *vcpu,
97 			  const struct sys_reg_params *p,
98 			  const struct sys_reg_desc *r)
99 {
100 	unsigned long val;
101 	bool was_enabled = vcpu_has_cache_enabled(vcpu);
102 
103 	BUG_ON(!p->is_write);
104 
105 	val = *vcpu_reg(vcpu, p->Rt);
106 	if (!p->is_aarch32) {
107 		vcpu_sys_reg(vcpu, r->reg) = val;
108 	} else {
109 		if (!p->is_32bit)
110 			vcpu_cp15_64_high(vcpu, r->reg) = val >> 32;
111 		vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
112 	}
113 
114 	kvm_toggle_cache(vcpu, was_enabled);
115 	return true;
116 }
117 
118 /*
119  * Trap handler for the GICv3 SGI generation system register.
120  * Forward the request to the VGIC emulation.
121  * The cp15_64 code makes sure this automatically works
122  * for both AArch64 and AArch32 accesses.
123  */
124 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
125 			   const struct sys_reg_params *p,
126 			   const struct sys_reg_desc *r)
127 {
128 	u64 val;
129 
130 	if (!p->is_write)
131 		return read_from_write_only(vcpu, p);
132 
133 	val = *vcpu_reg(vcpu, p->Rt);
134 	vgic_v3_dispatch_sgi(vcpu, val);
135 
136 	return true;
137 }
138 
139 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
140 			const struct sys_reg_params *p,
141 			const struct sys_reg_desc *r)
142 {
143 	if (p->is_write)
144 		return ignore_write(vcpu, p);
145 	else
146 		return read_zero(vcpu, p);
147 }
148 
149 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
150 			   const struct sys_reg_params *p,
151 			   const struct sys_reg_desc *r)
152 {
153 	if (p->is_write) {
154 		return ignore_write(vcpu, p);
155 	} else {
156 		*vcpu_reg(vcpu, p->Rt) = (1 << 3);
157 		return true;
158 	}
159 }
160 
161 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
162 				   const struct sys_reg_params *p,
163 				   const struct sys_reg_desc *r)
164 {
165 	if (p->is_write) {
166 		return ignore_write(vcpu, p);
167 	} else {
168 		u32 val;
169 		asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
170 		*vcpu_reg(vcpu, p->Rt) = val;
171 		return true;
172 	}
173 }
174 
175 /*
176  * We want to avoid world-switching all the DBG registers all the
177  * time:
178  *
179  * - If we've touched any debug register, it is likely that we're
180  *   going to touch more of them. It then makes sense to disable the
181  *   traps and start doing the save/restore dance
182  * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
183  *   then mandatory to save/restore the registers, as the guest
184  *   depends on them.
185  *
186  * For this, we use a DIRTY bit, indicating the guest has modified the
187  * debug registers, used as follow:
188  *
189  * On guest entry:
190  * - If the dirty bit is set (because we're coming back from trapping),
191  *   disable the traps, save host registers, restore guest registers.
192  * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
193  *   set the dirty bit, disable the traps, save host registers,
194  *   restore guest registers.
195  * - Otherwise, enable the traps
196  *
197  * On guest exit:
198  * - If the dirty bit is set, save guest registers, restore host
199  *   registers and clear the dirty bit. This ensure that the host can
200  *   now use the debug registers.
201  */
202 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
203 			    const struct sys_reg_params *p,
204 			    const struct sys_reg_desc *r)
205 {
206 	if (p->is_write) {
207 		vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
208 		vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
209 	} else {
210 		*vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
211 	}
212 
213 	trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt));
214 
215 	return true;
216 }
217 
218 /*
219  * reg_to_dbg/dbg_to_reg
220  *
221  * A 32 bit write to a debug register leave top bits alone
222  * A 32 bit read from a debug register only returns the bottom bits
223  *
224  * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
225  * hyp.S code switches between host and guest values in future.
226  */
227 static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
228 			      const struct sys_reg_params *p,
229 			      u64 *dbg_reg)
230 {
231 	u64 val = *vcpu_reg(vcpu, p->Rt);
232 
233 	if (p->is_32bit) {
234 		val &= 0xffffffffUL;
235 		val |= ((*dbg_reg >> 32) << 32);
236 	}
237 
238 	*dbg_reg = val;
239 	vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
240 }
241 
242 static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
243 			      const struct sys_reg_params *p,
244 			      u64 *dbg_reg)
245 {
246 	u64 val = *dbg_reg;
247 
248 	if (p->is_32bit)
249 		val &= 0xffffffffUL;
250 
251 	*vcpu_reg(vcpu, p->Rt) = val;
252 }
253 
254 static inline bool trap_bvr(struct kvm_vcpu *vcpu,
255 			    const struct sys_reg_params *p,
256 			    const struct sys_reg_desc *rd)
257 {
258 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
259 
260 	if (p->is_write)
261 		reg_to_dbg(vcpu, p, dbg_reg);
262 	else
263 		dbg_to_reg(vcpu, p, dbg_reg);
264 
265 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
266 
267 	return true;
268 }
269 
270 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
271 		const struct kvm_one_reg *reg, void __user *uaddr)
272 {
273 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
274 
275 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
276 		return -EFAULT;
277 	return 0;
278 }
279 
280 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
281 	const struct kvm_one_reg *reg, void __user *uaddr)
282 {
283 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
284 
285 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
286 		return -EFAULT;
287 	return 0;
288 }
289 
290 static inline void reset_bvr(struct kvm_vcpu *vcpu,
291 			     const struct sys_reg_desc *rd)
292 {
293 	vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
294 }
295 
296 static inline bool trap_bcr(struct kvm_vcpu *vcpu,
297 			    const struct sys_reg_params *p,
298 			    const struct sys_reg_desc *rd)
299 {
300 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
301 
302 	if (p->is_write)
303 		reg_to_dbg(vcpu, p, dbg_reg);
304 	else
305 		dbg_to_reg(vcpu, p, dbg_reg);
306 
307 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
308 
309 	return true;
310 }
311 
312 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
313 		const struct kvm_one_reg *reg, void __user *uaddr)
314 {
315 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
316 
317 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
318 		return -EFAULT;
319 
320 	return 0;
321 }
322 
323 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
324 	const struct kvm_one_reg *reg, void __user *uaddr)
325 {
326 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
327 
328 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
329 		return -EFAULT;
330 	return 0;
331 }
332 
333 static inline void reset_bcr(struct kvm_vcpu *vcpu,
334 			     const struct sys_reg_desc *rd)
335 {
336 	vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
337 }
338 
339 static inline bool trap_wvr(struct kvm_vcpu *vcpu,
340 			    const struct sys_reg_params *p,
341 			    const struct sys_reg_desc *rd)
342 {
343 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
344 
345 	if (p->is_write)
346 		reg_to_dbg(vcpu, p, dbg_reg);
347 	else
348 		dbg_to_reg(vcpu, p, dbg_reg);
349 
350 	trace_trap_reg(__func__, rd->reg, p->is_write,
351 		vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
352 
353 	return true;
354 }
355 
356 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
357 		const struct kvm_one_reg *reg, void __user *uaddr)
358 {
359 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
360 
361 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
362 		return -EFAULT;
363 	return 0;
364 }
365 
366 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
367 	const struct kvm_one_reg *reg, void __user *uaddr)
368 {
369 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
370 
371 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
372 		return -EFAULT;
373 	return 0;
374 }
375 
376 static inline void reset_wvr(struct kvm_vcpu *vcpu,
377 			     const struct sys_reg_desc *rd)
378 {
379 	vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
380 }
381 
382 static inline bool trap_wcr(struct kvm_vcpu *vcpu,
383 			    const struct sys_reg_params *p,
384 			    const struct sys_reg_desc *rd)
385 {
386 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
387 
388 	if (p->is_write)
389 		reg_to_dbg(vcpu, p, dbg_reg);
390 	else
391 		dbg_to_reg(vcpu, p, dbg_reg);
392 
393 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
394 
395 	return true;
396 }
397 
398 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
399 		const struct kvm_one_reg *reg, void __user *uaddr)
400 {
401 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
402 
403 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
404 		return -EFAULT;
405 	return 0;
406 }
407 
408 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
409 	const struct kvm_one_reg *reg, void __user *uaddr)
410 {
411 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
412 
413 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
414 		return -EFAULT;
415 	return 0;
416 }
417 
418 static inline void reset_wcr(struct kvm_vcpu *vcpu,
419 			     const struct sys_reg_desc *rd)
420 {
421 	vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
422 }
423 
424 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
425 {
426 	u64 amair;
427 
428 	asm volatile("mrs %0, amair_el1\n" : "=r" (amair));
429 	vcpu_sys_reg(vcpu, AMAIR_EL1) = amair;
430 }
431 
432 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
433 {
434 	u64 mpidr;
435 
436 	/*
437 	 * Map the vcpu_id into the first three affinity level fields of
438 	 * the MPIDR. We limit the number of VCPUs in level 0 due to a
439 	 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
440 	 * of the GICv3 to be able to address each CPU directly when
441 	 * sending IPIs.
442 	 */
443 	mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
444 	mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
445 	mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
446 	vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
447 }
448 
449 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
450 #define DBG_BCR_BVR_WCR_WVR_EL1(n)					\
451 	/* DBGBVRn_EL1 */						\
452 	{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100),	\
453 	  trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr },		\
454 	/* DBGBCRn_EL1 */						\
455 	{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101),	\
456 	  trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr },		\
457 	/* DBGWVRn_EL1 */						\
458 	{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110),	\
459 	  trap_wvr, reset_wvr, n, 0,  get_wvr, set_wvr },		\
460 	/* DBGWCRn_EL1 */						\
461 	{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111),	\
462 	  trap_wcr, reset_wcr, n, 0,  get_wcr, set_wcr }
463 
464 /*
465  * Architected system registers.
466  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
467  *
468  * We could trap ID_DFR0 and tell the guest we don't support performance
469  * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
470  * NAKed, so it will read the PMCR anyway.
471  *
472  * Therefore we tell the guest we have 0 counters.  Unfortunately, we
473  * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
474  * all PM registers, which doesn't crash the guest kernel at least.
475  *
476  * Debug handling: We do trap most, if not all debug related system
477  * registers. The implementation is good enough to ensure that a guest
478  * can use these with minimal performance degradation. The drawback is
479  * that we don't implement any of the external debug, none of the
480  * OSlock protocol. This should be revisited if we ever encounter a
481  * more demanding guest...
482  */
483 static const struct sys_reg_desc sys_reg_descs[] = {
484 	/* DC ISW */
485 	{ Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
486 	  access_dcsw },
487 	/* DC CSW */
488 	{ Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
489 	  access_dcsw },
490 	/* DC CISW */
491 	{ Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
492 	  access_dcsw },
493 
494 	DBG_BCR_BVR_WCR_WVR_EL1(0),
495 	DBG_BCR_BVR_WCR_WVR_EL1(1),
496 	/* MDCCINT_EL1 */
497 	{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
498 	  trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
499 	/* MDSCR_EL1 */
500 	{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
501 	  trap_debug_regs, reset_val, MDSCR_EL1, 0 },
502 	DBG_BCR_BVR_WCR_WVR_EL1(2),
503 	DBG_BCR_BVR_WCR_WVR_EL1(3),
504 	DBG_BCR_BVR_WCR_WVR_EL1(4),
505 	DBG_BCR_BVR_WCR_WVR_EL1(5),
506 	DBG_BCR_BVR_WCR_WVR_EL1(6),
507 	DBG_BCR_BVR_WCR_WVR_EL1(7),
508 	DBG_BCR_BVR_WCR_WVR_EL1(8),
509 	DBG_BCR_BVR_WCR_WVR_EL1(9),
510 	DBG_BCR_BVR_WCR_WVR_EL1(10),
511 	DBG_BCR_BVR_WCR_WVR_EL1(11),
512 	DBG_BCR_BVR_WCR_WVR_EL1(12),
513 	DBG_BCR_BVR_WCR_WVR_EL1(13),
514 	DBG_BCR_BVR_WCR_WVR_EL1(14),
515 	DBG_BCR_BVR_WCR_WVR_EL1(15),
516 
517 	/* MDRAR_EL1 */
518 	{ Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
519 	  trap_raz_wi },
520 	/* OSLAR_EL1 */
521 	{ Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
522 	  trap_raz_wi },
523 	/* OSLSR_EL1 */
524 	{ Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
525 	  trap_oslsr_el1 },
526 	/* OSDLR_EL1 */
527 	{ Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
528 	  trap_raz_wi },
529 	/* DBGPRCR_EL1 */
530 	{ Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
531 	  trap_raz_wi },
532 	/* DBGCLAIMSET_EL1 */
533 	{ Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
534 	  trap_raz_wi },
535 	/* DBGCLAIMCLR_EL1 */
536 	{ Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
537 	  trap_raz_wi },
538 	/* DBGAUTHSTATUS_EL1 */
539 	{ Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
540 	  trap_dbgauthstatus_el1 },
541 
542 	/* MDCCSR_EL1 */
543 	{ Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
544 	  trap_raz_wi },
545 	/* DBGDTR_EL0 */
546 	{ Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
547 	  trap_raz_wi },
548 	/* DBGDTR[TR]X_EL0 */
549 	{ Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
550 	  trap_raz_wi },
551 
552 	/* DBGVCR32_EL2 */
553 	{ Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
554 	  NULL, reset_val, DBGVCR32_EL2, 0 },
555 
556 	/* MPIDR_EL1 */
557 	{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
558 	  NULL, reset_mpidr, MPIDR_EL1 },
559 	/* SCTLR_EL1 */
560 	{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
561 	  access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
562 	/* CPACR_EL1 */
563 	{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
564 	  NULL, reset_val, CPACR_EL1, 0 },
565 	/* TTBR0_EL1 */
566 	{ Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
567 	  access_vm_reg, reset_unknown, TTBR0_EL1 },
568 	/* TTBR1_EL1 */
569 	{ Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
570 	  access_vm_reg, reset_unknown, TTBR1_EL1 },
571 	/* TCR_EL1 */
572 	{ Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
573 	  access_vm_reg, reset_val, TCR_EL1, 0 },
574 
575 	/* AFSR0_EL1 */
576 	{ Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
577 	  access_vm_reg, reset_unknown, AFSR0_EL1 },
578 	/* AFSR1_EL1 */
579 	{ Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
580 	  access_vm_reg, reset_unknown, AFSR1_EL1 },
581 	/* ESR_EL1 */
582 	{ Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
583 	  access_vm_reg, reset_unknown, ESR_EL1 },
584 	/* FAR_EL1 */
585 	{ Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
586 	  access_vm_reg, reset_unknown, FAR_EL1 },
587 	/* PAR_EL1 */
588 	{ Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
589 	  NULL, reset_unknown, PAR_EL1 },
590 
591 	/* PMINTENSET_EL1 */
592 	{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
593 	  trap_raz_wi },
594 	/* PMINTENCLR_EL1 */
595 	{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
596 	  trap_raz_wi },
597 
598 	/* MAIR_EL1 */
599 	{ Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
600 	  access_vm_reg, reset_unknown, MAIR_EL1 },
601 	/* AMAIR_EL1 */
602 	{ Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
603 	  access_vm_reg, reset_amair_el1, AMAIR_EL1 },
604 
605 	/* VBAR_EL1 */
606 	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
607 	  NULL, reset_val, VBAR_EL1, 0 },
608 
609 	/* ICC_SGI1R_EL1 */
610 	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101),
611 	  access_gic_sgi },
612 	/* ICC_SRE_EL1 */
613 	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
614 	  trap_raz_wi },
615 
616 	/* CONTEXTIDR_EL1 */
617 	{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
618 	  access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
619 	/* TPIDR_EL1 */
620 	{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
621 	  NULL, reset_unknown, TPIDR_EL1 },
622 
623 	/* CNTKCTL_EL1 */
624 	{ Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
625 	  NULL, reset_val, CNTKCTL_EL1, 0},
626 
627 	/* CSSELR_EL1 */
628 	{ Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
629 	  NULL, reset_unknown, CSSELR_EL1 },
630 
631 	/* PMCR_EL0 */
632 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
633 	  trap_raz_wi },
634 	/* PMCNTENSET_EL0 */
635 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
636 	  trap_raz_wi },
637 	/* PMCNTENCLR_EL0 */
638 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
639 	  trap_raz_wi },
640 	/* PMOVSCLR_EL0 */
641 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
642 	  trap_raz_wi },
643 	/* PMSWINC_EL0 */
644 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
645 	  trap_raz_wi },
646 	/* PMSELR_EL0 */
647 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
648 	  trap_raz_wi },
649 	/* PMCEID0_EL0 */
650 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
651 	  trap_raz_wi },
652 	/* PMCEID1_EL0 */
653 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
654 	  trap_raz_wi },
655 	/* PMCCNTR_EL0 */
656 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
657 	  trap_raz_wi },
658 	/* PMXEVTYPER_EL0 */
659 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
660 	  trap_raz_wi },
661 	/* PMXEVCNTR_EL0 */
662 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
663 	  trap_raz_wi },
664 	/* PMUSERENR_EL0 */
665 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
666 	  trap_raz_wi },
667 	/* PMOVSSET_EL0 */
668 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
669 	  trap_raz_wi },
670 
671 	/* TPIDR_EL0 */
672 	{ Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
673 	  NULL, reset_unknown, TPIDR_EL0 },
674 	/* TPIDRRO_EL0 */
675 	{ Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
676 	  NULL, reset_unknown, TPIDRRO_EL0 },
677 
678 	/* DACR32_EL2 */
679 	{ Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
680 	  NULL, reset_unknown, DACR32_EL2 },
681 	/* IFSR32_EL2 */
682 	{ Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
683 	  NULL, reset_unknown, IFSR32_EL2 },
684 	/* FPEXC32_EL2 */
685 	{ Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
686 	  NULL, reset_val, FPEXC32_EL2, 0x70 },
687 };
688 
689 static bool trap_dbgidr(struct kvm_vcpu *vcpu,
690 			const struct sys_reg_params *p,
691 			const struct sys_reg_desc *r)
692 {
693 	if (p->is_write) {
694 		return ignore_write(vcpu, p);
695 	} else {
696 		u64 dfr = read_cpuid(ID_AA64DFR0_EL1);
697 		u64 pfr = read_cpuid(ID_AA64PFR0_EL1);
698 		u32 el3 = !!((pfr >> 12) & 0xf);
699 
700 		*vcpu_reg(vcpu, p->Rt) = ((((dfr >> 20) & 0xf) << 28) |
701 					  (((dfr >> 12) & 0xf) << 24) |
702 					  (((dfr >> 28) & 0xf) << 20) |
703 					  (6 << 16) | (el3 << 14) | (el3 << 12));
704 		return true;
705 	}
706 }
707 
708 static bool trap_debug32(struct kvm_vcpu *vcpu,
709 			 const struct sys_reg_params *p,
710 			 const struct sys_reg_desc *r)
711 {
712 	if (p->is_write) {
713 		vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
714 		vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
715 	} else {
716 		*vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg);
717 	}
718 
719 	return true;
720 }
721 
722 /* AArch32 debug register mappings
723  *
724  * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
725  * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
726  *
727  * All control registers and watchpoint value registers are mapped to
728  * the lower 32 bits of their AArch64 equivalents. We share the trap
729  * handlers with the above AArch64 code which checks what mode the
730  * system is in.
731  */
732 
733 static inline bool trap_xvr(struct kvm_vcpu *vcpu,
734 			    const struct sys_reg_params *p,
735 			    const struct sys_reg_desc *rd)
736 {
737 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
738 
739 	if (p->is_write) {
740 		u64 val = *dbg_reg;
741 
742 		val &= 0xffffffffUL;
743 		val |= *vcpu_reg(vcpu, p->Rt) << 32;
744 		*dbg_reg = val;
745 
746 		vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
747 	} else {
748 		*vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32;
749 	}
750 
751 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
752 
753 	return true;
754 }
755 
756 #define DBG_BCR_BVR_WCR_WVR(n)						\
757 	/* DBGBVRn */							\
758 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, 	\
759 	/* DBGBCRn */							\
760 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n },	\
761 	/* DBGWVRn */							\
762 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n },	\
763 	/* DBGWCRn */							\
764 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
765 
766 #define DBGBXVR(n)							\
767 	{ Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
768 
769 /*
770  * Trapped cp14 registers. We generally ignore most of the external
771  * debug, on the principle that they don't really make sense to a
772  * guest. Revisit this one day, would this principle change.
773  */
774 static const struct sys_reg_desc cp14_regs[] = {
775 	/* DBGIDR */
776 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
777 	/* DBGDTRRXext */
778 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
779 
780 	DBG_BCR_BVR_WCR_WVR(0),
781 	/* DBGDSCRint */
782 	{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
783 	DBG_BCR_BVR_WCR_WVR(1),
784 	/* DBGDCCINT */
785 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
786 	/* DBGDSCRext */
787 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
788 	DBG_BCR_BVR_WCR_WVR(2),
789 	/* DBGDTR[RT]Xint */
790 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
791 	/* DBGDTR[RT]Xext */
792 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
793 	DBG_BCR_BVR_WCR_WVR(3),
794 	DBG_BCR_BVR_WCR_WVR(4),
795 	DBG_BCR_BVR_WCR_WVR(5),
796 	/* DBGWFAR */
797 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
798 	/* DBGOSECCR */
799 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
800 	DBG_BCR_BVR_WCR_WVR(6),
801 	/* DBGVCR */
802 	{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
803 	DBG_BCR_BVR_WCR_WVR(7),
804 	DBG_BCR_BVR_WCR_WVR(8),
805 	DBG_BCR_BVR_WCR_WVR(9),
806 	DBG_BCR_BVR_WCR_WVR(10),
807 	DBG_BCR_BVR_WCR_WVR(11),
808 	DBG_BCR_BVR_WCR_WVR(12),
809 	DBG_BCR_BVR_WCR_WVR(13),
810 	DBG_BCR_BVR_WCR_WVR(14),
811 	DBG_BCR_BVR_WCR_WVR(15),
812 
813 	/* DBGDRAR (32bit) */
814 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
815 
816 	DBGBXVR(0),
817 	/* DBGOSLAR */
818 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
819 	DBGBXVR(1),
820 	/* DBGOSLSR */
821 	{ Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
822 	DBGBXVR(2),
823 	DBGBXVR(3),
824 	/* DBGOSDLR */
825 	{ Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
826 	DBGBXVR(4),
827 	/* DBGPRCR */
828 	{ Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
829 	DBGBXVR(5),
830 	DBGBXVR(6),
831 	DBGBXVR(7),
832 	DBGBXVR(8),
833 	DBGBXVR(9),
834 	DBGBXVR(10),
835 	DBGBXVR(11),
836 	DBGBXVR(12),
837 	DBGBXVR(13),
838 	DBGBXVR(14),
839 	DBGBXVR(15),
840 
841 	/* DBGDSAR (32bit) */
842 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
843 
844 	/* DBGDEVID2 */
845 	{ Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
846 	/* DBGDEVID1 */
847 	{ Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
848 	/* DBGDEVID */
849 	{ Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
850 	/* DBGCLAIMSET */
851 	{ Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
852 	/* DBGCLAIMCLR */
853 	{ Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
854 	/* DBGAUTHSTATUS */
855 	{ Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
856 };
857 
858 /* Trapped cp14 64bit registers */
859 static const struct sys_reg_desc cp14_64_regs[] = {
860 	/* DBGDRAR (64bit) */
861 	{ Op1( 0), CRm( 1), .access = trap_raz_wi },
862 
863 	/* DBGDSAR (64bit) */
864 	{ Op1( 0), CRm( 2), .access = trap_raz_wi },
865 };
866 
867 /*
868  * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
869  * depending on the way they are accessed (as a 32bit or a 64bit
870  * register).
871  */
872 static const struct sys_reg_desc cp15_regs[] = {
873 	{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
874 
875 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
876 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
877 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
878 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
879 	{ Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
880 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
881 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
882 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
883 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
884 	{ Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
885 	{ Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
886 
887 	/*
888 	 * DC{C,I,CI}SW operations:
889 	 */
890 	{ Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
891 	{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
892 	{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
893 
894 	/* PMU */
895 	{ Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi },
896 	{ Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
897 	{ Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
898 	{ Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
899 	{ Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi },
900 	{ Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi },
901 	{ Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi },
902 	{ Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
903 	{ Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },
904 	{ Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
905 	{ Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
906 	{ Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
907 	{ Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi },
908 
909 	{ Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
910 	{ Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
911 	{ Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
912 	{ Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
913 
914 	/* ICC_SRE */
915 	{ Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
916 
917 	{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
918 };
919 
920 static const struct sys_reg_desc cp15_64_regs[] = {
921 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
922 	{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
923 	{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
924 };
925 
926 /* Target specific emulation tables */
927 static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
928 
929 void kvm_register_target_sys_reg_table(unsigned int target,
930 				       struct kvm_sys_reg_target_table *table)
931 {
932 	target_tables[target] = table;
933 }
934 
935 /* Get specific register table for this target. */
936 static const struct sys_reg_desc *get_target_table(unsigned target,
937 						   bool mode_is_64,
938 						   size_t *num)
939 {
940 	struct kvm_sys_reg_target_table *table;
941 
942 	table = target_tables[target];
943 	if (mode_is_64) {
944 		*num = table->table64.num;
945 		return table->table64.table;
946 	} else {
947 		*num = table->table32.num;
948 		return table->table32.table;
949 	}
950 }
951 
952 static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
953 					 const struct sys_reg_desc table[],
954 					 unsigned int num)
955 {
956 	unsigned int i;
957 
958 	for (i = 0; i < num; i++) {
959 		const struct sys_reg_desc *r = &table[i];
960 
961 		if (params->Op0 != r->Op0)
962 			continue;
963 		if (params->Op1 != r->Op1)
964 			continue;
965 		if (params->CRn != r->CRn)
966 			continue;
967 		if (params->CRm != r->CRm)
968 			continue;
969 		if (params->Op2 != r->Op2)
970 			continue;
971 
972 		return r;
973 	}
974 	return NULL;
975 }
976 
977 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
978 {
979 	kvm_inject_undefined(vcpu);
980 	return 1;
981 }
982 
983 /*
984  * emulate_cp --  tries to match a sys_reg access in a handling table, and
985  *                call the corresponding trap handler.
986  *
987  * @params: pointer to the descriptor of the access
988  * @table: array of trap descriptors
989  * @num: size of the trap descriptor array
990  *
991  * Return 0 if the access has been handled, and -1 if not.
992  */
993 static int emulate_cp(struct kvm_vcpu *vcpu,
994 		      const struct sys_reg_params *params,
995 		      const struct sys_reg_desc *table,
996 		      size_t num)
997 {
998 	const struct sys_reg_desc *r;
999 
1000 	if (!table)
1001 		return -1;	/* Not handled */
1002 
1003 	r = find_reg(params, table, num);
1004 
1005 	if (r) {
1006 		/*
1007 		 * Not having an accessor means that we have
1008 		 * configured a trap that we don't know how to
1009 		 * handle. This certainly qualifies as a gross bug
1010 		 * that should be fixed right away.
1011 		 */
1012 		BUG_ON(!r->access);
1013 
1014 		if (likely(r->access(vcpu, params, r))) {
1015 			/* Skip instruction, since it was emulated */
1016 			kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1017 		}
1018 
1019 		/* Handled */
1020 		return 0;
1021 	}
1022 
1023 	/* Not handled */
1024 	return -1;
1025 }
1026 
1027 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
1028 				struct sys_reg_params *params)
1029 {
1030 	u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
1031 	int cp;
1032 
1033 	switch(hsr_ec) {
1034 	case ESR_ELx_EC_CP15_32:
1035 	case ESR_ELx_EC_CP15_64:
1036 		cp = 15;
1037 		break;
1038 	case ESR_ELx_EC_CP14_MR:
1039 	case ESR_ELx_EC_CP14_64:
1040 		cp = 14;
1041 		break;
1042 	default:
1043 		WARN_ON((cp = -1));
1044 	}
1045 
1046 	kvm_err("Unsupported guest CP%d access at: %08lx\n",
1047 		cp, *vcpu_pc(vcpu));
1048 	print_sys_reg_instr(params);
1049 	kvm_inject_undefined(vcpu);
1050 }
1051 
1052 /**
1053  * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access
1054  * @vcpu: The VCPU pointer
1055  * @run:  The kvm_run struct
1056  */
1057 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1058 			    const struct sys_reg_desc *global,
1059 			    size_t nr_global,
1060 			    const struct sys_reg_desc *target_specific,
1061 			    size_t nr_specific)
1062 {
1063 	struct sys_reg_params params;
1064 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
1065 	int Rt2 = (hsr >> 10) & 0xf;
1066 
1067 	params.is_aarch32 = true;
1068 	params.is_32bit = false;
1069 	params.CRm = (hsr >> 1) & 0xf;
1070 	params.Rt = (hsr >> 5) & 0xf;
1071 	params.is_write = ((hsr & 1) == 0);
1072 
1073 	params.Op0 = 0;
1074 	params.Op1 = (hsr >> 16) & 0xf;
1075 	params.Op2 = 0;
1076 	params.CRn = 0;
1077 
1078 	/*
1079 	 * Massive hack here. Store Rt2 in the top 32bits so we only
1080 	 * have one register to deal with. As we use the same trap
1081 	 * backends between AArch32 and AArch64, we get away with it.
1082 	 */
1083 	if (params.is_write) {
1084 		u64 val = *vcpu_reg(vcpu, params.Rt);
1085 		val &= 0xffffffff;
1086 		val |= *vcpu_reg(vcpu, Rt2) << 32;
1087 		*vcpu_reg(vcpu, params.Rt) = val;
1088 	}
1089 
1090 	if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
1091 		goto out;
1092 	if (!emulate_cp(vcpu, &params, global, nr_global))
1093 		goto out;
1094 
1095 	unhandled_cp_access(vcpu, &params);
1096 
1097 out:
1098 	/* Do the opposite hack for the read side */
1099 	if (!params.is_write) {
1100 		u64 val = *vcpu_reg(vcpu, params.Rt);
1101 		val >>= 32;
1102 		*vcpu_reg(vcpu, Rt2) = val;
1103 	}
1104 
1105 	return 1;
1106 }
1107 
1108 /**
1109  * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
1110  * @vcpu: The VCPU pointer
1111  * @run:  The kvm_run struct
1112  */
1113 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
1114 			    const struct sys_reg_desc *global,
1115 			    size_t nr_global,
1116 			    const struct sys_reg_desc *target_specific,
1117 			    size_t nr_specific)
1118 {
1119 	struct sys_reg_params params;
1120 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
1121 
1122 	params.is_aarch32 = true;
1123 	params.is_32bit = true;
1124 	params.CRm = (hsr >> 1) & 0xf;
1125 	params.Rt  = (hsr >> 5) & 0xf;
1126 	params.is_write = ((hsr & 1) == 0);
1127 	params.CRn = (hsr >> 10) & 0xf;
1128 	params.Op0 = 0;
1129 	params.Op1 = (hsr >> 14) & 0x7;
1130 	params.Op2 = (hsr >> 17) & 0x7;
1131 
1132 	if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
1133 		return 1;
1134 	if (!emulate_cp(vcpu, &params, global, nr_global))
1135 		return 1;
1136 
1137 	unhandled_cp_access(vcpu, &params);
1138 	return 1;
1139 }
1140 
1141 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
1142 {
1143 	const struct sys_reg_desc *target_specific;
1144 	size_t num;
1145 
1146 	target_specific = get_target_table(vcpu->arch.target, false, &num);
1147 	return kvm_handle_cp_64(vcpu,
1148 				cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
1149 				target_specific, num);
1150 }
1151 
1152 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1153 {
1154 	const struct sys_reg_desc *target_specific;
1155 	size_t num;
1156 
1157 	target_specific = get_target_table(vcpu->arch.target, false, &num);
1158 	return kvm_handle_cp_32(vcpu,
1159 				cp15_regs, ARRAY_SIZE(cp15_regs),
1160 				target_specific, num);
1161 }
1162 
1163 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
1164 {
1165 	return kvm_handle_cp_64(vcpu,
1166 				cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
1167 				NULL, 0);
1168 }
1169 
1170 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1171 {
1172 	return kvm_handle_cp_32(vcpu,
1173 				cp14_regs, ARRAY_SIZE(cp14_regs),
1174 				NULL, 0);
1175 }
1176 
1177 static int emulate_sys_reg(struct kvm_vcpu *vcpu,
1178 			   const struct sys_reg_params *params)
1179 {
1180 	size_t num;
1181 	const struct sys_reg_desc *table, *r;
1182 
1183 	table = get_target_table(vcpu->arch.target, true, &num);
1184 
1185 	/* Search target-specific then generic table. */
1186 	r = find_reg(params, table, num);
1187 	if (!r)
1188 		r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1189 
1190 	if (likely(r)) {
1191 		/*
1192 		 * Not having an accessor means that we have
1193 		 * configured a trap that we don't know how to
1194 		 * handle. This certainly qualifies as a gross bug
1195 		 * that should be fixed right away.
1196 		 */
1197 		BUG_ON(!r->access);
1198 
1199 		if (likely(r->access(vcpu, params, r))) {
1200 			/* Skip instruction, since it was emulated */
1201 			kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1202 			return 1;
1203 		}
1204 		/* If access function fails, it should complain. */
1205 	} else {
1206 		kvm_err("Unsupported guest sys_reg access at: %lx\n",
1207 			*vcpu_pc(vcpu));
1208 		print_sys_reg_instr(params);
1209 	}
1210 	kvm_inject_undefined(vcpu);
1211 	return 1;
1212 }
1213 
1214 static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
1215 			      const struct sys_reg_desc *table, size_t num)
1216 {
1217 	unsigned long i;
1218 
1219 	for (i = 0; i < num; i++)
1220 		if (table[i].reset)
1221 			table[i].reset(vcpu, &table[i]);
1222 }
1223 
1224 /**
1225  * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
1226  * @vcpu: The VCPU pointer
1227  * @run:  The kvm_run struct
1228  */
1229 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1230 {
1231 	struct sys_reg_params params;
1232 	unsigned long esr = kvm_vcpu_get_hsr(vcpu);
1233 
1234 	trace_kvm_handle_sys_reg(esr);
1235 
1236 	params.is_aarch32 = false;
1237 	params.is_32bit = false;
1238 	params.Op0 = (esr >> 20) & 3;
1239 	params.Op1 = (esr >> 14) & 0x7;
1240 	params.CRn = (esr >> 10) & 0xf;
1241 	params.CRm = (esr >> 1) & 0xf;
1242 	params.Op2 = (esr >> 17) & 0x7;
1243 	params.Rt = (esr >> 5) & 0x1f;
1244 	params.is_write = !(esr & 1);
1245 
1246 	return emulate_sys_reg(vcpu, &params);
1247 }
1248 
1249 /******************************************************************************
1250  * Userspace API
1251  *****************************************************************************/
1252 
1253 static bool index_to_params(u64 id, struct sys_reg_params *params)
1254 {
1255 	switch (id & KVM_REG_SIZE_MASK) {
1256 	case KVM_REG_SIZE_U64:
1257 		/* Any unused index bits means it's not valid. */
1258 		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
1259 			      | KVM_REG_ARM_COPROC_MASK
1260 			      | KVM_REG_ARM64_SYSREG_OP0_MASK
1261 			      | KVM_REG_ARM64_SYSREG_OP1_MASK
1262 			      | KVM_REG_ARM64_SYSREG_CRN_MASK
1263 			      | KVM_REG_ARM64_SYSREG_CRM_MASK
1264 			      | KVM_REG_ARM64_SYSREG_OP2_MASK))
1265 			return false;
1266 		params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
1267 			       >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
1268 		params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
1269 			       >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
1270 		params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
1271 			       >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
1272 		params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
1273 			       >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
1274 		params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
1275 			       >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
1276 		return true;
1277 	default:
1278 		return false;
1279 	}
1280 }
1281 
1282 /* Decode an index value, and find the sys_reg_desc entry. */
1283 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
1284 						    u64 id)
1285 {
1286 	size_t num;
1287 	const struct sys_reg_desc *table, *r;
1288 	struct sys_reg_params params;
1289 
1290 	/* We only do sys_reg for now. */
1291 	if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
1292 		return NULL;
1293 
1294 	if (!index_to_params(id, &params))
1295 		return NULL;
1296 
1297 	table = get_target_table(vcpu->arch.target, true, &num);
1298 	r = find_reg(&params, table, num);
1299 	if (!r)
1300 		r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1301 
1302 	/* Not saved in the sys_reg array? */
1303 	if (r && !r->reg)
1304 		r = NULL;
1305 
1306 	return r;
1307 }
1308 
1309 /*
1310  * These are the invariant sys_reg registers: we let the guest see the
1311  * host versions of these, so they're part of the guest state.
1312  *
1313  * A future CPU may provide a mechanism to present different values to
1314  * the guest, or a future kvm may trap them.
1315  */
1316 
1317 #define FUNCTION_INVARIANT(reg)						\
1318 	static void get_##reg(struct kvm_vcpu *v,			\
1319 			      const struct sys_reg_desc *r)		\
1320 	{								\
1321 		u64 val;						\
1322 									\
1323 		asm volatile("mrs %0, " __stringify(reg) "\n"		\
1324 			     : "=r" (val));				\
1325 		((struct sys_reg_desc *)r)->val = val;			\
1326 	}
1327 
1328 FUNCTION_INVARIANT(midr_el1)
1329 FUNCTION_INVARIANT(ctr_el0)
1330 FUNCTION_INVARIANT(revidr_el1)
1331 FUNCTION_INVARIANT(id_pfr0_el1)
1332 FUNCTION_INVARIANT(id_pfr1_el1)
1333 FUNCTION_INVARIANT(id_dfr0_el1)
1334 FUNCTION_INVARIANT(id_afr0_el1)
1335 FUNCTION_INVARIANT(id_mmfr0_el1)
1336 FUNCTION_INVARIANT(id_mmfr1_el1)
1337 FUNCTION_INVARIANT(id_mmfr2_el1)
1338 FUNCTION_INVARIANT(id_mmfr3_el1)
1339 FUNCTION_INVARIANT(id_isar0_el1)
1340 FUNCTION_INVARIANT(id_isar1_el1)
1341 FUNCTION_INVARIANT(id_isar2_el1)
1342 FUNCTION_INVARIANT(id_isar3_el1)
1343 FUNCTION_INVARIANT(id_isar4_el1)
1344 FUNCTION_INVARIANT(id_isar5_el1)
1345 FUNCTION_INVARIANT(clidr_el1)
1346 FUNCTION_INVARIANT(aidr_el1)
1347 
1348 /* ->val is filled in by kvm_sys_reg_table_init() */
1349 static struct sys_reg_desc invariant_sys_regs[] = {
1350 	{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
1351 	  NULL, get_midr_el1 },
1352 	{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
1353 	  NULL, get_revidr_el1 },
1354 	{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
1355 	  NULL, get_id_pfr0_el1 },
1356 	{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
1357 	  NULL, get_id_pfr1_el1 },
1358 	{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
1359 	  NULL, get_id_dfr0_el1 },
1360 	{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
1361 	  NULL, get_id_afr0_el1 },
1362 	{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
1363 	  NULL, get_id_mmfr0_el1 },
1364 	{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
1365 	  NULL, get_id_mmfr1_el1 },
1366 	{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
1367 	  NULL, get_id_mmfr2_el1 },
1368 	{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
1369 	  NULL, get_id_mmfr3_el1 },
1370 	{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
1371 	  NULL, get_id_isar0_el1 },
1372 	{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
1373 	  NULL, get_id_isar1_el1 },
1374 	{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
1375 	  NULL, get_id_isar2_el1 },
1376 	{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
1377 	  NULL, get_id_isar3_el1 },
1378 	{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
1379 	  NULL, get_id_isar4_el1 },
1380 	{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
1381 	  NULL, get_id_isar5_el1 },
1382 	{ Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
1383 	  NULL, get_clidr_el1 },
1384 	{ Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
1385 	  NULL, get_aidr_el1 },
1386 	{ Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
1387 	  NULL, get_ctr_el0 },
1388 };
1389 
1390 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
1391 {
1392 	if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
1393 		return -EFAULT;
1394 	return 0;
1395 }
1396 
1397 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
1398 {
1399 	if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
1400 		return -EFAULT;
1401 	return 0;
1402 }
1403 
1404 static int get_invariant_sys_reg(u64 id, void __user *uaddr)
1405 {
1406 	struct sys_reg_params params;
1407 	const struct sys_reg_desc *r;
1408 
1409 	if (!index_to_params(id, &params))
1410 		return -ENOENT;
1411 
1412 	r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
1413 	if (!r)
1414 		return -ENOENT;
1415 
1416 	return reg_to_user(uaddr, &r->val, id);
1417 }
1418 
1419 static int set_invariant_sys_reg(u64 id, void __user *uaddr)
1420 {
1421 	struct sys_reg_params params;
1422 	const struct sys_reg_desc *r;
1423 	int err;
1424 	u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
1425 
1426 	if (!index_to_params(id, &params))
1427 		return -ENOENT;
1428 	r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
1429 	if (!r)
1430 		return -ENOENT;
1431 
1432 	err = reg_from_user(&val, uaddr, id);
1433 	if (err)
1434 		return err;
1435 
1436 	/* This is what we mean by invariant: you can't change it. */
1437 	if (r->val != val)
1438 		return -EINVAL;
1439 
1440 	return 0;
1441 }
1442 
1443 static bool is_valid_cache(u32 val)
1444 {
1445 	u32 level, ctype;
1446 
1447 	if (val >= CSSELR_MAX)
1448 		return false;
1449 
1450 	/* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
1451 	level = (val >> 1);
1452 	ctype = (cache_levels >> (level * 3)) & 7;
1453 
1454 	switch (ctype) {
1455 	case 0: /* No cache */
1456 		return false;
1457 	case 1: /* Instruction cache only */
1458 		return (val & 1);
1459 	case 2: /* Data cache only */
1460 	case 4: /* Unified cache */
1461 		return !(val & 1);
1462 	case 3: /* Separate instruction and data caches */
1463 		return true;
1464 	default: /* Reserved: we can't know instruction or data. */
1465 		return false;
1466 	}
1467 }
1468 
1469 static int demux_c15_get(u64 id, void __user *uaddr)
1470 {
1471 	u32 val;
1472 	u32 __user *uval = uaddr;
1473 
1474 	/* Fail if we have unknown bits set. */
1475 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1476 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1477 		return -ENOENT;
1478 
1479 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1480 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1481 		if (KVM_REG_SIZE(id) != 4)
1482 			return -ENOENT;
1483 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1484 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1485 		if (!is_valid_cache(val))
1486 			return -ENOENT;
1487 
1488 		return put_user(get_ccsidr(val), uval);
1489 	default:
1490 		return -ENOENT;
1491 	}
1492 }
1493 
1494 static int demux_c15_set(u64 id, void __user *uaddr)
1495 {
1496 	u32 val, newval;
1497 	u32 __user *uval = uaddr;
1498 
1499 	/* Fail if we have unknown bits set. */
1500 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1501 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1502 		return -ENOENT;
1503 
1504 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1505 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1506 		if (KVM_REG_SIZE(id) != 4)
1507 			return -ENOENT;
1508 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1509 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1510 		if (!is_valid_cache(val))
1511 			return -ENOENT;
1512 
1513 		if (get_user(newval, uval))
1514 			return -EFAULT;
1515 
1516 		/* This is also invariant: you can't change it. */
1517 		if (newval != get_ccsidr(val))
1518 			return -EINVAL;
1519 		return 0;
1520 	default:
1521 		return -ENOENT;
1522 	}
1523 }
1524 
1525 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1526 {
1527 	const struct sys_reg_desc *r;
1528 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
1529 
1530 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1531 		return demux_c15_get(reg->id, uaddr);
1532 
1533 	if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
1534 		return -ENOENT;
1535 
1536 	r = index_to_sys_reg_desc(vcpu, reg->id);
1537 	if (!r)
1538 		return get_invariant_sys_reg(reg->id, uaddr);
1539 
1540 	if (r->get_user)
1541 		return (r->get_user)(vcpu, r, reg, uaddr);
1542 
1543 	return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
1544 }
1545 
1546 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1547 {
1548 	const struct sys_reg_desc *r;
1549 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
1550 
1551 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1552 		return demux_c15_set(reg->id, uaddr);
1553 
1554 	if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
1555 		return -ENOENT;
1556 
1557 	r = index_to_sys_reg_desc(vcpu, reg->id);
1558 	if (!r)
1559 		return set_invariant_sys_reg(reg->id, uaddr);
1560 
1561 	if (r->set_user)
1562 		return (r->set_user)(vcpu, r, reg, uaddr);
1563 
1564 	return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
1565 }
1566 
1567 static unsigned int num_demux_regs(void)
1568 {
1569 	unsigned int i, count = 0;
1570 
1571 	for (i = 0; i < CSSELR_MAX; i++)
1572 		if (is_valid_cache(i))
1573 			count++;
1574 
1575 	return count;
1576 }
1577 
1578 static int write_demux_regids(u64 __user *uindices)
1579 {
1580 	u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
1581 	unsigned int i;
1582 
1583 	val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
1584 	for (i = 0; i < CSSELR_MAX; i++) {
1585 		if (!is_valid_cache(i))
1586 			continue;
1587 		if (put_user(val | i, uindices))
1588 			return -EFAULT;
1589 		uindices++;
1590 	}
1591 	return 0;
1592 }
1593 
1594 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
1595 {
1596 	return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
1597 		KVM_REG_ARM64_SYSREG |
1598 		(reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
1599 		(reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
1600 		(reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
1601 		(reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
1602 		(reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
1603 }
1604 
1605 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
1606 {
1607 	if (!*uind)
1608 		return true;
1609 
1610 	if (put_user(sys_reg_to_index(reg), *uind))
1611 		return false;
1612 
1613 	(*uind)++;
1614 	return true;
1615 }
1616 
1617 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
1618 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
1619 {
1620 	const struct sys_reg_desc *i1, *i2, *end1, *end2;
1621 	unsigned int total = 0;
1622 	size_t num;
1623 
1624 	/* We check for duplicates here, to allow arch-specific overrides. */
1625 	i1 = get_target_table(vcpu->arch.target, true, &num);
1626 	end1 = i1 + num;
1627 	i2 = sys_reg_descs;
1628 	end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
1629 
1630 	BUG_ON(i1 == end1 || i2 == end2);
1631 
1632 	/* Walk carefully, as both tables may refer to the same register. */
1633 	while (i1 || i2) {
1634 		int cmp = cmp_sys_reg(i1, i2);
1635 		/* target-specific overrides generic entry. */
1636 		if (cmp <= 0) {
1637 			/* Ignore registers we trap but don't save. */
1638 			if (i1->reg) {
1639 				if (!copy_reg_to_user(i1, &uind))
1640 					return -EFAULT;
1641 				total++;
1642 			}
1643 		} else {
1644 			/* Ignore registers we trap but don't save. */
1645 			if (i2->reg) {
1646 				if (!copy_reg_to_user(i2, &uind))
1647 					return -EFAULT;
1648 				total++;
1649 			}
1650 		}
1651 
1652 		if (cmp <= 0 && ++i1 == end1)
1653 			i1 = NULL;
1654 		if (cmp >= 0 && ++i2 == end2)
1655 			i2 = NULL;
1656 	}
1657 	return total;
1658 }
1659 
1660 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
1661 {
1662 	return ARRAY_SIZE(invariant_sys_regs)
1663 		+ num_demux_regs()
1664 		+ walk_sys_regs(vcpu, (u64 __user *)NULL);
1665 }
1666 
1667 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1668 {
1669 	unsigned int i;
1670 	int err;
1671 
1672 	/* Then give them all the invariant registers' indices. */
1673 	for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
1674 		if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
1675 			return -EFAULT;
1676 		uindices++;
1677 	}
1678 
1679 	err = walk_sys_regs(vcpu, uindices);
1680 	if (err < 0)
1681 		return err;
1682 	uindices += err;
1683 
1684 	return write_demux_regids(uindices);
1685 }
1686 
1687 static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
1688 {
1689 	unsigned int i;
1690 
1691 	for (i = 1; i < n; i++) {
1692 		if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
1693 			kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
1694 			return 1;
1695 		}
1696 	}
1697 
1698 	return 0;
1699 }
1700 
1701 void kvm_sys_reg_table_init(void)
1702 {
1703 	unsigned int i;
1704 	struct sys_reg_desc clidr;
1705 
1706 	/* Make sure tables are unique and in order. */
1707 	BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
1708 	BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
1709 	BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
1710 	BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
1711 	BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
1712 	BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
1713 
1714 	/* We abuse the reset function to overwrite the table itself. */
1715 	for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
1716 		invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
1717 
1718 	/*
1719 	 * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
1720 	 *
1721 	 *   If software reads the Cache Type fields from Ctype1
1722 	 *   upwards, once it has seen a value of 0b000, no caches
1723 	 *   exist at further-out levels of the hierarchy. So, for
1724 	 *   example, if Ctype3 is the first Cache Type field with a
1725 	 *   value of 0b000, the values of Ctype4 to Ctype7 must be
1726 	 *   ignored.
1727 	 */
1728 	get_clidr_el1(NULL, &clidr); /* Ugly... */
1729 	cache_levels = clidr.val;
1730 	for (i = 0; i < 7; i++)
1731 		if (((cache_levels >> (i*3)) & 7) == 0)
1732 			break;
1733 	/* Clear all higher bits. */
1734 	cache_levels &= (1 << (i*3))-1;
1735 }
1736 
1737 /**
1738  * kvm_reset_sys_regs - sets system registers to reset value
1739  * @vcpu: The VCPU pointer
1740  *
1741  * This function finds the right table above and sets the registers on the
1742  * virtual CPU struct to their architecturally defined reset values.
1743  */
1744 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
1745 {
1746 	size_t num;
1747 	const struct sys_reg_desc *table;
1748 
1749 	/* Catch someone adding a register without putting in reset entry. */
1750 	memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
1751 
1752 	/* Generic chip reset first (so target could override). */
1753 	reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1754 
1755 	table = get_target_table(vcpu->arch.target, true, &num);
1756 	reset_sys_reg_descs(vcpu, table, num);
1757 
1758 	for (num = 1; num < NR_SYS_REGS; num++)
1759 		if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
1760 			panic("Didn't reset vcpu_sys_reg(%zi)", num);
1761 }
1762