1 /* 2 * Copyright (C) 2012,2013 - ARM Ltd 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * Derived from arch/arm/kvm/coproc.c: 6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 7 * Authors: Rusty Russell <rusty@rustcorp.com.au> 8 * Christoffer Dall <c.dall@virtualopensystems.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License, version 2, as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program. If not, see <http://www.gnu.org/licenses/>. 21 */ 22 23 #include <linux/bsearch.h> 24 #include <linux/kvm_host.h> 25 #include <linux/mm.h> 26 #include <linux/printk.h> 27 #include <linux/uaccess.h> 28 29 #include <asm/cacheflush.h> 30 #include <asm/cputype.h> 31 #include <asm/debug-monitors.h> 32 #include <asm/esr.h> 33 #include <asm/kvm_arm.h> 34 #include <asm/kvm_asm.h> 35 #include <asm/kvm_coproc.h> 36 #include <asm/kvm_emulate.h> 37 #include <asm/kvm_host.h> 38 #include <asm/kvm_hyp.h> 39 #include <asm/kvm_mmu.h> 40 #include <asm/perf_event.h> 41 #include <asm/sysreg.h> 42 43 #include <trace/events/kvm.h> 44 45 #include "sys_regs.h" 46 47 #include "trace.h" 48 49 /* 50 * All of this file is extremly similar to the ARM coproc.c, but the 51 * types are different. My gut feeling is that it should be pretty 52 * easy to merge, but that would be an ABI breakage -- again. VFP 53 * would also need to be abstracted. 54 * 55 * For AArch32, we only take care of what is being trapped. Anything 56 * that has to do with init and userspace access has to go via the 57 * 64bit interface. 58 */ 59 60 static bool read_from_write_only(struct kvm_vcpu *vcpu, 61 struct sys_reg_params *params, 62 const struct sys_reg_desc *r) 63 { 64 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n"); 65 print_sys_reg_instr(params); 66 kvm_inject_undefined(vcpu); 67 return false; 68 } 69 70 static bool write_to_read_only(struct kvm_vcpu *vcpu, 71 struct sys_reg_params *params, 72 const struct sys_reg_desc *r) 73 { 74 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n"); 75 print_sys_reg_instr(params); 76 kvm_inject_undefined(vcpu); 77 return false; 78 } 79 80 u64 vcpu_read_sys_reg(struct kvm_vcpu *vcpu, int reg) 81 { 82 if (!vcpu->arch.sysregs_loaded_on_cpu) 83 goto immediate_read; 84 85 /* 86 * System registers listed in the switch are not saved on every 87 * exit from the guest but are only saved on vcpu_put. 88 * 89 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but 90 * should never be listed below, because the guest cannot modify its 91 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's 92 * thread when emulating cross-VCPU communication. 93 */ 94 switch (reg) { 95 case CSSELR_EL1: return read_sysreg_s(SYS_CSSELR_EL1); 96 case SCTLR_EL1: return read_sysreg_s(sctlr_EL12); 97 case ACTLR_EL1: return read_sysreg_s(SYS_ACTLR_EL1); 98 case CPACR_EL1: return read_sysreg_s(cpacr_EL12); 99 case TTBR0_EL1: return read_sysreg_s(ttbr0_EL12); 100 case TTBR1_EL1: return read_sysreg_s(ttbr1_EL12); 101 case TCR_EL1: return read_sysreg_s(tcr_EL12); 102 case ESR_EL1: return read_sysreg_s(esr_EL12); 103 case AFSR0_EL1: return read_sysreg_s(afsr0_EL12); 104 case AFSR1_EL1: return read_sysreg_s(afsr1_EL12); 105 case FAR_EL1: return read_sysreg_s(far_EL12); 106 case MAIR_EL1: return read_sysreg_s(mair_EL12); 107 case VBAR_EL1: return read_sysreg_s(vbar_EL12); 108 case CONTEXTIDR_EL1: return read_sysreg_s(contextidr_EL12); 109 case TPIDR_EL0: return read_sysreg_s(SYS_TPIDR_EL0); 110 case TPIDRRO_EL0: return read_sysreg_s(SYS_TPIDRRO_EL0); 111 case TPIDR_EL1: return read_sysreg_s(SYS_TPIDR_EL1); 112 case AMAIR_EL1: return read_sysreg_s(amair_EL12); 113 case CNTKCTL_EL1: return read_sysreg_s(cntkctl_EL12); 114 case PAR_EL1: return read_sysreg_s(SYS_PAR_EL1); 115 case DACR32_EL2: return read_sysreg_s(SYS_DACR32_EL2); 116 case IFSR32_EL2: return read_sysreg_s(SYS_IFSR32_EL2); 117 case DBGVCR32_EL2: return read_sysreg_s(SYS_DBGVCR32_EL2); 118 } 119 120 immediate_read: 121 return __vcpu_sys_reg(vcpu, reg); 122 } 123 124 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg) 125 { 126 if (!vcpu->arch.sysregs_loaded_on_cpu) 127 goto immediate_write; 128 129 /* 130 * System registers listed in the switch are not restored on every 131 * entry to the guest but are only restored on vcpu_load. 132 * 133 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but 134 * should never be listed below, because the the MPIDR should only be 135 * set once, before running the VCPU, and never changed later. 136 */ 137 switch (reg) { 138 case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); return; 139 case SCTLR_EL1: write_sysreg_s(val, sctlr_EL12); return; 140 case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); return; 141 case CPACR_EL1: write_sysreg_s(val, cpacr_EL12); return; 142 case TTBR0_EL1: write_sysreg_s(val, ttbr0_EL12); return; 143 case TTBR1_EL1: write_sysreg_s(val, ttbr1_EL12); return; 144 case TCR_EL1: write_sysreg_s(val, tcr_EL12); return; 145 case ESR_EL1: write_sysreg_s(val, esr_EL12); return; 146 case AFSR0_EL1: write_sysreg_s(val, afsr0_EL12); return; 147 case AFSR1_EL1: write_sysreg_s(val, afsr1_EL12); return; 148 case FAR_EL1: write_sysreg_s(val, far_EL12); return; 149 case MAIR_EL1: write_sysreg_s(val, mair_EL12); return; 150 case VBAR_EL1: write_sysreg_s(val, vbar_EL12); return; 151 case CONTEXTIDR_EL1: write_sysreg_s(val, contextidr_EL12); return; 152 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); return; 153 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); return; 154 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); return; 155 case AMAIR_EL1: write_sysreg_s(val, amair_EL12); return; 156 case CNTKCTL_EL1: write_sysreg_s(val, cntkctl_EL12); return; 157 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); return; 158 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); return; 159 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); return; 160 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); return; 161 } 162 163 immediate_write: 164 __vcpu_sys_reg(vcpu, reg) = val; 165 } 166 167 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ 168 static u32 cache_levels; 169 170 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ 171 #define CSSELR_MAX 12 172 173 /* Which cache CCSIDR represents depends on CSSELR value. */ 174 static u32 get_ccsidr(u32 csselr) 175 { 176 u32 ccsidr; 177 178 /* Make sure noone else changes CSSELR during this! */ 179 local_irq_disable(); 180 write_sysreg(csselr, csselr_el1); 181 isb(); 182 ccsidr = read_sysreg(ccsidr_el1); 183 local_irq_enable(); 184 185 return ccsidr; 186 } 187 188 /* 189 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). 190 */ 191 static bool access_dcsw(struct kvm_vcpu *vcpu, 192 struct sys_reg_params *p, 193 const struct sys_reg_desc *r) 194 { 195 if (!p->is_write) 196 return read_from_write_only(vcpu, p, r); 197 198 kvm_set_way_flush(vcpu); 199 return true; 200 } 201 202 /* 203 * Generic accessor for VM registers. Only called as long as HCR_TVM 204 * is set. If the guest enables the MMU, we stop trapping the VM 205 * sys_regs and leave it in complete control of the caches. 206 */ 207 static bool access_vm_reg(struct kvm_vcpu *vcpu, 208 struct sys_reg_params *p, 209 const struct sys_reg_desc *r) 210 { 211 bool was_enabled = vcpu_has_cache_enabled(vcpu); 212 u64 val; 213 int reg = r->reg; 214 215 BUG_ON(!p->is_write); 216 217 /* See the 32bit mapping in kvm_host.h */ 218 if (p->is_aarch32) 219 reg = r->reg / 2; 220 221 if (!p->is_aarch32 || !p->is_32bit) { 222 val = p->regval; 223 } else { 224 val = vcpu_read_sys_reg(vcpu, reg); 225 if (r->reg % 2) 226 val = (p->regval << 32) | (u64)lower_32_bits(val); 227 else 228 val = ((u64)upper_32_bits(val) << 32) | 229 lower_32_bits(p->regval); 230 } 231 vcpu_write_sys_reg(vcpu, val, reg); 232 233 kvm_toggle_cache(vcpu, was_enabled); 234 return true; 235 } 236 237 /* 238 * Trap handler for the GICv3 SGI generation system register. 239 * Forward the request to the VGIC emulation. 240 * The cp15_64 code makes sure this automatically works 241 * for both AArch64 and AArch32 accesses. 242 */ 243 static bool access_gic_sgi(struct kvm_vcpu *vcpu, 244 struct sys_reg_params *p, 245 const struct sys_reg_desc *r) 246 { 247 if (!p->is_write) 248 return read_from_write_only(vcpu, p, r); 249 250 vgic_v3_dispatch_sgi(vcpu, p->regval); 251 252 return true; 253 } 254 255 static bool access_gic_sre(struct kvm_vcpu *vcpu, 256 struct sys_reg_params *p, 257 const struct sys_reg_desc *r) 258 { 259 if (p->is_write) 260 return ignore_write(vcpu, p); 261 262 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; 263 return true; 264 } 265 266 static bool trap_raz_wi(struct kvm_vcpu *vcpu, 267 struct sys_reg_params *p, 268 const struct sys_reg_desc *r) 269 { 270 if (p->is_write) 271 return ignore_write(vcpu, p); 272 else 273 return read_zero(vcpu, p); 274 } 275 276 static bool trap_undef(struct kvm_vcpu *vcpu, 277 struct sys_reg_params *p, 278 const struct sys_reg_desc *r) 279 { 280 kvm_inject_undefined(vcpu); 281 return false; 282 } 283 284 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, 285 struct sys_reg_params *p, 286 const struct sys_reg_desc *r) 287 { 288 if (p->is_write) { 289 return ignore_write(vcpu, p); 290 } else { 291 p->regval = (1 << 3); 292 return true; 293 } 294 } 295 296 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, 297 struct sys_reg_params *p, 298 const struct sys_reg_desc *r) 299 { 300 if (p->is_write) { 301 return ignore_write(vcpu, p); 302 } else { 303 p->regval = read_sysreg(dbgauthstatus_el1); 304 return true; 305 } 306 } 307 308 /* 309 * We want to avoid world-switching all the DBG registers all the 310 * time: 311 * 312 * - If we've touched any debug register, it is likely that we're 313 * going to touch more of them. It then makes sense to disable the 314 * traps and start doing the save/restore dance 315 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is 316 * then mandatory to save/restore the registers, as the guest 317 * depends on them. 318 * 319 * For this, we use a DIRTY bit, indicating the guest has modified the 320 * debug registers, used as follow: 321 * 322 * On guest entry: 323 * - If the dirty bit is set (because we're coming back from trapping), 324 * disable the traps, save host registers, restore guest registers. 325 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), 326 * set the dirty bit, disable the traps, save host registers, 327 * restore guest registers. 328 * - Otherwise, enable the traps 329 * 330 * On guest exit: 331 * - If the dirty bit is set, save guest registers, restore host 332 * registers and clear the dirty bit. This ensure that the host can 333 * now use the debug registers. 334 */ 335 static bool trap_debug_regs(struct kvm_vcpu *vcpu, 336 struct sys_reg_params *p, 337 const struct sys_reg_desc *r) 338 { 339 if (p->is_write) { 340 vcpu_write_sys_reg(vcpu, p->regval, r->reg); 341 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 342 } else { 343 p->regval = vcpu_read_sys_reg(vcpu, r->reg); 344 } 345 346 trace_trap_reg(__func__, r->reg, p->is_write, p->regval); 347 348 return true; 349 } 350 351 /* 352 * reg_to_dbg/dbg_to_reg 353 * 354 * A 32 bit write to a debug register leave top bits alone 355 * A 32 bit read from a debug register only returns the bottom bits 356 * 357 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the 358 * hyp.S code switches between host and guest values in future. 359 */ 360 static void reg_to_dbg(struct kvm_vcpu *vcpu, 361 struct sys_reg_params *p, 362 u64 *dbg_reg) 363 { 364 u64 val = p->regval; 365 366 if (p->is_32bit) { 367 val &= 0xffffffffUL; 368 val |= ((*dbg_reg >> 32) << 32); 369 } 370 371 *dbg_reg = val; 372 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 373 } 374 375 static void dbg_to_reg(struct kvm_vcpu *vcpu, 376 struct sys_reg_params *p, 377 u64 *dbg_reg) 378 { 379 p->regval = *dbg_reg; 380 if (p->is_32bit) 381 p->regval &= 0xffffffffUL; 382 } 383 384 static bool trap_bvr(struct kvm_vcpu *vcpu, 385 struct sys_reg_params *p, 386 const struct sys_reg_desc *rd) 387 { 388 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 389 390 if (p->is_write) 391 reg_to_dbg(vcpu, p, dbg_reg); 392 else 393 dbg_to_reg(vcpu, p, dbg_reg); 394 395 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); 396 397 return true; 398 } 399 400 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 401 const struct kvm_one_reg *reg, void __user *uaddr) 402 { 403 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 404 405 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) 406 return -EFAULT; 407 return 0; 408 } 409 410 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 411 const struct kvm_one_reg *reg, void __user *uaddr) 412 { 413 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 414 415 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) 416 return -EFAULT; 417 return 0; 418 } 419 420 static void reset_bvr(struct kvm_vcpu *vcpu, 421 const struct sys_reg_desc *rd) 422 { 423 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val; 424 } 425 426 static bool trap_bcr(struct kvm_vcpu *vcpu, 427 struct sys_reg_params *p, 428 const struct sys_reg_desc *rd) 429 { 430 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; 431 432 if (p->is_write) 433 reg_to_dbg(vcpu, p, dbg_reg); 434 else 435 dbg_to_reg(vcpu, p, dbg_reg); 436 437 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); 438 439 return true; 440 } 441 442 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 443 const struct kvm_one_reg *reg, void __user *uaddr) 444 { 445 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; 446 447 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) 448 return -EFAULT; 449 450 return 0; 451 } 452 453 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 454 const struct kvm_one_reg *reg, void __user *uaddr) 455 { 456 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; 457 458 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) 459 return -EFAULT; 460 return 0; 461 } 462 463 static void reset_bcr(struct kvm_vcpu *vcpu, 464 const struct sys_reg_desc *rd) 465 { 466 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val; 467 } 468 469 static bool trap_wvr(struct kvm_vcpu *vcpu, 470 struct sys_reg_params *p, 471 const struct sys_reg_desc *rd) 472 { 473 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; 474 475 if (p->is_write) 476 reg_to_dbg(vcpu, p, dbg_reg); 477 else 478 dbg_to_reg(vcpu, p, dbg_reg); 479 480 trace_trap_reg(__func__, rd->reg, p->is_write, 481 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]); 482 483 return true; 484 } 485 486 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 487 const struct kvm_one_reg *reg, void __user *uaddr) 488 { 489 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; 490 491 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) 492 return -EFAULT; 493 return 0; 494 } 495 496 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 497 const struct kvm_one_reg *reg, void __user *uaddr) 498 { 499 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; 500 501 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) 502 return -EFAULT; 503 return 0; 504 } 505 506 static void reset_wvr(struct kvm_vcpu *vcpu, 507 const struct sys_reg_desc *rd) 508 { 509 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val; 510 } 511 512 static bool trap_wcr(struct kvm_vcpu *vcpu, 513 struct sys_reg_params *p, 514 const struct sys_reg_desc *rd) 515 { 516 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; 517 518 if (p->is_write) 519 reg_to_dbg(vcpu, p, dbg_reg); 520 else 521 dbg_to_reg(vcpu, p, dbg_reg); 522 523 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); 524 525 return true; 526 } 527 528 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 529 const struct kvm_one_reg *reg, void __user *uaddr) 530 { 531 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; 532 533 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) 534 return -EFAULT; 535 return 0; 536 } 537 538 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 539 const struct kvm_one_reg *reg, void __user *uaddr) 540 { 541 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; 542 543 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) 544 return -EFAULT; 545 return 0; 546 } 547 548 static void reset_wcr(struct kvm_vcpu *vcpu, 549 const struct sys_reg_desc *rd) 550 { 551 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val; 552 } 553 554 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 555 { 556 u64 amair = read_sysreg(amair_el1); 557 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1); 558 } 559 560 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 561 { 562 u64 mpidr; 563 564 /* 565 * Map the vcpu_id into the first three affinity level fields of 566 * the MPIDR. We limit the number of VCPUs in level 0 due to a 567 * limitation to 16 CPUs in that level in the ICC_SGIxR registers 568 * of the GICv3 to be able to address each CPU directly when 569 * sending IPIs. 570 */ 571 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); 572 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); 573 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); 574 vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1); 575 } 576 577 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 578 { 579 u64 pmcr, val; 580 581 pmcr = read_sysreg(pmcr_el0); 582 /* 583 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN 584 * except PMCR.E resetting to zero. 585 */ 586 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK) 587 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); 588 __vcpu_sys_reg(vcpu, PMCR_EL0) = val; 589 } 590 591 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags) 592 { 593 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0); 594 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu); 595 596 if (!enabled) 597 kvm_inject_undefined(vcpu); 598 599 return !enabled; 600 } 601 602 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu) 603 { 604 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN); 605 } 606 607 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu) 608 { 609 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN); 610 } 611 612 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu) 613 { 614 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN); 615 } 616 617 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu) 618 { 619 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN); 620 } 621 622 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 623 const struct sys_reg_desc *r) 624 { 625 u64 val; 626 627 if (!kvm_arm_pmu_v3_ready(vcpu)) 628 return trap_raz_wi(vcpu, p, r); 629 630 if (pmu_access_el0_disabled(vcpu)) 631 return false; 632 633 if (p->is_write) { 634 /* Only update writeable bits of PMCR */ 635 val = __vcpu_sys_reg(vcpu, PMCR_EL0); 636 val &= ~ARMV8_PMU_PMCR_MASK; 637 val |= p->regval & ARMV8_PMU_PMCR_MASK; 638 __vcpu_sys_reg(vcpu, PMCR_EL0) = val; 639 kvm_pmu_handle_pmcr(vcpu, val); 640 } else { 641 /* PMCR.P & PMCR.C are RAZ */ 642 val = __vcpu_sys_reg(vcpu, PMCR_EL0) 643 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C); 644 p->regval = val; 645 } 646 647 return true; 648 } 649 650 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 651 const struct sys_reg_desc *r) 652 { 653 if (!kvm_arm_pmu_v3_ready(vcpu)) 654 return trap_raz_wi(vcpu, p, r); 655 656 if (pmu_access_event_counter_el0_disabled(vcpu)) 657 return false; 658 659 if (p->is_write) 660 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval; 661 else 662 /* return PMSELR.SEL field */ 663 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0) 664 & ARMV8_PMU_COUNTER_MASK; 665 666 return true; 667 } 668 669 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 670 const struct sys_reg_desc *r) 671 { 672 u64 pmceid; 673 674 if (!kvm_arm_pmu_v3_ready(vcpu)) 675 return trap_raz_wi(vcpu, p, r); 676 677 BUG_ON(p->is_write); 678 679 if (pmu_access_el0_disabled(vcpu)) 680 return false; 681 682 if (!(p->Op2 & 1)) 683 pmceid = read_sysreg(pmceid0_el0); 684 else 685 pmceid = read_sysreg(pmceid1_el0); 686 687 p->regval = pmceid; 688 689 return true; 690 } 691 692 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) 693 { 694 u64 pmcr, val; 695 696 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0); 697 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; 698 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) { 699 kvm_inject_undefined(vcpu); 700 return false; 701 } 702 703 return true; 704 } 705 706 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, 707 struct sys_reg_params *p, 708 const struct sys_reg_desc *r) 709 { 710 u64 idx; 711 712 if (!kvm_arm_pmu_v3_ready(vcpu)) 713 return trap_raz_wi(vcpu, p, r); 714 715 if (r->CRn == 9 && r->CRm == 13) { 716 if (r->Op2 == 2) { 717 /* PMXEVCNTR_EL0 */ 718 if (pmu_access_event_counter_el0_disabled(vcpu)) 719 return false; 720 721 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) 722 & ARMV8_PMU_COUNTER_MASK; 723 } else if (r->Op2 == 0) { 724 /* PMCCNTR_EL0 */ 725 if (pmu_access_cycle_counter_el0_disabled(vcpu)) 726 return false; 727 728 idx = ARMV8_PMU_CYCLE_IDX; 729 } else { 730 return false; 731 } 732 } else if (r->CRn == 0 && r->CRm == 9) { 733 /* PMCCNTR */ 734 if (pmu_access_event_counter_el0_disabled(vcpu)) 735 return false; 736 737 idx = ARMV8_PMU_CYCLE_IDX; 738 } else if (r->CRn == 14 && (r->CRm & 12) == 8) { 739 /* PMEVCNTRn_EL0 */ 740 if (pmu_access_event_counter_el0_disabled(vcpu)) 741 return false; 742 743 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); 744 } else { 745 return false; 746 } 747 748 if (!pmu_counter_idx_valid(vcpu, idx)) 749 return false; 750 751 if (p->is_write) { 752 if (pmu_access_el0_disabled(vcpu)) 753 return false; 754 755 kvm_pmu_set_counter_value(vcpu, idx, p->regval); 756 } else { 757 p->regval = kvm_pmu_get_counter_value(vcpu, idx); 758 } 759 760 return true; 761 } 762 763 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 764 const struct sys_reg_desc *r) 765 { 766 u64 idx, reg; 767 768 if (!kvm_arm_pmu_v3_ready(vcpu)) 769 return trap_raz_wi(vcpu, p, r); 770 771 if (pmu_access_el0_disabled(vcpu)) 772 return false; 773 774 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { 775 /* PMXEVTYPER_EL0 */ 776 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK; 777 reg = PMEVTYPER0_EL0 + idx; 778 } else if (r->CRn == 14 && (r->CRm & 12) == 12) { 779 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); 780 if (idx == ARMV8_PMU_CYCLE_IDX) 781 reg = PMCCFILTR_EL0; 782 else 783 /* PMEVTYPERn_EL0 */ 784 reg = PMEVTYPER0_EL0 + idx; 785 } else { 786 BUG(); 787 } 788 789 if (!pmu_counter_idx_valid(vcpu, idx)) 790 return false; 791 792 if (p->is_write) { 793 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); 794 __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK; 795 } else { 796 p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK; 797 } 798 799 return true; 800 } 801 802 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 803 const struct sys_reg_desc *r) 804 { 805 u64 val, mask; 806 807 if (!kvm_arm_pmu_v3_ready(vcpu)) 808 return trap_raz_wi(vcpu, p, r); 809 810 if (pmu_access_el0_disabled(vcpu)) 811 return false; 812 813 mask = kvm_pmu_valid_counter_mask(vcpu); 814 if (p->is_write) { 815 val = p->regval & mask; 816 if (r->Op2 & 0x1) { 817 /* accessing PMCNTENSET_EL0 */ 818 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val; 819 kvm_pmu_enable_counter(vcpu, val); 820 } else { 821 /* accessing PMCNTENCLR_EL0 */ 822 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val; 823 kvm_pmu_disable_counter(vcpu, val); 824 } 825 } else { 826 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask; 827 } 828 829 return true; 830 } 831 832 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 833 const struct sys_reg_desc *r) 834 { 835 u64 mask = kvm_pmu_valid_counter_mask(vcpu); 836 837 if (!kvm_arm_pmu_v3_ready(vcpu)) 838 return trap_raz_wi(vcpu, p, r); 839 840 if (!vcpu_mode_priv(vcpu)) { 841 kvm_inject_undefined(vcpu); 842 return false; 843 } 844 845 if (p->is_write) { 846 u64 val = p->regval & mask; 847 848 if (r->Op2 & 0x1) 849 /* accessing PMINTENSET_EL1 */ 850 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val; 851 else 852 /* accessing PMINTENCLR_EL1 */ 853 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val; 854 } else { 855 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask; 856 } 857 858 return true; 859 } 860 861 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 862 const struct sys_reg_desc *r) 863 { 864 u64 mask = kvm_pmu_valid_counter_mask(vcpu); 865 866 if (!kvm_arm_pmu_v3_ready(vcpu)) 867 return trap_raz_wi(vcpu, p, r); 868 869 if (pmu_access_el0_disabled(vcpu)) 870 return false; 871 872 if (p->is_write) { 873 if (r->CRm & 0x2) 874 /* accessing PMOVSSET_EL0 */ 875 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask); 876 else 877 /* accessing PMOVSCLR_EL0 */ 878 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask); 879 } else { 880 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask; 881 } 882 883 return true; 884 } 885 886 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 887 const struct sys_reg_desc *r) 888 { 889 u64 mask; 890 891 if (!kvm_arm_pmu_v3_ready(vcpu)) 892 return trap_raz_wi(vcpu, p, r); 893 894 if (!p->is_write) 895 return read_from_write_only(vcpu, p, r); 896 897 if (pmu_write_swinc_el0_disabled(vcpu)) 898 return false; 899 900 mask = kvm_pmu_valid_counter_mask(vcpu); 901 kvm_pmu_software_increment(vcpu, p->regval & mask); 902 return true; 903 } 904 905 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 906 const struct sys_reg_desc *r) 907 { 908 if (!kvm_arm_pmu_v3_ready(vcpu)) 909 return trap_raz_wi(vcpu, p, r); 910 911 if (p->is_write) { 912 if (!vcpu_mode_priv(vcpu)) { 913 kvm_inject_undefined(vcpu); 914 return false; 915 } 916 917 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) = 918 p->regval & ARMV8_PMU_USERENR_MASK; 919 } else { 920 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0) 921 & ARMV8_PMU_USERENR_MASK; 922 } 923 924 return true; 925 } 926 927 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ 928 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ 929 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \ 930 trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \ 931 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \ 932 trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \ 933 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \ 934 trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \ 935 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \ 936 trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr } 937 938 /* Macro to expand the PMEVCNTRn_EL0 register */ 939 #define PMU_PMEVCNTR_EL0(n) \ 940 { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \ 941 access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), } 942 943 /* Macro to expand the PMEVTYPERn_EL0 register */ 944 #define PMU_PMEVTYPER_EL0(n) \ 945 { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \ 946 access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } 947 948 static bool access_cntp_tval(struct kvm_vcpu *vcpu, 949 struct sys_reg_params *p, 950 const struct sys_reg_desc *r) 951 { 952 u64 now = kvm_phys_timer_read(); 953 u64 cval; 954 955 if (p->is_write) { 956 kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, 957 p->regval + now); 958 } else { 959 cval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL); 960 p->regval = cval - now; 961 } 962 963 return true; 964 } 965 966 static bool access_cntp_ctl(struct kvm_vcpu *vcpu, 967 struct sys_reg_params *p, 968 const struct sys_reg_desc *r) 969 { 970 if (p->is_write) 971 kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, p->regval); 972 else 973 p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL); 974 975 return true; 976 } 977 978 static bool access_cntp_cval(struct kvm_vcpu *vcpu, 979 struct sys_reg_params *p, 980 const struct sys_reg_desc *r) 981 { 982 if (p->is_write) 983 kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, p->regval); 984 else 985 p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL); 986 987 return true; 988 } 989 990 /* Read a sanitised cpufeature ID register by sys_reg_desc */ 991 static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) 992 { 993 u32 id = sys_reg((u32)r->Op0, (u32)r->Op1, 994 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); 995 u64 val = raz ? 0 : read_sanitised_ftr_reg(id); 996 997 if (id == SYS_ID_AA64PFR0_EL1) { 998 if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT)) 999 kvm_debug("SVE unsupported for guests, suppressing\n"); 1000 1001 val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); 1002 } else if (id == SYS_ID_AA64MMFR1_EL1) { 1003 if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT)) 1004 kvm_debug("LORegions unsupported for guests, suppressing\n"); 1005 1006 val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT); 1007 } 1008 1009 return val; 1010 } 1011 1012 /* cpufeature ID register access trap handlers */ 1013 1014 static bool __access_id_reg(struct kvm_vcpu *vcpu, 1015 struct sys_reg_params *p, 1016 const struct sys_reg_desc *r, 1017 bool raz) 1018 { 1019 if (p->is_write) 1020 return write_to_read_only(vcpu, p, r); 1021 1022 p->regval = read_id_reg(r, raz); 1023 return true; 1024 } 1025 1026 static bool access_id_reg(struct kvm_vcpu *vcpu, 1027 struct sys_reg_params *p, 1028 const struct sys_reg_desc *r) 1029 { 1030 return __access_id_reg(vcpu, p, r, false); 1031 } 1032 1033 static bool access_raz_id_reg(struct kvm_vcpu *vcpu, 1034 struct sys_reg_params *p, 1035 const struct sys_reg_desc *r) 1036 { 1037 return __access_id_reg(vcpu, p, r, true); 1038 } 1039 1040 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id); 1041 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id); 1042 static u64 sys_reg_to_index(const struct sys_reg_desc *reg); 1043 1044 /* 1045 * cpufeature ID register user accessors 1046 * 1047 * For now, these registers are immutable for userspace, so no values 1048 * are stored, and for set_id_reg() we don't allow the effective value 1049 * to be changed. 1050 */ 1051 static int __get_id_reg(const struct sys_reg_desc *rd, void __user *uaddr, 1052 bool raz) 1053 { 1054 const u64 id = sys_reg_to_index(rd); 1055 const u64 val = read_id_reg(rd, raz); 1056 1057 return reg_to_user(uaddr, &val, id); 1058 } 1059 1060 static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr, 1061 bool raz) 1062 { 1063 const u64 id = sys_reg_to_index(rd); 1064 int err; 1065 u64 val; 1066 1067 err = reg_from_user(&val, uaddr, id); 1068 if (err) 1069 return err; 1070 1071 /* This is what we mean by invariant: you can't change it. */ 1072 if (val != read_id_reg(rd, raz)) 1073 return -EINVAL; 1074 1075 return 0; 1076 } 1077 1078 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 1079 const struct kvm_one_reg *reg, void __user *uaddr) 1080 { 1081 return __get_id_reg(rd, uaddr, false); 1082 } 1083 1084 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 1085 const struct kvm_one_reg *reg, void __user *uaddr) 1086 { 1087 return __set_id_reg(rd, uaddr, false); 1088 } 1089 1090 static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 1091 const struct kvm_one_reg *reg, void __user *uaddr) 1092 { 1093 return __get_id_reg(rd, uaddr, true); 1094 } 1095 1096 static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 1097 const struct kvm_one_reg *reg, void __user *uaddr) 1098 { 1099 return __set_id_reg(rd, uaddr, true); 1100 } 1101 1102 /* sys_reg_desc initialiser for known cpufeature ID registers */ 1103 #define ID_SANITISED(name) { \ 1104 SYS_DESC(SYS_##name), \ 1105 .access = access_id_reg, \ 1106 .get_user = get_id_reg, \ 1107 .set_user = set_id_reg, \ 1108 } 1109 1110 /* 1111 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID 1112 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2 1113 * (1 <= crm < 8, 0 <= Op2 < 8). 1114 */ 1115 #define ID_UNALLOCATED(crm, op2) { \ 1116 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \ 1117 .access = access_raz_id_reg, \ 1118 .get_user = get_raz_id_reg, \ 1119 .set_user = set_raz_id_reg, \ 1120 } 1121 1122 /* 1123 * sys_reg_desc initialiser for known ID registers that we hide from guests. 1124 * For now, these are exposed just like unallocated ID regs: they appear 1125 * RAZ for the guest. 1126 */ 1127 #define ID_HIDDEN(name) { \ 1128 SYS_DESC(SYS_##name), \ 1129 .access = access_raz_id_reg, \ 1130 .get_user = get_raz_id_reg, \ 1131 .set_user = set_raz_id_reg, \ 1132 } 1133 1134 /* 1135 * Architected system registers. 1136 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 1137 * 1138 * Debug handling: We do trap most, if not all debug related system 1139 * registers. The implementation is good enough to ensure that a guest 1140 * can use these with minimal performance degradation. The drawback is 1141 * that we don't implement any of the external debug, none of the 1142 * OSlock protocol. This should be revisited if we ever encounter a 1143 * more demanding guest... 1144 */ 1145 static const struct sys_reg_desc sys_reg_descs[] = { 1146 { SYS_DESC(SYS_DC_ISW), access_dcsw }, 1147 { SYS_DESC(SYS_DC_CSW), access_dcsw }, 1148 { SYS_DESC(SYS_DC_CISW), access_dcsw }, 1149 1150 DBG_BCR_BVR_WCR_WVR_EL1(0), 1151 DBG_BCR_BVR_WCR_WVR_EL1(1), 1152 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 }, 1153 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 }, 1154 DBG_BCR_BVR_WCR_WVR_EL1(2), 1155 DBG_BCR_BVR_WCR_WVR_EL1(3), 1156 DBG_BCR_BVR_WCR_WVR_EL1(4), 1157 DBG_BCR_BVR_WCR_WVR_EL1(5), 1158 DBG_BCR_BVR_WCR_WVR_EL1(6), 1159 DBG_BCR_BVR_WCR_WVR_EL1(7), 1160 DBG_BCR_BVR_WCR_WVR_EL1(8), 1161 DBG_BCR_BVR_WCR_WVR_EL1(9), 1162 DBG_BCR_BVR_WCR_WVR_EL1(10), 1163 DBG_BCR_BVR_WCR_WVR_EL1(11), 1164 DBG_BCR_BVR_WCR_WVR_EL1(12), 1165 DBG_BCR_BVR_WCR_WVR_EL1(13), 1166 DBG_BCR_BVR_WCR_WVR_EL1(14), 1167 DBG_BCR_BVR_WCR_WVR_EL1(15), 1168 1169 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi }, 1170 { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi }, 1171 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 }, 1172 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi }, 1173 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi }, 1174 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi }, 1175 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi }, 1176 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 }, 1177 1178 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi }, 1179 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi }, 1180 // DBGDTR[TR]X_EL0 share the same encoding 1181 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi }, 1182 1183 { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 }, 1184 1185 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 }, 1186 1187 /* 1188 * ID regs: all ID_SANITISED() entries here must have corresponding 1189 * entries in arm64_ftr_regs[]. 1190 */ 1191 1192 /* AArch64 mappings of the AArch32 ID registers */ 1193 /* CRm=1 */ 1194 ID_SANITISED(ID_PFR0_EL1), 1195 ID_SANITISED(ID_PFR1_EL1), 1196 ID_SANITISED(ID_DFR0_EL1), 1197 ID_HIDDEN(ID_AFR0_EL1), 1198 ID_SANITISED(ID_MMFR0_EL1), 1199 ID_SANITISED(ID_MMFR1_EL1), 1200 ID_SANITISED(ID_MMFR2_EL1), 1201 ID_SANITISED(ID_MMFR3_EL1), 1202 1203 /* CRm=2 */ 1204 ID_SANITISED(ID_ISAR0_EL1), 1205 ID_SANITISED(ID_ISAR1_EL1), 1206 ID_SANITISED(ID_ISAR2_EL1), 1207 ID_SANITISED(ID_ISAR3_EL1), 1208 ID_SANITISED(ID_ISAR4_EL1), 1209 ID_SANITISED(ID_ISAR5_EL1), 1210 ID_SANITISED(ID_MMFR4_EL1), 1211 ID_UNALLOCATED(2,7), 1212 1213 /* CRm=3 */ 1214 ID_SANITISED(MVFR0_EL1), 1215 ID_SANITISED(MVFR1_EL1), 1216 ID_SANITISED(MVFR2_EL1), 1217 ID_UNALLOCATED(3,3), 1218 ID_UNALLOCATED(3,4), 1219 ID_UNALLOCATED(3,5), 1220 ID_UNALLOCATED(3,6), 1221 ID_UNALLOCATED(3,7), 1222 1223 /* AArch64 ID registers */ 1224 /* CRm=4 */ 1225 ID_SANITISED(ID_AA64PFR0_EL1), 1226 ID_SANITISED(ID_AA64PFR1_EL1), 1227 ID_UNALLOCATED(4,2), 1228 ID_UNALLOCATED(4,3), 1229 ID_UNALLOCATED(4,4), 1230 ID_UNALLOCATED(4,5), 1231 ID_UNALLOCATED(4,6), 1232 ID_UNALLOCATED(4,7), 1233 1234 /* CRm=5 */ 1235 ID_SANITISED(ID_AA64DFR0_EL1), 1236 ID_SANITISED(ID_AA64DFR1_EL1), 1237 ID_UNALLOCATED(5,2), 1238 ID_UNALLOCATED(5,3), 1239 ID_HIDDEN(ID_AA64AFR0_EL1), 1240 ID_HIDDEN(ID_AA64AFR1_EL1), 1241 ID_UNALLOCATED(5,6), 1242 ID_UNALLOCATED(5,7), 1243 1244 /* CRm=6 */ 1245 ID_SANITISED(ID_AA64ISAR0_EL1), 1246 ID_SANITISED(ID_AA64ISAR1_EL1), 1247 ID_UNALLOCATED(6,2), 1248 ID_UNALLOCATED(6,3), 1249 ID_UNALLOCATED(6,4), 1250 ID_UNALLOCATED(6,5), 1251 ID_UNALLOCATED(6,6), 1252 ID_UNALLOCATED(6,7), 1253 1254 /* CRm=7 */ 1255 ID_SANITISED(ID_AA64MMFR0_EL1), 1256 ID_SANITISED(ID_AA64MMFR1_EL1), 1257 ID_SANITISED(ID_AA64MMFR2_EL1), 1258 ID_UNALLOCATED(7,3), 1259 ID_UNALLOCATED(7,4), 1260 ID_UNALLOCATED(7,5), 1261 ID_UNALLOCATED(7,6), 1262 ID_UNALLOCATED(7,7), 1263 1264 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, 1265 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 }, 1266 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 }, 1267 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 }, 1268 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 }, 1269 1270 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 }, 1271 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 }, 1272 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 }, 1273 1274 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi }, 1275 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi }, 1276 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi }, 1277 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi }, 1278 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi }, 1279 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi }, 1280 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi }, 1281 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi }, 1282 1283 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 }, 1284 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 }, 1285 1286 { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 }, 1287 { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 }, 1288 1289 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, 1290 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, 1291 1292 { SYS_DESC(SYS_LORSA_EL1), trap_undef }, 1293 { SYS_DESC(SYS_LOREA_EL1), trap_undef }, 1294 { SYS_DESC(SYS_LORN_EL1), trap_undef }, 1295 { SYS_DESC(SYS_LORC_EL1), trap_undef }, 1296 { SYS_DESC(SYS_LORID_EL1), trap_undef }, 1297 1298 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 }, 1299 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, 1300 1301 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only }, 1302 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only }, 1303 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only }, 1304 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only }, 1305 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only }, 1306 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi }, 1307 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only }, 1308 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only }, 1309 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only }, 1310 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre }, 1311 1312 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, 1313 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 }, 1314 1315 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0}, 1316 1317 { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 }, 1318 1319 { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, }, 1320 { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 }, 1321 { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 }, 1322 { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 }, 1323 { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 }, 1324 { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 }, 1325 { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid }, 1326 { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid }, 1327 { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 }, 1328 { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper }, 1329 { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr }, 1330 /* 1331 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero 1332 * in 32bit mode. Here we choose to reset it as zero for consistency. 1333 */ 1334 { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 }, 1335 { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 }, 1336 1337 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, 1338 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 }, 1339 1340 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval }, 1341 { SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl }, 1342 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval }, 1343 1344 /* PMEVCNTRn_EL0 */ 1345 PMU_PMEVCNTR_EL0(0), 1346 PMU_PMEVCNTR_EL0(1), 1347 PMU_PMEVCNTR_EL0(2), 1348 PMU_PMEVCNTR_EL0(3), 1349 PMU_PMEVCNTR_EL0(4), 1350 PMU_PMEVCNTR_EL0(5), 1351 PMU_PMEVCNTR_EL0(6), 1352 PMU_PMEVCNTR_EL0(7), 1353 PMU_PMEVCNTR_EL0(8), 1354 PMU_PMEVCNTR_EL0(9), 1355 PMU_PMEVCNTR_EL0(10), 1356 PMU_PMEVCNTR_EL0(11), 1357 PMU_PMEVCNTR_EL0(12), 1358 PMU_PMEVCNTR_EL0(13), 1359 PMU_PMEVCNTR_EL0(14), 1360 PMU_PMEVCNTR_EL0(15), 1361 PMU_PMEVCNTR_EL0(16), 1362 PMU_PMEVCNTR_EL0(17), 1363 PMU_PMEVCNTR_EL0(18), 1364 PMU_PMEVCNTR_EL0(19), 1365 PMU_PMEVCNTR_EL0(20), 1366 PMU_PMEVCNTR_EL0(21), 1367 PMU_PMEVCNTR_EL0(22), 1368 PMU_PMEVCNTR_EL0(23), 1369 PMU_PMEVCNTR_EL0(24), 1370 PMU_PMEVCNTR_EL0(25), 1371 PMU_PMEVCNTR_EL0(26), 1372 PMU_PMEVCNTR_EL0(27), 1373 PMU_PMEVCNTR_EL0(28), 1374 PMU_PMEVCNTR_EL0(29), 1375 PMU_PMEVCNTR_EL0(30), 1376 /* PMEVTYPERn_EL0 */ 1377 PMU_PMEVTYPER_EL0(0), 1378 PMU_PMEVTYPER_EL0(1), 1379 PMU_PMEVTYPER_EL0(2), 1380 PMU_PMEVTYPER_EL0(3), 1381 PMU_PMEVTYPER_EL0(4), 1382 PMU_PMEVTYPER_EL0(5), 1383 PMU_PMEVTYPER_EL0(6), 1384 PMU_PMEVTYPER_EL0(7), 1385 PMU_PMEVTYPER_EL0(8), 1386 PMU_PMEVTYPER_EL0(9), 1387 PMU_PMEVTYPER_EL0(10), 1388 PMU_PMEVTYPER_EL0(11), 1389 PMU_PMEVTYPER_EL0(12), 1390 PMU_PMEVTYPER_EL0(13), 1391 PMU_PMEVTYPER_EL0(14), 1392 PMU_PMEVTYPER_EL0(15), 1393 PMU_PMEVTYPER_EL0(16), 1394 PMU_PMEVTYPER_EL0(17), 1395 PMU_PMEVTYPER_EL0(18), 1396 PMU_PMEVTYPER_EL0(19), 1397 PMU_PMEVTYPER_EL0(20), 1398 PMU_PMEVTYPER_EL0(21), 1399 PMU_PMEVTYPER_EL0(22), 1400 PMU_PMEVTYPER_EL0(23), 1401 PMU_PMEVTYPER_EL0(24), 1402 PMU_PMEVTYPER_EL0(25), 1403 PMU_PMEVTYPER_EL0(26), 1404 PMU_PMEVTYPER_EL0(27), 1405 PMU_PMEVTYPER_EL0(28), 1406 PMU_PMEVTYPER_EL0(29), 1407 PMU_PMEVTYPER_EL0(30), 1408 /* 1409 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero 1410 * in 32bit mode. Here we choose to reset it as zero for consistency. 1411 */ 1412 { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 }, 1413 1414 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 }, 1415 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 }, 1416 { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 }, 1417 }; 1418 1419 static bool trap_dbgidr(struct kvm_vcpu *vcpu, 1420 struct sys_reg_params *p, 1421 const struct sys_reg_desc *r) 1422 { 1423 if (p->is_write) { 1424 return ignore_write(vcpu, p); 1425 } else { 1426 u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); 1427 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 1428 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT); 1429 1430 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | 1431 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | 1432 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) 1433 | (6 << 16) | (el3 << 14) | (el3 << 12)); 1434 return true; 1435 } 1436 } 1437 1438 static bool trap_debug32(struct kvm_vcpu *vcpu, 1439 struct sys_reg_params *p, 1440 const struct sys_reg_desc *r) 1441 { 1442 if (p->is_write) { 1443 vcpu_cp14(vcpu, r->reg) = p->regval; 1444 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 1445 } else { 1446 p->regval = vcpu_cp14(vcpu, r->reg); 1447 } 1448 1449 return true; 1450 } 1451 1452 /* AArch32 debug register mappings 1453 * 1454 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0] 1455 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32] 1456 * 1457 * All control registers and watchpoint value registers are mapped to 1458 * the lower 32 bits of their AArch64 equivalents. We share the trap 1459 * handlers with the above AArch64 code which checks what mode the 1460 * system is in. 1461 */ 1462 1463 static bool trap_xvr(struct kvm_vcpu *vcpu, 1464 struct sys_reg_params *p, 1465 const struct sys_reg_desc *rd) 1466 { 1467 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 1468 1469 if (p->is_write) { 1470 u64 val = *dbg_reg; 1471 1472 val &= 0xffffffffUL; 1473 val |= p->regval << 32; 1474 *dbg_reg = val; 1475 1476 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 1477 } else { 1478 p->regval = *dbg_reg >> 32; 1479 } 1480 1481 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); 1482 1483 return true; 1484 } 1485 1486 #define DBG_BCR_BVR_WCR_WVR(n) \ 1487 /* DBGBVRn */ \ 1488 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \ 1489 /* DBGBCRn */ \ 1490 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \ 1491 /* DBGWVRn */ \ 1492 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \ 1493 /* DBGWCRn */ \ 1494 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n } 1495 1496 #define DBGBXVR(n) \ 1497 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n } 1498 1499 /* 1500 * Trapped cp14 registers. We generally ignore most of the external 1501 * debug, on the principle that they don't really make sense to a 1502 * guest. Revisit this one day, would this principle change. 1503 */ 1504 static const struct sys_reg_desc cp14_regs[] = { 1505 /* DBGIDR */ 1506 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr }, 1507 /* DBGDTRRXext */ 1508 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi }, 1509 1510 DBG_BCR_BVR_WCR_WVR(0), 1511 /* DBGDSCRint */ 1512 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi }, 1513 DBG_BCR_BVR_WCR_WVR(1), 1514 /* DBGDCCINT */ 1515 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 }, 1516 /* DBGDSCRext */ 1517 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 }, 1518 DBG_BCR_BVR_WCR_WVR(2), 1519 /* DBGDTR[RT]Xint */ 1520 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi }, 1521 /* DBGDTR[RT]Xext */ 1522 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi }, 1523 DBG_BCR_BVR_WCR_WVR(3), 1524 DBG_BCR_BVR_WCR_WVR(4), 1525 DBG_BCR_BVR_WCR_WVR(5), 1526 /* DBGWFAR */ 1527 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi }, 1528 /* DBGOSECCR */ 1529 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi }, 1530 DBG_BCR_BVR_WCR_WVR(6), 1531 /* DBGVCR */ 1532 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 }, 1533 DBG_BCR_BVR_WCR_WVR(7), 1534 DBG_BCR_BVR_WCR_WVR(8), 1535 DBG_BCR_BVR_WCR_WVR(9), 1536 DBG_BCR_BVR_WCR_WVR(10), 1537 DBG_BCR_BVR_WCR_WVR(11), 1538 DBG_BCR_BVR_WCR_WVR(12), 1539 DBG_BCR_BVR_WCR_WVR(13), 1540 DBG_BCR_BVR_WCR_WVR(14), 1541 DBG_BCR_BVR_WCR_WVR(15), 1542 1543 /* DBGDRAR (32bit) */ 1544 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi }, 1545 1546 DBGBXVR(0), 1547 /* DBGOSLAR */ 1548 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi }, 1549 DBGBXVR(1), 1550 /* DBGOSLSR */ 1551 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 }, 1552 DBGBXVR(2), 1553 DBGBXVR(3), 1554 /* DBGOSDLR */ 1555 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi }, 1556 DBGBXVR(4), 1557 /* DBGPRCR */ 1558 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi }, 1559 DBGBXVR(5), 1560 DBGBXVR(6), 1561 DBGBXVR(7), 1562 DBGBXVR(8), 1563 DBGBXVR(9), 1564 DBGBXVR(10), 1565 DBGBXVR(11), 1566 DBGBXVR(12), 1567 DBGBXVR(13), 1568 DBGBXVR(14), 1569 DBGBXVR(15), 1570 1571 /* DBGDSAR (32bit) */ 1572 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi }, 1573 1574 /* DBGDEVID2 */ 1575 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi }, 1576 /* DBGDEVID1 */ 1577 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi }, 1578 /* DBGDEVID */ 1579 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi }, 1580 /* DBGCLAIMSET */ 1581 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi }, 1582 /* DBGCLAIMCLR */ 1583 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi }, 1584 /* DBGAUTHSTATUS */ 1585 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 }, 1586 }; 1587 1588 /* Trapped cp14 64bit registers */ 1589 static const struct sys_reg_desc cp14_64_regs[] = { 1590 /* DBGDRAR (64bit) */ 1591 { Op1( 0), CRm( 1), .access = trap_raz_wi }, 1592 1593 /* DBGDSAR (64bit) */ 1594 { Op1( 0), CRm( 2), .access = trap_raz_wi }, 1595 }; 1596 1597 /* Macro to expand the PMEVCNTRn register */ 1598 #define PMU_PMEVCNTR(n) \ 1599 /* PMEVCNTRn */ \ 1600 { Op1(0), CRn(0b1110), \ 1601 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ 1602 access_pmu_evcntr } 1603 1604 /* Macro to expand the PMEVTYPERn register */ 1605 #define PMU_PMEVTYPER(n) \ 1606 /* PMEVTYPERn */ \ 1607 { Op1(0), CRn(0b1110), \ 1608 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ 1609 access_pmu_evtyper } 1610 1611 /* 1612 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, 1613 * depending on the way they are accessed (as a 32bit or a 64bit 1614 * register). 1615 */ 1616 static const struct sys_reg_desc cp15_regs[] = { 1617 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, 1618 1619 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR }, 1620 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 1621 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, 1622 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, 1623 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR }, 1624 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR }, 1625 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR }, 1626 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR }, 1627 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR }, 1628 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR }, 1629 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR }, 1630 1631 /* 1632 * DC{C,I,CI}SW operations: 1633 */ 1634 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, 1635 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, 1636 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, 1637 1638 /* PMU */ 1639 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr }, 1640 { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten }, 1641 { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten }, 1642 { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs }, 1643 { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc }, 1644 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, 1645 { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, 1646 { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, 1647 { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr }, 1648 { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper }, 1649 { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr }, 1650 { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr }, 1651 { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten }, 1652 { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten }, 1653 { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs }, 1654 1655 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, 1656 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, 1657 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, 1658 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, 1659 1660 /* ICC_SRE */ 1661 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre }, 1662 1663 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, 1664 1665 /* CNTP_TVAL */ 1666 { Op1( 0), CRn(14), CRm( 2), Op2( 0), access_cntp_tval }, 1667 /* CNTP_CTL */ 1668 { Op1( 0), CRn(14), CRm( 2), Op2( 1), access_cntp_ctl }, 1669 1670 /* PMEVCNTRn */ 1671 PMU_PMEVCNTR(0), 1672 PMU_PMEVCNTR(1), 1673 PMU_PMEVCNTR(2), 1674 PMU_PMEVCNTR(3), 1675 PMU_PMEVCNTR(4), 1676 PMU_PMEVCNTR(5), 1677 PMU_PMEVCNTR(6), 1678 PMU_PMEVCNTR(7), 1679 PMU_PMEVCNTR(8), 1680 PMU_PMEVCNTR(9), 1681 PMU_PMEVCNTR(10), 1682 PMU_PMEVCNTR(11), 1683 PMU_PMEVCNTR(12), 1684 PMU_PMEVCNTR(13), 1685 PMU_PMEVCNTR(14), 1686 PMU_PMEVCNTR(15), 1687 PMU_PMEVCNTR(16), 1688 PMU_PMEVCNTR(17), 1689 PMU_PMEVCNTR(18), 1690 PMU_PMEVCNTR(19), 1691 PMU_PMEVCNTR(20), 1692 PMU_PMEVCNTR(21), 1693 PMU_PMEVCNTR(22), 1694 PMU_PMEVCNTR(23), 1695 PMU_PMEVCNTR(24), 1696 PMU_PMEVCNTR(25), 1697 PMU_PMEVCNTR(26), 1698 PMU_PMEVCNTR(27), 1699 PMU_PMEVCNTR(28), 1700 PMU_PMEVCNTR(29), 1701 PMU_PMEVCNTR(30), 1702 /* PMEVTYPERn */ 1703 PMU_PMEVTYPER(0), 1704 PMU_PMEVTYPER(1), 1705 PMU_PMEVTYPER(2), 1706 PMU_PMEVTYPER(3), 1707 PMU_PMEVTYPER(4), 1708 PMU_PMEVTYPER(5), 1709 PMU_PMEVTYPER(6), 1710 PMU_PMEVTYPER(7), 1711 PMU_PMEVTYPER(8), 1712 PMU_PMEVTYPER(9), 1713 PMU_PMEVTYPER(10), 1714 PMU_PMEVTYPER(11), 1715 PMU_PMEVTYPER(12), 1716 PMU_PMEVTYPER(13), 1717 PMU_PMEVTYPER(14), 1718 PMU_PMEVTYPER(15), 1719 PMU_PMEVTYPER(16), 1720 PMU_PMEVTYPER(17), 1721 PMU_PMEVTYPER(18), 1722 PMU_PMEVTYPER(19), 1723 PMU_PMEVTYPER(20), 1724 PMU_PMEVTYPER(21), 1725 PMU_PMEVTYPER(22), 1726 PMU_PMEVTYPER(23), 1727 PMU_PMEVTYPER(24), 1728 PMU_PMEVTYPER(25), 1729 PMU_PMEVTYPER(26), 1730 PMU_PMEVTYPER(27), 1731 PMU_PMEVTYPER(28), 1732 PMU_PMEVTYPER(29), 1733 PMU_PMEVTYPER(30), 1734 /* PMCCFILTR */ 1735 { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper }, 1736 }; 1737 1738 static const struct sys_reg_desc cp15_64_regs[] = { 1739 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 1740 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr }, 1741 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, 1742 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, 1743 { Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval }, 1744 }; 1745 1746 /* Target specific emulation tables */ 1747 static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS]; 1748 1749 void kvm_register_target_sys_reg_table(unsigned int target, 1750 struct kvm_sys_reg_target_table *table) 1751 { 1752 target_tables[target] = table; 1753 } 1754 1755 /* Get specific register table for this target. */ 1756 static const struct sys_reg_desc *get_target_table(unsigned target, 1757 bool mode_is_64, 1758 size_t *num) 1759 { 1760 struct kvm_sys_reg_target_table *table; 1761 1762 table = target_tables[target]; 1763 if (mode_is_64) { 1764 *num = table->table64.num; 1765 return table->table64.table; 1766 } else { 1767 *num = table->table32.num; 1768 return table->table32.table; 1769 } 1770 } 1771 1772 #define reg_to_match_value(x) \ 1773 ({ \ 1774 unsigned long val; \ 1775 val = (x)->Op0 << 14; \ 1776 val |= (x)->Op1 << 11; \ 1777 val |= (x)->CRn << 7; \ 1778 val |= (x)->CRm << 3; \ 1779 val |= (x)->Op2; \ 1780 val; \ 1781 }) 1782 1783 static int match_sys_reg(const void *key, const void *elt) 1784 { 1785 const unsigned long pval = (unsigned long)key; 1786 const struct sys_reg_desc *r = elt; 1787 1788 return pval - reg_to_match_value(r); 1789 } 1790 1791 static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, 1792 const struct sys_reg_desc table[], 1793 unsigned int num) 1794 { 1795 unsigned long pval = reg_to_match_value(params); 1796 1797 return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg); 1798 } 1799 1800 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) 1801 { 1802 kvm_inject_undefined(vcpu); 1803 return 1; 1804 } 1805 1806 static void perform_access(struct kvm_vcpu *vcpu, 1807 struct sys_reg_params *params, 1808 const struct sys_reg_desc *r) 1809 { 1810 /* 1811 * Not having an accessor means that we have configured a trap 1812 * that we don't know how to handle. This certainly qualifies 1813 * as a gross bug that should be fixed right away. 1814 */ 1815 BUG_ON(!r->access); 1816 1817 /* Skip instruction if instructed so */ 1818 if (likely(r->access(vcpu, params, r))) 1819 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 1820 } 1821 1822 /* 1823 * emulate_cp -- tries to match a sys_reg access in a handling table, and 1824 * call the corresponding trap handler. 1825 * 1826 * @params: pointer to the descriptor of the access 1827 * @table: array of trap descriptors 1828 * @num: size of the trap descriptor array 1829 * 1830 * Return 0 if the access has been handled, and -1 if not. 1831 */ 1832 static int emulate_cp(struct kvm_vcpu *vcpu, 1833 struct sys_reg_params *params, 1834 const struct sys_reg_desc *table, 1835 size_t num) 1836 { 1837 const struct sys_reg_desc *r; 1838 1839 if (!table) 1840 return -1; /* Not handled */ 1841 1842 r = find_reg(params, table, num); 1843 1844 if (r) { 1845 perform_access(vcpu, params, r); 1846 return 0; 1847 } 1848 1849 /* Not handled */ 1850 return -1; 1851 } 1852 1853 static void unhandled_cp_access(struct kvm_vcpu *vcpu, 1854 struct sys_reg_params *params) 1855 { 1856 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); 1857 int cp = -1; 1858 1859 switch(hsr_ec) { 1860 case ESR_ELx_EC_CP15_32: 1861 case ESR_ELx_EC_CP15_64: 1862 cp = 15; 1863 break; 1864 case ESR_ELx_EC_CP14_MR: 1865 case ESR_ELx_EC_CP14_64: 1866 cp = 14; 1867 break; 1868 default: 1869 WARN_ON(1); 1870 } 1871 1872 kvm_err("Unsupported guest CP%d access at: %08lx\n", 1873 cp, *vcpu_pc(vcpu)); 1874 print_sys_reg_instr(params); 1875 kvm_inject_undefined(vcpu); 1876 } 1877 1878 /** 1879 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access 1880 * @vcpu: The VCPU pointer 1881 * @run: The kvm_run struct 1882 */ 1883 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, 1884 const struct sys_reg_desc *global, 1885 size_t nr_global, 1886 const struct sys_reg_desc *target_specific, 1887 size_t nr_specific) 1888 { 1889 struct sys_reg_params params; 1890 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1891 int Rt = kvm_vcpu_sys_get_rt(vcpu); 1892 int Rt2 = (hsr >> 10) & 0x1f; 1893 1894 params.is_aarch32 = true; 1895 params.is_32bit = false; 1896 params.CRm = (hsr >> 1) & 0xf; 1897 params.is_write = ((hsr & 1) == 0); 1898 1899 params.Op0 = 0; 1900 params.Op1 = (hsr >> 16) & 0xf; 1901 params.Op2 = 0; 1902 params.CRn = 0; 1903 1904 /* 1905 * Make a 64-bit value out of Rt and Rt2. As we use the same trap 1906 * backends between AArch32 and AArch64, we get away with it. 1907 */ 1908 if (params.is_write) { 1909 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff; 1910 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32; 1911 } 1912 1913 /* 1914 * Try to emulate the coprocessor access using the target 1915 * specific table first, and using the global table afterwards. 1916 * If either of the tables contains a handler, handle the 1917 * potential register operation in the case of a read and return 1918 * with success. 1919 */ 1920 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) || 1921 !emulate_cp(vcpu, ¶ms, global, nr_global)) { 1922 /* Split up the value between registers for the read side */ 1923 if (!params.is_write) { 1924 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval)); 1925 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval)); 1926 } 1927 1928 return 1; 1929 } 1930 1931 unhandled_cp_access(vcpu, ¶ms); 1932 return 1; 1933 } 1934 1935 /** 1936 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access 1937 * @vcpu: The VCPU pointer 1938 * @run: The kvm_run struct 1939 */ 1940 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, 1941 const struct sys_reg_desc *global, 1942 size_t nr_global, 1943 const struct sys_reg_desc *target_specific, 1944 size_t nr_specific) 1945 { 1946 struct sys_reg_params params; 1947 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1948 int Rt = kvm_vcpu_sys_get_rt(vcpu); 1949 1950 params.is_aarch32 = true; 1951 params.is_32bit = true; 1952 params.CRm = (hsr >> 1) & 0xf; 1953 params.regval = vcpu_get_reg(vcpu, Rt); 1954 params.is_write = ((hsr & 1) == 0); 1955 params.CRn = (hsr >> 10) & 0xf; 1956 params.Op0 = 0; 1957 params.Op1 = (hsr >> 14) & 0x7; 1958 params.Op2 = (hsr >> 17) & 0x7; 1959 1960 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) || 1961 !emulate_cp(vcpu, ¶ms, global, nr_global)) { 1962 if (!params.is_write) 1963 vcpu_set_reg(vcpu, Rt, params.regval); 1964 return 1; 1965 } 1966 1967 unhandled_cp_access(vcpu, ¶ms); 1968 return 1; 1969 } 1970 1971 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 1972 { 1973 const struct sys_reg_desc *target_specific; 1974 size_t num; 1975 1976 target_specific = get_target_table(vcpu->arch.target, false, &num); 1977 return kvm_handle_cp_64(vcpu, 1978 cp15_64_regs, ARRAY_SIZE(cp15_64_regs), 1979 target_specific, num); 1980 } 1981 1982 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 1983 { 1984 const struct sys_reg_desc *target_specific; 1985 size_t num; 1986 1987 target_specific = get_target_table(vcpu->arch.target, false, &num); 1988 return kvm_handle_cp_32(vcpu, 1989 cp15_regs, ARRAY_SIZE(cp15_regs), 1990 target_specific, num); 1991 } 1992 1993 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 1994 { 1995 return kvm_handle_cp_64(vcpu, 1996 cp14_64_regs, ARRAY_SIZE(cp14_64_regs), 1997 NULL, 0); 1998 } 1999 2000 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 2001 { 2002 return kvm_handle_cp_32(vcpu, 2003 cp14_regs, ARRAY_SIZE(cp14_regs), 2004 NULL, 0); 2005 } 2006 2007 static int emulate_sys_reg(struct kvm_vcpu *vcpu, 2008 struct sys_reg_params *params) 2009 { 2010 size_t num; 2011 const struct sys_reg_desc *table, *r; 2012 2013 table = get_target_table(vcpu->arch.target, true, &num); 2014 2015 /* Search target-specific then generic table. */ 2016 r = find_reg(params, table, num); 2017 if (!r) 2018 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 2019 2020 if (likely(r)) { 2021 perform_access(vcpu, params, r); 2022 } else { 2023 kvm_err("Unsupported guest sys_reg access at: %lx\n", 2024 *vcpu_pc(vcpu)); 2025 print_sys_reg_instr(params); 2026 kvm_inject_undefined(vcpu); 2027 } 2028 return 1; 2029 } 2030 2031 static void reset_sys_reg_descs(struct kvm_vcpu *vcpu, 2032 const struct sys_reg_desc *table, size_t num) 2033 { 2034 unsigned long i; 2035 2036 for (i = 0; i < num; i++) 2037 if (table[i].reset) 2038 table[i].reset(vcpu, &table[i]); 2039 } 2040 2041 /** 2042 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access 2043 * @vcpu: The VCPU pointer 2044 * @run: The kvm_run struct 2045 */ 2046 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) 2047 { 2048 struct sys_reg_params params; 2049 unsigned long esr = kvm_vcpu_get_hsr(vcpu); 2050 int Rt = kvm_vcpu_sys_get_rt(vcpu); 2051 int ret; 2052 2053 trace_kvm_handle_sys_reg(esr); 2054 2055 params.is_aarch32 = false; 2056 params.is_32bit = false; 2057 params.Op0 = (esr >> 20) & 3; 2058 params.Op1 = (esr >> 14) & 0x7; 2059 params.CRn = (esr >> 10) & 0xf; 2060 params.CRm = (esr >> 1) & 0xf; 2061 params.Op2 = (esr >> 17) & 0x7; 2062 params.regval = vcpu_get_reg(vcpu, Rt); 2063 params.is_write = !(esr & 1); 2064 2065 ret = emulate_sys_reg(vcpu, ¶ms); 2066 2067 if (!params.is_write) 2068 vcpu_set_reg(vcpu, Rt, params.regval); 2069 return ret; 2070 } 2071 2072 /****************************************************************************** 2073 * Userspace API 2074 *****************************************************************************/ 2075 2076 static bool index_to_params(u64 id, struct sys_reg_params *params) 2077 { 2078 switch (id & KVM_REG_SIZE_MASK) { 2079 case KVM_REG_SIZE_U64: 2080 /* Any unused index bits means it's not valid. */ 2081 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK 2082 | KVM_REG_ARM_COPROC_MASK 2083 | KVM_REG_ARM64_SYSREG_OP0_MASK 2084 | KVM_REG_ARM64_SYSREG_OP1_MASK 2085 | KVM_REG_ARM64_SYSREG_CRN_MASK 2086 | KVM_REG_ARM64_SYSREG_CRM_MASK 2087 | KVM_REG_ARM64_SYSREG_OP2_MASK)) 2088 return false; 2089 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) 2090 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT); 2091 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) 2092 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT); 2093 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) 2094 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT); 2095 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) 2096 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT); 2097 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) 2098 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT); 2099 return true; 2100 default: 2101 return false; 2102 } 2103 } 2104 2105 const struct sys_reg_desc *find_reg_by_id(u64 id, 2106 struct sys_reg_params *params, 2107 const struct sys_reg_desc table[], 2108 unsigned int num) 2109 { 2110 if (!index_to_params(id, params)) 2111 return NULL; 2112 2113 return find_reg(params, table, num); 2114 } 2115 2116 /* Decode an index value, and find the sys_reg_desc entry. */ 2117 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, 2118 u64 id) 2119 { 2120 size_t num; 2121 const struct sys_reg_desc *table, *r; 2122 struct sys_reg_params params; 2123 2124 /* We only do sys_reg for now. */ 2125 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) 2126 return NULL; 2127 2128 table = get_target_table(vcpu->arch.target, true, &num); 2129 r = find_reg_by_id(id, ¶ms, table, num); 2130 if (!r) 2131 r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 2132 2133 /* Not saved in the sys_reg array and not otherwise accessible? */ 2134 if (r && !(r->reg || r->get_user)) 2135 r = NULL; 2136 2137 return r; 2138 } 2139 2140 /* 2141 * These are the invariant sys_reg registers: we let the guest see the 2142 * host versions of these, so they're part of the guest state. 2143 * 2144 * A future CPU may provide a mechanism to present different values to 2145 * the guest, or a future kvm may trap them. 2146 */ 2147 2148 #define FUNCTION_INVARIANT(reg) \ 2149 static void get_##reg(struct kvm_vcpu *v, \ 2150 const struct sys_reg_desc *r) \ 2151 { \ 2152 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \ 2153 } 2154 2155 FUNCTION_INVARIANT(midr_el1) 2156 FUNCTION_INVARIANT(ctr_el0) 2157 FUNCTION_INVARIANT(revidr_el1) 2158 FUNCTION_INVARIANT(clidr_el1) 2159 FUNCTION_INVARIANT(aidr_el1) 2160 2161 /* ->val is filled in by kvm_sys_reg_table_init() */ 2162 static struct sys_reg_desc invariant_sys_regs[] = { 2163 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 }, 2164 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 }, 2165 { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 }, 2166 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 }, 2167 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 }, 2168 }; 2169 2170 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id) 2171 { 2172 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) 2173 return -EFAULT; 2174 return 0; 2175 } 2176 2177 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id) 2178 { 2179 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) 2180 return -EFAULT; 2181 return 0; 2182 } 2183 2184 static int get_invariant_sys_reg(u64 id, void __user *uaddr) 2185 { 2186 struct sys_reg_params params; 2187 const struct sys_reg_desc *r; 2188 2189 r = find_reg_by_id(id, ¶ms, invariant_sys_regs, 2190 ARRAY_SIZE(invariant_sys_regs)); 2191 if (!r) 2192 return -ENOENT; 2193 2194 return reg_to_user(uaddr, &r->val, id); 2195 } 2196 2197 static int set_invariant_sys_reg(u64 id, void __user *uaddr) 2198 { 2199 struct sys_reg_params params; 2200 const struct sys_reg_desc *r; 2201 int err; 2202 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ 2203 2204 r = find_reg_by_id(id, ¶ms, invariant_sys_regs, 2205 ARRAY_SIZE(invariant_sys_regs)); 2206 if (!r) 2207 return -ENOENT; 2208 2209 err = reg_from_user(&val, uaddr, id); 2210 if (err) 2211 return err; 2212 2213 /* This is what we mean by invariant: you can't change it. */ 2214 if (r->val != val) 2215 return -EINVAL; 2216 2217 return 0; 2218 } 2219 2220 static bool is_valid_cache(u32 val) 2221 { 2222 u32 level, ctype; 2223 2224 if (val >= CSSELR_MAX) 2225 return false; 2226 2227 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ 2228 level = (val >> 1); 2229 ctype = (cache_levels >> (level * 3)) & 7; 2230 2231 switch (ctype) { 2232 case 0: /* No cache */ 2233 return false; 2234 case 1: /* Instruction cache only */ 2235 return (val & 1); 2236 case 2: /* Data cache only */ 2237 case 4: /* Unified cache */ 2238 return !(val & 1); 2239 case 3: /* Separate instruction and data caches */ 2240 return true; 2241 default: /* Reserved: we can't know instruction or data. */ 2242 return false; 2243 } 2244 } 2245 2246 static int demux_c15_get(u64 id, void __user *uaddr) 2247 { 2248 u32 val; 2249 u32 __user *uval = uaddr; 2250 2251 /* Fail if we have unknown bits set. */ 2252 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 2253 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 2254 return -ENOENT; 2255 2256 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { 2257 case KVM_REG_ARM_DEMUX_ID_CCSIDR: 2258 if (KVM_REG_SIZE(id) != 4) 2259 return -ENOENT; 2260 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) 2261 >> KVM_REG_ARM_DEMUX_VAL_SHIFT; 2262 if (!is_valid_cache(val)) 2263 return -ENOENT; 2264 2265 return put_user(get_ccsidr(val), uval); 2266 default: 2267 return -ENOENT; 2268 } 2269 } 2270 2271 static int demux_c15_set(u64 id, void __user *uaddr) 2272 { 2273 u32 val, newval; 2274 u32 __user *uval = uaddr; 2275 2276 /* Fail if we have unknown bits set. */ 2277 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 2278 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 2279 return -ENOENT; 2280 2281 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { 2282 case KVM_REG_ARM_DEMUX_ID_CCSIDR: 2283 if (KVM_REG_SIZE(id) != 4) 2284 return -ENOENT; 2285 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) 2286 >> KVM_REG_ARM_DEMUX_VAL_SHIFT; 2287 if (!is_valid_cache(val)) 2288 return -ENOENT; 2289 2290 if (get_user(newval, uval)) 2291 return -EFAULT; 2292 2293 /* This is also invariant: you can't change it. */ 2294 if (newval != get_ccsidr(val)) 2295 return -EINVAL; 2296 return 0; 2297 default: 2298 return -ENOENT; 2299 } 2300 } 2301 2302 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 2303 { 2304 const struct sys_reg_desc *r; 2305 void __user *uaddr = (void __user *)(unsigned long)reg->addr; 2306 2307 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 2308 return demux_c15_get(reg->id, uaddr); 2309 2310 if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) 2311 return -ENOENT; 2312 2313 r = index_to_sys_reg_desc(vcpu, reg->id); 2314 if (!r) 2315 return get_invariant_sys_reg(reg->id, uaddr); 2316 2317 if (r->get_user) 2318 return (r->get_user)(vcpu, r, reg, uaddr); 2319 2320 return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id); 2321 } 2322 2323 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 2324 { 2325 const struct sys_reg_desc *r; 2326 void __user *uaddr = (void __user *)(unsigned long)reg->addr; 2327 2328 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 2329 return demux_c15_set(reg->id, uaddr); 2330 2331 if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) 2332 return -ENOENT; 2333 2334 r = index_to_sys_reg_desc(vcpu, reg->id); 2335 if (!r) 2336 return set_invariant_sys_reg(reg->id, uaddr); 2337 2338 if (r->set_user) 2339 return (r->set_user)(vcpu, r, reg, uaddr); 2340 2341 return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id); 2342 } 2343 2344 static unsigned int num_demux_regs(void) 2345 { 2346 unsigned int i, count = 0; 2347 2348 for (i = 0; i < CSSELR_MAX; i++) 2349 if (is_valid_cache(i)) 2350 count++; 2351 2352 return count; 2353 } 2354 2355 static int write_demux_regids(u64 __user *uindices) 2356 { 2357 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; 2358 unsigned int i; 2359 2360 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; 2361 for (i = 0; i < CSSELR_MAX; i++) { 2362 if (!is_valid_cache(i)) 2363 continue; 2364 if (put_user(val | i, uindices)) 2365 return -EFAULT; 2366 uindices++; 2367 } 2368 return 0; 2369 } 2370 2371 static u64 sys_reg_to_index(const struct sys_reg_desc *reg) 2372 { 2373 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | 2374 KVM_REG_ARM64_SYSREG | 2375 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | 2376 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | 2377 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | 2378 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | 2379 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); 2380 } 2381 2382 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) 2383 { 2384 if (!*uind) 2385 return true; 2386 2387 if (put_user(sys_reg_to_index(reg), *uind)) 2388 return false; 2389 2390 (*uind)++; 2391 return true; 2392 } 2393 2394 static int walk_one_sys_reg(const struct sys_reg_desc *rd, 2395 u64 __user **uind, 2396 unsigned int *total) 2397 { 2398 /* 2399 * Ignore registers we trap but don't save, 2400 * and for which no custom user accessor is provided. 2401 */ 2402 if (!(rd->reg || rd->get_user)) 2403 return 0; 2404 2405 if (!copy_reg_to_user(rd, uind)) 2406 return -EFAULT; 2407 2408 (*total)++; 2409 return 0; 2410 } 2411 2412 /* Assumed ordered tables, see kvm_sys_reg_table_init. */ 2413 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) 2414 { 2415 const struct sys_reg_desc *i1, *i2, *end1, *end2; 2416 unsigned int total = 0; 2417 size_t num; 2418 int err; 2419 2420 /* We check for duplicates here, to allow arch-specific overrides. */ 2421 i1 = get_target_table(vcpu->arch.target, true, &num); 2422 end1 = i1 + num; 2423 i2 = sys_reg_descs; 2424 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs); 2425 2426 BUG_ON(i1 == end1 || i2 == end2); 2427 2428 /* Walk carefully, as both tables may refer to the same register. */ 2429 while (i1 || i2) { 2430 int cmp = cmp_sys_reg(i1, i2); 2431 /* target-specific overrides generic entry. */ 2432 if (cmp <= 0) 2433 err = walk_one_sys_reg(i1, &uind, &total); 2434 else 2435 err = walk_one_sys_reg(i2, &uind, &total); 2436 2437 if (err) 2438 return err; 2439 2440 if (cmp <= 0 && ++i1 == end1) 2441 i1 = NULL; 2442 if (cmp >= 0 && ++i2 == end2) 2443 i2 = NULL; 2444 } 2445 return total; 2446 } 2447 2448 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) 2449 { 2450 return ARRAY_SIZE(invariant_sys_regs) 2451 + num_demux_regs() 2452 + walk_sys_regs(vcpu, (u64 __user *)NULL); 2453 } 2454 2455 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 2456 { 2457 unsigned int i; 2458 int err; 2459 2460 /* Then give them all the invariant registers' indices. */ 2461 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) { 2462 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) 2463 return -EFAULT; 2464 uindices++; 2465 } 2466 2467 err = walk_sys_regs(vcpu, uindices); 2468 if (err < 0) 2469 return err; 2470 uindices += err; 2471 2472 return write_demux_regids(uindices); 2473 } 2474 2475 static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n) 2476 { 2477 unsigned int i; 2478 2479 for (i = 1; i < n; i++) { 2480 if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) { 2481 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1); 2482 return 1; 2483 } 2484 } 2485 2486 return 0; 2487 } 2488 2489 void kvm_sys_reg_table_init(void) 2490 { 2491 unsigned int i; 2492 struct sys_reg_desc clidr; 2493 2494 /* Make sure tables are unique and in order. */ 2495 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs))); 2496 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs))); 2497 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs))); 2498 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs))); 2499 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs))); 2500 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs))); 2501 2502 /* We abuse the reset function to overwrite the table itself. */ 2503 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) 2504 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]); 2505 2506 /* 2507 * CLIDR format is awkward, so clean it up. See ARM B4.1.20: 2508 * 2509 * If software reads the Cache Type fields from Ctype1 2510 * upwards, once it has seen a value of 0b000, no caches 2511 * exist at further-out levels of the hierarchy. So, for 2512 * example, if Ctype3 is the first Cache Type field with a 2513 * value of 0b000, the values of Ctype4 to Ctype7 must be 2514 * ignored. 2515 */ 2516 get_clidr_el1(NULL, &clidr); /* Ugly... */ 2517 cache_levels = clidr.val; 2518 for (i = 0; i < 7; i++) 2519 if (((cache_levels >> (i*3)) & 7) == 0) 2520 break; 2521 /* Clear all higher bits. */ 2522 cache_levels &= (1 << (i*3))-1; 2523 } 2524 2525 /** 2526 * kvm_reset_sys_regs - sets system registers to reset value 2527 * @vcpu: The VCPU pointer 2528 * 2529 * This function finds the right table above and sets the registers on the 2530 * virtual CPU struct to their architecturally defined reset values. 2531 */ 2532 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) 2533 { 2534 size_t num; 2535 const struct sys_reg_desc *table; 2536 2537 /* Catch someone adding a register without putting in reset entry. */ 2538 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); 2539 2540 /* Generic chip reset first (so target could override). */ 2541 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 2542 2543 table = get_target_table(vcpu->arch.target, true, &num); 2544 reset_sys_reg_descs(vcpu, table, num); 2545 2546 for (num = 1; num < NR_SYS_REGS; num++) 2547 if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242) 2548 panic("Didn't reset __vcpu_sys_reg(%zi)", num); 2549 } 2550