1 /* 2 * Copyright (C) 2012,2013 - ARM Ltd 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * Derived from arch/arm/kvm/coproc.c: 6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 7 * Authors: Rusty Russell <rusty@rustcorp.com.au> 8 * Christoffer Dall <c.dall@virtualopensystems.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License, version 2, as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program. If not, see <http://www.gnu.org/licenses/>. 21 */ 22 23 #include <linux/bsearch.h> 24 #include <linux/kvm_host.h> 25 #include <linux/mm.h> 26 #include <linux/printk.h> 27 #include <linux/uaccess.h> 28 29 #include <asm/cacheflush.h> 30 #include <asm/cputype.h> 31 #include <asm/debug-monitors.h> 32 #include <asm/esr.h> 33 #include <asm/kvm_arm.h> 34 #include <asm/kvm_coproc.h> 35 #include <asm/kvm_emulate.h> 36 #include <asm/kvm_host.h> 37 #include <asm/kvm_hyp.h> 38 #include <asm/kvm_mmu.h> 39 #include <asm/perf_event.h> 40 #include <asm/sysreg.h> 41 42 #include <trace/events/kvm.h> 43 44 #include "sys_regs.h" 45 46 #include "trace.h" 47 48 /* 49 * All of this file is extremly similar to the ARM coproc.c, but the 50 * types are different. My gut feeling is that it should be pretty 51 * easy to merge, but that would be an ABI breakage -- again. VFP 52 * would also need to be abstracted. 53 * 54 * For AArch32, we only take care of what is being trapped. Anything 55 * that has to do with init and userspace access has to go via the 56 * 64bit interface. 57 */ 58 59 static bool read_from_write_only(struct kvm_vcpu *vcpu, 60 struct sys_reg_params *params, 61 const struct sys_reg_desc *r) 62 { 63 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n"); 64 print_sys_reg_instr(params); 65 kvm_inject_undefined(vcpu); 66 return false; 67 } 68 69 static bool write_to_read_only(struct kvm_vcpu *vcpu, 70 struct sys_reg_params *params, 71 const struct sys_reg_desc *r) 72 { 73 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n"); 74 print_sys_reg_instr(params); 75 kvm_inject_undefined(vcpu); 76 return false; 77 } 78 79 u64 vcpu_read_sys_reg(struct kvm_vcpu *vcpu, int reg) 80 { 81 if (!vcpu->arch.sysregs_loaded_on_cpu) 82 goto immediate_read; 83 84 /* 85 * System registers listed in the switch are not saved on every 86 * exit from the guest but are only saved on vcpu_put. 87 * 88 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but 89 * should never be listed below, because the guest cannot modify its 90 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's 91 * thread when emulating cross-VCPU communication. 92 */ 93 switch (reg) { 94 case CSSELR_EL1: return read_sysreg_s(SYS_CSSELR_EL1); 95 case SCTLR_EL1: return read_sysreg_s(sctlr_EL12); 96 case ACTLR_EL1: return read_sysreg_s(SYS_ACTLR_EL1); 97 case CPACR_EL1: return read_sysreg_s(cpacr_EL12); 98 case TTBR0_EL1: return read_sysreg_s(ttbr0_EL12); 99 case TTBR1_EL1: return read_sysreg_s(ttbr1_EL12); 100 case TCR_EL1: return read_sysreg_s(tcr_EL12); 101 case ESR_EL1: return read_sysreg_s(esr_EL12); 102 case AFSR0_EL1: return read_sysreg_s(afsr0_EL12); 103 case AFSR1_EL1: return read_sysreg_s(afsr1_EL12); 104 case FAR_EL1: return read_sysreg_s(far_EL12); 105 case MAIR_EL1: return read_sysreg_s(mair_EL12); 106 case VBAR_EL1: return read_sysreg_s(vbar_EL12); 107 case CONTEXTIDR_EL1: return read_sysreg_s(contextidr_EL12); 108 case TPIDR_EL0: return read_sysreg_s(SYS_TPIDR_EL0); 109 case TPIDRRO_EL0: return read_sysreg_s(SYS_TPIDRRO_EL0); 110 case TPIDR_EL1: return read_sysreg_s(SYS_TPIDR_EL1); 111 case AMAIR_EL1: return read_sysreg_s(amair_EL12); 112 case CNTKCTL_EL1: return read_sysreg_s(cntkctl_EL12); 113 case PAR_EL1: return read_sysreg_s(SYS_PAR_EL1); 114 case DACR32_EL2: return read_sysreg_s(SYS_DACR32_EL2); 115 case IFSR32_EL2: return read_sysreg_s(SYS_IFSR32_EL2); 116 case DBGVCR32_EL2: return read_sysreg_s(SYS_DBGVCR32_EL2); 117 } 118 119 immediate_read: 120 return __vcpu_sys_reg(vcpu, reg); 121 } 122 123 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg) 124 { 125 if (!vcpu->arch.sysregs_loaded_on_cpu) 126 goto immediate_write; 127 128 /* 129 * System registers listed in the switch are not restored on every 130 * entry to the guest but are only restored on vcpu_load. 131 * 132 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but 133 * should never be listed below, because the the MPIDR should only be 134 * set once, before running the VCPU, and never changed later. 135 */ 136 switch (reg) { 137 case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); return; 138 case SCTLR_EL1: write_sysreg_s(val, sctlr_EL12); return; 139 case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); return; 140 case CPACR_EL1: write_sysreg_s(val, cpacr_EL12); return; 141 case TTBR0_EL1: write_sysreg_s(val, ttbr0_EL12); return; 142 case TTBR1_EL1: write_sysreg_s(val, ttbr1_EL12); return; 143 case TCR_EL1: write_sysreg_s(val, tcr_EL12); return; 144 case ESR_EL1: write_sysreg_s(val, esr_EL12); return; 145 case AFSR0_EL1: write_sysreg_s(val, afsr0_EL12); return; 146 case AFSR1_EL1: write_sysreg_s(val, afsr1_EL12); return; 147 case FAR_EL1: write_sysreg_s(val, far_EL12); return; 148 case MAIR_EL1: write_sysreg_s(val, mair_EL12); return; 149 case VBAR_EL1: write_sysreg_s(val, vbar_EL12); return; 150 case CONTEXTIDR_EL1: write_sysreg_s(val, contextidr_EL12); return; 151 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); return; 152 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); return; 153 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); return; 154 case AMAIR_EL1: write_sysreg_s(val, amair_EL12); return; 155 case CNTKCTL_EL1: write_sysreg_s(val, cntkctl_EL12); return; 156 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); return; 157 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); return; 158 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); return; 159 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); return; 160 } 161 162 immediate_write: 163 __vcpu_sys_reg(vcpu, reg) = val; 164 } 165 166 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ 167 static u32 cache_levels; 168 169 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ 170 #define CSSELR_MAX 12 171 172 /* Which cache CCSIDR represents depends on CSSELR value. */ 173 static u32 get_ccsidr(u32 csselr) 174 { 175 u32 ccsidr; 176 177 /* Make sure noone else changes CSSELR during this! */ 178 local_irq_disable(); 179 write_sysreg(csselr, csselr_el1); 180 isb(); 181 ccsidr = read_sysreg(ccsidr_el1); 182 local_irq_enable(); 183 184 return ccsidr; 185 } 186 187 /* 188 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). 189 */ 190 static bool access_dcsw(struct kvm_vcpu *vcpu, 191 struct sys_reg_params *p, 192 const struct sys_reg_desc *r) 193 { 194 if (!p->is_write) 195 return read_from_write_only(vcpu, p, r); 196 197 kvm_set_way_flush(vcpu); 198 return true; 199 } 200 201 /* 202 * Generic accessor for VM registers. Only called as long as HCR_TVM 203 * is set. If the guest enables the MMU, we stop trapping the VM 204 * sys_regs and leave it in complete control of the caches. 205 */ 206 static bool access_vm_reg(struct kvm_vcpu *vcpu, 207 struct sys_reg_params *p, 208 const struct sys_reg_desc *r) 209 { 210 bool was_enabled = vcpu_has_cache_enabled(vcpu); 211 u64 val; 212 int reg = r->reg; 213 214 BUG_ON(!p->is_write); 215 216 /* See the 32bit mapping in kvm_host.h */ 217 if (p->is_aarch32) 218 reg = r->reg / 2; 219 220 if (!p->is_aarch32 || !p->is_32bit) { 221 val = p->regval; 222 } else { 223 val = vcpu_read_sys_reg(vcpu, reg); 224 if (r->reg % 2) 225 val = (p->regval << 32) | (u64)lower_32_bits(val); 226 else 227 val = ((u64)upper_32_bits(val) << 32) | 228 lower_32_bits(p->regval); 229 } 230 vcpu_write_sys_reg(vcpu, val, reg); 231 232 kvm_toggle_cache(vcpu, was_enabled); 233 return true; 234 } 235 236 /* 237 * Trap handler for the GICv3 SGI generation system register. 238 * Forward the request to the VGIC emulation. 239 * The cp15_64 code makes sure this automatically works 240 * for both AArch64 and AArch32 accesses. 241 */ 242 static bool access_gic_sgi(struct kvm_vcpu *vcpu, 243 struct sys_reg_params *p, 244 const struct sys_reg_desc *r) 245 { 246 if (!p->is_write) 247 return read_from_write_only(vcpu, p, r); 248 249 vgic_v3_dispatch_sgi(vcpu, p->regval); 250 251 return true; 252 } 253 254 static bool access_gic_sre(struct kvm_vcpu *vcpu, 255 struct sys_reg_params *p, 256 const struct sys_reg_desc *r) 257 { 258 if (p->is_write) 259 return ignore_write(vcpu, p); 260 261 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; 262 return true; 263 } 264 265 static bool trap_raz_wi(struct kvm_vcpu *vcpu, 266 struct sys_reg_params *p, 267 const struct sys_reg_desc *r) 268 { 269 if (p->is_write) 270 return ignore_write(vcpu, p); 271 else 272 return read_zero(vcpu, p); 273 } 274 275 static bool trap_undef(struct kvm_vcpu *vcpu, 276 struct sys_reg_params *p, 277 const struct sys_reg_desc *r) 278 { 279 kvm_inject_undefined(vcpu); 280 return false; 281 } 282 283 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, 284 struct sys_reg_params *p, 285 const struct sys_reg_desc *r) 286 { 287 if (p->is_write) { 288 return ignore_write(vcpu, p); 289 } else { 290 p->regval = (1 << 3); 291 return true; 292 } 293 } 294 295 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, 296 struct sys_reg_params *p, 297 const struct sys_reg_desc *r) 298 { 299 if (p->is_write) { 300 return ignore_write(vcpu, p); 301 } else { 302 p->regval = read_sysreg(dbgauthstatus_el1); 303 return true; 304 } 305 } 306 307 /* 308 * We want to avoid world-switching all the DBG registers all the 309 * time: 310 * 311 * - If we've touched any debug register, it is likely that we're 312 * going to touch more of them. It then makes sense to disable the 313 * traps and start doing the save/restore dance 314 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is 315 * then mandatory to save/restore the registers, as the guest 316 * depends on them. 317 * 318 * For this, we use a DIRTY bit, indicating the guest has modified the 319 * debug registers, used as follow: 320 * 321 * On guest entry: 322 * - If the dirty bit is set (because we're coming back from trapping), 323 * disable the traps, save host registers, restore guest registers. 324 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), 325 * set the dirty bit, disable the traps, save host registers, 326 * restore guest registers. 327 * - Otherwise, enable the traps 328 * 329 * On guest exit: 330 * - If the dirty bit is set, save guest registers, restore host 331 * registers and clear the dirty bit. This ensure that the host can 332 * now use the debug registers. 333 */ 334 static bool trap_debug_regs(struct kvm_vcpu *vcpu, 335 struct sys_reg_params *p, 336 const struct sys_reg_desc *r) 337 { 338 if (p->is_write) { 339 vcpu_write_sys_reg(vcpu, p->regval, r->reg); 340 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; 341 } else { 342 p->regval = vcpu_read_sys_reg(vcpu, r->reg); 343 } 344 345 trace_trap_reg(__func__, r->reg, p->is_write, p->regval); 346 347 return true; 348 } 349 350 /* 351 * reg_to_dbg/dbg_to_reg 352 * 353 * A 32 bit write to a debug register leave top bits alone 354 * A 32 bit read from a debug register only returns the bottom bits 355 * 356 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the 357 * hyp.S code switches between host and guest values in future. 358 */ 359 static void reg_to_dbg(struct kvm_vcpu *vcpu, 360 struct sys_reg_params *p, 361 u64 *dbg_reg) 362 { 363 u64 val = p->regval; 364 365 if (p->is_32bit) { 366 val &= 0xffffffffUL; 367 val |= ((*dbg_reg >> 32) << 32); 368 } 369 370 *dbg_reg = val; 371 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; 372 } 373 374 static void dbg_to_reg(struct kvm_vcpu *vcpu, 375 struct sys_reg_params *p, 376 u64 *dbg_reg) 377 { 378 p->regval = *dbg_reg; 379 if (p->is_32bit) 380 p->regval &= 0xffffffffUL; 381 } 382 383 static bool trap_bvr(struct kvm_vcpu *vcpu, 384 struct sys_reg_params *p, 385 const struct sys_reg_desc *rd) 386 { 387 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 388 389 if (p->is_write) 390 reg_to_dbg(vcpu, p, dbg_reg); 391 else 392 dbg_to_reg(vcpu, p, dbg_reg); 393 394 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); 395 396 return true; 397 } 398 399 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 400 const struct kvm_one_reg *reg, void __user *uaddr) 401 { 402 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 403 404 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) 405 return -EFAULT; 406 return 0; 407 } 408 409 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 410 const struct kvm_one_reg *reg, void __user *uaddr) 411 { 412 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 413 414 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) 415 return -EFAULT; 416 return 0; 417 } 418 419 static void reset_bvr(struct kvm_vcpu *vcpu, 420 const struct sys_reg_desc *rd) 421 { 422 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val; 423 } 424 425 static bool trap_bcr(struct kvm_vcpu *vcpu, 426 struct sys_reg_params *p, 427 const struct sys_reg_desc *rd) 428 { 429 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; 430 431 if (p->is_write) 432 reg_to_dbg(vcpu, p, dbg_reg); 433 else 434 dbg_to_reg(vcpu, p, dbg_reg); 435 436 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); 437 438 return true; 439 } 440 441 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 442 const struct kvm_one_reg *reg, void __user *uaddr) 443 { 444 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; 445 446 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) 447 return -EFAULT; 448 449 return 0; 450 } 451 452 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 453 const struct kvm_one_reg *reg, void __user *uaddr) 454 { 455 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; 456 457 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) 458 return -EFAULT; 459 return 0; 460 } 461 462 static void reset_bcr(struct kvm_vcpu *vcpu, 463 const struct sys_reg_desc *rd) 464 { 465 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val; 466 } 467 468 static bool trap_wvr(struct kvm_vcpu *vcpu, 469 struct sys_reg_params *p, 470 const struct sys_reg_desc *rd) 471 { 472 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; 473 474 if (p->is_write) 475 reg_to_dbg(vcpu, p, dbg_reg); 476 else 477 dbg_to_reg(vcpu, p, dbg_reg); 478 479 trace_trap_reg(__func__, rd->reg, p->is_write, 480 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]); 481 482 return true; 483 } 484 485 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 486 const struct kvm_one_reg *reg, void __user *uaddr) 487 { 488 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; 489 490 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) 491 return -EFAULT; 492 return 0; 493 } 494 495 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 496 const struct kvm_one_reg *reg, void __user *uaddr) 497 { 498 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; 499 500 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) 501 return -EFAULT; 502 return 0; 503 } 504 505 static void reset_wvr(struct kvm_vcpu *vcpu, 506 const struct sys_reg_desc *rd) 507 { 508 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val; 509 } 510 511 static bool trap_wcr(struct kvm_vcpu *vcpu, 512 struct sys_reg_params *p, 513 const struct sys_reg_desc *rd) 514 { 515 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; 516 517 if (p->is_write) 518 reg_to_dbg(vcpu, p, dbg_reg); 519 else 520 dbg_to_reg(vcpu, p, dbg_reg); 521 522 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); 523 524 return true; 525 } 526 527 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 528 const struct kvm_one_reg *reg, void __user *uaddr) 529 { 530 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; 531 532 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) 533 return -EFAULT; 534 return 0; 535 } 536 537 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 538 const struct kvm_one_reg *reg, void __user *uaddr) 539 { 540 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; 541 542 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) 543 return -EFAULT; 544 return 0; 545 } 546 547 static void reset_wcr(struct kvm_vcpu *vcpu, 548 const struct sys_reg_desc *rd) 549 { 550 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val; 551 } 552 553 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 554 { 555 u64 amair = read_sysreg(amair_el1); 556 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1); 557 } 558 559 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 560 { 561 u64 mpidr; 562 563 /* 564 * Map the vcpu_id into the first three affinity level fields of 565 * the MPIDR. We limit the number of VCPUs in level 0 due to a 566 * limitation to 16 CPUs in that level in the ICC_SGIxR registers 567 * of the GICv3 to be able to address each CPU directly when 568 * sending IPIs. 569 */ 570 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); 571 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); 572 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); 573 vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1); 574 } 575 576 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 577 { 578 u64 pmcr, val; 579 580 pmcr = read_sysreg(pmcr_el0); 581 /* 582 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN 583 * except PMCR.E resetting to zero. 584 */ 585 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK) 586 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); 587 __vcpu_sys_reg(vcpu, PMCR_EL0) = val; 588 } 589 590 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags) 591 { 592 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0); 593 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu); 594 595 if (!enabled) 596 kvm_inject_undefined(vcpu); 597 598 return !enabled; 599 } 600 601 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu) 602 { 603 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN); 604 } 605 606 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu) 607 { 608 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN); 609 } 610 611 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu) 612 { 613 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN); 614 } 615 616 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu) 617 { 618 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN); 619 } 620 621 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 622 const struct sys_reg_desc *r) 623 { 624 u64 val; 625 626 if (!kvm_arm_pmu_v3_ready(vcpu)) 627 return trap_raz_wi(vcpu, p, r); 628 629 if (pmu_access_el0_disabled(vcpu)) 630 return false; 631 632 if (p->is_write) { 633 /* Only update writeable bits of PMCR */ 634 val = __vcpu_sys_reg(vcpu, PMCR_EL0); 635 val &= ~ARMV8_PMU_PMCR_MASK; 636 val |= p->regval & ARMV8_PMU_PMCR_MASK; 637 __vcpu_sys_reg(vcpu, PMCR_EL0) = val; 638 kvm_pmu_handle_pmcr(vcpu, val); 639 } else { 640 /* PMCR.P & PMCR.C are RAZ */ 641 val = __vcpu_sys_reg(vcpu, PMCR_EL0) 642 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C); 643 p->regval = val; 644 } 645 646 return true; 647 } 648 649 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 650 const struct sys_reg_desc *r) 651 { 652 if (!kvm_arm_pmu_v3_ready(vcpu)) 653 return trap_raz_wi(vcpu, p, r); 654 655 if (pmu_access_event_counter_el0_disabled(vcpu)) 656 return false; 657 658 if (p->is_write) 659 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval; 660 else 661 /* return PMSELR.SEL field */ 662 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0) 663 & ARMV8_PMU_COUNTER_MASK; 664 665 return true; 666 } 667 668 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 669 const struct sys_reg_desc *r) 670 { 671 u64 pmceid; 672 673 if (!kvm_arm_pmu_v3_ready(vcpu)) 674 return trap_raz_wi(vcpu, p, r); 675 676 BUG_ON(p->is_write); 677 678 if (pmu_access_el0_disabled(vcpu)) 679 return false; 680 681 if (!(p->Op2 & 1)) 682 pmceid = read_sysreg(pmceid0_el0); 683 else 684 pmceid = read_sysreg(pmceid1_el0); 685 686 p->regval = pmceid; 687 688 return true; 689 } 690 691 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) 692 { 693 u64 pmcr, val; 694 695 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0); 696 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; 697 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) { 698 kvm_inject_undefined(vcpu); 699 return false; 700 } 701 702 return true; 703 } 704 705 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, 706 struct sys_reg_params *p, 707 const struct sys_reg_desc *r) 708 { 709 u64 idx; 710 711 if (!kvm_arm_pmu_v3_ready(vcpu)) 712 return trap_raz_wi(vcpu, p, r); 713 714 if (r->CRn == 9 && r->CRm == 13) { 715 if (r->Op2 == 2) { 716 /* PMXEVCNTR_EL0 */ 717 if (pmu_access_event_counter_el0_disabled(vcpu)) 718 return false; 719 720 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) 721 & ARMV8_PMU_COUNTER_MASK; 722 } else if (r->Op2 == 0) { 723 /* PMCCNTR_EL0 */ 724 if (pmu_access_cycle_counter_el0_disabled(vcpu)) 725 return false; 726 727 idx = ARMV8_PMU_CYCLE_IDX; 728 } else { 729 return false; 730 } 731 } else if (r->CRn == 0 && r->CRm == 9) { 732 /* PMCCNTR */ 733 if (pmu_access_event_counter_el0_disabled(vcpu)) 734 return false; 735 736 idx = ARMV8_PMU_CYCLE_IDX; 737 } else if (r->CRn == 14 && (r->CRm & 12) == 8) { 738 /* PMEVCNTRn_EL0 */ 739 if (pmu_access_event_counter_el0_disabled(vcpu)) 740 return false; 741 742 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); 743 } else { 744 return false; 745 } 746 747 if (!pmu_counter_idx_valid(vcpu, idx)) 748 return false; 749 750 if (p->is_write) { 751 if (pmu_access_el0_disabled(vcpu)) 752 return false; 753 754 kvm_pmu_set_counter_value(vcpu, idx, p->regval); 755 } else { 756 p->regval = kvm_pmu_get_counter_value(vcpu, idx); 757 } 758 759 return true; 760 } 761 762 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 763 const struct sys_reg_desc *r) 764 { 765 u64 idx, reg; 766 767 if (!kvm_arm_pmu_v3_ready(vcpu)) 768 return trap_raz_wi(vcpu, p, r); 769 770 if (pmu_access_el0_disabled(vcpu)) 771 return false; 772 773 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { 774 /* PMXEVTYPER_EL0 */ 775 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK; 776 reg = PMEVTYPER0_EL0 + idx; 777 } else if (r->CRn == 14 && (r->CRm & 12) == 12) { 778 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); 779 if (idx == ARMV8_PMU_CYCLE_IDX) 780 reg = PMCCFILTR_EL0; 781 else 782 /* PMEVTYPERn_EL0 */ 783 reg = PMEVTYPER0_EL0 + idx; 784 } else { 785 BUG(); 786 } 787 788 if (!pmu_counter_idx_valid(vcpu, idx)) 789 return false; 790 791 if (p->is_write) { 792 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); 793 __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK; 794 } else { 795 p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK; 796 } 797 798 return true; 799 } 800 801 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 802 const struct sys_reg_desc *r) 803 { 804 u64 val, mask; 805 806 if (!kvm_arm_pmu_v3_ready(vcpu)) 807 return trap_raz_wi(vcpu, p, r); 808 809 if (pmu_access_el0_disabled(vcpu)) 810 return false; 811 812 mask = kvm_pmu_valid_counter_mask(vcpu); 813 if (p->is_write) { 814 val = p->regval & mask; 815 if (r->Op2 & 0x1) { 816 /* accessing PMCNTENSET_EL0 */ 817 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val; 818 kvm_pmu_enable_counter(vcpu, val); 819 } else { 820 /* accessing PMCNTENCLR_EL0 */ 821 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val; 822 kvm_pmu_disable_counter(vcpu, val); 823 } 824 } else { 825 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask; 826 } 827 828 return true; 829 } 830 831 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 832 const struct sys_reg_desc *r) 833 { 834 u64 mask = kvm_pmu_valid_counter_mask(vcpu); 835 836 if (!kvm_arm_pmu_v3_ready(vcpu)) 837 return trap_raz_wi(vcpu, p, r); 838 839 if (!vcpu_mode_priv(vcpu)) { 840 kvm_inject_undefined(vcpu); 841 return false; 842 } 843 844 if (p->is_write) { 845 u64 val = p->regval & mask; 846 847 if (r->Op2 & 0x1) 848 /* accessing PMINTENSET_EL1 */ 849 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val; 850 else 851 /* accessing PMINTENCLR_EL1 */ 852 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val; 853 } else { 854 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask; 855 } 856 857 return true; 858 } 859 860 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 861 const struct sys_reg_desc *r) 862 { 863 u64 mask = kvm_pmu_valid_counter_mask(vcpu); 864 865 if (!kvm_arm_pmu_v3_ready(vcpu)) 866 return trap_raz_wi(vcpu, p, r); 867 868 if (pmu_access_el0_disabled(vcpu)) 869 return false; 870 871 if (p->is_write) { 872 if (r->CRm & 0x2) 873 /* accessing PMOVSSET_EL0 */ 874 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask); 875 else 876 /* accessing PMOVSCLR_EL0 */ 877 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask); 878 } else { 879 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask; 880 } 881 882 return true; 883 } 884 885 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 886 const struct sys_reg_desc *r) 887 { 888 u64 mask; 889 890 if (!kvm_arm_pmu_v3_ready(vcpu)) 891 return trap_raz_wi(vcpu, p, r); 892 893 if (!p->is_write) 894 return read_from_write_only(vcpu, p, r); 895 896 if (pmu_write_swinc_el0_disabled(vcpu)) 897 return false; 898 899 mask = kvm_pmu_valid_counter_mask(vcpu); 900 kvm_pmu_software_increment(vcpu, p->regval & mask); 901 return true; 902 } 903 904 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 905 const struct sys_reg_desc *r) 906 { 907 if (!kvm_arm_pmu_v3_ready(vcpu)) 908 return trap_raz_wi(vcpu, p, r); 909 910 if (p->is_write) { 911 if (!vcpu_mode_priv(vcpu)) { 912 kvm_inject_undefined(vcpu); 913 return false; 914 } 915 916 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) = 917 p->regval & ARMV8_PMU_USERENR_MASK; 918 } else { 919 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0) 920 & ARMV8_PMU_USERENR_MASK; 921 } 922 923 return true; 924 } 925 926 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ 927 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ 928 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \ 929 trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \ 930 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \ 931 trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \ 932 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \ 933 trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \ 934 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \ 935 trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr } 936 937 /* Macro to expand the PMEVCNTRn_EL0 register */ 938 #define PMU_PMEVCNTR_EL0(n) \ 939 { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \ 940 access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), } 941 942 /* Macro to expand the PMEVTYPERn_EL0 register */ 943 #define PMU_PMEVTYPER_EL0(n) \ 944 { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \ 945 access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } 946 947 static bool access_cntp_tval(struct kvm_vcpu *vcpu, 948 struct sys_reg_params *p, 949 const struct sys_reg_desc *r) 950 { 951 u64 now = kvm_phys_timer_read(); 952 u64 cval; 953 954 if (p->is_write) { 955 kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, 956 p->regval + now); 957 } else { 958 cval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL); 959 p->regval = cval - now; 960 } 961 962 return true; 963 } 964 965 static bool access_cntp_ctl(struct kvm_vcpu *vcpu, 966 struct sys_reg_params *p, 967 const struct sys_reg_desc *r) 968 { 969 if (p->is_write) 970 kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, p->regval); 971 else 972 p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL); 973 974 return true; 975 } 976 977 static bool access_cntp_cval(struct kvm_vcpu *vcpu, 978 struct sys_reg_params *p, 979 const struct sys_reg_desc *r) 980 { 981 if (p->is_write) 982 kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, p->regval); 983 else 984 p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL); 985 986 return true; 987 } 988 989 /* Read a sanitised cpufeature ID register by sys_reg_desc */ 990 static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) 991 { 992 u32 id = sys_reg((u32)r->Op0, (u32)r->Op1, 993 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); 994 u64 val = raz ? 0 : read_sanitised_ftr_reg(id); 995 996 if (id == SYS_ID_AA64PFR0_EL1) { 997 if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT)) 998 kvm_debug("SVE unsupported for guests, suppressing\n"); 999 1000 val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); 1001 } else if (id == SYS_ID_AA64MMFR1_EL1) { 1002 if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT)) 1003 kvm_debug("LORegions unsupported for guests, suppressing\n"); 1004 1005 val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT); 1006 } 1007 1008 return val; 1009 } 1010 1011 /* cpufeature ID register access trap handlers */ 1012 1013 static bool __access_id_reg(struct kvm_vcpu *vcpu, 1014 struct sys_reg_params *p, 1015 const struct sys_reg_desc *r, 1016 bool raz) 1017 { 1018 if (p->is_write) 1019 return write_to_read_only(vcpu, p, r); 1020 1021 p->regval = read_id_reg(r, raz); 1022 return true; 1023 } 1024 1025 static bool access_id_reg(struct kvm_vcpu *vcpu, 1026 struct sys_reg_params *p, 1027 const struct sys_reg_desc *r) 1028 { 1029 return __access_id_reg(vcpu, p, r, false); 1030 } 1031 1032 static bool access_raz_id_reg(struct kvm_vcpu *vcpu, 1033 struct sys_reg_params *p, 1034 const struct sys_reg_desc *r) 1035 { 1036 return __access_id_reg(vcpu, p, r, true); 1037 } 1038 1039 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id); 1040 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id); 1041 static u64 sys_reg_to_index(const struct sys_reg_desc *reg); 1042 1043 /* 1044 * cpufeature ID register user accessors 1045 * 1046 * For now, these registers are immutable for userspace, so no values 1047 * are stored, and for set_id_reg() we don't allow the effective value 1048 * to be changed. 1049 */ 1050 static int __get_id_reg(const struct sys_reg_desc *rd, void __user *uaddr, 1051 bool raz) 1052 { 1053 const u64 id = sys_reg_to_index(rd); 1054 const u64 val = read_id_reg(rd, raz); 1055 1056 return reg_to_user(uaddr, &val, id); 1057 } 1058 1059 static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr, 1060 bool raz) 1061 { 1062 const u64 id = sys_reg_to_index(rd); 1063 int err; 1064 u64 val; 1065 1066 err = reg_from_user(&val, uaddr, id); 1067 if (err) 1068 return err; 1069 1070 /* This is what we mean by invariant: you can't change it. */ 1071 if (val != read_id_reg(rd, raz)) 1072 return -EINVAL; 1073 1074 return 0; 1075 } 1076 1077 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 1078 const struct kvm_one_reg *reg, void __user *uaddr) 1079 { 1080 return __get_id_reg(rd, uaddr, false); 1081 } 1082 1083 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 1084 const struct kvm_one_reg *reg, void __user *uaddr) 1085 { 1086 return __set_id_reg(rd, uaddr, false); 1087 } 1088 1089 static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 1090 const struct kvm_one_reg *reg, void __user *uaddr) 1091 { 1092 return __get_id_reg(rd, uaddr, true); 1093 } 1094 1095 static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 1096 const struct kvm_one_reg *reg, void __user *uaddr) 1097 { 1098 return __set_id_reg(rd, uaddr, true); 1099 } 1100 1101 /* sys_reg_desc initialiser for known cpufeature ID registers */ 1102 #define ID_SANITISED(name) { \ 1103 SYS_DESC(SYS_##name), \ 1104 .access = access_id_reg, \ 1105 .get_user = get_id_reg, \ 1106 .set_user = set_id_reg, \ 1107 } 1108 1109 /* 1110 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID 1111 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2 1112 * (1 <= crm < 8, 0 <= Op2 < 8). 1113 */ 1114 #define ID_UNALLOCATED(crm, op2) { \ 1115 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \ 1116 .access = access_raz_id_reg, \ 1117 .get_user = get_raz_id_reg, \ 1118 .set_user = set_raz_id_reg, \ 1119 } 1120 1121 /* 1122 * sys_reg_desc initialiser for known ID registers that we hide from guests. 1123 * For now, these are exposed just like unallocated ID regs: they appear 1124 * RAZ for the guest. 1125 */ 1126 #define ID_HIDDEN(name) { \ 1127 SYS_DESC(SYS_##name), \ 1128 .access = access_raz_id_reg, \ 1129 .get_user = get_raz_id_reg, \ 1130 .set_user = set_raz_id_reg, \ 1131 } 1132 1133 /* 1134 * Architected system registers. 1135 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 1136 * 1137 * Debug handling: We do trap most, if not all debug related system 1138 * registers. The implementation is good enough to ensure that a guest 1139 * can use these with minimal performance degradation. The drawback is 1140 * that we don't implement any of the external debug, none of the 1141 * OSlock protocol. This should be revisited if we ever encounter a 1142 * more demanding guest... 1143 */ 1144 static const struct sys_reg_desc sys_reg_descs[] = { 1145 { SYS_DESC(SYS_DC_ISW), access_dcsw }, 1146 { SYS_DESC(SYS_DC_CSW), access_dcsw }, 1147 { SYS_DESC(SYS_DC_CISW), access_dcsw }, 1148 1149 DBG_BCR_BVR_WCR_WVR_EL1(0), 1150 DBG_BCR_BVR_WCR_WVR_EL1(1), 1151 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 }, 1152 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 }, 1153 DBG_BCR_BVR_WCR_WVR_EL1(2), 1154 DBG_BCR_BVR_WCR_WVR_EL1(3), 1155 DBG_BCR_BVR_WCR_WVR_EL1(4), 1156 DBG_BCR_BVR_WCR_WVR_EL1(5), 1157 DBG_BCR_BVR_WCR_WVR_EL1(6), 1158 DBG_BCR_BVR_WCR_WVR_EL1(7), 1159 DBG_BCR_BVR_WCR_WVR_EL1(8), 1160 DBG_BCR_BVR_WCR_WVR_EL1(9), 1161 DBG_BCR_BVR_WCR_WVR_EL1(10), 1162 DBG_BCR_BVR_WCR_WVR_EL1(11), 1163 DBG_BCR_BVR_WCR_WVR_EL1(12), 1164 DBG_BCR_BVR_WCR_WVR_EL1(13), 1165 DBG_BCR_BVR_WCR_WVR_EL1(14), 1166 DBG_BCR_BVR_WCR_WVR_EL1(15), 1167 1168 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi }, 1169 { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi }, 1170 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 }, 1171 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi }, 1172 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi }, 1173 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi }, 1174 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi }, 1175 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 }, 1176 1177 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi }, 1178 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi }, 1179 // DBGDTR[TR]X_EL0 share the same encoding 1180 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi }, 1181 1182 { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 }, 1183 1184 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 }, 1185 1186 /* 1187 * ID regs: all ID_SANITISED() entries here must have corresponding 1188 * entries in arm64_ftr_regs[]. 1189 */ 1190 1191 /* AArch64 mappings of the AArch32 ID registers */ 1192 /* CRm=1 */ 1193 ID_SANITISED(ID_PFR0_EL1), 1194 ID_SANITISED(ID_PFR1_EL1), 1195 ID_SANITISED(ID_DFR0_EL1), 1196 ID_HIDDEN(ID_AFR0_EL1), 1197 ID_SANITISED(ID_MMFR0_EL1), 1198 ID_SANITISED(ID_MMFR1_EL1), 1199 ID_SANITISED(ID_MMFR2_EL1), 1200 ID_SANITISED(ID_MMFR3_EL1), 1201 1202 /* CRm=2 */ 1203 ID_SANITISED(ID_ISAR0_EL1), 1204 ID_SANITISED(ID_ISAR1_EL1), 1205 ID_SANITISED(ID_ISAR2_EL1), 1206 ID_SANITISED(ID_ISAR3_EL1), 1207 ID_SANITISED(ID_ISAR4_EL1), 1208 ID_SANITISED(ID_ISAR5_EL1), 1209 ID_SANITISED(ID_MMFR4_EL1), 1210 ID_UNALLOCATED(2,7), 1211 1212 /* CRm=3 */ 1213 ID_SANITISED(MVFR0_EL1), 1214 ID_SANITISED(MVFR1_EL1), 1215 ID_SANITISED(MVFR2_EL1), 1216 ID_UNALLOCATED(3,3), 1217 ID_UNALLOCATED(3,4), 1218 ID_UNALLOCATED(3,5), 1219 ID_UNALLOCATED(3,6), 1220 ID_UNALLOCATED(3,7), 1221 1222 /* AArch64 ID registers */ 1223 /* CRm=4 */ 1224 ID_SANITISED(ID_AA64PFR0_EL1), 1225 ID_SANITISED(ID_AA64PFR1_EL1), 1226 ID_UNALLOCATED(4,2), 1227 ID_UNALLOCATED(4,3), 1228 ID_UNALLOCATED(4,4), 1229 ID_UNALLOCATED(4,5), 1230 ID_UNALLOCATED(4,6), 1231 ID_UNALLOCATED(4,7), 1232 1233 /* CRm=5 */ 1234 ID_SANITISED(ID_AA64DFR0_EL1), 1235 ID_SANITISED(ID_AA64DFR1_EL1), 1236 ID_UNALLOCATED(5,2), 1237 ID_UNALLOCATED(5,3), 1238 ID_HIDDEN(ID_AA64AFR0_EL1), 1239 ID_HIDDEN(ID_AA64AFR1_EL1), 1240 ID_UNALLOCATED(5,6), 1241 ID_UNALLOCATED(5,7), 1242 1243 /* CRm=6 */ 1244 ID_SANITISED(ID_AA64ISAR0_EL1), 1245 ID_SANITISED(ID_AA64ISAR1_EL1), 1246 ID_UNALLOCATED(6,2), 1247 ID_UNALLOCATED(6,3), 1248 ID_UNALLOCATED(6,4), 1249 ID_UNALLOCATED(6,5), 1250 ID_UNALLOCATED(6,6), 1251 ID_UNALLOCATED(6,7), 1252 1253 /* CRm=7 */ 1254 ID_SANITISED(ID_AA64MMFR0_EL1), 1255 ID_SANITISED(ID_AA64MMFR1_EL1), 1256 ID_SANITISED(ID_AA64MMFR2_EL1), 1257 ID_UNALLOCATED(7,3), 1258 ID_UNALLOCATED(7,4), 1259 ID_UNALLOCATED(7,5), 1260 ID_UNALLOCATED(7,6), 1261 ID_UNALLOCATED(7,7), 1262 1263 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, 1264 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 }, 1265 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 }, 1266 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 }, 1267 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 }, 1268 1269 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 }, 1270 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 }, 1271 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 }, 1272 1273 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi }, 1274 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi }, 1275 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi }, 1276 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi }, 1277 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi }, 1278 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi }, 1279 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi }, 1280 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi }, 1281 1282 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 }, 1283 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 }, 1284 1285 { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 }, 1286 { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 }, 1287 1288 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, 1289 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, 1290 1291 { SYS_DESC(SYS_LORSA_EL1), trap_undef }, 1292 { SYS_DESC(SYS_LOREA_EL1), trap_undef }, 1293 { SYS_DESC(SYS_LORN_EL1), trap_undef }, 1294 { SYS_DESC(SYS_LORC_EL1), trap_undef }, 1295 { SYS_DESC(SYS_LORID_EL1), trap_undef }, 1296 1297 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 }, 1298 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, 1299 1300 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only }, 1301 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only }, 1302 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only }, 1303 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only }, 1304 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only }, 1305 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi }, 1306 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only }, 1307 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only }, 1308 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only }, 1309 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre }, 1310 1311 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, 1312 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 }, 1313 1314 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0}, 1315 1316 { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 }, 1317 1318 { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, }, 1319 { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 }, 1320 { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 }, 1321 { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 }, 1322 { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 }, 1323 { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 }, 1324 { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid }, 1325 { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid }, 1326 { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 }, 1327 { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper }, 1328 { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr }, 1329 /* 1330 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero 1331 * in 32bit mode. Here we choose to reset it as zero for consistency. 1332 */ 1333 { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 }, 1334 { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 }, 1335 1336 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, 1337 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 }, 1338 1339 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval }, 1340 { SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl }, 1341 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval }, 1342 1343 /* PMEVCNTRn_EL0 */ 1344 PMU_PMEVCNTR_EL0(0), 1345 PMU_PMEVCNTR_EL0(1), 1346 PMU_PMEVCNTR_EL0(2), 1347 PMU_PMEVCNTR_EL0(3), 1348 PMU_PMEVCNTR_EL0(4), 1349 PMU_PMEVCNTR_EL0(5), 1350 PMU_PMEVCNTR_EL0(6), 1351 PMU_PMEVCNTR_EL0(7), 1352 PMU_PMEVCNTR_EL0(8), 1353 PMU_PMEVCNTR_EL0(9), 1354 PMU_PMEVCNTR_EL0(10), 1355 PMU_PMEVCNTR_EL0(11), 1356 PMU_PMEVCNTR_EL0(12), 1357 PMU_PMEVCNTR_EL0(13), 1358 PMU_PMEVCNTR_EL0(14), 1359 PMU_PMEVCNTR_EL0(15), 1360 PMU_PMEVCNTR_EL0(16), 1361 PMU_PMEVCNTR_EL0(17), 1362 PMU_PMEVCNTR_EL0(18), 1363 PMU_PMEVCNTR_EL0(19), 1364 PMU_PMEVCNTR_EL0(20), 1365 PMU_PMEVCNTR_EL0(21), 1366 PMU_PMEVCNTR_EL0(22), 1367 PMU_PMEVCNTR_EL0(23), 1368 PMU_PMEVCNTR_EL0(24), 1369 PMU_PMEVCNTR_EL0(25), 1370 PMU_PMEVCNTR_EL0(26), 1371 PMU_PMEVCNTR_EL0(27), 1372 PMU_PMEVCNTR_EL0(28), 1373 PMU_PMEVCNTR_EL0(29), 1374 PMU_PMEVCNTR_EL0(30), 1375 /* PMEVTYPERn_EL0 */ 1376 PMU_PMEVTYPER_EL0(0), 1377 PMU_PMEVTYPER_EL0(1), 1378 PMU_PMEVTYPER_EL0(2), 1379 PMU_PMEVTYPER_EL0(3), 1380 PMU_PMEVTYPER_EL0(4), 1381 PMU_PMEVTYPER_EL0(5), 1382 PMU_PMEVTYPER_EL0(6), 1383 PMU_PMEVTYPER_EL0(7), 1384 PMU_PMEVTYPER_EL0(8), 1385 PMU_PMEVTYPER_EL0(9), 1386 PMU_PMEVTYPER_EL0(10), 1387 PMU_PMEVTYPER_EL0(11), 1388 PMU_PMEVTYPER_EL0(12), 1389 PMU_PMEVTYPER_EL0(13), 1390 PMU_PMEVTYPER_EL0(14), 1391 PMU_PMEVTYPER_EL0(15), 1392 PMU_PMEVTYPER_EL0(16), 1393 PMU_PMEVTYPER_EL0(17), 1394 PMU_PMEVTYPER_EL0(18), 1395 PMU_PMEVTYPER_EL0(19), 1396 PMU_PMEVTYPER_EL0(20), 1397 PMU_PMEVTYPER_EL0(21), 1398 PMU_PMEVTYPER_EL0(22), 1399 PMU_PMEVTYPER_EL0(23), 1400 PMU_PMEVTYPER_EL0(24), 1401 PMU_PMEVTYPER_EL0(25), 1402 PMU_PMEVTYPER_EL0(26), 1403 PMU_PMEVTYPER_EL0(27), 1404 PMU_PMEVTYPER_EL0(28), 1405 PMU_PMEVTYPER_EL0(29), 1406 PMU_PMEVTYPER_EL0(30), 1407 /* 1408 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero 1409 * in 32bit mode. Here we choose to reset it as zero for consistency. 1410 */ 1411 { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 }, 1412 1413 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 }, 1414 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 }, 1415 { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 }, 1416 }; 1417 1418 static bool trap_dbgidr(struct kvm_vcpu *vcpu, 1419 struct sys_reg_params *p, 1420 const struct sys_reg_desc *r) 1421 { 1422 if (p->is_write) { 1423 return ignore_write(vcpu, p); 1424 } else { 1425 u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); 1426 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 1427 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT); 1428 1429 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | 1430 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | 1431 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) 1432 | (6 << 16) | (el3 << 14) | (el3 << 12)); 1433 return true; 1434 } 1435 } 1436 1437 static bool trap_debug32(struct kvm_vcpu *vcpu, 1438 struct sys_reg_params *p, 1439 const struct sys_reg_desc *r) 1440 { 1441 if (p->is_write) { 1442 vcpu_cp14(vcpu, r->reg) = p->regval; 1443 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; 1444 } else { 1445 p->regval = vcpu_cp14(vcpu, r->reg); 1446 } 1447 1448 return true; 1449 } 1450 1451 /* AArch32 debug register mappings 1452 * 1453 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0] 1454 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32] 1455 * 1456 * All control registers and watchpoint value registers are mapped to 1457 * the lower 32 bits of their AArch64 equivalents. We share the trap 1458 * handlers with the above AArch64 code which checks what mode the 1459 * system is in. 1460 */ 1461 1462 static bool trap_xvr(struct kvm_vcpu *vcpu, 1463 struct sys_reg_params *p, 1464 const struct sys_reg_desc *rd) 1465 { 1466 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 1467 1468 if (p->is_write) { 1469 u64 val = *dbg_reg; 1470 1471 val &= 0xffffffffUL; 1472 val |= p->regval << 32; 1473 *dbg_reg = val; 1474 1475 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; 1476 } else { 1477 p->regval = *dbg_reg >> 32; 1478 } 1479 1480 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); 1481 1482 return true; 1483 } 1484 1485 #define DBG_BCR_BVR_WCR_WVR(n) \ 1486 /* DBGBVRn */ \ 1487 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \ 1488 /* DBGBCRn */ \ 1489 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \ 1490 /* DBGWVRn */ \ 1491 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \ 1492 /* DBGWCRn */ \ 1493 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n } 1494 1495 #define DBGBXVR(n) \ 1496 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n } 1497 1498 /* 1499 * Trapped cp14 registers. We generally ignore most of the external 1500 * debug, on the principle that they don't really make sense to a 1501 * guest. Revisit this one day, would this principle change. 1502 */ 1503 static const struct sys_reg_desc cp14_regs[] = { 1504 /* DBGIDR */ 1505 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr }, 1506 /* DBGDTRRXext */ 1507 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi }, 1508 1509 DBG_BCR_BVR_WCR_WVR(0), 1510 /* DBGDSCRint */ 1511 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi }, 1512 DBG_BCR_BVR_WCR_WVR(1), 1513 /* DBGDCCINT */ 1514 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 }, 1515 /* DBGDSCRext */ 1516 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 }, 1517 DBG_BCR_BVR_WCR_WVR(2), 1518 /* DBGDTR[RT]Xint */ 1519 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi }, 1520 /* DBGDTR[RT]Xext */ 1521 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi }, 1522 DBG_BCR_BVR_WCR_WVR(3), 1523 DBG_BCR_BVR_WCR_WVR(4), 1524 DBG_BCR_BVR_WCR_WVR(5), 1525 /* DBGWFAR */ 1526 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi }, 1527 /* DBGOSECCR */ 1528 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi }, 1529 DBG_BCR_BVR_WCR_WVR(6), 1530 /* DBGVCR */ 1531 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 }, 1532 DBG_BCR_BVR_WCR_WVR(7), 1533 DBG_BCR_BVR_WCR_WVR(8), 1534 DBG_BCR_BVR_WCR_WVR(9), 1535 DBG_BCR_BVR_WCR_WVR(10), 1536 DBG_BCR_BVR_WCR_WVR(11), 1537 DBG_BCR_BVR_WCR_WVR(12), 1538 DBG_BCR_BVR_WCR_WVR(13), 1539 DBG_BCR_BVR_WCR_WVR(14), 1540 DBG_BCR_BVR_WCR_WVR(15), 1541 1542 /* DBGDRAR (32bit) */ 1543 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi }, 1544 1545 DBGBXVR(0), 1546 /* DBGOSLAR */ 1547 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi }, 1548 DBGBXVR(1), 1549 /* DBGOSLSR */ 1550 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 }, 1551 DBGBXVR(2), 1552 DBGBXVR(3), 1553 /* DBGOSDLR */ 1554 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi }, 1555 DBGBXVR(4), 1556 /* DBGPRCR */ 1557 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi }, 1558 DBGBXVR(5), 1559 DBGBXVR(6), 1560 DBGBXVR(7), 1561 DBGBXVR(8), 1562 DBGBXVR(9), 1563 DBGBXVR(10), 1564 DBGBXVR(11), 1565 DBGBXVR(12), 1566 DBGBXVR(13), 1567 DBGBXVR(14), 1568 DBGBXVR(15), 1569 1570 /* DBGDSAR (32bit) */ 1571 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi }, 1572 1573 /* DBGDEVID2 */ 1574 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi }, 1575 /* DBGDEVID1 */ 1576 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi }, 1577 /* DBGDEVID */ 1578 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi }, 1579 /* DBGCLAIMSET */ 1580 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi }, 1581 /* DBGCLAIMCLR */ 1582 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi }, 1583 /* DBGAUTHSTATUS */ 1584 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 }, 1585 }; 1586 1587 /* Trapped cp14 64bit registers */ 1588 static const struct sys_reg_desc cp14_64_regs[] = { 1589 /* DBGDRAR (64bit) */ 1590 { Op1( 0), CRm( 1), .access = trap_raz_wi }, 1591 1592 /* DBGDSAR (64bit) */ 1593 { Op1( 0), CRm( 2), .access = trap_raz_wi }, 1594 }; 1595 1596 /* Macro to expand the PMEVCNTRn register */ 1597 #define PMU_PMEVCNTR(n) \ 1598 /* PMEVCNTRn */ \ 1599 { Op1(0), CRn(0b1110), \ 1600 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ 1601 access_pmu_evcntr } 1602 1603 /* Macro to expand the PMEVTYPERn register */ 1604 #define PMU_PMEVTYPER(n) \ 1605 /* PMEVTYPERn */ \ 1606 { Op1(0), CRn(0b1110), \ 1607 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ 1608 access_pmu_evtyper } 1609 1610 /* 1611 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, 1612 * depending on the way they are accessed (as a 32bit or a 64bit 1613 * register). 1614 */ 1615 static const struct sys_reg_desc cp15_regs[] = { 1616 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, 1617 1618 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR }, 1619 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 1620 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, 1621 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, 1622 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR }, 1623 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR }, 1624 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR }, 1625 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR }, 1626 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR }, 1627 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR }, 1628 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR }, 1629 1630 /* 1631 * DC{C,I,CI}SW operations: 1632 */ 1633 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, 1634 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, 1635 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, 1636 1637 /* PMU */ 1638 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr }, 1639 { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten }, 1640 { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten }, 1641 { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs }, 1642 { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc }, 1643 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, 1644 { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, 1645 { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, 1646 { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr }, 1647 { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper }, 1648 { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr }, 1649 { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr }, 1650 { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten }, 1651 { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten }, 1652 { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs }, 1653 1654 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, 1655 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, 1656 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, 1657 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, 1658 1659 /* ICC_SRE */ 1660 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre }, 1661 1662 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, 1663 1664 /* CNTP_TVAL */ 1665 { Op1( 0), CRn(14), CRm( 2), Op2( 0), access_cntp_tval }, 1666 /* CNTP_CTL */ 1667 { Op1( 0), CRn(14), CRm( 2), Op2( 1), access_cntp_ctl }, 1668 1669 /* PMEVCNTRn */ 1670 PMU_PMEVCNTR(0), 1671 PMU_PMEVCNTR(1), 1672 PMU_PMEVCNTR(2), 1673 PMU_PMEVCNTR(3), 1674 PMU_PMEVCNTR(4), 1675 PMU_PMEVCNTR(5), 1676 PMU_PMEVCNTR(6), 1677 PMU_PMEVCNTR(7), 1678 PMU_PMEVCNTR(8), 1679 PMU_PMEVCNTR(9), 1680 PMU_PMEVCNTR(10), 1681 PMU_PMEVCNTR(11), 1682 PMU_PMEVCNTR(12), 1683 PMU_PMEVCNTR(13), 1684 PMU_PMEVCNTR(14), 1685 PMU_PMEVCNTR(15), 1686 PMU_PMEVCNTR(16), 1687 PMU_PMEVCNTR(17), 1688 PMU_PMEVCNTR(18), 1689 PMU_PMEVCNTR(19), 1690 PMU_PMEVCNTR(20), 1691 PMU_PMEVCNTR(21), 1692 PMU_PMEVCNTR(22), 1693 PMU_PMEVCNTR(23), 1694 PMU_PMEVCNTR(24), 1695 PMU_PMEVCNTR(25), 1696 PMU_PMEVCNTR(26), 1697 PMU_PMEVCNTR(27), 1698 PMU_PMEVCNTR(28), 1699 PMU_PMEVCNTR(29), 1700 PMU_PMEVCNTR(30), 1701 /* PMEVTYPERn */ 1702 PMU_PMEVTYPER(0), 1703 PMU_PMEVTYPER(1), 1704 PMU_PMEVTYPER(2), 1705 PMU_PMEVTYPER(3), 1706 PMU_PMEVTYPER(4), 1707 PMU_PMEVTYPER(5), 1708 PMU_PMEVTYPER(6), 1709 PMU_PMEVTYPER(7), 1710 PMU_PMEVTYPER(8), 1711 PMU_PMEVTYPER(9), 1712 PMU_PMEVTYPER(10), 1713 PMU_PMEVTYPER(11), 1714 PMU_PMEVTYPER(12), 1715 PMU_PMEVTYPER(13), 1716 PMU_PMEVTYPER(14), 1717 PMU_PMEVTYPER(15), 1718 PMU_PMEVTYPER(16), 1719 PMU_PMEVTYPER(17), 1720 PMU_PMEVTYPER(18), 1721 PMU_PMEVTYPER(19), 1722 PMU_PMEVTYPER(20), 1723 PMU_PMEVTYPER(21), 1724 PMU_PMEVTYPER(22), 1725 PMU_PMEVTYPER(23), 1726 PMU_PMEVTYPER(24), 1727 PMU_PMEVTYPER(25), 1728 PMU_PMEVTYPER(26), 1729 PMU_PMEVTYPER(27), 1730 PMU_PMEVTYPER(28), 1731 PMU_PMEVTYPER(29), 1732 PMU_PMEVTYPER(30), 1733 /* PMCCFILTR */ 1734 { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper }, 1735 }; 1736 1737 static const struct sys_reg_desc cp15_64_regs[] = { 1738 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 1739 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr }, 1740 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, 1741 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, 1742 { Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval }, 1743 }; 1744 1745 /* Target specific emulation tables */ 1746 static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS]; 1747 1748 void kvm_register_target_sys_reg_table(unsigned int target, 1749 struct kvm_sys_reg_target_table *table) 1750 { 1751 target_tables[target] = table; 1752 } 1753 1754 /* Get specific register table for this target. */ 1755 static const struct sys_reg_desc *get_target_table(unsigned target, 1756 bool mode_is_64, 1757 size_t *num) 1758 { 1759 struct kvm_sys_reg_target_table *table; 1760 1761 table = target_tables[target]; 1762 if (mode_is_64) { 1763 *num = table->table64.num; 1764 return table->table64.table; 1765 } else { 1766 *num = table->table32.num; 1767 return table->table32.table; 1768 } 1769 } 1770 1771 #define reg_to_match_value(x) \ 1772 ({ \ 1773 unsigned long val; \ 1774 val = (x)->Op0 << 14; \ 1775 val |= (x)->Op1 << 11; \ 1776 val |= (x)->CRn << 7; \ 1777 val |= (x)->CRm << 3; \ 1778 val |= (x)->Op2; \ 1779 val; \ 1780 }) 1781 1782 static int match_sys_reg(const void *key, const void *elt) 1783 { 1784 const unsigned long pval = (unsigned long)key; 1785 const struct sys_reg_desc *r = elt; 1786 1787 return pval - reg_to_match_value(r); 1788 } 1789 1790 static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, 1791 const struct sys_reg_desc table[], 1792 unsigned int num) 1793 { 1794 unsigned long pval = reg_to_match_value(params); 1795 1796 return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg); 1797 } 1798 1799 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) 1800 { 1801 kvm_inject_undefined(vcpu); 1802 return 1; 1803 } 1804 1805 static void perform_access(struct kvm_vcpu *vcpu, 1806 struct sys_reg_params *params, 1807 const struct sys_reg_desc *r) 1808 { 1809 /* 1810 * Not having an accessor means that we have configured a trap 1811 * that we don't know how to handle. This certainly qualifies 1812 * as a gross bug that should be fixed right away. 1813 */ 1814 BUG_ON(!r->access); 1815 1816 /* Skip instruction if instructed so */ 1817 if (likely(r->access(vcpu, params, r))) 1818 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 1819 } 1820 1821 /* 1822 * emulate_cp -- tries to match a sys_reg access in a handling table, and 1823 * call the corresponding trap handler. 1824 * 1825 * @params: pointer to the descriptor of the access 1826 * @table: array of trap descriptors 1827 * @num: size of the trap descriptor array 1828 * 1829 * Return 0 if the access has been handled, and -1 if not. 1830 */ 1831 static int emulate_cp(struct kvm_vcpu *vcpu, 1832 struct sys_reg_params *params, 1833 const struct sys_reg_desc *table, 1834 size_t num) 1835 { 1836 const struct sys_reg_desc *r; 1837 1838 if (!table) 1839 return -1; /* Not handled */ 1840 1841 r = find_reg(params, table, num); 1842 1843 if (r) { 1844 perform_access(vcpu, params, r); 1845 return 0; 1846 } 1847 1848 /* Not handled */ 1849 return -1; 1850 } 1851 1852 static void unhandled_cp_access(struct kvm_vcpu *vcpu, 1853 struct sys_reg_params *params) 1854 { 1855 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); 1856 int cp = -1; 1857 1858 switch(hsr_ec) { 1859 case ESR_ELx_EC_CP15_32: 1860 case ESR_ELx_EC_CP15_64: 1861 cp = 15; 1862 break; 1863 case ESR_ELx_EC_CP14_MR: 1864 case ESR_ELx_EC_CP14_64: 1865 cp = 14; 1866 break; 1867 default: 1868 WARN_ON(1); 1869 } 1870 1871 kvm_err("Unsupported guest CP%d access at: %08lx\n", 1872 cp, *vcpu_pc(vcpu)); 1873 print_sys_reg_instr(params); 1874 kvm_inject_undefined(vcpu); 1875 } 1876 1877 /** 1878 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access 1879 * @vcpu: The VCPU pointer 1880 * @run: The kvm_run struct 1881 */ 1882 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, 1883 const struct sys_reg_desc *global, 1884 size_t nr_global, 1885 const struct sys_reg_desc *target_specific, 1886 size_t nr_specific) 1887 { 1888 struct sys_reg_params params; 1889 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1890 int Rt = kvm_vcpu_sys_get_rt(vcpu); 1891 int Rt2 = (hsr >> 10) & 0x1f; 1892 1893 params.is_aarch32 = true; 1894 params.is_32bit = false; 1895 params.CRm = (hsr >> 1) & 0xf; 1896 params.is_write = ((hsr & 1) == 0); 1897 1898 params.Op0 = 0; 1899 params.Op1 = (hsr >> 16) & 0xf; 1900 params.Op2 = 0; 1901 params.CRn = 0; 1902 1903 /* 1904 * Make a 64-bit value out of Rt and Rt2. As we use the same trap 1905 * backends between AArch32 and AArch64, we get away with it. 1906 */ 1907 if (params.is_write) { 1908 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff; 1909 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32; 1910 } 1911 1912 /* 1913 * Try to emulate the coprocessor access using the target 1914 * specific table first, and using the global table afterwards. 1915 * If either of the tables contains a handler, handle the 1916 * potential register operation in the case of a read and return 1917 * with success. 1918 */ 1919 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) || 1920 !emulate_cp(vcpu, ¶ms, global, nr_global)) { 1921 /* Split up the value between registers for the read side */ 1922 if (!params.is_write) { 1923 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval)); 1924 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval)); 1925 } 1926 1927 return 1; 1928 } 1929 1930 unhandled_cp_access(vcpu, ¶ms); 1931 return 1; 1932 } 1933 1934 /** 1935 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access 1936 * @vcpu: The VCPU pointer 1937 * @run: The kvm_run struct 1938 */ 1939 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, 1940 const struct sys_reg_desc *global, 1941 size_t nr_global, 1942 const struct sys_reg_desc *target_specific, 1943 size_t nr_specific) 1944 { 1945 struct sys_reg_params params; 1946 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1947 int Rt = kvm_vcpu_sys_get_rt(vcpu); 1948 1949 params.is_aarch32 = true; 1950 params.is_32bit = true; 1951 params.CRm = (hsr >> 1) & 0xf; 1952 params.regval = vcpu_get_reg(vcpu, Rt); 1953 params.is_write = ((hsr & 1) == 0); 1954 params.CRn = (hsr >> 10) & 0xf; 1955 params.Op0 = 0; 1956 params.Op1 = (hsr >> 14) & 0x7; 1957 params.Op2 = (hsr >> 17) & 0x7; 1958 1959 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) || 1960 !emulate_cp(vcpu, ¶ms, global, nr_global)) { 1961 if (!params.is_write) 1962 vcpu_set_reg(vcpu, Rt, params.regval); 1963 return 1; 1964 } 1965 1966 unhandled_cp_access(vcpu, ¶ms); 1967 return 1; 1968 } 1969 1970 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 1971 { 1972 const struct sys_reg_desc *target_specific; 1973 size_t num; 1974 1975 target_specific = get_target_table(vcpu->arch.target, false, &num); 1976 return kvm_handle_cp_64(vcpu, 1977 cp15_64_regs, ARRAY_SIZE(cp15_64_regs), 1978 target_specific, num); 1979 } 1980 1981 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 1982 { 1983 const struct sys_reg_desc *target_specific; 1984 size_t num; 1985 1986 target_specific = get_target_table(vcpu->arch.target, false, &num); 1987 return kvm_handle_cp_32(vcpu, 1988 cp15_regs, ARRAY_SIZE(cp15_regs), 1989 target_specific, num); 1990 } 1991 1992 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 1993 { 1994 return kvm_handle_cp_64(vcpu, 1995 cp14_64_regs, ARRAY_SIZE(cp14_64_regs), 1996 NULL, 0); 1997 } 1998 1999 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 2000 { 2001 return kvm_handle_cp_32(vcpu, 2002 cp14_regs, ARRAY_SIZE(cp14_regs), 2003 NULL, 0); 2004 } 2005 2006 static int emulate_sys_reg(struct kvm_vcpu *vcpu, 2007 struct sys_reg_params *params) 2008 { 2009 size_t num; 2010 const struct sys_reg_desc *table, *r; 2011 2012 table = get_target_table(vcpu->arch.target, true, &num); 2013 2014 /* Search target-specific then generic table. */ 2015 r = find_reg(params, table, num); 2016 if (!r) 2017 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 2018 2019 if (likely(r)) { 2020 perform_access(vcpu, params, r); 2021 } else { 2022 kvm_err("Unsupported guest sys_reg access at: %lx\n", 2023 *vcpu_pc(vcpu)); 2024 print_sys_reg_instr(params); 2025 kvm_inject_undefined(vcpu); 2026 } 2027 return 1; 2028 } 2029 2030 static void reset_sys_reg_descs(struct kvm_vcpu *vcpu, 2031 const struct sys_reg_desc *table, size_t num) 2032 { 2033 unsigned long i; 2034 2035 for (i = 0; i < num; i++) 2036 if (table[i].reset) 2037 table[i].reset(vcpu, &table[i]); 2038 } 2039 2040 /** 2041 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access 2042 * @vcpu: The VCPU pointer 2043 * @run: The kvm_run struct 2044 */ 2045 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) 2046 { 2047 struct sys_reg_params params; 2048 unsigned long esr = kvm_vcpu_get_hsr(vcpu); 2049 int Rt = kvm_vcpu_sys_get_rt(vcpu); 2050 int ret; 2051 2052 trace_kvm_handle_sys_reg(esr); 2053 2054 params.is_aarch32 = false; 2055 params.is_32bit = false; 2056 params.Op0 = (esr >> 20) & 3; 2057 params.Op1 = (esr >> 14) & 0x7; 2058 params.CRn = (esr >> 10) & 0xf; 2059 params.CRm = (esr >> 1) & 0xf; 2060 params.Op2 = (esr >> 17) & 0x7; 2061 params.regval = vcpu_get_reg(vcpu, Rt); 2062 params.is_write = !(esr & 1); 2063 2064 ret = emulate_sys_reg(vcpu, ¶ms); 2065 2066 if (!params.is_write) 2067 vcpu_set_reg(vcpu, Rt, params.regval); 2068 return ret; 2069 } 2070 2071 /****************************************************************************** 2072 * Userspace API 2073 *****************************************************************************/ 2074 2075 static bool index_to_params(u64 id, struct sys_reg_params *params) 2076 { 2077 switch (id & KVM_REG_SIZE_MASK) { 2078 case KVM_REG_SIZE_U64: 2079 /* Any unused index bits means it's not valid. */ 2080 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK 2081 | KVM_REG_ARM_COPROC_MASK 2082 | KVM_REG_ARM64_SYSREG_OP0_MASK 2083 | KVM_REG_ARM64_SYSREG_OP1_MASK 2084 | KVM_REG_ARM64_SYSREG_CRN_MASK 2085 | KVM_REG_ARM64_SYSREG_CRM_MASK 2086 | KVM_REG_ARM64_SYSREG_OP2_MASK)) 2087 return false; 2088 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) 2089 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT); 2090 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) 2091 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT); 2092 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) 2093 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT); 2094 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) 2095 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT); 2096 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) 2097 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT); 2098 return true; 2099 default: 2100 return false; 2101 } 2102 } 2103 2104 const struct sys_reg_desc *find_reg_by_id(u64 id, 2105 struct sys_reg_params *params, 2106 const struct sys_reg_desc table[], 2107 unsigned int num) 2108 { 2109 if (!index_to_params(id, params)) 2110 return NULL; 2111 2112 return find_reg(params, table, num); 2113 } 2114 2115 /* Decode an index value, and find the sys_reg_desc entry. */ 2116 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, 2117 u64 id) 2118 { 2119 size_t num; 2120 const struct sys_reg_desc *table, *r; 2121 struct sys_reg_params params; 2122 2123 /* We only do sys_reg for now. */ 2124 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) 2125 return NULL; 2126 2127 table = get_target_table(vcpu->arch.target, true, &num); 2128 r = find_reg_by_id(id, ¶ms, table, num); 2129 if (!r) 2130 r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 2131 2132 /* Not saved in the sys_reg array and not otherwise accessible? */ 2133 if (r && !(r->reg || r->get_user)) 2134 r = NULL; 2135 2136 return r; 2137 } 2138 2139 /* 2140 * These are the invariant sys_reg registers: we let the guest see the 2141 * host versions of these, so they're part of the guest state. 2142 * 2143 * A future CPU may provide a mechanism to present different values to 2144 * the guest, or a future kvm may trap them. 2145 */ 2146 2147 #define FUNCTION_INVARIANT(reg) \ 2148 static void get_##reg(struct kvm_vcpu *v, \ 2149 const struct sys_reg_desc *r) \ 2150 { \ 2151 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \ 2152 } 2153 2154 FUNCTION_INVARIANT(midr_el1) 2155 FUNCTION_INVARIANT(ctr_el0) 2156 FUNCTION_INVARIANT(revidr_el1) 2157 FUNCTION_INVARIANT(clidr_el1) 2158 FUNCTION_INVARIANT(aidr_el1) 2159 2160 /* ->val is filled in by kvm_sys_reg_table_init() */ 2161 static struct sys_reg_desc invariant_sys_regs[] = { 2162 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 }, 2163 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 }, 2164 { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 }, 2165 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 }, 2166 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 }, 2167 }; 2168 2169 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id) 2170 { 2171 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) 2172 return -EFAULT; 2173 return 0; 2174 } 2175 2176 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id) 2177 { 2178 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) 2179 return -EFAULT; 2180 return 0; 2181 } 2182 2183 static int get_invariant_sys_reg(u64 id, void __user *uaddr) 2184 { 2185 struct sys_reg_params params; 2186 const struct sys_reg_desc *r; 2187 2188 r = find_reg_by_id(id, ¶ms, invariant_sys_regs, 2189 ARRAY_SIZE(invariant_sys_regs)); 2190 if (!r) 2191 return -ENOENT; 2192 2193 return reg_to_user(uaddr, &r->val, id); 2194 } 2195 2196 static int set_invariant_sys_reg(u64 id, void __user *uaddr) 2197 { 2198 struct sys_reg_params params; 2199 const struct sys_reg_desc *r; 2200 int err; 2201 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ 2202 2203 r = find_reg_by_id(id, ¶ms, invariant_sys_regs, 2204 ARRAY_SIZE(invariant_sys_regs)); 2205 if (!r) 2206 return -ENOENT; 2207 2208 err = reg_from_user(&val, uaddr, id); 2209 if (err) 2210 return err; 2211 2212 /* This is what we mean by invariant: you can't change it. */ 2213 if (r->val != val) 2214 return -EINVAL; 2215 2216 return 0; 2217 } 2218 2219 static bool is_valid_cache(u32 val) 2220 { 2221 u32 level, ctype; 2222 2223 if (val >= CSSELR_MAX) 2224 return false; 2225 2226 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ 2227 level = (val >> 1); 2228 ctype = (cache_levels >> (level * 3)) & 7; 2229 2230 switch (ctype) { 2231 case 0: /* No cache */ 2232 return false; 2233 case 1: /* Instruction cache only */ 2234 return (val & 1); 2235 case 2: /* Data cache only */ 2236 case 4: /* Unified cache */ 2237 return !(val & 1); 2238 case 3: /* Separate instruction and data caches */ 2239 return true; 2240 default: /* Reserved: we can't know instruction or data. */ 2241 return false; 2242 } 2243 } 2244 2245 static int demux_c15_get(u64 id, void __user *uaddr) 2246 { 2247 u32 val; 2248 u32 __user *uval = uaddr; 2249 2250 /* Fail if we have unknown bits set. */ 2251 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 2252 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 2253 return -ENOENT; 2254 2255 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { 2256 case KVM_REG_ARM_DEMUX_ID_CCSIDR: 2257 if (KVM_REG_SIZE(id) != 4) 2258 return -ENOENT; 2259 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) 2260 >> KVM_REG_ARM_DEMUX_VAL_SHIFT; 2261 if (!is_valid_cache(val)) 2262 return -ENOENT; 2263 2264 return put_user(get_ccsidr(val), uval); 2265 default: 2266 return -ENOENT; 2267 } 2268 } 2269 2270 static int demux_c15_set(u64 id, void __user *uaddr) 2271 { 2272 u32 val, newval; 2273 u32 __user *uval = uaddr; 2274 2275 /* Fail if we have unknown bits set. */ 2276 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 2277 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 2278 return -ENOENT; 2279 2280 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { 2281 case KVM_REG_ARM_DEMUX_ID_CCSIDR: 2282 if (KVM_REG_SIZE(id) != 4) 2283 return -ENOENT; 2284 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) 2285 >> KVM_REG_ARM_DEMUX_VAL_SHIFT; 2286 if (!is_valid_cache(val)) 2287 return -ENOENT; 2288 2289 if (get_user(newval, uval)) 2290 return -EFAULT; 2291 2292 /* This is also invariant: you can't change it. */ 2293 if (newval != get_ccsidr(val)) 2294 return -EINVAL; 2295 return 0; 2296 default: 2297 return -ENOENT; 2298 } 2299 } 2300 2301 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 2302 { 2303 const struct sys_reg_desc *r; 2304 void __user *uaddr = (void __user *)(unsigned long)reg->addr; 2305 2306 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 2307 return demux_c15_get(reg->id, uaddr); 2308 2309 if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) 2310 return -ENOENT; 2311 2312 r = index_to_sys_reg_desc(vcpu, reg->id); 2313 if (!r) 2314 return get_invariant_sys_reg(reg->id, uaddr); 2315 2316 if (r->get_user) 2317 return (r->get_user)(vcpu, r, reg, uaddr); 2318 2319 return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id); 2320 } 2321 2322 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 2323 { 2324 const struct sys_reg_desc *r; 2325 void __user *uaddr = (void __user *)(unsigned long)reg->addr; 2326 2327 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 2328 return demux_c15_set(reg->id, uaddr); 2329 2330 if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) 2331 return -ENOENT; 2332 2333 r = index_to_sys_reg_desc(vcpu, reg->id); 2334 if (!r) 2335 return set_invariant_sys_reg(reg->id, uaddr); 2336 2337 if (r->set_user) 2338 return (r->set_user)(vcpu, r, reg, uaddr); 2339 2340 return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id); 2341 } 2342 2343 static unsigned int num_demux_regs(void) 2344 { 2345 unsigned int i, count = 0; 2346 2347 for (i = 0; i < CSSELR_MAX; i++) 2348 if (is_valid_cache(i)) 2349 count++; 2350 2351 return count; 2352 } 2353 2354 static int write_demux_regids(u64 __user *uindices) 2355 { 2356 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; 2357 unsigned int i; 2358 2359 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; 2360 for (i = 0; i < CSSELR_MAX; i++) { 2361 if (!is_valid_cache(i)) 2362 continue; 2363 if (put_user(val | i, uindices)) 2364 return -EFAULT; 2365 uindices++; 2366 } 2367 return 0; 2368 } 2369 2370 static u64 sys_reg_to_index(const struct sys_reg_desc *reg) 2371 { 2372 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | 2373 KVM_REG_ARM64_SYSREG | 2374 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | 2375 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | 2376 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | 2377 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | 2378 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); 2379 } 2380 2381 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) 2382 { 2383 if (!*uind) 2384 return true; 2385 2386 if (put_user(sys_reg_to_index(reg), *uind)) 2387 return false; 2388 2389 (*uind)++; 2390 return true; 2391 } 2392 2393 static int walk_one_sys_reg(const struct sys_reg_desc *rd, 2394 u64 __user **uind, 2395 unsigned int *total) 2396 { 2397 /* 2398 * Ignore registers we trap but don't save, 2399 * and for which no custom user accessor is provided. 2400 */ 2401 if (!(rd->reg || rd->get_user)) 2402 return 0; 2403 2404 if (!copy_reg_to_user(rd, uind)) 2405 return -EFAULT; 2406 2407 (*total)++; 2408 return 0; 2409 } 2410 2411 /* Assumed ordered tables, see kvm_sys_reg_table_init. */ 2412 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) 2413 { 2414 const struct sys_reg_desc *i1, *i2, *end1, *end2; 2415 unsigned int total = 0; 2416 size_t num; 2417 int err; 2418 2419 /* We check for duplicates here, to allow arch-specific overrides. */ 2420 i1 = get_target_table(vcpu->arch.target, true, &num); 2421 end1 = i1 + num; 2422 i2 = sys_reg_descs; 2423 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs); 2424 2425 BUG_ON(i1 == end1 || i2 == end2); 2426 2427 /* Walk carefully, as both tables may refer to the same register. */ 2428 while (i1 || i2) { 2429 int cmp = cmp_sys_reg(i1, i2); 2430 /* target-specific overrides generic entry. */ 2431 if (cmp <= 0) 2432 err = walk_one_sys_reg(i1, &uind, &total); 2433 else 2434 err = walk_one_sys_reg(i2, &uind, &total); 2435 2436 if (err) 2437 return err; 2438 2439 if (cmp <= 0 && ++i1 == end1) 2440 i1 = NULL; 2441 if (cmp >= 0 && ++i2 == end2) 2442 i2 = NULL; 2443 } 2444 return total; 2445 } 2446 2447 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) 2448 { 2449 return ARRAY_SIZE(invariant_sys_regs) 2450 + num_demux_regs() 2451 + walk_sys_regs(vcpu, (u64 __user *)NULL); 2452 } 2453 2454 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 2455 { 2456 unsigned int i; 2457 int err; 2458 2459 /* Then give them all the invariant registers' indices. */ 2460 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) { 2461 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) 2462 return -EFAULT; 2463 uindices++; 2464 } 2465 2466 err = walk_sys_regs(vcpu, uindices); 2467 if (err < 0) 2468 return err; 2469 uindices += err; 2470 2471 return write_demux_regids(uindices); 2472 } 2473 2474 static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n) 2475 { 2476 unsigned int i; 2477 2478 for (i = 1; i < n; i++) { 2479 if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) { 2480 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1); 2481 return 1; 2482 } 2483 } 2484 2485 return 0; 2486 } 2487 2488 void kvm_sys_reg_table_init(void) 2489 { 2490 unsigned int i; 2491 struct sys_reg_desc clidr; 2492 2493 /* Make sure tables are unique and in order. */ 2494 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs))); 2495 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs))); 2496 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs))); 2497 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs))); 2498 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs))); 2499 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs))); 2500 2501 /* We abuse the reset function to overwrite the table itself. */ 2502 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) 2503 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]); 2504 2505 /* 2506 * CLIDR format is awkward, so clean it up. See ARM B4.1.20: 2507 * 2508 * If software reads the Cache Type fields from Ctype1 2509 * upwards, once it has seen a value of 0b000, no caches 2510 * exist at further-out levels of the hierarchy. So, for 2511 * example, if Ctype3 is the first Cache Type field with a 2512 * value of 0b000, the values of Ctype4 to Ctype7 must be 2513 * ignored. 2514 */ 2515 get_clidr_el1(NULL, &clidr); /* Ugly... */ 2516 cache_levels = clidr.val; 2517 for (i = 0; i < 7; i++) 2518 if (((cache_levels >> (i*3)) & 7) == 0) 2519 break; 2520 /* Clear all higher bits. */ 2521 cache_levels &= (1 << (i*3))-1; 2522 } 2523 2524 /** 2525 * kvm_reset_sys_regs - sets system registers to reset value 2526 * @vcpu: The VCPU pointer 2527 * 2528 * This function finds the right table above and sets the registers on the 2529 * virtual CPU struct to their architecturally defined reset values. 2530 */ 2531 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) 2532 { 2533 size_t num; 2534 const struct sys_reg_desc *table; 2535 2536 /* Catch someone adding a register without putting in reset entry. */ 2537 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); 2538 2539 /* Generic chip reset first (so target could override). */ 2540 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 2541 2542 table = get_target_table(vcpu->arch.target, true, &num); 2543 reset_sys_reg_descs(vcpu, table, num); 2544 2545 for (num = 1; num < NR_SYS_REGS; num++) 2546 if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242) 2547 panic("Didn't reset __vcpu_sys_reg(%zi)", num); 2548 } 2549