1 /* 2 * Copyright (C) 2012,2013 - ARM Ltd 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * Derived from arch/arm/kvm/coproc.c: 6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 7 * Authors: Rusty Russell <rusty@rustcorp.com.au> 8 * Christoffer Dall <c.dall@virtualopensystems.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License, version 2, as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program. If not, see <http://www.gnu.org/licenses/>. 21 */ 22 23 #include <linux/mm.h> 24 #include <linux/kvm_host.h> 25 #include <linux/uaccess.h> 26 #include <asm/kvm_arm.h> 27 #include <asm/kvm_host.h> 28 #include <asm/kvm_emulate.h> 29 #include <asm/kvm_coproc.h> 30 #include <asm/kvm_mmu.h> 31 #include <asm/cacheflush.h> 32 #include <asm/cputype.h> 33 #include <asm/debug-monitors.h> 34 #include <trace/events/kvm.h> 35 36 #include "sys_regs.h" 37 38 /* 39 * All of this file is extremly similar to the ARM coproc.c, but the 40 * types are different. My gut feeling is that it should be pretty 41 * easy to merge, but that would be an ABI breakage -- again. VFP 42 * would also need to be abstracted. 43 * 44 * For AArch32, we only take care of what is being trapped. Anything 45 * that has to do with init and userspace access has to go via the 46 * 64bit interface. 47 */ 48 49 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ 50 static u32 cache_levels; 51 52 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ 53 #define CSSELR_MAX 12 54 55 /* Which cache CCSIDR represents depends on CSSELR value. */ 56 static u32 get_ccsidr(u32 csselr) 57 { 58 u32 ccsidr; 59 60 /* Make sure noone else changes CSSELR during this! */ 61 local_irq_disable(); 62 /* Put value into CSSELR */ 63 asm volatile("msr csselr_el1, %x0" : : "r" (csselr)); 64 isb(); 65 /* Read result out of CCSIDR */ 66 asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr)); 67 local_irq_enable(); 68 69 return ccsidr; 70 } 71 72 /* 73 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). 74 */ 75 static bool access_dcsw(struct kvm_vcpu *vcpu, 76 const struct sys_reg_params *p, 77 const struct sys_reg_desc *r) 78 { 79 if (!p->is_write) 80 return read_from_write_only(vcpu, p); 81 82 kvm_set_way_flush(vcpu); 83 return true; 84 } 85 86 /* 87 * Generic accessor for VM registers. Only called as long as HCR_TVM 88 * is set. If the guest enables the MMU, we stop trapping the VM 89 * sys_regs and leave it in complete control of the caches. 90 */ 91 static bool access_vm_reg(struct kvm_vcpu *vcpu, 92 const struct sys_reg_params *p, 93 const struct sys_reg_desc *r) 94 { 95 unsigned long val; 96 bool was_enabled = vcpu_has_cache_enabled(vcpu); 97 98 BUG_ON(!p->is_write); 99 100 val = *vcpu_reg(vcpu, p->Rt); 101 if (!p->is_aarch32) { 102 vcpu_sys_reg(vcpu, r->reg) = val; 103 } else { 104 if (!p->is_32bit) 105 vcpu_cp15_64_high(vcpu, r->reg) = val >> 32; 106 vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; 107 } 108 109 kvm_toggle_cache(vcpu, was_enabled); 110 return true; 111 } 112 113 static bool trap_raz_wi(struct kvm_vcpu *vcpu, 114 const struct sys_reg_params *p, 115 const struct sys_reg_desc *r) 116 { 117 if (p->is_write) 118 return ignore_write(vcpu, p); 119 else 120 return read_zero(vcpu, p); 121 } 122 123 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, 124 const struct sys_reg_params *p, 125 const struct sys_reg_desc *r) 126 { 127 if (p->is_write) { 128 return ignore_write(vcpu, p); 129 } else { 130 *vcpu_reg(vcpu, p->Rt) = (1 << 3); 131 return true; 132 } 133 } 134 135 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, 136 const struct sys_reg_params *p, 137 const struct sys_reg_desc *r) 138 { 139 if (p->is_write) { 140 return ignore_write(vcpu, p); 141 } else { 142 u32 val; 143 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val)); 144 *vcpu_reg(vcpu, p->Rt) = val; 145 return true; 146 } 147 } 148 149 /* 150 * We want to avoid world-switching all the DBG registers all the 151 * time: 152 * 153 * - If we've touched any debug register, it is likely that we're 154 * going to touch more of them. It then makes sense to disable the 155 * traps and start doing the save/restore dance 156 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is 157 * then mandatory to save/restore the registers, as the guest 158 * depends on them. 159 * 160 * For this, we use a DIRTY bit, indicating the guest has modified the 161 * debug registers, used as follow: 162 * 163 * On guest entry: 164 * - If the dirty bit is set (because we're coming back from trapping), 165 * disable the traps, save host registers, restore guest registers. 166 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), 167 * set the dirty bit, disable the traps, save host registers, 168 * restore guest registers. 169 * - Otherwise, enable the traps 170 * 171 * On guest exit: 172 * - If the dirty bit is set, save guest registers, restore host 173 * registers and clear the dirty bit. This ensure that the host can 174 * now use the debug registers. 175 */ 176 static bool trap_debug_regs(struct kvm_vcpu *vcpu, 177 const struct sys_reg_params *p, 178 const struct sys_reg_desc *r) 179 { 180 if (p->is_write) { 181 vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); 182 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 183 } else { 184 *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg); 185 } 186 187 return true; 188 } 189 190 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 191 { 192 u64 amair; 193 194 asm volatile("mrs %0, amair_el1\n" : "=r" (amair)); 195 vcpu_sys_reg(vcpu, AMAIR_EL1) = amair; 196 } 197 198 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 199 { 200 /* 201 * Simply map the vcpu_id into the Aff0 field of the MPIDR. 202 */ 203 vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff); 204 } 205 206 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ 207 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ 208 /* DBGBVRn_EL1 */ \ 209 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \ 210 trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 }, \ 211 /* DBGBCRn_EL1 */ \ 212 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \ 213 trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 }, \ 214 /* DBGWVRn_EL1 */ \ 215 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \ 216 trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 }, \ 217 /* DBGWCRn_EL1 */ \ 218 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \ 219 trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 } 220 221 /* 222 * Architected system registers. 223 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 224 * 225 * We could trap ID_DFR0 and tell the guest we don't support performance 226 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was 227 * NAKed, so it will read the PMCR anyway. 228 * 229 * Therefore we tell the guest we have 0 counters. Unfortunately, we 230 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for 231 * all PM registers, which doesn't crash the guest kernel at least. 232 * 233 * Debug handling: We do trap most, if not all debug related system 234 * registers. The implementation is good enough to ensure that a guest 235 * can use these with minimal performance degradation. The drawback is 236 * that we don't implement any of the external debug, none of the 237 * OSlock protocol. This should be revisited if we ever encounter a 238 * more demanding guest... 239 */ 240 static const struct sys_reg_desc sys_reg_descs[] = { 241 /* DC ISW */ 242 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010), 243 access_dcsw }, 244 /* DC CSW */ 245 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010), 246 access_dcsw }, 247 /* DC CISW */ 248 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010), 249 access_dcsw }, 250 251 DBG_BCR_BVR_WCR_WVR_EL1(0), 252 DBG_BCR_BVR_WCR_WVR_EL1(1), 253 /* MDCCINT_EL1 */ 254 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000), 255 trap_debug_regs, reset_val, MDCCINT_EL1, 0 }, 256 /* MDSCR_EL1 */ 257 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010), 258 trap_debug_regs, reset_val, MDSCR_EL1, 0 }, 259 DBG_BCR_BVR_WCR_WVR_EL1(2), 260 DBG_BCR_BVR_WCR_WVR_EL1(3), 261 DBG_BCR_BVR_WCR_WVR_EL1(4), 262 DBG_BCR_BVR_WCR_WVR_EL1(5), 263 DBG_BCR_BVR_WCR_WVR_EL1(6), 264 DBG_BCR_BVR_WCR_WVR_EL1(7), 265 DBG_BCR_BVR_WCR_WVR_EL1(8), 266 DBG_BCR_BVR_WCR_WVR_EL1(9), 267 DBG_BCR_BVR_WCR_WVR_EL1(10), 268 DBG_BCR_BVR_WCR_WVR_EL1(11), 269 DBG_BCR_BVR_WCR_WVR_EL1(12), 270 DBG_BCR_BVR_WCR_WVR_EL1(13), 271 DBG_BCR_BVR_WCR_WVR_EL1(14), 272 DBG_BCR_BVR_WCR_WVR_EL1(15), 273 274 /* MDRAR_EL1 */ 275 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), 276 trap_raz_wi }, 277 /* OSLAR_EL1 */ 278 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100), 279 trap_raz_wi }, 280 /* OSLSR_EL1 */ 281 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100), 282 trap_oslsr_el1 }, 283 /* OSDLR_EL1 */ 284 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100), 285 trap_raz_wi }, 286 /* DBGPRCR_EL1 */ 287 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100), 288 trap_raz_wi }, 289 /* DBGCLAIMSET_EL1 */ 290 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110), 291 trap_raz_wi }, 292 /* DBGCLAIMCLR_EL1 */ 293 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110), 294 trap_raz_wi }, 295 /* DBGAUTHSTATUS_EL1 */ 296 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110), 297 trap_dbgauthstatus_el1 }, 298 299 /* TEECR32_EL1 */ 300 { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), 301 NULL, reset_val, TEECR32_EL1, 0 }, 302 /* TEEHBR32_EL1 */ 303 { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000), 304 NULL, reset_val, TEEHBR32_EL1, 0 }, 305 306 /* MDCCSR_EL1 */ 307 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000), 308 trap_raz_wi }, 309 /* DBGDTR_EL0 */ 310 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000), 311 trap_raz_wi }, 312 /* DBGDTR[TR]X_EL0 */ 313 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000), 314 trap_raz_wi }, 315 316 /* DBGVCR32_EL2 */ 317 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000), 318 NULL, reset_val, DBGVCR32_EL2, 0 }, 319 320 /* MPIDR_EL1 */ 321 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101), 322 NULL, reset_mpidr, MPIDR_EL1 }, 323 /* SCTLR_EL1 */ 324 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), 325 access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, 326 /* CPACR_EL1 */ 327 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), 328 NULL, reset_val, CPACR_EL1, 0 }, 329 /* TTBR0_EL1 */ 330 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000), 331 access_vm_reg, reset_unknown, TTBR0_EL1 }, 332 /* TTBR1_EL1 */ 333 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001), 334 access_vm_reg, reset_unknown, TTBR1_EL1 }, 335 /* TCR_EL1 */ 336 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010), 337 access_vm_reg, reset_val, TCR_EL1, 0 }, 338 339 /* AFSR0_EL1 */ 340 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000), 341 access_vm_reg, reset_unknown, AFSR0_EL1 }, 342 /* AFSR1_EL1 */ 343 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001), 344 access_vm_reg, reset_unknown, AFSR1_EL1 }, 345 /* ESR_EL1 */ 346 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000), 347 access_vm_reg, reset_unknown, ESR_EL1 }, 348 /* FAR_EL1 */ 349 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), 350 access_vm_reg, reset_unknown, FAR_EL1 }, 351 /* PAR_EL1 */ 352 { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000), 353 NULL, reset_unknown, PAR_EL1 }, 354 355 /* PMINTENSET_EL1 */ 356 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), 357 trap_raz_wi }, 358 /* PMINTENCLR_EL1 */ 359 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010), 360 trap_raz_wi }, 361 362 /* MAIR_EL1 */ 363 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), 364 access_vm_reg, reset_unknown, MAIR_EL1 }, 365 /* AMAIR_EL1 */ 366 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000), 367 access_vm_reg, reset_amair_el1, AMAIR_EL1 }, 368 369 /* VBAR_EL1 */ 370 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000), 371 NULL, reset_val, VBAR_EL1, 0 }, 372 373 /* ICC_SRE_EL1 */ 374 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101), 375 trap_raz_wi }, 376 377 /* CONTEXTIDR_EL1 */ 378 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), 379 access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, 380 /* TPIDR_EL1 */ 381 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100), 382 NULL, reset_unknown, TPIDR_EL1 }, 383 384 /* CNTKCTL_EL1 */ 385 { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000), 386 NULL, reset_val, CNTKCTL_EL1, 0}, 387 388 /* CSSELR_EL1 */ 389 { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), 390 NULL, reset_unknown, CSSELR_EL1 }, 391 392 /* PMCR_EL0 */ 393 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000), 394 trap_raz_wi }, 395 /* PMCNTENSET_EL0 */ 396 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), 397 trap_raz_wi }, 398 /* PMCNTENCLR_EL0 */ 399 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010), 400 trap_raz_wi }, 401 /* PMOVSCLR_EL0 */ 402 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), 403 trap_raz_wi }, 404 /* PMSWINC_EL0 */ 405 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), 406 trap_raz_wi }, 407 /* PMSELR_EL0 */ 408 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), 409 trap_raz_wi }, 410 /* PMCEID0_EL0 */ 411 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), 412 trap_raz_wi }, 413 /* PMCEID1_EL0 */ 414 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111), 415 trap_raz_wi }, 416 /* PMCCNTR_EL0 */ 417 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), 418 trap_raz_wi }, 419 /* PMXEVTYPER_EL0 */ 420 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), 421 trap_raz_wi }, 422 /* PMXEVCNTR_EL0 */ 423 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), 424 trap_raz_wi }, 425 /* PMUSERENR_EL0 */ 426 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), 427 trap_raz_wi }, 428 /* PMOVSSET_EL0 */ 429 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), 430 trap_raz_wi }, 431 432 /* TPIDR_EL0 */ 433 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010), 434 NULL, reset_unknown, TPIDR_EL0 }, 435 /* TPIDRRO_EL0 */ 436 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), 437 NULL, reset_unknown, TPIDRRO_EL0 }, 438 439 /* DACR32_EL2 */ 440 { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000), 441 NULL, reset_unknown, DACR32_EL2 }, 442 /* IFSR32_EL2 */ 443 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001), 444 NULL, reset_unknown, IFSR32_EL2 }, 445 /* FPEXC32_EL2 */ 446 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000), 447 NULL, reset_val, FPEXC32_EL2, 0x70 }, 448 }; 449 450 static bool trap_dbgidr(struct kvm_vcpu *vcpu, 451 const struct sys_reg_params *p, 452 const struct sys_reg_desc *r) 453 { 454 if (p->is_write) { 455 return ignore_write(vcpu, p); 456 } else { 457 u64 dfr = read_cpuid(ID_AA64DFR0_EL1); 458 u64 pfr = read_cpuid(ID_AA64PFR0_EL1); 459 u32 el3 = !!((pfr >> 12) & 0xf); 460 461 *vcpu_reg(vcpu, p->Rt) = ((((dfr >> 20) & 0xf) << 28) | 462 (((dfr >> 12) & 0xf) << 24) | 463 (((dfr >> 28) & 0xf) << 20) | 464 (6 << 16) | (el3 << 14) | (el3 << 12)); 465 return true; 466 } 467 } 468 469 static bool trap_debug32(struct kvm_vcpu *vcpu, 470 const struct sys_reg_params *p, 471 const struct sys_reg_desc *r) 472 { 473 if (p->is_write) { 474 vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); 475 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 476 } else { 477 *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg); 478 } 479 480 return true; 481 } 482 483 #define DBG_BCR_BVR_WCR_WVR(n) \ 484 /* DBGBVRn */ \ 485 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32, \ 486 NULL, (cp14_DBGBVR0 + (n) * 2) }, \ 487 /* DBGBCRn */ \ 488 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32, \ 489 NULL, (cp14_DBGBCR0 + (n) * 2) }, \ 490 /* DBGWVRn */ \ 491 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32, \ 492 NULL, (cp14_DBGWVR0 + (n) * 2) }, \ 493 /* DBGWCRn */ \ 494 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32, \ 495 NULL, (cp14_DBGWCR0 + (n) * 2) } 496 497 #define DBGBXVR(n) \ 498 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32, \ 499 NULL, cp14_DBGBXVR0 + n * 2 } 500 501 /* 502 * Trapped cp14 registers. We generally ignore most of the external 503 * debug, on the principle that they don't really make sense to a 504 * guest. Revisit this one day, whould this principle change. 505 */ 506 static const struct sys_reg_desc cp14_regs[] = { 507 /* DBGIDR */ 508 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr }, 509 /* DBGDTRRXext */ 510 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi }, 511 512 DBG_BCR_BVR_WCR_WVR(0), 513 /* DBGDSCRint */ 514 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi }, 515 DBG_BCR_BVR_WCR_WVR(1), 516 /* DBGDCCINT */ 517 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 }, 518 /* DBGDSCRext */ 519 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 }, 520 DBG_BCR_BVR_WCR_WVR(2), 521 /* DBGDTR[RT]Xint */ 522 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi }, 523 /* DBGDTR[RT]Xext */ 524 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi }, 525 DBG_BCR_BVR_WCR_WVR(3), 526 DBG_BCR_BVR_WCR_WVR(4), 527 DBG_BCR_BVR_WCR_WVR(5), 528 /* DBGWFAR */ 529 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi }, 530 /* DBGOSECCR */ 531 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi }, 532 DBG_BCR_BVR_WCR_WVR(6), 533 /* DBGVCR */ 534 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 }, 535 DBG_BCR_BVR_WCR_WVR(7), 536 DBG_BCR_BVR_WCR_WVR(8), 537 DBG_BCR_BVR_WCR_WVR(9), 538 DBG_BCR_BVR_WCR_WVR(10), 539 DBG_BCR_BVR_WCR_WVR(11), 540 DBG_BCR_BVR_WCR_WVR(12), 541 DBG_BCR_BVR_WCR_WVR(13), 542 DBG_BCR_BVR_WCR_WVR(14), 543 DBG_BCR_BVR_WCR_WVR(15), 544 545 /* DBGDRAR (32bit) */ 546 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi }, 547 548 DBGBXVR(0), 549 /* DBGOSLAR */ 550 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi }, 551 DBGBXVR(1), 552 /* DBGOSLSR */ 553 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 }, 554 DBGBXVR(2), 555 DBGBXVR(3), 556 /* DBGOSDLR */ 557 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi }, 558 DBGBXVR(4), 559 /* DBGPRCR */ 560 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi }, 561 DBGBXVR(5), 562 DBGBXVR(6), 563 DBGBXVR(7), 564 DBGBXVR(8), 565 DBGBXVR(9), 566 DBGBXVR(10), 567 DBGBXVR(11), 568 DBGBXVR(12), 569 DBGBXVR(13), 570 DBGBXVR(14), 571 DBGBXVR(15), 572 573 /* DBGDSAR (32bit) */ 574 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi }, 575 576 /* DBGDEVID2 */ 577 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi }, 578 /* DBGDEVID1 */ 579 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi }, 580 /* DBGDEVID */ 581 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi }, 582 /* DBGCLAIMSET */ 583 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi }, 584 /* DBGCLAIMCLR */ 585 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi }, 586 /* DBGAUTHSTATUS */ 587 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 }, 588 }; 589 590 /* Trapped cp14 64bit registers */ 591 static const struct sys_reg_desc cp14_64_regs[] = { 592 /* DBGDRAR (64bit) */ 593 { Op1( 0), CRm( 1), .access = trap_raz_wi }, 594 595 /* DBGDSAR (64bit) */ 596 { Op1( 0), CRm( 2), .access = trap_raz_wi }, 597 }; 598 599 /* 600 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, 601 * depending on the way they are accessed (as a 32bit or a 64bit 602 * register). 603 */ 604 static const struct sys_reg_desc cp15_regs[] = { 605 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR }, 606 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 607 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, 608 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, 609 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR }, 610 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR }, 611 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR }, 612 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR }, 613 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR }, 614 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR }, 615 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR }, 616 617 /* 618 * DC{C,I,CI}SW operations: 619 */ 620 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, 621 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, 622 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, 623 624 /* PMU */ 625 { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi }, 626 { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi }, 627 { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi }, 628 { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi }, 629 { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi }, 630 { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi }, 631 { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi }, 632 { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi }, 633 { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi }, 634 { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi }, 635 { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, 636 { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi }, 637 { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi }, 638 639 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, 640 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, 641 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, 642 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, 643 644 /* ICC_SRE */ 645 { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi }, 646 647 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, 648 }; 649 650 static const struct sys_reg_desc cp15_64_regs[] = { 651 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 652 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, 653 }; 654 655 /* Target specific emulation tables */ 656 static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS]; 657 658 void kvm_register_target_sys_reg_table(unsigned int target, 659 struct kvm_sys_reg_target_table *table) 660 { 661 target_tables[target] = table; 662 } 663 664 /* Get specific register table for this target. */ 665 static const struct sys_reg_desc *get_target_table(unsigned target, 666 bool mode_is_64, 667 size_t *num) 668 { 669 struct kvm_sys_reg_target_table *table; 670 671 table = target_tables[target]; 672 if (mode_is_64) { 673 *num = table->table64.num; 674 return table->table64.table; 675 } else { 676 *num = table->table32.num; 677 return table->table32.table; 678 } 679 } 680 681 static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, 682 const struct sys_reg_desc table[], 683 unsigned int num) 684 { 685 unsigned int i; 686 687 for (i = 0; i < num; i++) { 688 const struct sys_reg_desc *r = &table[i]; 689 690 if (params->Op0 != r->Op0) 691 continue; 692 if (params->Op1 != r->Op1) 693 continue; 694 if (params->CRn != r->CRn) 695 continue; 696 if (params->CRm != r->CRm) 697 continue; 698 if (params->Op2 != r->Op2) 699 continue; 700 701 return r; 702 } 703 return NULL; 704 } 705 706 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) 707 { 708 kvm_inject_undefined(vcpu); 709 return 1; 710 } 711 712 /* 713 * emulate_cp -- tries to match a sys_reg access in a handling table, and 714 * call the corresponding trap handler. 715 * 716 * @params: pointer to the descriptor of the access 717 * @table: array of trap descriptors 718 * @num: size of the trap descriptor array 719 * 720 * Return 0 if the access has been handled, and -1 if not. 721 */ 722 static int emulate_cp(struct kvm_vcpu *vcpu, 723 const struct sys_reg_params *params, 724 const struct sys_reg_desc *table, 725 size_t num) 726 { 727 const struct sys_reg_desc *r; 728 729 if (!table) 730 return -1; /* Not handled */ 731 732 r = find_reg(params, table, num); 733 734 if (r) { 735 /* 736 * Not having an accessor means that we have 737 * configured a trap that we don't know how to 738 * handle. This certainly qualifies as a gross bug 739 * that should be fixed right away. 740 */ 741 BUG_ON(!r->access); 742 743 if (likely(r->access(vcpu, params, r))) { 744 /* Skip instruction, since it was emulated */ 745 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 746 } 747 748 /* Handled */ 749 return 0; 750 } 751 752 /* Not handled */ 753 return -1; 754 } 755 756 static void unhandled_cp_access(struct kvm_vcpu *vcpu, 757 struct sys_reg_params *params) 758 { 759 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); 760 int cp; 761 762 switch(hsr_ec) { 763 case ESR_EL2_EC_CP15_32: 764 case ESR_EL2_EC_CP15_64: 765 cp = 15; 766 break; 767 case ESR_EL2_EC_CP14_MR: 768 case ESR_EL2_EC_CP14_64: 769 cp = 14; 770 break; 771 default: 772 WARN_ON((cp = -1)); 773 } 774 775 kvm_err("Unsupported guest CP%d access at: %08lx\n", 776 cp, *vcpu_pc(vcpu)); 777 print_sys_reg_instr(params); 778 kvm_inject_undefined(vcpu); 779 } 780 781 /** 782 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access 783 * @vcpu: The VCPU pointer 784 * @run: The kvm_run struct 785 */ 786 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, 787 const struct sys_reg_desc *global, 788 size_t nr_global, 789 const struct sys_reg_desc *target_specific, 790 size_t nr_specific) 791 { 792 struct sys_reg_params params; 793 u32 hsr = kvm_vcpu_get_hsr(vcpu); 794 int Rt2 = (hsr >> 10) & 0xf; 795 796 params.is_aarch32 = true; 797 params.is_32bit = false; 798 params.CRm = (hsr >> 1) & 0xf; 799 params.Rt = (hsr >> 5) & 0xf; 800 params.is_write = ((hsr & 1) == 0); 801 802 params.Op0 = 0; 803 params.Op1 = (hsr >> 16) & 0xf; 804 params.Op2 = 0; 805 params.CRn = 0; 806 807 /* 808 * Massive hack here. Store Rt2 in the top 32bits so we only 809 * have one register to deal with. As we use the same trap 810 * backends between AArch32 and AArch64, we get away with it. 811 */ 812 if (params.is_write) { 813 u64 val = *vcpu_reg(vcpu, params.Rt); 814 val &= 0xffffffff; 815 val |= *vcpu_reg(vcpu, Rt2) << 32; 816 *vcpu_reg(vcpu, params.Rt) = val; 817 } 818 819 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific)) 820 goto out; 821 if (!emulate_cp(vcpu, ¶ms, global, nr_global)) 822 goto out; 823 824 unhandled_cp_access(vcpu, ¶ms); 825 826 out: 827 /* Do the opposite hack for the read side */ 828 if (!params.is_write) { 829 u64 val = *vcpu_reg(vcpu, params.Rt); 830 val >>= 32; 831 *vcpu_reg(vcpu, Rt2) = val; 832 } 833 834 return 1; 835 } 836 837 /** 838 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access 839 * @vcpu: The VCPU pointer 840 * @run: The kvm_run struct 841 */ 842 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, 843 const struct sys_reg_desc *global, 844 size_t nr_global, 845 const struct sys_reg_desc *target_specific, 846 size_t nr_specific) 847 { 848 struct sys_reg_params params; 849 u32 hsr = kvm_vcpu_get_hsr(vcpu); 850 851 params.is_aarch32 = true; 852 params.is_32bit = true; 853 params.CRm = (hsr >> 1) & 0xf; 854 params.Rt = (hsr >> 5) & 0xf; 855 params.is_write = ((hsr & 1) == 0); 856 params.CRn = (hsr >> 10) & 0xf; 857 params.Op0 = 0; 858 params.Op1 = (hsr >> 14) & 0x7; 859 params.Op2 = (hsr >> 17) & 0x7; 860 861 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific)) 862 return 1; 863 if (!emulate_cp(vcpu, ¶ms, global, nr_global)) 864 return 1; 865 866 unhandled_cp_access(vcpu, ¶ms); 867 return 1; 868 } 869 870 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 871 { 872 const struct sys_reg_desc *target_specific; 873 size_t num; 874 875 target_specific = get_target_table(vcpu->arch.target, false, &num); 876 return kvm_handle_cp_64(vcpu, 877 cp15_64_regs, ARRAY_SIZE(cp15_64_regs), 878 target_specific, num); 879 } 880 881 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 882 { 883 const struct sys_reg_desc *target_specific; 884 size_t num; 885 886 target_specific = get_target_table(vcpu->arch.target, false, &num); 887 return kvm_handle_cp_32(vcpu, 888 cp15_regs, ARRAY_SIZE(cp15_regs), 889 target_specific, num); 890 } 891 892 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 893 { 894 return kvm_handle_cp_64(vcpu, 895 cp14_64_regs, ARRAY_SIZE(cp14_64_regs), 896 NULL, 0); 897 } 898 899 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 900 { 901 return kvm_handle_cp_32(vcpu, 902 cp14_regs, ARRAY_SIZE(cp14_regs), 903 NULL, 0); 904 } 905 906 static int emulate_sys_reg(struct kvm_vcpu *vcpu, 907 const struct sys_reg_params *params) 908 { 909 size_t num; 910 const struct sys_reg_desc *table, *r; 911 912 table = get_target_table(vcpu->arch.target, true, &num); 913 914 /* Search target-specific then generic table. */ 915 r = find_reg(params, table, num); 916 if (!r) 917 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 918 919 if (likely(r)) { 920 /* 921 * Not having an accessor means that we have 922 * configured a trap that we don't know how to 923 * handle. This certainly qualifies as a gross bug 924 * that should be fixed right away. 925 */ 926 BUG_ON(!r->access); 927 928 if (likely(r->access(vcpu, params, r))) { 929 /* Skip instruction, since it was emulated */ 930 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 931 return 1; 932 } 933 /* If access function fails, it should complain. */ 934 } else { 935 kvm_err("Unsupported guest sys_reg access at: %lx\n", 936 *vcpu_pc(vcpu)); 937 print_sys_reg_instr(params); 938 } 939 kvm_inject_undefined(vcpu); 940 return 1; 941 } 942 943 static void reset_sys_reg_descs(struct kvm_vcpu *vcpu, 944 const struct sys_reg_desc *table, size_t num) 945 { 946 unsigned long i; 947 948 for (i = 0; i < num; i++) 949 if (table[i].reset) 950 table[i].reset(vcpu, &table[i]); 951 } 952 953 /** 954 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access 955 * @vcpu: The VCPU pointer 956 * @run: The kvm_run struct 957 */ 958 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) 959 { 960 struct sys_reg_params params; 961 unsigned long esr = kvm_vcpu_get_hsr(vcpu); 962 963 params.is_aarch32 = false; 964 params.is_32bit = false; 965 params.Op0 = (esr >> 20) & 3; 966 params.Op1 = (esr >> 14) & 0x7; 967 params.CRn = (esr >> 10) & 0xf; 968 params.CRm = (esr >> 1) & 0xf; 969 params.Op2 = (esr >> 17) & 0x7; 970 params.Rt = (esr >> 5) & 0x1f; 971 params.is_write = !(esr & 1); 972 973 return emulate_sys_reg(vcpu, ¶ms); 974 } 975 976 /****************************************************************************** 977 * Userspace API 978 *****************************************************************************/ 979 980 static bool index_to_params(u64 id, struct sys_reg_params *params) 981 { 982 switch (id & KVM_REG_SIZE_MASK) { 983 case KVM_REG_SIZE_U64: 984 /* Any unused index bits means it's not valid. */ 985 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK 986 | KVM_REG_ARM_COPROC_MASK 987 | KVM_REG_ARM64_SYSREG_OP0_MASK 988 | KVM_REG_ARM64_SYSREG_OP1_MASK 989 | KVM_REG_ARM64_SYSREG_CRN_MASK 990 | KVM_REG_ARM64_SYSREG_CRM_MASK 991 | KVM_REG_ARM64_SYSREG_OP2_MASK)) 992 return false; 993 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) 994 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT); 995 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) 996 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT); 997 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) 998 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT); 999 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) 1000 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT); 1001 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) 1002 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT); 1003 return true; 1004 default: 1005 return false; 1006 } 1007 } 1008 1009 /* Decode an index value, and find the sys_reg_desc entry. */ 1010 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, 1011 u64 id) 1012 { 1013 size_t num; 1014 const struct sys_reg_desc *table, *r; 1015 struct sys_reg_params params; 1016 1017 /* We only do sys_reg for now. */ 1018 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) 1019 return NULL; 1020 1021 if (!index_to_params(id, ¶ms)) 1022 return NULL; 1023 1024 table = get_target_table(vcpu->arch.target, true, &num); 1025 r = find_reg(¶ms, table, num); 1026 if (!r) 1027 r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 1028 1029 /* Not saved in the sys_reg array? */ 1030 if (r && !r->reg) 1031 r = NULL; 1032 1033 return r; 1034 } 1035 1036 /* 1037 * These are the invariant sys_reg registers: we let the guest see the 1038 * host versions of these, so they're part of the guest state. 1039 * 1040 * A future CPU may provide a mechanism to present different values to 1041 * the guest, or a future kvm may trap them. 1042 */ 1043 1044 #define FUNCTION_INVARIANT(reg) \ 1045 static void get_##reg(struct kvm_vcpu *v, \ 1046 const struct sys_reg_desc *r) \ 1047 { \ 1048 u64 val; \ 1049 \ 1050 asm volatile("mrs %0, " __stringify(reg) "\n" \ 1051 : "=r" (val)); \ 1052 ((struct sys_reg_desc *)r)->val = val; \ 1053 } 1054 1055 FUNCTION_INVARIANT(midr_el1) 1056 FUNCTION_INVARIANT(ctr_el0) 1057 FUNCTION_INVARIANT(revidr_el1) 1058 FUNCTION_INVARIANT(id_pfr0_el1) 1059 FUNCTION_INVARIANT(id_pfr1_el1) 1060 FUNCTION_INVARIANT(id_dfr0_el1) 1061 FUNCTION_INVARIANT(id_afr0_el1) 1062 FUNCTION_INVARIANT(id_mmfr0_el1) 1063 FUNCTION_INVARIANT(id_mmfr1_el1) 1064 FUNCTION_INVARIANT(id_mmfr2_el1) 1065 FUNCTION_INVARIANT(id_mmfr3_el1) 1066 FUNCTION_INVARIANT(id_isar0_el1) 1067 FUNCTION_INVARIANT(id_isar1_el1) 1068 FUNCTION_INVARIANT(id_isar2_el1) 1069 FUNCTION_INVARIANT(id_isar3_el1) 1070 FUNCTION_INVARIANT(id_isar4_el1) 1071 FUNCTION_INVARIANT(id_isar5_el1) 1072 FUNCTION_INVARIANT(clidr_el1) 1073 FUNCTION_INVARIANT(aidr_el1) 1074 1075 /* ->val is filled in by kvm_sys_reg_table_init() */ 1076 static struct sys_reg_desc invariant_sys_regs[] = { 1077 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000), 1078 NULL, get_midr_el1 }, 1079 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110), 1080 NULL, get_revidr_el1 }, 1081 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000), 1082 NULL, get_id_pfr0_el1 }, 1083 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001), 1084 NULL, get_id_pfr1_el1 }, 1085 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010), 1086 NULL, get_id_dfr0_el1 }, 1087 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011), 1088 NULL, get_id_afr0_el1 }, 1089 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100), 1090 NULL, get_id_mmfr0_el1 }, 1091 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101), 1092 NULL, get_id_mmfr1_el1 }, 1093 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110), 1094 NULL, get_id_mmfr2_el1 }, 1095 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111), 1096 NULL, get_id_mmfr3_el1 }, 1097 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000), 1098 NULL, get_id_isar0_el1 }, 1099 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001), 1100 NULL, get_id_isar1_el1 }, 1101 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010), 1102 NULL, get_id_isar2_el1 }, 1103 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011), 1104 NULL, get_id_isar3_el1 }, 1105 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100), 1106 NULL, get_id_isar4_el1 }, 1107 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101), 1108 NULL, get_id_isar5_el1 }, 1109 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001), 1110 NULL, get_clidr_el1 }, 1111 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111), 1112 NULL, get_aidr_el1 }, 1113 { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001), 1114 NULL, get_ctr_el0 }, 1115 }; 1116 1117 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id) 1118 { 1119 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) 1120 return -EFAULT; 1121 return 0; 1122 } 1123 1124 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id) 1125 { 1126 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) 1127 return -EFAULT; 1128 return 0; 1129 } 1130 1131 static int get_invariant_sys_reg(u64 id, void __user *uaddr) 1132 { 1133 struct sys_reg_params params; 1134 const struct sys_reg_desc *r; 1135 1136 if (!index_to_params(id, ¶ms)) 1137 return -ENOENT; 1138 1139 r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); 1140 if (!r) 1141 return -ENOENT; 1142 1143 return reg_to_user(uaddr, &r->val, id); 1144 } 1145 1146 static int set_invariant_sys_reg(u64 id, void __user *uaddr) 1147 { 1148 struct sys_reg_params params; 1149 const struct sys_reg_desc *r; 1150 int err; 1151 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ 1152 1153 if (!index_to_params(id, ¶ms)) 1154 return -ENOENT; 1155 r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); 1156 if (!r) 1157 return -ENOENT; 1158 1159 err = reg_from_user(&val, uaddr, id); 1160 if (err) 1161 return err; 1162 1163 /* This is what we mean by invariant: you can't change it. */ 1164 if (r->val != val) 1165 return -EINVAL; 1166 1167 return 0; 1168 } 1169 1170 static bool is_valid_cache(u32 val) 1171 { 1172 u32 level, ctype; 1173 1174 if (val >= CSSELR_MAX) 1175 return false; 1176 1177 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ 1178 level = (val >> 1); 1179 ctype = (cache_levels >> (level * 3)) & 7; 1180 1181 switch (ctype) { 1182 case 0: /* No cache */ 1183 return false; 1184 case 1: /* Instruction cache only */ 1185 return (val & 1); 1186 case 2: /* Data cache only */ 1187 case 4: /* Unified cache */ 1188 return !(val & 1); 1189 case 3: /* Separate instruction and data caches */ 1190 return true; 1191 default: /* Reserved: we can't know instruction or data. */ 1192 return false; 1193 } 1194 } 1195 1196 static int demux_c15_get(u64 id, void __user *uaddr) 1197 { 1198 u32 val; 1199 u32 __user *uval = uaddr; 1200 1201 /* Fail if we have unknown bits set. */ 1202 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 1203 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 1204 return -ENOENT; 1205 1206 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { 1207 case KVM_REG_ARM_DEMUX_ID_CCSIDR: 1208 if (KVM_REG_SIZE(id) != 4) 1209 return -ENOENT; 1210 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) 1211 >> KVM_REG_ARM_DEMUX_VAL_SHIFT; 1212 if (!is_valid_cache(val)) 1213 return -ENOENT; 1214 1215 return put_user(get_ccsidr(val), uval); 1216 default: 1217 return -ENOENT; 1218 } 1219 } 1220 1221 static int demux_c15_set(u64 id, void __user *uaddr) 1222 { 1223 u32 val, newval; 1224 u32 __user *uval = uaddr; 1225 1226 /* Fail if we have unknown bits set. */ 1227 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 1228 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 1229 return -ENOENT; 1230 1231 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { 1232 case KVM_REG_ARM_DEMUX_ID_CCSIDR: 1233 if (KVM_REG_SIZE(id) != 4) 1234 return -ENOENT; 1235 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) 1236 >> KVM_REG_ARM_DEMUX_VAL_SHIFT; 1237 if (!is_valid_cache(val)) 1238 return -ENOENT; 1239 1240 if (get_user(newval, uval)) 1241 return -EFAULT; 1242 1243 /* This is also invariant: you can't change it. */ 1244 if (newval != get_ccsidr(val)) 1245 return -EINVAL; 1246 return 0; 1247 default: 1248 return -ENOENT; 1249 } 1250 } 1251 1252 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 1253 { 1254 const struct sys_reg_desc *r; 1255 void __user *uaddr = (void __user *)(unsigned long)reg->addr; 1256 1257 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 1258 return demux_c15_get(reg->id, uaddr); 1259 1260 if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) 1261 return -ENOENT; 1262 1263 r = index_to_sys_reg_desc(vcpu, reg->id); 1264 if (!r) 1265 return get_invariant_sys_reg(reg->id, uaddr); 1266 1267 return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id); 1268 } 1269 1270 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 1271 { 1272 const struct sys_reg_desc *r; 1273 void __user *uaddr = (void __user *)(unsigned long)reg->addr; 1274 1275 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 1276 return demux_c15_set(reg->id, uaddr); 1277 1278 if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) 1279 return -ENOENT; 1280 1281 r = index_to_sys_reg_desc(vcpu, reg->id); 1282 if (!r) 1283 return set_invariant_sys_reg(reg->id, uaddr); 1284 1285 return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id); 1286 } 1287 1288 static unsigned int num_demux_regs(void) 1289 { 1290 unsigned int i, count = 0; 1291 1292 for (i = 0; i < CSSELR_MAX; i++) 1293 if (is_valid_cache(i)) 1294 count++; 1295 1296 return count; 1297 } 1298 1299 static int write_demux_regids(u64 __user *uindices) 1300 { 1301 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; 1302 unsigned int i; 1303 1304 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; 1305 for (i = 0; i < CSSELR_MAX; i++) { 1306 if (!is_valid_cache(i)) 1307 continue; 1308 if (put_user(val | i, uindices)) 1309 return -EFAULT; 1310 uindices++; 1311 } 1312 return 0; 1313 } 1314 1315 static u64 sys_reg_to_index(const struct sys_reg_desc *reg) 1316 { 1317 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | 1318 KVM_REG_ARM64_SYSREG | 1319 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | 1320 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | 1321 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | 1322 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | 1323 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); 1324 } 1325 1326 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) 1327 { 1328 if (!*uind) 1329 return true; 1330 1331 if (put_user(sys_reg_to_index(reg), *uind)) 1332 return false; 1333 1334 (*uind)++; 1335 return true; 1336 } 1337 1338 /* Assumed ordered tables, see kvm_sys_reg_table_init. */ 1339 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) 1340 { 1341 const struct sys_reg_desc *i1, *i2, *end1, *end2; 1342 unsigned int total = 0; 1343 size_t num; 1344 1345 /* We check for duplicates here, to allow arch-specific overrides. */ 1346 i1 = get_target_table(vcpu->arch.target, true, &num); 1347 end1 = i1 + num; 1348 i2 = sys_reg_descs; 1349 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs); 1350 1351 BUG_ON(i1 == end1 || i2 == end2); 1352 1353 /* Walk carefully, as both tables may refer to the same register. */ 1354 while (i1 || i2) { 1355 int cmp = cmp_sys_reg(i1, i2); 1356 /* target-specific overrides generic entry. */ 1357 if (cmp <= 0) { 1358 /* Ignore registers we trap but don't save. */ 1359 if (i1->reg) { 1360 if (!copy_reg_to_user(i1, &uind)) 1361 return -EFAULT; 1362 total++; 1363 } 1364 } else { 1365 /* Ignore registers we trap but don't save. */ 1366 if (i2->reg) { 1367 if (!copy_reg_to_user(i2, &uind)) 1368 return -EFAULT; 1369 total++; 1370 } 1371 } 1372 1373 if (cmp <= 0 && ++i1 == end1) 1374 i1 = NULL; 1375 if (cmp >= 0 && ++i2 == end2) 1376 i2 = NULL; 1377 } 1378 return total; 1379 } 1380 1381 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) 1382 { 1383 return ARRAY_SIZE(invariant_sys_regs) 1384 + num_demux_regs() 1385 + walk_sys_regs(vcpu, (u64 __user *)NULL); 1386 } 1387 1388 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 1389 { 1390 unsigned int i; 1391 int err; 1392 1393 /* Then give them all the invariant registers' indices. */ 1394 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) { 1395 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) 1396 return -EFAULT; 1397 uindices++; 1398 } 1399 1400 err = walk_sys_regs(vcpu, uindices); 1401 if (err < 0) 1402 return err; 1403 uindices += err; 1404 1405 return write_demux_regids(uindices); 1406 } 1407 1408 static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n) 1409 { 1410 unsigned int i; 1411 1412 for (i = 1; i < n; i++) { 1413 if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) { 1414 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1); 1415 return 1; 1416 } 1417 } 1418 1419 return 0; 1420 } 1421 1422 void kvm_sys_reg_table_init(void) 1423 { 1424 unsigned int i; 1425 struct sys_reg_desc clidr; 1426 1427 /* Make sure tables are unique and in order. */ 1428 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs))); 1429 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs))); 1430 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs))); 1431 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs))); 1432 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs))); 1433 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs))); 1434 1435 /* We abuse the reset function to overwrite the table itself. */ 1436 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) 1437 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]); 1438 1439 /* 1440 * CLIDR format is awkward, so clean it up. See ARM B4.1.20: 1441 * 1442 * If software reads the Cache Type fields from Ctype1 1443 * upwards, once it has seen a value of 0b000, no caches 1444 * exist at further-out levels of the hierarchy. So, for 1445 * example, if Ctype3 is the first Cache Type field with a 1446 * value of 0b000, the values of Ctype4 to Ctype7 must be 1447 * ignored. 1448 */ 1449 get_clidr_el1(NULL, &clidr); /* Ugly... */ 1450 cache_levels = clidr.val; 1451 for (i = 0; i < 7; i++) 1452 if (((cache_levels >> (i*3)) & 7) == 0) 1453 break; 1454 /* Clear all higher bits. */ 1455 cache_levels &= (1 << (i*3))-1; 1456 } 1457 1458 /** 1459 * kvm_reset_sys_regs - sets system registers to reset value 1460 * @vcpu: The VCPU pointer 1461 * 1462 * This function finds the right table above and sets the registers on the 1463 * virtual CPU struct to their architecturally defined reset values. 1464 */ 1465 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) 1466 { 1467 size_t num; 1468 const struct sys_reg_desc *table; 1469 1470 /* Catch someone adding a register without putting in reset entry. */ 1471 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); 1472 1473 /* Generic chip reset first (so target could override). */ 1474 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 1475 1476 table = get_target_table(vcpu->arch.target, true, &num); 1477 reset_sys_reg_descs(vcpu, table, num); 1478 1479 for (num = 1; num < NR_SYS_REGS; num++) 1480 if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242) 1481 panic("Didn't reset vcpu_sys_reg(%zi)", num); 1482 } 1483