1 /* 2 * Copyright (C) 2012,2013 - ARM Ltd 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * Derived from arch/arm/kvm/guest.c: 6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 7 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include <linux/errno.h> 23 #include <linux/err.h> 24 #include <linux/kvm_host.h> 25 #include <linux/module.h> 26 #include <linux/vmalloc.h> 27 #include <linux/fs.h> 28 #include <kvm/arm_psci.h> 29 #include <asm/cputype.h> 30 #include <linux/uaccess.h> 31 #include <asm/kvm.h> 32 #include <asm/kvm_emulate.h> 33 #include <asm/kvm_coproc.h> 34 35 #include "trace.h" 36 37 #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM } 38 #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU } 39 40 struct kvm_stats_debugfs_item debugfs_entries[] = { 41 VCPU_STAT(hvc_exit_stat), 42 VCPU_STAT(wfe_exit_stat), 43 VCPU_STAT(wfi_exit_stat), 44 VCPU_STAT(mmio_exit_user), 45 VCPU_STAT(mmio_exit_kernel), 46 VCPU_STAT(exits), 47 { NULL } 48 }; 49 50 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 51 { 52 return 0; 53 } 54 55 static u64 core_reg_offset_from_id(u64 id) 56 { 57 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); 58 } 59 60 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 61 { 62 /* 63 * Because the kvm_regs structure is a mix of 32, 64 and 64 * 128bit fields, we index it as if it was a 32bit 65 * array. Hence below, nr_regs is the number of entries, and 66 * off the index in the "array". 67 */ 68 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; 69 struct kvm_regs *regs = vcpu_gp_regs(vcpu); 70 int nr_regs = sizeof(*regs) / sizeof(__u32); 71 u32 off; 72 73 /* Our ID is an index into the kvm_regs struct. */ 74 off = core_reg_offset_from_id(reg->id); 75 if (off >= nr_regs || 76 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) 77 return -ENOENT; 78 79 if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id))) 80 return -EFAULT; 81 82 return 0; 83 } 84 85 static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 86 { 87 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; 88 struct kvm_regs *regs = vcpu_gp_regs(vcpu); 89 int nr_regs = sizeof(*regs) / sizeof(__u32); 90 __uint128_t tmp; 91 void *valp = &tmp; 92 u64 off; 93 int err = 0; 94 95 /* Our ID is an index into the kvm_regs struct. */ 96 off = core_reg_offset_from_id(reg->id); 97 if (off >= nr_regs || 98 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) 99 return -ENOENT; 100 101 if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) 102 return -EINVAL; 103 104 if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) { 105 err = -EFAULT; 106 goto out; 107 } 108 109 if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) { 110 u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK; 111 switch (mode) { 112 case PSR_AA32_MODE_USR: 113 case PSR_AA32_MODE_FIQ: 114 case PSR_AA32_MODE_IRQ: 115 case PSR_AA32_MODE_SVC: 116 case PSR_AA32_MODE_ABT: 117 case PSR_AA32_MODE_UND: 118 case PSR_MODE_EL0t: 119 case PSR_MODE_EL1t: 120 case PSR_MODE_EL1h: 121 break; 122 default: 123 err = -EINVAL; 124 goto out; 125 } 126 } 127 128 memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id)); 129 out: 130 return err; 131 } 132 133 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 134 { 135 return -EINVAL; 136 } 137 138 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 139 { 140 return -EINVAL; 141 } 142 143 static unsigned long num_core_regs(void) 144 { 145 return sizeof(struct kvm_regs) / sizeof(__u32); 146 } 147 148 /** 149 * ARM64 versions of the TIMER registers, always available on arm64 150 */ 151 152 #define NUM_TIMER_REGS 3 153 154 static bool is_timer_reg(u64 index) 155 { 156 switch (index) { 157 case KVM_REG_ARM_TIMER_CTL: 158 case KVM_REG_ARM_TIMER_CNT: 159 case KVM_REG_ARM_TIMER_CVAL: 160 return true; 161 } 162 return false; 163 } 164 165 static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 166 { 167 if (put_user(KVM_REG_ARM_TIMER_CTL, uindices)) 168 return -EFAULT; 169 uindices++; 170 if (put_user(KVM_REG_ARM_TIMER_CNT, uindices)) 171 return -EFAULT; 172 uindices++; 173 if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices)) 174 return -EFAULT; 175 176 return 0; 177 } 178 179 static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 180 { 181 void __user *uaddr = (void __user *)(long)reg->addr; 182 u64 val; 183 int ret; 184 185 ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); 186 if (ret != 0) 187 return -EFAULT; 188 189 return kvm_arm_timer_set_reg(vcpu, reg->id, val); 190 } 191 192 static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 193 { 194 void __user *uaddr = (void __user *)(long)reg->addr; 195 u64 val; 196 197 val = kvm_arm_timer_get_reg(vcpu, reg->id); 198 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; 199 } 200 201 /** 202 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG 203 * 204 * This is for all registers. 205 */ 206 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) 207 { 208 return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu) 209 + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS; 210 } 211 212 /** 213 * kvm_arm_copy_reg_indices - get indices of all registers. 214 * 215 * We do core registers right here, then we append system regs. 216 */ 217 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 218 { 219 unsigned int i; 220 const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE; 221 int ret; 222 223 for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { 224 if (put_user(core_reg | i, uindices)) 225 return -EFAULT; 226 uindices++; 227 } 228 229 ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); 230 if (ret) 231 return ret; 232 uindices += kvm_arm_get_fw_num_regs(vcpu); 233 234 ret = copy_timer_indices(vcpu, uindices); 235 if (ret) 236 return ret; 237 uindices += NUM_TIMER_REGS; 238 239 return kvm_arm_copy_sys_reg_indices(vcpu, uindices); 240 } 241 242 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 243 { 244 /* We currently use nothing arch-specific in upper 32 bits */ 245 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) 246 return -EINVAL; 247 248 /* Register group 16 means we want a core register. */ 249 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 250 return get_core_reg(vcpu, reg); 251 252 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) 253 return kvm_arm_get_fw_reg(vcpu, reg); 254 255 if (is_timer_reg(reg->id)) 256 return get_timer_reg(vcpu, reg); 257 258 return kvm_arm_sys_reg_get_reg(vcpu, reg); 259 } 260 261 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 262 { 263 /* We currently use nothing arch-specific in upper 32 bits */ 264 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) 265 return -EINVAL; 266 267 /* Register group 16 means we set a core register. */ 268 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 269 return set_core_reg(vcpu, reg); 270 271 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) 272 return kvm_arm_set_fw_reg(vcpu, reg); 273 274 if (is_timer_reg(reg->id)) 275 return set_timer_reg(vcpu, reg); 276 277 return kvm_arm_sys_reg_set_reg(vcpu, reg); 278 } 279 280 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 281 struct kvm_sregs *sregs) 282 { 283 return -EINVAL; 284 } 285 286 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 287 struct kvm_sregs *sregs) 288 { 289 return -EINVAL; 290 } 291 292 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, 293 struct kvm_vcpu_events *events) 294 { 295 events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE); 296 events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); 297 298 if (events->exception.serror_pending && events->exception.serror_has_esr) 299 events->exception.serror_esr = vcpu_get_vsesr(vcpu); 300 301 return 0; 302 } 303 304 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, 305 struct kvm_vcpu_events *events) 306 { 307 bool serror_pending = events->exception.serror_pending; 308 bool has_esr = events->exception.serror_has_esr; 309 310 if (serror_pending && has_esr) { 311 if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) 312 return -EINVAL; 313 314 if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK)) 315 kvm_set_sei_esr(vcpu, events->exception.serror_esr); 316 else 317 return -EINVAL; 318 } else if (serror_pending) { 319 kvm_inject_vabt(vcpu); 320 } 321 322 return 0; 323 } 324 325 int __attribute_const__ kvm_target_cpu(void) 326 { 327 unsigned long implementor = read_cpuid_implementor(); 328 unsigned long part_number = read_cpuid_part_number(); 329 330 switch (implementor) { 331 case ARM_CPU_IMP_ARM: 332 switch (part_number) { 333 case ARM_CPU_PART_AEM_V8: 334 return KVM_ARM_TARGET_AEM_V8; 335 case ARM_CPU_PART_FOUNDATION: 336 return KVM_ARM_TARGET_FOUNDATION_V8; 337 case ARM_CPU_PART_CORTEX_A53: 338 return KVM_ARM_TARGET_CORTEX_A53; 339 case ARM_CPU_PART_CORTEX_A57: 340 return KVM_ARM_TARGET_CORTEX_A57; 341 }; 342 break; 343 case ARM_CPU_IMP_APM: 344 switch (part_number) { 345 case APM_CPU_PART_POTENZA: 346 return KVM_ARM_TARGET_XGENE_POTENZA; 347 }; 348 break; 349 }; 350 351 /* Return a default generic target */ 352 return KVM_ARM_TARGET_GENERIC_V8; 353 } 354 355 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) 356 { 357 int target = kvm_target_cpu(); 358 359 if (target < 0) 360 return -ENODEV; 361 362 memset(init, 0, sizeof(*init)); 363 364 /* 365 * For now, we don't return any features. 366 * In future, we might use features to return target 367 * specific features available for the preferred 368 * target type. 369 */ 370 init->target = (__u32)target; 371 372 return 0; 373 } 374 375 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 376 { 377 return -EINVAL; 378 } 379 380 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 381 { 382 return -EINVAL; 383 } 384 385 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 386 struct kvm_translation *tr) 387 { 388 return -EINVAL; 389 } 390 391 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ 392 KVM_GUESTDBG_USE_SW_BP | \ 393 KVM_GUESTDBG_USE_HW | \ 394 KVM_GUESTDBG_SINGLESTEP) 395 396 /** 397 * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging 398 * @kvm: pointer to the KVM struct 399 * @kvm_guest_debug: the ioctl data buffer 400 * 401 * This sets up and enables the VM for guest debugging. Userspace 402 * passes in a control flag to enable different debug types and 403 * potentially other architecture specific information in the rest of 404 * the structure. 405 */ 406 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 407 struct kvm_guest_debug *dbg) 408 { 409 int ret = 0; 410 411 trace_kvm_set_guest_debug(vcpu, dbg->control); 412 413 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) { 414 ret = -EINVAL; 415 goto out; 416 } 417 418 if (dbg->control & KVM_GUESTDBG_ENABLE) { 419 vcpu->guest_debug = dbg->control; 420 421 /* Hardware assisted Break and Watch points */ 422 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) { 423 vcpu->arch.external_debug_state = dbg->arch; 424 } 425 426 } else { 427 /* If not enabled clear all flags */ 428 vcpu->guest_debug = 0; 429 } 430 431 out: 432 return ret; 433 } 434 435 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 436 struct kvm_device_attr *attr) 437 { 438 int ret; 439 440 switch (attr->group) { 441 case KVM_ARM_VCPU_PMU_V3_CTRL: 442 ret = kvm_arm_pmu_v3_set_attr(vcpu, attr); 443 break; 444 case KVM_ARM_VCPU_TIMER_CTRL: 445 ret = kvm_arm_timer_set_attr(vcpu, attr); 446 break; 447 default: 448 ret = -ENXIO; 449 break; 450 } 451 452 return ret; 453 } 454 455 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, 456 struct kvm_device_attr *attr) 457 { 458 int ret; 459 460 switch (attr->group) { 461 case KVM_ARM_VCPU_PMU_V3_CTRL: 462 ret = kvm_arm_pmu_v3_get_attr(vcpu, attr); 463 break; 464 case KVM_ARM_VCPU_TIMER_CTRL: 465 ret = kvm_arm_timer_get_attr(vcpu, attr); 466 break; 467 default: 468 ret = -ENXIO; 469 break; 470 } 471 472 return ret; 473 } 474 475 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, 476 struct kvm_device_attr *attr) 477 { 478 int ret; 479 480 switch (attr->group) { 481 case KVM_ARM_VCPU_PMU_V3_CTRL: 482 ret = kvm_arm_pmu_v3_has_attr(vcpu, attr); 483 break; 484 case KVM_ARM_VCPU_TIMER_CTRL: 485 ret = kvm_arm_timer_has_attr(vcpu, attr); 486 break; 487 default: 488 ret = -ENXIO; 489 break; 490 } 491 492 return ret; 493 } 494