1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * 4 * Copyright IBM Corp. 2007 5 * Copyright 2010-2011 Freescale Semiconductor, Inc. 6 * 7 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 8 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 9 * Scott Wood <scottwood@freescale.com> 10 * Varun Sethi <varun.sethi@freescale.com> 11 */ 12 13 #include <linux/errno.h> 14 #include <linux/err.h> 15 #include <linux/kvm_host.h> 16 #include <linux/gfp.h> 17 #include <linux/module.h> 18 #include <linux/vmalloc.h> 19 #include <linux/fs.h> 20 21 #include <asm/cputable.h> 22 #include <linux/uaccess.h> 23 #include <asm/interrupt.h> 24 #include <asm/kvm_ppc.h> 25 #include <asm/cacheflush.h> 26 #include <asm/dbell.h> 27 #include <asm/hw_irq.h> 28 #include <asm/irq.h> 29 #include <asm/time.h> 30 31 #include "timing.h" 32 #include "booke.h" 33 34 #define CREATE_TRACE_POINTS 35 #include "trace_booke.h" 36 37 unsigned long kvmppc_booke_handlers; 38 39 const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 40 KVM_GENERIC_VM_STATS(), 41 STATS_DESC_ICOUNTER(VM, num_2M_pages), 42 STATS_DESC_ICOUNTER(VM, num_1G_pages) 43 }; 44 45 const struct kvm_stats_header kvm_vm_stats_header = { 46 .name_size = KVM_STATS_NAME_SIZE, 47 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), 48 .id_offset = sizeof(struct kvm_stats_header), 49 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 50 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 51 sizeof(kvm_vm_stats_desc), 52 }; 53 54 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 55 KVM_GENERIC_VCPU_STATS(), 56 STATS_DESC_COUNTER(VCPU, sum_exits), 57 STATS_DESC_COUNTER(VCPU, mmio_exits), 58 STATS_DESC_COUNTER(VCPU, signal_exits), 59 STATS_DESC_COUNTER(VCPU, light_exits), 60 STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits), 61 STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits), 62 STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits), 63 STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits), 64 STATS_DESC_COUNTER(VCPU, syscall_exits), 65 STATS_DESC_COUNTER(VCPU, isi_exits), 66 STATS_DESC_COUNTER(VCPU, dsi_exits), 67 STATS_DESC_COUNTER(VCPU, emulated_inst_exits), 68 STATS_DESC_COUNTER(VCPU, dec_exits), 69 STATS_DESC_COUNTER(VCPU, ext_intr_exits), 70 STATS_DESC_COUNTER(VCPU, halt_successful_wait), 71 STATS_DESC_COUNTER(VCPU, dbell_exits), 72 STATS_DESC_COUNTER(VCPU, gdbell_exits), 73 STATS_DESC_COUNTER(VCPU, ld), 74 STATS_DESC_COUNTER(VCPU, st), 75 STATS_DESC_COUNTER(VCPU, pthru_all), 76 STATS_DESC_COUNTER(VCPU, pthru_host), 77 STATS_DESC_COUNTER(VCPU, pthru_bad_aff) 78 }; 79 80 const struct kvm_stats_header kvm_vcpu_stats_header = { 81 .name_size = KVM_STATS_NAME_SIZE, 82 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), 83 .id_offset = sizeof(struct kvm_stats_header), 84 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 85 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 86 sizeof(kvm_vcpu_stats_desc), 87 }; 88 89 /* TODO: use vcpu_printf() */ 90 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) 91 { 92 int i; 93 94 printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip, 95 vcpu->arch.shared->msr); 96 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link, 97 vcpu->arch.regs.ctr); 98 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, 99 vcpu->arch.shared->srr1); 100 101 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); 102 103 for (i = 0; i < 32; i += 4) { 104 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i, 105 kvmppc_get_gpr(vcpu, i), 106 kvmppc_get_gpr(vcpu, i+1), 107 kvmppc_get_gpr(vcpu, i+2), 108 kvmppc_get_gpr(vcpu, i+3)); 109 } 110 } 111 112 #ifdef CONFIG_SPE 113 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu) 114 { 115 preempt_disable(); 116 enable_kernel_spe(); 117 kvmppc_save_guest_spe(vcpu); 118 disable_kernel_spe(); 119 vcpu->arch.shadow_msr &= ~MSR_SPE; 120 preempt_enable(); 121 } 122 123 static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu) 124 { 125 preempt_disable(); 126 enable_kernel_spe(); 127 kvmppc_load_guest_spe(vcpu); 128 disable_kernel_spe(); 129 vcpu->arch.shadow_msr |= MSR_SPE; 130 preempt_enable(); 131 } 132 133 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu) 134 { 135 if (vcpu->arch.shared->msr & MSR_SPE) { 136 if (!(vcpu->arch.shadow_msr & MSR_SPE)) 137 kvmppc_vcpu_enable_spe(vcpu); 138 } else if (vcpu->arch.shadow_msr & MSR_SPE) { 139 kvmppc_vcpu_disable_spe(vcpu); 140 } 141 } 142 #else 143 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu) 144 { 145 } 146 #endif 147 148 /* 149 * Load up guest vcpu FP state if it's needed. 150 * It also set the MSR_FP in thread so that host know 151 * we're holding FPU, and then host can help to save 152 * guest vcpu FP state if other threads require to use FPU. 153 * This simulates an FP unavailable fault. 154 * 155 * It requires to be called with preemption disabled. 156 */ 157 static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu) 158 { 159 #ifdef CONFIG_PPC_FPU 160 if (!(current->thread.regs->msr & MSR_FP)) { 161 enable_kernel_fp(); 162 load_fp_state(&vcpu->arch.fp); 163 disable_kernel_fp(); 164 current->thread.fp_save_area = &vcpu->arch.fp; 165 current->thread.regs->msr |= MSR_FP; 166 } 167 #endif 168 } 169 170 /* 171 * Save guest vcpu FP state into thread. 172 * It requires to be called with preemption disabled. 173 */ 174 static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu) 175 { 176 #ifdef CONFIG_PPC_FPU 177 if (current->thread.regs->msr & MSR_FP) 178 giveup_fpu(current); 179 current->thread.fp_save_area = NULL; 180 #endif 181 } 182 183 static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu) 184 { 185 #if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV) 186 /* We always treat the FP bit as enabled from the host 187 perspective, so only need to adjust the shadow MSR */ 188 vcpu->arch.shadow_msr &= ~MSR_FP; 189 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP; 190 #endif 191 } 192 193 /* 194 * Simulate AltiVec unavailable fault to load guest state 195 * from thread to AltiVec unit. 196 * It requires to be called with preemption disabled. 197 */ 198 static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu) 199 { 200 #ifdef CONFIG_ALTIVEC 201 if (cpu_has_feature(CPU_FTR_ALTIVEC)) { 202 if (!(current->thread.regs->msr & MSR_VEC)) { 203 enable_kernel_altivec(); 204 load_vr_state(&vcpu->arch.vr); 205 disable_kernel_altivec(); 206 current->thread.vr_save_area = &vcpu->arch.vr; 207 current->thread.regs->msr |= MSR_VEC; 208 } 209 } 210 #endif 211 } 212 213 /* 214 * Save guest vcpu AltiVec state into thread. 215 * It requires to be called with preemption disabled. 216 */ 217 static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu) 218 { 219 #ifdef CONFIG_ALTIVEC 220 if (cpu_has_feature(CPU_FTR_ALTIVEC)) { 221 if (current->thread.regs->msr & MSR_VEC) 222 giveup_altivec(current); 223 current->thread.vr_save_area = NULL; 224 } 225 #endif 226 } 227 228 static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu) 229 { 230 /* Synchronize guest's desire to get debug interrupts into shadow MSR */ 231 #ifndef CONFIG_KVM_BOOKE_HV 232 vcpu->arch.shadow_msr &= ~MSR_DE; 233 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE; 234 #endif 235 236 /* Force enable debug interrupts when user space wants to debug */ 237 if (vcpu->guest_debug) { 238 #ifdef CONFIG_KVM_BOOKE_HV 239 /* 240 * Since there is no shadow MSR, sync MSR_DE into the guest 241 * visible MSR. 242 */ 243 vcpu->arch.shared->msr |= MSR_DE; 244 #else 245 vcpu->arch.shadow_msr |= MSR_DE; 246 vcpu->arch.shared->msr &= ~MSR_DE; 247 #endif 248 } 249 } 250 251 /* 252 * Helper function for "full" MSR writes. No need to call this if only 253 * EE/CE/ME/DE/RI are changing. 254 */ 255 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) 256 { 257 u32 old_msr = vcpu->arch.shared->msr; 258 259 #ifdef CONFIG_KVM_BOOKE_HV 260 new_msr |= MSR_GS; 261 #endif 262 263 vcpu->arch.shared->msr = new_msr; 264 265 kvmppc_mmu_msr_notify(vcpu, old_msr); 266 kvmppc_vcpu_sync_spe(vcpu); 267 kvmppc_vcpu_sync_fpu(vcpu); 268 kvmppc_vcpu_sync_debug(vcpu); 269 } 270 271 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, 272 unsigned int priority) 273 { 274 trace_kvm_booke_queue_irqprio(vcpu, priority); 275 set_bit(priority, &vcpu->arch.pending_exceptions); 276 } 277 278 void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, 279 ulong dear_flags, ulong esr_flags) 280 { 281 vcpu->arch.queued_dear = dear_flags; 282 vcpu->arch.queued_esr = esr_flags; 283 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); 284 } 285 286 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, 287 ulong dear_flags, ulong esr_flags) 288 { 289 vcpu->arch.queued_dear = dear_flags; 290 vcpu->arch.queued_esr = esr_flags; 291 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); 292 } 293 294 void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu) 295 { 296 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); 297 } 298 299 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags) 300 { 301 vcpu->arch.queued_esr = esr_flags; 302 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); 303 } 304 305 static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags, 306 ulong esr_flags) 307 { 308 vcpu->arch.queued_dear = dear_flags; 309 vcpu->arch.queued_esr = esr_flags; 310 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT); 311 } 312 313 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags) 314 { 315 vcpu->arch.queued_esr = esr_flags; 316 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); 317 } 318 319 void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu) 320 { 321 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); 322 } 323 324 #ifdef CONFIG_ALTIVEC 325 void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu) 326 { 327 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL); 328 } 329 #endif 330 331 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) 332 { 333 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER); 334 } 335 336 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) 337 { 338 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); 339 } 340 341 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) 342 { 343 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); 344 } 345 346 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 347 struct kvm_interrupt *irq) 348 { 349 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL; 350 351 if (irq->irq == KVM_INTERRUPT_SET_LEVEL) 352 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL; 353 354 kvmppc_booke_queue_irqprio(vcpu, prio); 355 } 356 357 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu) 358 { 359 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); 360 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); 361 } 362 363 static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu) 364 { 365 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG); 366 } 367 368 static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu) 369 { 370 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions); 371 } 372 373 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu) 374 { 375 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG); 376 } 377 378 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu) 379 { 380 clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions); 381 } 382 383 static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) 384 { 385 kvmppc_set_srr0(vcpu, srr0); 386 kvmppc_set_srr1(vcpu, srr1); 387 } 388 389 static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) 390 { 391 vcpu->arch.csrr0 = srr0; 392 vcpu->arch.csrr1 = srr1; 393 } 394 395 static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) 396 { 397 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) { 398 vcpu->arch.dsrr0 = srr0; 399 vcpu->arch.dsrr1 = srr1; 400 } else { 401 set_guest_csrr(vcpu, srr0, srr1); 402 } 403 } 404 405 static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) 406 { 407 vcpu->arch.mcsrr0 = srr0; 408 vcpu->arch.mcsrr1 = srr1; 409 } 410 411 /* Deliver the interrupt of the corresponding priority, if possible. */ 412 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, 413 unsigned int priority) 414 { 415 int allowed = 0; 416 ulong msr_mask = 0; 417 bool update_esr = false, update_dear = false, update_epr = false; 418 ulong crit_raw = vcpu->arch.shared->critical; 419 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); 420 bool crit; 421 bool keep_irq = false; 422 enum int_class int_class; 423 ulong new_msr = vcpu->arch.shared->msr; 424 425 /* Truncate crit indicators in 32 bit mode */ 426 if (!(vcpu->arch.shared->msr & MSR_SF)) { 427 crit_raw &= 0xffffffff; 428 crit_r1 &= 0xffffffff; 429 } 430 431 /* Critical section when crit == r1 */ 432 crit = (crit_raw == crit_r1); 433 /* ... and we're in supervisor mode */ 434 crit = crit && !(vcpu->arch.shared->msr & MSR_PR); 435 436 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) { 437 priority = BOOKE_IRQPRIO_EXTERNAL; 438 keep_irq = true; 439 } 440 441 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags) 442 update_epr = true; 443 444 switch (priority) { 445 case BOOKE_IRQPRIO_DTLB_MISS: 446 case BOOKE_IRQPRIO_DATA_STORAGE: 447 case BOOKE_IRQPRIO_ALIGNMENT: 448 update_dear = true; 449 fallthrough; 450 case BOOKE_IRQPRIO_INST_STORAGE: 451 case BOOKE_IRQPRIO_PROGRAM: 452 update_esr = true; 453 fallthrough; 454 case BOOKE_IRQPRIO_ITLB_MISS: 455 case BOOKE_IRQPRIO_SYSCALL: 456 case BOOKE_IRQPRIO_FP_UNAVAIL: 457 #ifdef CONFIG_SPE_POSSIBLE 458 case BOOKE_IRQPRIO_SPE_UNAVAIL: 459 case BOOKE_IRQPRIO_SPE_FP_DATA: 460 case BOOKE_IRQPRIO_SPE_FP_ROUND: 461 #endif 462 #ifdef CONFIG_ALTIVEC 463 case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL: 464 case BOOKE_IRQPRIO_ALTIVEC_ASSIST: 465 #endif 466 case BOOKE_IRQPRIO_AP_UNAVAIL: 467 allowed = 1; 468 msr_mask = MSR_CE | MSR_ME | MSR_DE; 469 int_class = INT_CLASS_NONCRIT; 470 break; 471 case BOOKE_IRQPRIO_WATCHDOG: 472 case BOOKE_IRQPRIO_CRITICAL: 473 case BOOKE_IRQPRIO_DBELL_CRIT: 474 allowed = vcpu->arch.shared->msr & MSR_CE; 475 allowed = allowed && !crit; 476 msr_mask = MSR_ME; 477 int_class = INT_CLASS_CRIT; 478 break; 479 case BOOKE_IRQPRIO_MACHINE_CHECK: 480 allowed = vcpu->arch.shared->msr & MSR_ME; 481 allowed = allowed && !crit; 482 int_class = INT_CLASS_MC; 483 break; 484 case BOOKE_IRQPRIO_DECREMENTER: 485 case BOOKE_IRQPRIO_FIT: 486 keep_irq = true; 487 fallthrough; 488 case BOOKE_IRQPRIO_EXTERNAL: 489 case BOOKE_IRQPRIO_DBELL: 490 allowed = vcpu->arch.shared->msr & MSR_EE; 491 allowed = allowed && !crit; 492 msr_mask = MSR_CE | MSR_ME | MSR_DE; 493 int_class = INT_CLASS_NONCRIT; 494 break; 495 case BOOKE_IRQPRIO_DEBUG: 496 allowed = vcpu->arch.shared->msr & MSR_DE; 497 allowed = allowed && !crit; 498 msr_mask = MSR_ME; 499 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) 500 int_class = INT_CLASS_DBG; 501 else 502 int_class = INT_CLASS_CRIT; 503 504 break; 505 } 506 507 if (allowed) { 508 switch (int_class) { 509 case INT_CLASS_NONCRIT: 510 set_guest_srr(vcpu, vcpu->arch.regs.nip, 511 vcpu->arch.shared->msr); 512 break; 513 case INT_CLASS_CRIT: 514 set_guest_csrr(vcpu, vcpu->arch.regs.nip, 515 vcpu->arch.shared->msr); 516 break; 517 case INT_CLASS_DBG: 518 set_guest_dsrr(vcpu, vcpu->arch.regs.nip, 519 vcpu->arch.shared->msr); 520 break; 521 case INT_CLASS_MC: 522 set_guest_mcsrr(vcpu, vcpu->arch.regs.nip, 523 vcpu->arch.shared->msr); 524 break; 525 } 526 527 vcpu->arch.regs.nip = vcpu->arch.ivpr | 528 vcpu->arch.ivor[priority]; 529 if (update_esr) 530 kvmppc_set_esr(vcpu, vcpu->arch.queued_esr); 531 if (update_dear) 532 kvmppc_set_dar(vcpu, vcpu->arch.queued_dear); 533 if (update_epr) { 534 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER) 535 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu); 536 else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) { 537 BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC); 538 kvmppc_mpic_set_epr(vcpu); 539 } 540 } 541 542 new_msr &= msr_mask; 543 #if defined(CONFIG_64BIT) 544 if (vcpu->arch.epcr & SPRN_EPCR_ICM) 545 new_msr |= MSR_CM; 546 #endif 547 kvmppc_set_msr(vcpu, new_msr); 548 549 if (!keep_irq) 550 clear_bit(priority, &vcpu->arch.pending_exceptions); 551 } 552 553 #ifdef CONFIG_KVM_BOOKE_HV 554 /* 555 * If an interrupt is pending but masked, raise a guest doorbell 556 * so that we are notified when the guest enables the relevant 557 * MSR bit. 558 */ 559 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE) 560 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT); 561 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE) 562 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT); 563 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK) 564 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC); 565 #endif 566 567 return allowed; 568 } 569 570 /* 571 * Return the number of jiffies until the next timeout. If the timeout is 572 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA 573 * because the larger value can break the timer APIs. 574 */ 575 static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu) 576 { 577 u64 tb, wdt_tb, wdt_ticks = 0; 578 u64 nr_jiffies = 0; 579 u32 period = TCR_GET_WP(vcpu->arch.tcr); 580 581 wdt_tb = 1ULL << (63 - period); 582 tb = get_tb(); 583 /* 584 * The watchdog timeout will hapeen when TB bit corresponding 585 * to watchdog will toggle from 0 to 1. 586 */ 587 if (tb & wdt_tb) 588 wdt_ticks = wdt_tb; 589 590 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1)); 591 592 /* Convert timebase ticks to jiffies */ 593 nr_jiffies = wdt_ticks; 594 595 if (do_div(nr_jiffies, tb_ticks_per_jiffy)) 596 nr_jiffies++; 597 598 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA); 599 } 600 601 static void arm_next_watchdog(struct kvm_vcpu *vcpu) 602 { 603 unsigned long nr_jiffies; 604 unsigned long flags; 605 606 /* 607 * If TSR_ENW and TSR_WIS are not set then no need to exit to 608 * userspace, so clear the KVM_REQ_WATCHDOG request. 609 */ 610 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS)) 611 kvm_clear_request(KVM_REQ_WATCHDOG, vcpu); 612 613 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags); 614 nr_jiffies = watchdog_next_timeout(vcpu); 615 /* 616 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA 617 * then do not run the watchdog timer as this can break timer APIs. 618 */ 619 if (nr_jiffies < NEXT_TIMER_MAX_DELTA) 620 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies); 621 else 622 del_timer(&vcpu->arch.wdt_timer); 623 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags); 624 } 625 626 void kvmppc_watchdog_func(struct timer_list *t) 627 { 628 struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer); 629 u32 tsr, new_tsr; 630 int final; 631 632 do { 633 new_tsr = tsr = vcpu->arch.tsr; 634 final = 0; 635 636 /* Time out event */ 637 if (tsr & TSR_ENW) { 638 if (tsr & TSR_WIS) 639 final = 1; 640 else 641 new_tsr = tsr | TSR_WIS; 642 } else { 643 new_tsr = tsr | TSR_ENW; 644 } 645 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr); 646 647 if (new_tsr & TSR_WIS) { 648 smp_wmb(); 649 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); 650 kvm_vcpu_kick(vcpu); 651 } 652 653 /* 654 * If this is final watchdog expiry and some action is required 655 * then exit to userspace. 656 */ 657 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) && 658 vcpu->arch.watchdog_enabled) { 659 smp_wmb(); 660 kvm_make_request(KVM_REQ_WATCHDOG, vcpu); 661 kvm_vcpu_kick(vcpu); 662 } 663 664 /* 665 * Stop running the watchdog timer after final expiration to 666 * prevent the host from being flooded with timers if the 667 * guest sets a short period. 668 * Timers will resume when TSR/TCR is updated next time. 669 */ 670 if (!final) 671 arm_next_watchdog(vcpu); 672 } 673 674 static void update_timer_ints(struct kvm_vcpu *vcpu) 675 { 676 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS)) 677 kvmppc_core_queue_dec(vcpu); 678 else 679 kvmppc_core_dequeue_dec(vcpu); 680 681 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS)) 682 kvmppc_core_queue_watchdog(vcpu); 683 else 684 kvmppc_core_dequeue_watchdog(vcpu); 685 } 686 687 static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu) 688 { 689 unsigned long *pending = &vcpu->arch.pending_exceptions; 690 unsigned int priority; 691 692 priority = __ffs(*pending); 693 while (priority < BOOKE_IRQPRIO_MAX) { 694 if (kvmppc_booke_irqprio_deliver(vcpu, priority)) 695 break; 696 697 priority = find_next_bit(pending, 698 BITS_PER_BYTE * sizeof(*pending), 699 priority + 1); 700 } 701 702 /* Tell the guest about our interrupt status */ 703 vcpu->arch.shared->int_pending = !!*pending; 704 } 705 706 /* Check pending exceptions and deliver one, if possible. */ 707 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) 708 { 709 int r = 0; 710 WARN_ON_ONCE(!irqs_disabled()); 711 712 kvmppc_core_check_exceptions(vcpu); 713 714 if (kvm_request_pending(vcpu)) { 715 /* Exception delivery raised request; start over */ 716 return 1; 717 } 718 719 if (vcpu->arch.shared->msr & MSR_WE) { 720 local_irq_enable(); 721 kvm_vcpu_halt(vcpu); 722 hard_irq_disable(); 723 724 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); 725 r = 1; 726 } 727 728 return r; 729 } 730 731 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) 732 { 733 int r = 1; /* Indicate we want to get back into the guest */ 734 735 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) 736 update_timer_ints(vcpu); 737 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 738 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) 739 kvmppc_core_flush_tlb(vcpu); 740 #endif 741 742 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) { 743 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG; 744 r = 0; 745 } 746 747 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) { 748 vcpu->run->epr.epr = 0; 749 vcpu->arch.epr_needed = true; 750 vcpu->run->exit_reason = KVM_EXIT_EPR; 751 r = 0; 752 } 753 754 return r; 755 } 756 757 int kvmppc_vcpu_run(struct kvm_vcpu *vcpu) 758 { 759 int ret, s; 760 struct debug_reg debug; 761 762 if (!vcpu->arch.sane) { 763 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 764 return -EINVAL; 765 } 766 767 s = kvmppc_prepare_to_enter(vcpu); 768 if (s <= 0) { 769 ret = s; 770 goto out; 771 } 772 /* interrupts now hard-disabled */ 773 774 #ifdef CONFIG_PPC_FPU 775 /* Save userspace FPU state in stack */ 776 enable_kernel_fp(); 777 778 /* 779 * Since we can't trap on MSR_FP in GS-mode, we consider the guest 780 * as always using the FPU. 781 */ 782 kvmppc_load_guest_fp(vcpu); 783 #endif 784 785 #ifdef CONFIG_ALTIVEC 786 /* Save userspace AltiVec state in stack */ 787 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 788 enable_kernel_altivec(); 789 /* 790 * Since we can't trap on MSR_VEC in GS-mode, we consider the guest 791 * as always using the AltiVec. 792 */ 793 kvmppc_load_guest_altivec(vcpu); 794 #endif 795 796 /* Switch to guest debug context */ 797 debug = vcpu->arch.dbg_reg; 798 switch_booke_debug_regs(&debug); 799 debug = current->thread.debug; 800 current->thread.debug = vcpu->arch.dbg_reg; 801 802 vcpu->arch.pgdir = vcpu->kvm->mm->pgd; 803 kvmppc_fix_ee_before_entry(); 804 805 ret = __kvmppc_vcpu_run(vcpu); 806 807 /* No need for guest_exit. It's done in handle_exit. 808 We also get here with interrupts enabled. */ 809 810 /* Switch back to user space debug context */ 811 switch_booke_debug_regs(&debug); 812 current->thread.debug = debug; 813 814 #ifdef CONFIG_PPC_FPU 815 kvmppc_save_guest_fp(vcpu); 816 #endif 817 818 #ifdef CONFIG_ALTIVEC 819 kvmppc_save_guest_altivec(vcpu); 820 #endif 821 822 out: 823 vcpu->mode = OUTSIDE_GUEST_MODE; 824 return ret; 825 } 826 827 static int emulation_exit(struct kvm_vcpu *vcpu) 828 { 829 enum emulation_result er; 830 831 er = kvmppc_emulate_instruction(vcpu); 832 switch (er) { 833 case EMULATE_DONE: 834 /* don't overwrite subtypes, just account kvm_stats */ 835 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS); 836 /* Future optimization: only reload non-volatiles if 837 * they were actually modified by emulation. */ 838 return RESUME_GUEST_NV; 839 840 case EMULATE_AGAIN: 841 return RESUME_GUEST; 842 843 case EMULATE_FAIL: 844 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", 845 __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst); 846 /* For debugging, encode the failing instruction and 847 * report it to userspace. */ 848 vcpu->run->hw.hardware_exit_reason = ~0ULL << 32; 849 vcpu->run->hw.hardware_exit_reason |= vcpu->arch.last_inst; 850 kvmppc_core_queue_program(vcpu, ESR_PIL); 851 return RESUME_HOST; 852 853 case EMULATE_EXIT_USER: 854 return RESUME_HOST; 855 856 default: 857 BUG(); 858 } 859 } 860 861 static int kvmppc_handle_debug(struct kvm_vcpu *vcpu) 862 { 863 struct kvm_run *run = vcpu->run; 864 struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg); 865 u32 dbsr = vcpu->arch.dbsr; 866 867 if (vcpu->guest_debug == 0) { 868 /* 869 * Debug resources belong to Guest. 870 * Imprecise debug event is not injected 871 */ 872 if (dbsr & DBSR_IDE) { 873 dbsr &= ~DBSR_IDE; 874 if (!dbsr) 875 return RESUME_GUEST; 876 } 877 878 if (dbsr && (vcpu->arch.shared->msr & MSR_DE) && 879 (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM)) 880 kvmppc_core_queue_debug(vcpu); 881 882 /* Inject a program interrupt if trap debug is not allowed */ 883 if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE)) 884 kvmppc_core_queue_program(vcpu, ESR_PTR); 885 886 return RESUME_GUEST; 887 } 888 889 /* 890 * Debug resource owned by userspace. 891 * Clear guest dbsr (vcpu->arch.dbsr) 892 */ 893 vcpu->arch.dbsr = 0; 894 run->debug.arch.status = 0; 895 run->debug.arch.address = vcpu->arch.regs.nip; 896 897 if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) { 898 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT; 899 } else { 900 if (dbsr & (DBSR_DAC1W | DBSR_DAC2W)) 901 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE; 902 else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R)) 903 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ; 904 if (dbsr & (DBSR_DAC1R | DBSR_DAC1W)) 905 run->debug.arch.address = dbg_reg->dac1; 906 else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W)) 907 run->debug.arch.address = dbg_reg->dac2; 908 } 909 910 return RESUME_HOST; 911 } 912 913 static void kvmppc_fill_pt_regs(struct pt_regs *regs) 914 { 915 ulong r1, ip, msr, lr; 916 917 asm("mr %0, 1" : "=r"(r1)); 918 asm("mflr %0" : "=r"(lr)); 919 asm("mfmsr %0" : "=r"(msr)); 920 asm("bl 1f; 1: mflr %0" : "=r"(ip)); 921 922 memset(regs, 0, sizeof(*regs)); 923 regs->gpr[1] = r1; 924 regs->nip = ip; 925 regs->msr = msr; 926 regs->link = lr; 927 } 928 929 /* 930 * For interrupts needed to be handled by host interrupt handlers, 931 * corresponding host handler are called from here in similar way 932 * (but not exact) as they are called from low level handler 933 * (such as from arch/powerpc/kernel/head_fsl_booke.S). 934 */ 935 static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, 936 unsigned int exit_nr) 937 { 938 struct pt_regs regs; 939 940 switch (exit_nr) { 941 case BOOKE_INTERRUPT_EXTERNAL: 942 kvmppc_fill_pt_regs(®s); 943 do_IRQ(®s); 944 break; 945 case BOOKE_INTERRUPT_DECREMENTER: 946 kvmppc_fill_pt_regs(®s); 947 timer_interrupt(®s); 948 break; 949 #if defined(CONFIG_PPC_DOORBELL) 950 case BOOKE_INTERRUPT_DOORBELL: 951 kvmppc_fill_pt_regs(®s); 952 doorbell_exception(®s); 953 break; 954 #endif 955 case BOOKE_INTERRUPT_MACHINE_CHECK: 956 /* FIXME */ 957 break; 958 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR: 959 kvmppc_fill_pt_regs(®s); 960 performance_monitor_exception(®s); 961 break; 962 case BOOKE_INTERRUPT_WATCHDOG: 963 kvmppc_fill_pt_regs(®s); 964 #ifdef CONFIG_BOOKE_WDT 965 WatchdogException(®s); 966 #else 967 unknown_exception(®s); 968 #endif 969 break; 970 case BOOKE_INTERRUPT_CRITICAL: 971 kvmppc_fill_pt_regs(®s); 972 unknown_exception(®s); 973 break; 974 case BOOKE_INTERRUPT_DEBUG: 975 /* Save DBSR before preemption is enabled */ 976 vcpu->arch.dbsr = mfspr(SPRN_DBSR); 977 kvmppc_clear_dbsr(); 978 break; 979 } 980 } 981 982 static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu, 983 enum emulation_result emulated, u32 last_inst) 984 { 985 switch (emulated) { 986 case EMULATE_AGAIN: 987 return RESUME_GUEST; 988 989 case EMULATE_FAIL: 990 pr_debug("%s: load instruction from guest address %lx failed\n", 991 __func__, vcpu->arch.regs.nip); 992 /* For debugging, encode the failing instruction and 993 * report it to userspace. */ 994 vcpu->run->hw.hardware_exit_reason = ~0ULL << 32; 995 vcpu->run->hw.hardware_exit_reason |= last_inst; 996 kvmppc_core_queue_program(vcpu, ESR_PIL); 997 return RESUME_HOST; 998 999 default: 1000 BUG(); 1001 } 1002 } 1003 1004 /** 1005 * kvmppc_handle_exit 1006 * 1007 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) 1008 */ 1009 int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr) 1010 { 1011 struct kvm_run *run = vcpu->run; 1012 int r = RESUME_HOST; 1013 int s; 1014 int idx; 1015 u32 last_inst = KVM_INST_FETCH_FAILED; 1016 enum emulation_result emulated = EMULATE_DONE; 1017 1018 /* Fix irq state (pairs with kvmppc_fix_ee_before_entry()) */ 1019 kvmppc_fix_ee_after_exit(); 1020 1021 /* update before a new last_exit_type is rewritten */ 1022 kvmppc_update_timing_stats(vcpu); 1023 1024 /* restart interrupts if they were meant for the host */ 1025 kvmppc_restart_interrupt(vcpu, exit_nr); 1026 1027 /* 1028 * get last instruction before being preempted 1029 * TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR & ESR_DATA 1030 */ 1031 switch (exit_nr) { 1032 case BOOKE_INTERRUPT_DATA_STORAGE: 1033 case BOOKE_INTERRUPT_DTLB_MISS: 1034 case BOOKE_INTERRUPT_HV_PRIV: 1035 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); 1036 break; 1037 case BOOKE_INTERRUPT_PROGRAM: 1038 /* SW breakpoints arrive as illegal instructions on HV */ 1039 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 1040 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); 1041 break; 1042 default: 1043 break; 1044 } 1045 1046 trace_kvm_exit(exit_nr, vcpu); 1047 1048 context_tracking_guest_exit(); 1049 if (!vtime_accounting_enabled_this_cpu()) { 1050 local_irq_enable(); 1051 /* 1052 * Service IRQs here before vtime_account_guest_exit() so any 1053 * ticks that occurred while running the guest are accounted to 1054 * the guest. If vtime accounting is enabled, accounting uses 1055 * TB rather than ticks, so it can be done without enabling 1056 * interrupts here, which has the problem that it accounts 1057 * interrupt processing overhead to the host. 1058 */ 1059 local_irq_disable(); 1060 } 1061 vtime_account_guest_exit(); 1062 1063 local_irq_enable(); 1064 1065 run->exit_reason = KVM_EXIT_UNKNOWN; 1066 run->ready_for_interrupt_injection = 1; 1067 1068 if (emulated != EMULATE_DONE) { 1069 r = kvmppc_resume_inst_load(vcpu, emulated, last_inst); 1070 goto out; 1071 } 1072 1073 switch (exit_nr) { 1074 case BOOKE_INTERRUPT_MACHINE_CHECK: 1075 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); 1076 kvmppc_dump_vcpu(vcpu); 1077 /* For debugging, send invalid exit reason to user space */ 1078 run->hw.hardware_exit_reason = ~1ULL << 32; 1079 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR); 1080 r = RESUME_HOST; 1081 break; 1082 1083 case BOOKE_INTERRUPT_EXTERNAL: 1084 kvmppc_account_exit(vcpu, EXT_INTR_EXITS); 1085 r = RESUME_GUEST; 1086 break; 1087 1088 case BOOKE_INTERRUPT_DECREMENTER: 1089 kvmppc_account_exit(vcpu, DEC_EXITS); 1090 r = RESUME_GUEST; 1091 break; 1092 1093 case BOOKE_INTERRUPT_WATCHDOG: 1094 r = RESUME_GUEST; 1095 break; 1096 1097 case BOOKE_INTERRUPT_DOORBELL: 1098 kvmppc_account_exit(vcpu, DBELL_EXITS); 1099 r = RESUME_GUEST; 1100 break; 1101 1102 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT: 1103 kvmppc_account_exit(vcpu, GDBELL_EXITS); 1104 1105 /* 1106 * We are here because there is a pending guest interrupt 1107 * which could not be delivered as MSR_CE or MSR_ME was not 1108 * set. Once we break from here we will retry delivery. 1109 */ 1110 r = RESUME_GUEST; 1111 break; 1112 1113 case BOOKE_INTERRUPT_GUEST_DBELL: 1114 kvmppc_account_exit(vcpu, GDBELL_EXITS); 1115 1116 /* 1117 * We are here because there is a pending guest interrupt 1118 * which could not be delivered as MSR_EE was not set. Once 1119 * we break from here we will retry delivery. 1120 */ 1121 r = RESUME_GUEST; 1122 break; 1123 1124 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR: 1125 r = RESUME_GUEST; 1126 break; 1127 1128 case BOOKE_INTERRUPT_HV_PRIV: 1129 r = emulation_exit(vcpu); 1130 break; 1131 1132 case BOOKE_INTERRUPT_PROGRAM: 1133 if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) && 1134 (last_inst == KVMPPC_INST_SW_BREAKPOINT)) { 1135 /* 1136 * We are here because of an SW breakpoint instr, 1137 * so lets return to host to handle. 1138 */ 1139 r = kvmppc_handle_debug(vcpu); 1140 run->exit_reason = KVM_EXIT_DEBUG; 1141 kvmppc_account_exit(vcpu, DEBUG_EXITS); 1142 break; 1143 } 1144 1145 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) { 1146 /* 1147 * Program traps generated by user-level software must 1148 * be handled by the guest kernel. 1149 * 1150 * In GS mode, hypervisor privileged instructions trap 1151 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are 1152 * actual program interrupts, handled by the guest. 1153 */ 1154 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); 1155 r = RESUME_GUEST; 1156 kvmppc_account_exit(vcpu, USR_PR_INST); 1157 break; 1158 } 1159 1160 r = emulation_exit(vcpu); 1161 break; 1162 1163 case BOOKE_INTERRUPT_FP_UNAVAIL: 1164 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); 1165 kvmppc_account_exit(vcpu, FP_UNAVAIL); 1166 r = RESUME_GUEST; 1167 break; 1168 1169 #ifdef CONFIG_SPE 1170 case BOOKE_INTERRUPT_SPE_UNAVAIL: { 1171 if (vcpu->arch.shared->msr & MSR_SPE) 1172 kvmppc_vcpu_enable_spe(vcpu); 1173 else 1174 kvmppc_booke_queue_irqprio(vcpu, 1175 BOOKE_IRQPRIO_SPE_UNAVAIL); 1176 r = RESUME_GUEST; 1177 break; 1178 } 1179 1180 case BOOKE_INTERRUPT_SPE_FP_DATA: 1181 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA); 1182 r = RESUME_GUEST; 1183 break; 1184 1185 case BOOKE_INTERRUPT_SPE_FP_ROUND: 1186 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND); 1187 r = RESUME_GUEST; 1188 break; 1189 #elif defined(CONFIG_SPE_POSSIBLE) 1190 case BOOKE_INTERRUPT_SPE_UNAVAIL: 1191 /* 1192 * Guest wants SPE, but host kernel doesn't support it. Send 1193 * an "unimplemented operation" program check to the guest. 1194 */ 1195 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV); 1196 r = RESUME_GUEST; 1197 break; 1198 1199 /* 1200 * These really should never happen without CONFIG_SPE, 1201 * as we should never enable the real MSR[SPE] in the guest. 1202 */ 1203 case BOOKE_INTERRUPT_SPE_FP_DATA: 1204 case BOOKE_INTERRUPT_SPE_FP_ROUND: 1205 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n", 1206 __func__, exit_nr, vcpu->arch.regs.nip); 1207 run->hw.hardware_exit_reason = exit_nr; 1208 r = RESUME_HOST; 1209 break; 1210 #endif /* CONFIG_SPE_POSSIBLE */ 1211 1212 /* 1213 * On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC, 1214 * see kvmppc_core_check_processor_compat(). 1215 */ 1216 #ifdef CONFIG_ALTIVEC 1217 case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL: 1218 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL); 1219 r = RESUME_GUEST; 1220 break; 1221 1222 case BOOKE_INTERRUPT_ALTIVEC_ASSIST: 1223 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST); 1224 r = RESUME_GUEST; 1225 break; 1226 #endif 1227 1228 case BOOKE_INTERRUPT_DATA_STORAGE: 1229 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear, 1230 vcpu->arch.fault_esr); 1231 kvmppc_account_exit(vcpu, DSI_EXITS); 1232 r = RESUME_GUEST; 1233 break; 1234 1235 case BOOKE_INTERRUPT_INST_STORAGE: 1236 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr); 1237 kvmppc_account_exit(vcpu, ISI_EXITS); 1238 r = RESUME_GUEST; 1239 break; 1240 1241 case BOOKE_INTERRUPT_ALIGNMENT: 1242 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear, 1243 vcpu->arch.fault_esr); 1244 r = RESUME_GUEST; 1245 break; 1246 1247 #ifdef CONFIG_KVM_BOOKE_HV 1248 case BOOKE_INTERRUPT_HV_SYSCALL: 1249 if (!(vcpu->arch.shared->msr & MSR_PR)) { 1250 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); 1251 } else { 1252 /* 1253 * hcall from guest userspace -- send privileged 1254 * instruction program check. 1255 */ 1256 kvmppc_core_queue_program(vcpu, ESR_PPR); 1257 } 1258 1259 r = RESUME_GUEST; 1260 break; 1261 #else 1262 case BOOKE_INTERRUPT_SYSCALL: 1263 if (!(vcpu->arch.shared->msr & MSR_PR) && 1264 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { 1265 /* KVM PV hypercalls */ 1266 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); 1267 r = RESUME_GUEST; 1268 } else { 1269 /* Guest syscalls */ 1270 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); 1271 } 1272 kvmppc_account_exit(vcpu, SYSCALL_EXITS); 1273 r = RESUME_GUEST; 1274 break; 1275 #endif 1276 1277 case BOOKE_INTERRUPT_DTLB_MISS: { 1278 unsigned long eaddr = vcpu->arch.fault_dear; 1279 int gtlb_index; 1280 gpa_t gpaddr; 1281 gfn_t gfn; 1282 1283 #ifdef CONFIG_KVM_E500V2 1284 if (!(vcpu->arch.shared->msr & MSR_PR) && 1285 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { 1286 kvmppc_map_magic(vcpu); 1287 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); 1288 r = RESUME_GUEST; 1289 1290 break; 1291 } 1292 #endif 1293 1294 /* Check the guest TLB. */ 1295 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); 1296 if (gtlb_index < 0) { 1297 /* The guest didn't have a mapping for it. */ 1298 kvmppc_core_queue_dtlb_miss(vcpu, 1299 vcpu->arch.fault_dear, 1300 vcpu->arch.fault_esr); 1301 kvmppc_mmu_dtlb_miss(vcpu); 1302 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS); 1303 r = RESUME_GUEST; 1304 break; 1305 } 1306 1307 idx = srcu_read_lock(&vcpu->kvm->srcu); 1308 1309 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); 1310 gfn = gpaddr >> PAGE_SHIFT; 1311 1312 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { 1313 /* The guest TLB had a mapping, but the shadow TLB 1314 * didn't, and it is RAM. This could be because: 1315 * a) the entry is mapping the host kernel, or 1316 * b) the guest used a large mapping which we're faking 1317 * Either way, we need to satisfy the fault without 1318 * invoking the guest. */ 1319 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); 1320 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); 1321 r = RESUME_GUEST; 1322 } else { 1323 /* Guest has mapped and accessed a page which is not 1324 * actually RAM. */ 1325 vcpu->arch.paddr_accessed = gpaddr; 1326 vcpu->arch.vaddr_accessed = eaddr; 1327 r = kvmppc_emulate_mmio(vcpu); 1328 kvmppc_account_exit(vcpu, MMIO_EXITS); 1329 } 1330 1331 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1332 break; 1333 } 1334 1335 case BOOKE_INTERRUPT_ITLB_MISS: { 1336 unsigned long eaddr = vcpu->arch.regs.nip; 1337 gpa_t gpaddr; 1338 gfn_t gfn; 1339 int gtlb_index; 1340 1341 r = RESUME_GUEST; 1342 1343 /* Check the guest TLB. */ 1344 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); 1345 if (gtlb_index < 0) { 1346 /* The guest didn't have a mapping for it. */ 1347 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); 1348 kvmppc_mmu_itlb_miss(vcpu); 1349 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS); 1350 break; 1351 } 1352 1353 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS); 1354 1355 idx = srcu_read_lock(&vcpu->kvm->srcu); 1356 1357 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); 1358 gfn = gpaddr >> PAGE_SHIFT; 1359 1360 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { 1361 /* The guest TLB had a mapping, but the shadow TLB 1362 * didn't. This could be because: 1363 * a) the entry is mapping the host kernel, or 1364 * b) the guest used a large mapping which we're faking 1365 * Either way, we need to satisfy the fault without 1366 * invoking the guest. */ 1367 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); 1368 } else { 1369 /* Guest mapped and leaped at non-RAM! */ 1370 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); 1371 } 1372 1373 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1374 break; 1375 } 1376 1377 case BOOKE_INTERRUPT_DEBUG: { 1378 r = kvmppc_handle_debug(vcpu); 1379 if (r == RESUME_HOST) 1380 run->exit_reason = KVM_EXIT_DEBUG; 1381 kvmppc_account_exit(vcpu, DEBUG_EXITS); 1382 break; 1383 } 1384 1385 default: 1386 printk(KERN_EMERG "exit_nr %d\n", exit_nr); 1387 BUG(); 1388 } 1389 1390 out: 1391 /* 1392 * To avoid clobbering exit_reason, only check for signals if we 1393 * aren't already exiting to userspace for some other reason. 1394 */ 1395 if (!(r & RESUME_HOST)) { 1396 s = kvmppc_prepare_to_enter(vcpu); 1397 if (s <= 0) 1398 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); 1399 else { 1400 /* interrupts now hard-disabled */ 1401 kvmppc_fix_ee_before_entry(); 1402 kvmppc_load_guest_fp(vcpu); 1403 kvmppc_load_guest_altivec(vcpu); 1404 } 1405 } 1406 1407 return r; 1408 } 1409 1410 static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr) 1411 { 1412 u32 old_tsr = vcpu->arch.tsr; 1413 1414 vcpu->arch.tsr = new_tsr; 1415 1416 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS)) 1417 arm_next_watchdog(vcpu); 1418 1419 update_timer_ints(vcpu); 1420 } 1421 1422 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu) 1423 { 1424 /* setup watchdog timer once */ 1425 spin_lock_init(&vcpu->arch.wdt_lock); 1426 timer_setup(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 0); 1427 1428 /* 1429 * Clear DBSR.MRR to avoid guest debug interrupt as 1430 * this is of host interest 1431 */ 1432 mtspr(SPRN_DBSR, DBSR_MRR); 1433 return 0; 1434 } 1435 1436 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu) 1437 { 1438 del_timer_sync(&vcpu->arch.wdt_timer); 1439 } 1440 1441 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 1442 { 1443 int i; 1444 1445 vcpu_load(vcpu); 1446 1447 regs->pc = vcpu->arch.regs.nip; 1448 regs->cr = kvmppc_get_cr(vcpu); 1449 regs->ctr = vcpu->arch.regs.ctr; 1450 regs->lr = vcpu->arch.regs.link; 1451 regs->xer = kvmppc_get_xer(vcpu); 1452 regs->msr = vcpu->arch.shared->msr; 1453 regs->srr0 = kvmppc_get_srr0(vcpu); 1454 regs->srr1 = kvmppc_get_srr1(vcpu); 1455 regs->pid = vcpu->arch.pid; 1456 regs->sprg0 = kvmppc_get_sprg0(vcpu); 1457 regs->sprg1 = kvmppc_get_sprg1(vcpu); 1458 regs->sprg2 = kvmppc_get_sprg2(vcpu); 1459 regs->sprg3 = kvmppc_get_sprg3(vcpu); 1460 regs->sprg4 = kvmppc_get_sprg4(vcpu); 1461 regs->sprg5 = kvmppc_get_sprg5(vcpu); 1462 regs->sprg6 = kvmppc_get_sprg6(vcpu); 1463 regs->sprg7 = kvmppc_get_sprg7(vcpu); 1464 1465 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 1466 regs->gpr[i] = kvmppc_get_gpr(vcpu, i); 1467 1468 vcpu_put(vcpu); 1469 return 0; 1470 } 1471 1472 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 1473 { 1474 int i; 1475 1476 vcpu_load(vcpu); 1477 1478 vcpu->arch.regs.nip = regs->pc; 1479 kvmppc_set_cr(vcpu, regs->cr); 1480 vcpu->arch.regs.ctr = regs->ctr; 1481 vcpu->arch.regs.link = regs->lr; 1482 kvmppc_set_xer(vcpu, regs->xer); 1483 kvmppc_set_msr(vcpu, regs->msr); 1484 kvmppc_set_srr0(vcpu, regs->srr0); 1485 kvmppc_set_srr1(vcpu, regs->srr1); 1486 kvmppc_set_pid(vcpu, regs->pid); 1487 kvmppc_set_sprg0(vcpu, regs->sprg0); 1488 kvmppc_set_sprg1(vcpu, regs->sprg1); 1489 kvmppc_set_sprg2(vcpu, regs->sprg2); 1490 kvmppc_set_sprg3(vcpu, regs->sprg3); 1491 kvmppc_set_sprg4(vcpu, regs->sprg4); 1492 kvmppc_set_sprg5(vcpu, regs->sprg5); 1493 kvmppc_set_sprg6(vcpu, regs->sprg6); 1494 kvmppc_set_sprg7(vcpu, regs->sprg7); 1495 1496 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 1497 kvmppc_set_gpr(vcpu, i, regs->gpr[i]); 1498 1499 vcpu_put(vcpu); 1500 return 0; 1501 } 1502 1503 static void get_sregs_base(struct kvm_vcpu *vcpu, 1504 struct kvm_sregs *sregs) 1505 { 1506 u64 tb = get_tb(); 1507 1508 sregs->u.e.features |= KVM_SREGS_E_BASE; 1509 1510 sregs->u.e.csrr0 = vcpu->arch.csrr0; 1511 sregs->u.e.csrr1 = vcpu->arch.csrr1; 1512 sregs->u.e.mcsr = vcpu->arch.mcsr; 1513 sregs->u.e.esr = kvmppc_get_esr(vcpu); 1514 sregs->u.e.dear = kvmppc_get_dar(vcpu); 1515 sregs->u.e.tsr = vcpu->arch.tsr; 1516 sregs->u.e.tcr = vcpu->arch.tcr; 1517 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); 1518 sregs->u.e.tb = tb; 1519 sregs->u.e.vrsave = vcpu->arch.vrsave; 1520 } 1521 1522 static int set_sregs_base(struct kvm_vcpu *vcpu, 1523 struct kvm_sregs *sregs) 1524 { 1525 if (!(sregs->u.e.features & KVM_SREGS_E_BASE)) 1526 return 0; 1527 1528 vcpu->arch.csrr0 = sregs->u.e.csrr0; 1529 vcpu->arch.csrr1 = sregs->u.e.csrr1; 1530 vcpu->arch.mcsr = sregs->u.e.mcsr; 1531 kvmppc_set_esr(vcpu, sregs->u.e.esr); 1532 kvmppc_set_dar(vcpu, sregs->u.e.dear); 1533 vcpu->arch.vrsave = sregs->u.e.vrsave; 1534 kvmppc_set_tcr(vcpu, sregs->u.e.tcr); 1535 1536 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) { 1537 vcpu->arch.dec = sregs->u.e.dec; 1538 kvmppc_emulate_dec(vcpu); 1539 } 1540 1541 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) 1542 kvmppc_set_tsr(vcpu, sregs->u.e.tsr); 1543 1544 return 0; 1545 } 1546 1547 static void get_sregs_arch206(struct kvm_vcpu *vcpu, 1548 struct kvm_sregs *sregs) 1549 { 1550 sregs->u.e.features |= KVM_SREGS_E_ARCH206; 1551 1552 sregs->u.e.pir = vcpu->vcpu_id; 1553 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0; 1554 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1; 1555 sregs->u.e.decar = vcpu->arch.decar; 1556 sregs->u.e.ivpr = vcpu->arch.ivpr; 1557 } 1558 1559 static int set_sregs_arch206(struct kvm_vcpu *vcpu, 1560 struct kvm_sregs *sregs) 1561 { 1562 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206)) 1563 return 0; 1564 1565 if (sregs->u.e.pir != vcpu->vcpu_id) 1566 return -EINVAL; 1567 1568 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0; 1569 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1; 1570 vcpu->arch.decar = sregs->u.e.decar; 1571 vcpu->arch.ivpr = sregs->u.e.ivpr; 1572 1573 return 0; 1574 } 1575 1576 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 1577 { 1578 sregs->u.e.features |= KVM_SREGS_E_IVOR; 1579 1580 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; 1581 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; 1582 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; 1583 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; 1584 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; 1585 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; 1586 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; 1587 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; 1588 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; 1589 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; 1590 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; 1591 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; 1592 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; 1593 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; 1594 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; 1595 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; 1596 return 0; 1597 } 1598 1599 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 1600 { 1601 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) 1602 return 0; 1603 1604 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0]; 1605 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1]; 1606 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2]; 1607 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3]; 1608 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4]; 1609 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5]; 1610 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6]; 1611 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7]; 1612 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8]; 1613 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9]; 1614 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10]; 1615 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11]; 1616 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12]; 1617 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13]; 1618 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14]; 1619 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15]; 1620 1621 return 0; 1622 } 1623 1624 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 1625 struct kvm_sregs *sregs) 1626 { 1627 int ret; 1628 1629 vcpu_load(vcpu); 1630 1631 sregs->pvr = vcpu->arch.pvr; 1632 1633 get_sregs_base(vcpu, sregs); 1634 get_sregs_arch206(vcpu, sregs); 1635 ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); 1636 1637 vcpu_put(vcpu); 1638 return ret; 1639 } 1640 1641 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 1642 struct kvm_sregs *sregs) 1643 { 1644 int ret = -EINVAL; 1645 1646 vcpu_load(vcpu); 1647 if (vcpu->arch.pvr != sregs->pvr) 1648 goto out; 1649 1650 ret = set_sregs_base(vcpu, sregs); 1651 if (ret < 0) 1652 goto out; 1653 1654 ret = set_sregs_arch206(vcpu, sregs); 1655 if (ret < 0) 1656 goto out; 1657 1658 ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); 1659 1660 out: 1661 vcpu_put(vcpu); 1662 return ret; 1663 } 1664 1665 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, 1666 union kvmppc_one_reg *val) 1667 { 1668 int r = 0; 1669 1670 switch (id) { 1671 case KVM_REG_PPC_IAC1: 1672 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac1); 1673 break; 1674 case KVM_REG_PPC_IAC2: 1675 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac2); 1676 break; 1677 #if CONFIG_PPC_ADV_DEBUG_IACS > 2 1678 case KVM_REG_PPC_IAC3: 1679 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac3); 1680 break; 1681 case KVM_REG_PPC_IAC4: 1682 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac4); 1683 break; 1684 #endif 1685 case KVM_REG_PPC_DAC1: 1686 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac1); 1687 break; 1688 case KVM_REG_PPC_DAC2: 1689 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac2); 1690 break; 1691 case KVM_REG_PPC_EPR: { 1692 u32 epr = kvmppc_get_epr(vcpu); 1693 *val = get_reg_val(id, epr); 1694 break; 1695 } 1696 #if defined(CONFIG_64BIT) 1697 case KVM_REG_PPC_EPCR: 1698 *val = get_reg_val(id, vcpu->arch.epcr); 1699 break; 1700 #endif 1701 case KVM_REG_PPC_TCR: 1702 *val = get_reg_val(id, vcpu->arch.tcr); 1703 break; 1704 case KVM_REG_PPC_TSR: 1705 *val = get_reg_val(id, vcpu->arch.tsr); 1706 break; 1707 case KVM_REG_PPC_DEBUG_INST: 1708 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT); 1709 break; 1710 case KVM_REG_PPC_VRSAVE: 1711 *val = get_reg_val(id, vcpu->arch.vrsave); 1712 break; 1713 default: 1714 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val); 1715 break; 1716 } 1717 1718 return r; 1719 } 1720 1721 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, 1722 union kvmppc_one_reg *val) 1723 { 1724 int r = 0; 1725 1726 switch (id) { 1727 case KVM_REG_PPC_IAC1: 1728 vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val); 1729 break; 1730 case KVM_REG_PPC_IAC2: 1731 vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val); 1732 break; 1733 #if CONFIG_PPC_ADV_DEBUG_IACS > 2 1734 case KVM_REG_PPC_IAC3: 1735 vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val); 1736 break; 1737 case KVM_REG_PPC_IAC4: 1738 vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val); 1739 break; 1740 #endif 1741 case KVM_REG_PPC_DAC1: 1742 vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val); 1743 break; 1744 case KVM_REG_PPC_DAC2: 1745 vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val); 1746 break; 1747 case KVM_REG_PPC_EPR: { 1748 u32 new_epr = set_reg_val(id, *val); 1749 kvmppc_set_epr(vcpu, new_epr); 1750 break; 1751 } 1752 #if defined(CONFIG_64BIT) 1753 case KVM_REG_PPC_EPCR: { 1754 u32 new_epcr = set_reg_val(id, *val); 1755 kvmppc_set_epcr(vcpu, new_epcr); 1756 break; 1757 } 1758 #endif 1759 case KVM_REG_PPC_OR_TSR: { 1760 u32 tsr_bits = set_reg_val(id, *val); 1761 kvmppc_set_tsr_bits(vcpu, tsr_bits); 1762 break; 1763 } 1764 case KVM_REG_PPC_CLEAR_TSR: { 1765 u32 tsr_bits = set_reg_val(id, *val); 1766 kvmppc_clr_tsr_bits(vcpu, tsr_bits); 1767 break; 1768 } 1769 case KVM_REG_PPC_TSR: { 1770 u32 tsr = set_reg_val(id, *val); 1771 kvmppc_set_tsr(vcpu, tsr); 1772 break; 1773 } 1774 case KVM_REG_PPC_TCR: { 1775 u32 tcr = set_reg_val(id, *val); 1776 kvmppc_set_tcr(vcpu, tcr); 1777 break; 1778 } 1779 case KVM_REG_PPC_VRSAVE: 1780 vcpu->arch.vrsave = set_reg_val(id, *val); 1781 break; 1782 default: 1783 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val); 1784 break; 1785 } 1786 1787 return r; 1788 } 1789 1790 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1791 { 1792 return -EOPNOTSUPP; 1793 } 1794 1795 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1796 { 1797 return -EOPNOTSUPP; 1798 } 1799 1800 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 1801 struct kvm_translation *tr) 1802 { 1803 int r; 1804 1805 vcpu_load(vcpu); 1806 r = kvmppc_core_vcpu_translate(vcpu, tr); 1807 vcpu_put(vcpu); 1808 return r; 1809 } 1810 1811 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) 1812 { 1813 1814 } 1815 1816 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 1817 { 1818 return -EOPNOTSUPP; 1819 } 1820 1821 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 1822 { 1823 } 1824 1825 int kvmppc_core_prepare_memory_region(struct kvm *kvm, 1826 const struct kvm_memory_slot *old, 1827 struct kvm_memory_slot *new, 1828 enum kvm_mr_change change) 1829 { 1830 return 0; 1831 } 1832 1833 void kvmppc_core_commit_memory_region(struct kvm *kvm, 1834 struct kvm_memory_slot *old, 1835 const struct kvm_memory_slot *new, 1836 enum kvm_mr_change change) 1837 { 1838 } 1839 1840 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) 1841 { 1842 } 1843 1844 void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr) 1845 { 1846 #if defined(CONFIG_64BIT) 1847 vcpu->arch.epcr = new_epcr; 1848 #ifdef CONFIG_KVM_BOOKE_HV 1849 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM; 1850 if (vcpu->arch.epcr & SPRN_EPCR_ICM) 1851 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM; 1852 #endif 1853 #endif 1854 } 1855 1856 void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr) 1857 { 1858 vcpu->arch.tcr = new_tcr; 1859 arm_next_watchdog(vcpu); 1860 update_timer_ints(vcpu); 1861 } 1862 1863 void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) 1864 { 1865 set_bits(tsr_bits, &vcpu->arch.tsr); 1866 smp_wmb(); 1867 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); 1868 kvm_vcpu_kick(vcpu); 1869 } 1870 1871 void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) 1872 { 1873 clear_bits(tsr_bits, &vcpu->arch.tsr); 1874 1875 /* 1876 * We may have stopped the watchdog due to 1877 * being stuck on final expiration. 1878 */ 1879 if (tsr_bits & (TSR_ENW | TSR_WIS)) 1880 arm_next_watchdog(vcpu); 1881 1882 update_timer_ints(vcpu); 1883 } 1884 1885 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu) 1886 { 1887 if (vcpu->arch.tcr & TCR_ARE) { 1888 vcpu->arch.dec = vcpu->arch.decar; 1889 kvmppc_emulate_dec(vcpu); 1890 } 1891 1892 kvmppc_set_tsr_bits(vcpu, TSR_DIS); 1893 } 1894 1895 static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg, 1896 uint64_t addr, int index) 1897 { 1898 switch (index) { 1899 case 0: 1900 dbg_reg->dbcr0 |= DBCR0_IAC1; 1901 dbg_reg->iac1 = addr; 1902 break; 1903 case 1: 1904 dbg_reg->dbcr0 |= DBCR0_IAC2; 1905 dbg_reg->iac2 = addr; 1906 break; 1907 #if CONFIG_PPC_ADV_DEBUG_IACS > 2 1908 case 2: 1909 dbg_reg->dbcr0 |= DBCR0_IAC3; 1910 dbg_reg->iac3 = addr; 1911 break; 1912 case 3: 1913 dbg_reg->dbcr0 |= DBCR0_IAC4; 1914 dbg_reg->iac4 = addr; 1915 break; 1916 #endif 1917 default: 1918 return -EINVAL; 1919 } 1920 1921 dbg_reg->dbcr0 |= DBCR0_IDM; 1922 return 0; 1923 } 1924 1925 static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr, 1926 int type, int index) 1927 { 1928 switch (index) { 1929 case 0: 1930 if (type & KVMPPC_DEBUG_WATCH_READ) 1931 dbg_reg->dbcr0 |= DBCR0_DAC1R; 1932 if (type & KVMPPC_DEBUG_WATCH_WRITE) 1933 dbg_reg->dbcr0 |= DBCR0_DAC1W; 1934 dbg_reg->dac1 = addr; 1935 break; 1936 case 1: 1937 if (type & KVMPPC_DEBUG_WATCH_READ) 1938 dbg_reg->dbcr0 |= DBCR0_DAC2R; 1939 if (type & KVMPPC_DEBUG_WATCH_WRITE) 1940 dbg_reg->dbcr0 |= DBCR0_DAC2W; 1941 dbg_reg->dac2 = addr; 1942 break; 1943 default: 1944 return -EINVAL; 1945 } 1946 1947 dbg_reg->dbcr0 |= DBCR0_IDM; 1948 return 0; 1949 } 1950 void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set) 1951 { 1952 /* XXX: Add similar MSR protection for BookE-PR */ 1953 #ifdef CONFIG_KVM_BOOKE_HV 1954 BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP)); 1955 if (set) { 1956 if (prot_bitmap & MSR_UCLE) 1957 vcpu->arch.shadow_msrp |= MSRP_UCLEP; 1958 if (prot_bitmap & MSR_DE) 1959 vcpu->arch.shadow_msrp |= MSRP_DEP; 1960 if (prot_bitmap & MSR_PMM) 1961 vcpu->arch.shadow_msrp |= MSRP_PMMP; 1962 } else { 1963 if (prot_bitmap & MSR_UCLE) 1964 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP; 1965 if (prot_bitmap & MSR_DE) 1966 vcpu->arch.shadow_msrp &= ~MSRP_DEP; 1967 if (prot_bitmap & MSR_PMM) 1968 vcpu->arch.shadow_msrp &= ~MSRP_PMMP; 1969 } 1970 #endif 1971 } 1972 1973 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, 1974 enum xlate_readwrite xlrw, struct kvmppc_pte *pte) 1975 { 1976 int gtlb_index; 1977 gpa_t gpaddr; 1978 1979 #ifdef CONFIG_KVM_E500V2 1980 if (!(vcpu->arch.shared->msr & MSR_PR) && 1981 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { 1982 pte->eaddr = eaddr; 1983 pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) | 1984 (eaddr & ~PAGE_MASK); 1985 pte->vpage = eaddr >> PAGE_SHIFT; 1986 pte->may_read = true; 1987 pte->may_write = true; 1988 pte->may_execute = true; 1989 1990 return 0; 1991 } 1992 #endif 1993 1994 /* Check the guest TLB. */ 1995 switch (xlid) { 1996 case XLATE_INST: 1997 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); 1998 break; 1999 case XLATE_DATA: 2000 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); 2001 break; 2002 default: 2003 BUG(); 2004 } 2005 2006 /* Do we have a TLB entry at all? */ 2007 if (gtlb_index < 0) 2008 return -ENOENT; 2009 2010 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); 2011 2012 pte->eaddr = eaddr; 2013 pte->raddr = (gpaddr & PAGE_MASK) | (eaddr & ~PAGE_MASK); 2014 pte->vpage = eaddr >> PAGE_SHIFT; 2015 2016 /* XXX read permissions from the guest TLB */ 2017 pte->may_read = true; 2018 pte->may_write = true; 2019 pte->may_execute = true; 2020 2021 return 0; 2022 } 2023 2024 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 2025 struct kvm_guest_debug *dbg) 2026 { 2027 struct debug_reg *dbg_reg; 2028 int n, b = 0, w = 0; 2029 int ret = 0; 2030 2031 vcpu_load(vcpu); 2032 2033 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) { 2034 vcpu->arch.dbg_reg.dbcr0 = 0; 2035 vcpu->guest_debug = 0; 2036 kvm_guest_protect_msr(vcpu, MSR_DE, false); 2037 goto out; 2038 } 2039 2040 kvm_guest_protect_msr(vcpu, MSR_DE, true); 2041 vcpu->guest_debug = dbg->control; 2042 vcpu->arch.dbg_reg.dbcr0 = 0; 2043 2044 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 2045 vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC; 2046 2047 /* Code below handles only HW breakpoints */ 2048 dbg_reg = &(vcpu->arch.dbg_reg); 2049 2050 #ifdef CONFIG_KVM_BOOKE_HV 2051 /* 2052 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1 2053 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0 2054 */ 2055 dbg_reg->dbcr1 = 0; 2056 dbg_reg->dbcr2 = 0; 2057 #else 2058 /* 2059 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1 2060 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR 2061 * is set. 2062 */ 2063 dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US | 2064 DBCR1_IAC4US; 2065 dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; 2066 #endif 2067 2068 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) 2069 goto out; 2070 2071 ret = -EINVAL; 2072 for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) { 2073 uint64_t addr = dbg->arch.bp[n].addr; 2074 uint32_t type = dbg->arch.bp[n].type; 2075 2076 if (type == KVMPPC_DEBUG_NONE) 2077 continue; 2078 2079 if (type & ~(KVMPPC_DEBUG_WATCH_READ | 2080 KVMPPC_DEBUG_WATCH_WRITE | 2081 KVMPPC_DEBUG_BREAKPOINT)) 2082 goto out; 2083 2084 if (type & KVMPPC_DEBUG_BREAKPOINT) { 2085 /* Setting H/W breakpoint */ 2086 if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++)) 2087 goto out; 2088 } else { 2089 /* Setting H/W watchpoint */ 2090 if (kvmppc_booke_add_watchpoint(dbg_reg, addr, 2091 type, w++)) 2092 goto out; 2093 } 2094 } 2095 2096 ret = 0; 2097 out: 2098 vcpu_put(vcpu); 2099 return ret; 2100 } 2101 2102 void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 2103 { 2104 vcpu->cpu = smp_processor_id(); 2105 current->thread.kvm_vcpu = vcpu; 2106 } 2107 2108 void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu) 2109 { 2110 current->thread.kvm_vcpu = NULL; 2111 vcpu->cpu = -1; 2112 2113 /* Clear pending debug event in DBSR */ 2114 kvmppc_clear_dbsr(); 2115 } 2116 2117 int kvmppc_core_init_vm(struct kvm *kvm) 2118 { 2119 return kvm->arch.kvm_ops->init_vm(kvm); 2120 } 2121 2122 int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu) 2123 { 2124 int i; 2125 int r; 2126 2127 r = vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu); 2128 if (r) 2129 return r; 2130 2131 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ 2132 vcpu->arch.regs.nip = 0; 2133 vcpu->arch.shared->pir = vcpu->vcpu_id; 2134 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ 2135 kvmppc_set_msr(vcpu, 0); 2136 2137 #ifndef CONFIG_KVM_BOOKE_HV 2138 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS; 2139 vcpu->arch.shadow_pid = 1; 2140 vcpu->arch.shared->msr = 0; 2141 #endif 2142 2143 /* Eye-catching numbers so we know if the guest takes an interrupt 2144 * before it's programmed its own IVPR/IVORs. */ 2145 vcpu->arch.ivpr = 0x55550000; 2146 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++) 2147 vcpu->arch.ivor[i] = 0x7700 | i * 4; 2148 2149 kvmppc_init_timing_stats(vcpu); 2150 2151 r = kvmppc_core_vcpu_setup(vcpu); 2152 if (r) 2153 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); 2154 kvmppc_sanity_check(vcpu); 2155 return r; 2156 } 2157 2158 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) 2159 { 2160 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); 2161 } 2162 2163 void kvmppc_core_destroy_vm(struct kvm *kvm) 2164 { 2165 kvm->arch.kvm_ops->destroy_vm(kvm); 2166 } 2167 2168 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 2169 { 2170 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); 2171 } 2172 2173 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 2174 { 2175 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); 2176 } 2177 2178 int __init kvmppc_booke_init(void) 2179 { 2180 #ifndef CONFIG_KVM_BOOKE_HV 2181 unsigned long ivor[16]; 2182 unsigned long *handler = kvmppc_booke_handler_addr; 2183 unsigned long max_ivor = 0; 2184 unsigned long handler_len; 2185 int i; 2186 2187 /* We install our own exception handlers by hijacking IVPR. IVPR must 2188 * be 16-bit aligned, so we need a 64KB allocation. */ 2189 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 2190 VCPU_SIZE_ORDER); 2191 if (!kvmppc_booke_handlers) 2192 return -ENOMEM; 2193 2194 /* XXX make sure our handlers are smaller than Linux's */ 2195 2196 /* Copy our interrupt handlers to match host IVORs. That way we don't 2197 * have to swap the IVORs on every guest/host transition. */ 2198 ivor[0] = mfspr(SPRN_IVOR0); 2199 ivor[1] = mfspr(SPRN_IVOR1); 2200 ivor[2] = mfspr(SPRN_IVOR2); 2201 ivor[3] = mfspr(SPRN_IVOR3); 2202 ivor[4] = mfspr(SPRN_IVOR4); 2203 ivor[5] = mfspr(SPRN_IVOR5); 2204 ivor[6] = mfspr(SPRN_IVOR6); 2205 ivor[7] = mfspr(SPRN_IVOR7); 2206 ivor[8] = mfspr(SPRN_IVOR8); 2207 ivor[9] = mfspr(SPRN_IVOR9); 2208 ivor[10] = mfspr(SPRN_IVOR10); 2209 ivor[11] = mfspr(SPRN_IVOR11); 2210 ivor[12] = mfspr(SPRN_IVOR12); 2211 ivor[13] = mfspr(SPRN_IVOR13); 2212 ivor[14] = mfspr(SPRN_IVOR14); 2213 ivor[15] = mfspr(SPRN_IVOR15); 2214 2215 for (i = 0; i < 16; i++) { 2216 if (ivor[i] > max_ivor) 2217 max_ivor = i; 2218 2219 handler_len = handler[i + 1] - handler[i]; 2220 memcpy((void *)kvmppc_booke_handlers + ivor[i], 2221 (void *)handler[i], handler_len); 2222 } 2223 2224 handler_len = handler[max_ivor + 1] - handler[max_ivor]; 2225 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + 2226 ivor[max_ivor] + handler_len); 2227 #endif /* !BOOKE_HV */ 2228 return 0; 2229 } 2230 2231 void __exit kvmppc_booke_exit(void) 2232 { 2233 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER); 2234 kvm_exit(); 2235 } 2236