booke.c (7c973a2ebb8fb9c8ee2ae9647f9ad7b0ad58a3e6) | booke.c (f61c94bb99ca4253ac5dd57750e1af209a4beb7a) |
---|---|
1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the --- 195 unchanged lines hidden (view full) --- 204 205void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, 206 struct kvm_interrupt *irq) 207{ 208 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); 209 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); 210} 211 | 1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the --- 195 unchanged lines hidden (view full) --- 204 205void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, 206 struct kvm_interrupt *irq) 207{ 208 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); 209 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); 210} 211 |
212static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu) 213{ 214 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG); 215} 216 217static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu) 218{ 219 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions); 220} 221 |
|
212static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) 213{ 214#ifdef CONFIG_KVM_BOOKE_HV 215 mtspr(SPRN_GSRR0, srr0); 216 mtspr(SPRN_GSRR1, srr1); 217#else 218 vcpu->arch.shared->srr0 = srr0; 219 vcpu->arch.shared->srr1 = srr1; --- 103 unchanged lines hidden (view full) --- 323 case BOOKE_IRQPRIO_SPE_FP_DATA: 324 case BOOKE_IRQPRIO_SPE_FP_ROUND: 325 case BOOKE_IRQPRIO_AP_UNAVAIL: 326 case BOOKE_IRQPRIO_ALIGNMENT: 327 allowed = 1; 328 msr_mask = MSR_CE | MSR_ME | MSR_DE; 329 int_class = INT_CLASS_NONCRIT; 330 break; | 222static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) 223{ 224#ifdef CONFIG_KVM_BOOKE_HV 225 mtspr(SPRN_GSRR0, srr0); 226 mtspr(SPRN_GSRR1, srr1); 227#else 228 vcpu->arch.shared->srr0 = srr0; 229 vcpu->arch.shared->srr1 = srr1; --- 103 unchanged lines hidden (view full) --- 333 case BOOKE_IRQPRIO_SPE_FP_DATA: 334 case BOOKE_IRQPRIO_SPE_FP_ROUND: 335 case BOOKE_IRQPRIO_AP_UNAVAIL: 336 case BOOKE_IRQPRIO_ALIGNMENT: 337 allowed = 1; 338 msr_mask = MSR_CE | MSR_ME | MSR_DE; 339 int_class = INT_CLASS_NONCRIT; 340 break; |
341 case BOOKE_IRQPRIO_WATCHDOG: |
|
331 case BOOKE_IRQPRIO_CRITICAL: 332 case BOOKE_IRQPRIO_DBELL_CRIT: 333 allowed = vcpu->arch.shared->msr & MSR_CE; 334 allowed = allowed && !crit; 335 msr_mask = MSR_ME; 336 int_class = INT_CLASS_CRIT; 337 break; 338 case BOOKE_IRQPRIO_MACHINE_CHECK: --- 63 unchanged lines hidden (view full) --- 402 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT); 403 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK) 404 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC); 405#endif 406 407 return allowed; 408} 409 | 342 case BOOKE_IRQPRIO_CRITICAL: 343 case BOOKE_IRQPRIO_DBELL_CRIT: 344 allowed = vcpu->arch.shared->msr & MSR_CE; 345 allowed = allowed && !crit; 346 msr_mask = MSR_ME; 347 int_class = INT_CLASS_CRIT; 348 break; 349 case BOOKE_IRQPRIO_MACHINE_CHECK: --- 63 unchanged lines hidden (view full) --- 413 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT); 414 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK) 415 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC); 416#endif 417 418 return allowed; 419} 420 |
421/* 422 * Return the number of jiffies until the next timeout. If the timeout is 423 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA 424 * because the larger value can break the timer APIs. 425 */ 426static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu) 427{ 428 u64 tb, wdt_tb, wdt_ticks = 0; 429 u64 nr_jiffies = 0; 430 u32 period = TCR_GET_WP(vcpu->arch.tcr); 431 432 wdt_tb = 1ULL << (63 - period); 433 tb = get_tb(); 434 /* 435 * The watchdog timeout will hapeen when TB bit corresponding 436 * to watchdog will toggle from 0 to 1. 437 */ 438 if (tb & wdt_tb) 439 wdt_ticks = wdt_tb; 440 441 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1)); 442 443 /* Convert timebase ticks to jiffies */ 444 nr_jiffies = wdt_ticks; 445 446 if (do_div(nr_jiffies, tb_ticks_per_jiffy)) 447 nr_jiffies++; 448 449 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA); 450} 451 452static void arm_next_watchdog(struct kvm_vcpu *vcpu) 453{ 454 unsigned long nr_jiffies; 455 unsigned long flags; 456 457 /* 458 * If TSR_ENW and TSR_WIS are not set then no need to exit to 459 * userspace, so clear the KVM_REQ_WATCHDOG request. 460 */ 461 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS)) 462 clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests); 463 464 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags); 465 nr_jiffies = watchdog_next_timeout(vcpu); 466 /* 467 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA 468 * then do not run the watchdog timer as this can break timer APIs. 469 */ 470 if (nr_jiffies < NEXT_TIMER_MAX_DELTA) 471 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies); 472 else 473 del_timer(&vcpu->arch.wdt_timer); 474 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags); 475} 476 477void kvmppc_watchdog_func(unsigned long data) 478{ 479 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 480 u32 tsr, new_tsr; 481 int final; 482 483 do { 484 new_tsr = tsr = vcpu->arch.tsr; 485 final = 0; 486 487 /* Time out event */ 488 if (tsr & TSR_ENW) { 489 if (tsr & TSR_WIS) 490 final = 1; 491 else 492 new_tsr = tsr | TSR_WIS; 493 } else { 494 new_tsr = tsr | TSR_ENW; 495 } 496 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr); 497 498 if (new_tsr & TSR_WIS) { 499 smp_wmb(); 500 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); 501 kvm_vcpu_kick(vcpu); 502 } 503 504 /* 505 * If this is final watchdog expiry and some action is required 506 * then exit to userspace. 507 */ 508 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) && 509 vcpu->arch.watchdog_enabled) { 510 smp_wmb(); 511 kvm_make_request(KVM_REQ_WATCHDOG, vcpu); 512 kvm_vcpu_kick(vcpu); 513 } 514 515 /* 516 * Stop running the watchdog timer after final expiration to 517 * prevent the host from being flooded with timers if the 518 * guest sets a short period. 519 * Timers will resume when TSR/TCR is updated next time. 520 */ 521 if (!final) 522 arm_next_watchdog(vcpu); 523} 524 |
|
410static void update_timer_ints(struct kvm_vcpu *vcpu) 411{ 412 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS)) 413 kvmppc_core_queue_dec(vcpu); 414 else 415 kvmppc_core_dequeue_dec(vcpu); | 525static void update_timer_ints(struct kvm_vcpu *vcpu) 526{ 527 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS)) 528 kvmppc_core_queue_dec(vcpu); 529 else 530 kvmppc_core_dequeue_dec(vcpu); |
531 532 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS)) 533 kvmppc_core_queue_watchdog(vcpu); 534 else 535 kvmppc_core_dequeue_watchdog(vcpu); |
|
416} 417 418static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu) 419{ 420 unsigned long *pending = &vcpu->arch.pending_exceptions; 421 unsigned int priority; 422 423 priority = __ffs(*pending); --- 37 unchanged lines hidden (view full) --- 461 462 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) 463 update_timer_ints(vcpu); 464#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 465 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) 466 kvmppc_core_flush_tlb(vcpu); 467#endif 468 | 536} 537 538static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu) 539{ 540 unsigned long *pending = &vcpu->arch.pending_exceptions; 541 unsigned int priority; 542 543 priority = __ffs(*pending); --- 37 unchanged lines hidden (view full) --- 581 582 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) 583 update_timer_ints(vcpu); 584#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 585 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) 586 kvmppc_core_flush_tlb(vcpu); 587#endif 588 |
589 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) { 590 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG; 591 r = 0; 592 } 593 |
|
469 return r; 470} 471 472int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 473{ 474 int ret, s; 475#ifdef CONFIG_PPC_FPU 476 unsigned int fpscr; --- 513 unchanged lines hidden (view full) --- 990 991 kvmppc_init_timing_stats(vcpu); 992 993 r = kvmppc_core_vcpu_setup(vcpu); 994 kvmppc_sanity_check(vcpu); 995 return r; 996} 997 | 594 return r; 595} 596 597int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 598{ 599 int ret, s; 600#ifdef CONFIG_PPC_FPU 601 unsigned int fpscr; --- 513 unchanged lines hidden (view full) --- 1115 1116 kvmppc_init_timing_stats(vcpu); 1117 1118 r = kvmppc_core_vcpu_setup(vcpu); 1119 kvmppc_sanity_check(vcpu); 1120 return r; 1121} 1122 |
1123int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu) 1124{ 1125 /* setup watchdog timer once */ 1126 spin_lock_init(&vcpu->arch.wdt_lock); 1127 setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 1128 (unsigned long)vcpu); 1129 1130 return 0; 1131} 1132 1133void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu) 1134{ 1135 del_timer_sync(&vcpu->arch.wdt_timer); 1136} 1137 |
|
998int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 999{ 1000 int i; 1001 1002 regs->pc = vcpu->arch.pc; 1003 regs->cr = kvmppc_get_cr(vcpu); 1004 regs->ctr = vcpu->arch.ctr; 1005 regs->lr = vcpu->arch.lr; --- 79 unchanged lines hidden (view full) --- 1085 kvmppc_set_tcr(vcpu, sregs->u.e.tcr); 1086 1087 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) { 1088 vcpu->arch.dec = sregs->u.e.dec; 1089 kvmppc_emulate_dec(vcpu); 1090 } 1091 1092 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) { | 1138int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 1139{ 1140 int i; 1141 1142 regs->pc = vcpu->arch.pc; 1143 regs->cr = kvmppc_get_cr(vcpu); 1144 regs->ctr = vcpu->arch.ctr; 1145 regs->lr = vcpu->arch.lr; --- 79 unchanged lines hidden (view full) --- 1225 kvmppc_set_tcr(vcpu, sregs->u.e.tcr); 1226 1227 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) { 1228 vcpu->arch.dec = sregs->u.e.dec; 1229 kvmppc_emulate_dec(vcpu); 1230 } 1231 1232 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) { |
1233 u32 old_tsr = vcpu->arch.tsr; 1234 |
|
1093 vcpu->arch.tsr = sregs->u.e.tsr; | 1235 vcpu->arch.tsr = sregs->u.e.tsr; |
1236 1237 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS)) 1238 arm_next_watchdog(vcpu); 1239 |
|
1094 update_timer_ints(vcpu); 1095 } 1096 1097 return 0; 1098} 1099 1100static void get_sregs_arch206(struct kvm_vcpu *vcpu, 1101 struct kvm_sregs *sregs) --- 144 unchanged lines hidden (view full) --- 1246void kvmppc_core_commit_memory_region(struct kvm *kvm, 1247 struct kvm_userspace_memory_region *mem) 1248{ 1249} 1250 1251void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr) 1252{ 1253 vcpu->arch.tcr = new_tcr; | 1240 update_timer_ints(vcpu); 1241 } 1242 1243 return 0; 1244} 1245 1246static void get_sregs_arch206(struct kvm_vcpu *vcpu, 1247 struct kvm_sregs *sregs) --- 144 unchanged lines hidden (view full) --- 1392void kvmppc_core_commit_memory_region(struct kvm *kvm, 1393 struct kvm_userspace_memory_region *mem) 1394{ 1395} 1396 1397void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr) 1398{ 1399 vcpu->arch.tcr = new_tcr; |
1400 arm_next_watchdog(vcpu); |
|
1254 update_timer_ints(vcpu); 1255} 1256 1257void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) 1258{ 1259 set_bits(tsr_bits, &vcpu->arch.tsr); 1260 smp_wmb(); 1261 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); 1262 kvm_vcpu_kick(vcpu); 1263} 1264 1265void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) 1266{ 1267 clear_bits(tsr_bits, &vcpu->arch.tsr); | 1401 update_timer_ints(vcpu); 1402} 1403 1404void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) 1405{ 1406 set_bits(tsr_bits, &vcpu->arch.tsr); 1407 smp_wmb(); 1408 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); 1409 kvm_vcpu_kick(vcpu); 1410} 1411 1412void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) 1413{ 1414 clear_bits(tsr_bits, &vcpu->arch.tsr); |
1415 1416 /* 1417 * We may have stopped the watchdog due to 1418 * being stuck on final expiration. 1419 */ 1420 if (tsr_bits & (TSR_ENW | TSR_WIS)) 1421 arm_next_watchdog(vcpu); 1422 |
|
1268 update_timer_ints(vcpu); 1269} 1270 1271void kvmppc_decrementer_func(unsigned long data) 1272{ 1273 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 1274 1275 if (vcpu->arch.tcr & TCR_ARE) { --- 71 unchanged lines hidden --- | 1423 update_timer_ints(vcpu); 1424} 1425 1426void kvmppc_decrementer_func(unsigned long data) 1427{ 1428 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 1429 1430 if (vcpu->arch.tcr & TCR_ARE) { --- 71 unchanged lines hidden --- |