1 /* 2 * 8253/8254 interval timer emulation 3 * 4 * Copyright (c) 2003-2004 Fabrice Bellard 5 * Copyright (c) 2006 Intel Corporation 6 * Copyright (c) 2007 Keir Fraser, XenSource Inc 7 * Copyright (c) 2008 Intel Corporation 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a copy 10 * of this software and associated documentation files (the "Software"), to deal 11 * in the Software without restriction, including without limitation the rights 12 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 13 * copies of the Software, and to permit persons to whom the Software is 14 * furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice shall be included in 17 * all copies or substantial portions of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 24 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 25 * THE SOFTWARE. 26 * 27 * Authors: 28 * Sheng Yang <sheng.yang@intel.com> 29 * Based on QEMU and Xen. 30 */ 31 32 #include <linux/kvm_host.h> 33 34 #include "irq.h" 35 #include "i8254.h" 36 37 #ifndef CONFIG_X86_64 38 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) 39 #else 40 #define mod_64(x, y) ((x) % (y)) 41 #endif 42 43 #define RW_STATE_LSB 1 44 #define RW_STATE_MSB 2 45 #define RW_STATE_WORD0 3 46 #define RW_STATE_WORD1 4 47 48 /* Compute with 96 bit intermediate result: (a*b)/c */ 49 static u64 muldiv64(u64 a, u32 b, u32 c) 50 { 51 union { 52 u64 ll; 53 struct { 54 u32 low, high; 55 } l; 56 } u, res; 57 u64 rl, rh; 58 59 u.ll = a; 60 rl = (u64)u.l.low * (u64)b; 61 rh = (u64)u.l.high * (u64)b; 62 rh += (rl >> 32); 63 res.l.high = div64_u64(rh, c); 64 res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c); 65 return res.ll; 66 } 67 68 static void pit_set_gate(struct kvm *kvm, int channel, u32 val) 69 { 70 struct kvm_kpit_channel_state *c = 71 &kvm->arch.vpit->pit_state.channels[channel]; 72 73 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 74 75 switch (c->mode) { 76 default: 77 case 0: 78 case 4: 79 /* XXX: just disable/enable counting */ 80 break; 81 case 1: 82 case 2: 83 case 3: 84 case 5: 85 /* Restart counting on rising edge. */ 86 if (c->gate < val) 87 c->count_load_time = ktime_get(); 88 break; 89 } 90 91 c->gate = val; 92 } 93 94 static int pit_get_gate(struct kvm *kvm, int channel) 95 { 96 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 97 98 return kvm->arch.vpit->pit_state.channels[channel].gate; 99 } 100 101 static s64 __kpit_elapsed(struct kvm *kvm) 102 { 103 s64 elapsed; 104 ktime_t remaining; 105 struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; 106 107 /* 108 * The Counter does not stop when it reaches zero. In 109 * Modes 0, 1, 4, and 5 the Counter ``wraps around'' to 110 * the highest count, either FFFF hex for binary counting 111 * or 9999 for BCD counting, and continues counting. 112 * Modes 2 and 3 are periodic; the Counter reloads 113 * itself with the initial count and continues counting 114 * from there. 115 */ 116 remaining = hrtimer_expires_remaining(&ps->pit_timer.timer); 117 elapsed = ps->pit_timer.period - ktime_to_ns(remaining); 118 elapsed = mod_64(elapsed, ps->pit_timer.period); 119 120 return elapsed; 121 } 122 123 static s64 kpit_elapsed(struct kvm *kvm, struct kvm_kpit_channel_state *c, 124 int channel) 125 { 126 if (channel == 0) 127 return __kpit_elapsed(kvm); 128 129 return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time)); 130 } 131 132 static int pit_get_count(struct kvm *kvm, int channel) 133 { 134 struct kvm_kpit_channel_state *c = 135 &kvm->arch.vpit->pit_state.channels[channel]; 136 s64 d, t; 137 int counter; 138 139 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 140 141 t = kpit_elapsed(kvm, c, channel); 142 d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); 143 144 switch (c->mode) { 145 case 0: 146 case 1: 147 case 4: 148 case 5: 149 counter = (c->count - d) & 0xffff; 150 break; 151 case 3: 152 /* XXX: may be incorrect for odd counts */ 153 counter = c->count - (mod_64((2 * d), c->count)); 154 break; 155 default: 156 counter = c->count - mod_64(d, c->count); 157 break; 158 } 159 return counter; 160 } 161 162 static int pit_get_out(struct kvm *kvm, int channel) 163 { 164 struct kvm_kpit_channel_state *c = 165 &kvm->arch.vpit->pit_state.channels[channel]; 166 s64 d, t; 167 int out; 168 169 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 170 171 t = kpit_elapsed(kvm, c, channel); 172 d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); 173 174 switch (c->mode) { 175 default: 176 case 0: 177 out = (d >= c->count); 178 break; 179 case 1: 180 out = (d < c->count); 181 break; 182 case 2: 183 out = ((mod_64(d, c->count) == 0) && (d != 0)); 184 break; 185 case 3: 186 out = (mod_64(d, c->count) < ((c->count + 1) >> 1)); 187 break; 188 case 4: 189 case 5: 190 out = (d == c->count); 191 break; 192 } 193 194 return out; 195 } 196 197 static void pit_latch_count(struct kvm *kvm, int channel) 198 { 199 struct kvm_kpit_channel_state *c = 200 &kvm->arch.vpit->pit_state.channels[channel]; 201 202 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 203 204 if (!c->count_latched) { 205 c->latched_count = pit_get_count(kvm, channel); 206 c->count_latched = c->rw_mode; 207 } 208 } 209 210 static void pit_latch_status(struct kvm *kvm, int channel) 211 { 212 struct kvm_kpit_channel_state *c = 213 &kvm->arch.vpit->pit_state.channels[channel]; 214 215 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 216 217 if (!c->status_latched) { 218 /* TODO: Return NULL COUNT (bit 6). */ 219 c->status = ((pit_get_out(kvm, channel) << 7) | 220 (c->rw_mode << 4) | 221 (c->mode << 1) | 222 c->bcd); 223 c->status_latched = 1; 224 } 225 } 226 227 int pit_has_pending_timer(struct kvm_vcpu *vcpu) 228 { 229 struct kvm_pit *pit = vcpu->kvm->arch.vpit; 230 231 if (pit && vcpu->vcpu_id == 0 && pit->pit_state.irq_ack) 232 return atomic_read(&pit->pit_state.pit_timer.pending); 233 return 0; 234 } 235 236 static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) 237 { 238 struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state, 239 irq_ack_notifier); 240 spin_lock(&ps->inject_lock); 241 if (atomic_dec_return(&ps->pit_timer.pending) < 0) 242 atomic_inc(&ps->pit_timer.pending); 243 ps->irq_ack = 1; 244 spin_unlock(&ps->inject_lock); 245 } 246 247 void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) 248 { 249 struct kvm_pit *pit = vcpu->kvm->arch.vpit; 250 struct hrtimer *timer; 251 252 if (vcpu->vcpu_id != 0 || !pit) 253 return; 254 255 timer = &pit->pit_state.pit_timer.timer; 256 if (hrtimer_cancel(timer)) 257 hrtimer_start_expires(timer, HRTIMER_MODE_ABS); 258 } 259 260 static void destroy_pit_timer(struct kvm_timer *pt) 261 { 262 pr_debug("pit: execute del timer!\n"); 263 hrtimer_cancel(&pt->timer); 264 } 265 266 static bool kpit_is_periodic(struct kvm_timer *ktimer) 267 { 268 struct kvm_kpit_state *ps = container_of(ktimer, struct kvm_kpit_state, 269 pit_timer); 270 return ps->is_periodic; 271 } 272 273 static struct kvm_timer_ops kpit_ops = { 274 .is_periodic = kpit_is_periodic, 275 }; 276 277 static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period) 278 { 279 struct kvm_timer *pt = &ps->pit_timer; 280 s64 interval; 281 282 interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); 283 284 pr_debug("pit: create pit timer, interval is %llu nsec\n", interval); 285 286 /* TODO The new value only affected after the retriggered */ 287 hrtimer_cancel(&pt->timer); 288 pt->period = interval; 289 ps->is_periodic = is_period; 290 291 pt->timer.function = kvm_timer_fn; 292 pt->t_ops = &kpit_ops; 293 pt->kvm = ps->pit->kvm; 294 pt->vcpu_id = 0; 295 296 atomic_set(&pt->pending, 0); 297 ps->irq_ack = 1; 298 299 hrtimer_start(&pt->timer, ktime_add_ns(ktime_get(), interval), 300 HRTIMER_MODE_ABS); 301 } 302 303 static void pit_load_count(struct kvm *kvm, int channel, u32 val) 304 { 305 struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; 306 307 WARN_ON(!mutex_is_locked(&ps->lock)); 308 309 pr_debug("pit: load_count val is %d, channel is %d\n", val, channel); 310 311 /* 312 * The largest possible initial count is 0; this is equivalent 313 * to 216 for binary counting and 104 for BCD counting. 314 */ 315 if (val == 0) 316 val = 0x10000; 317 318 ps->channels[channel].count = val; 319 320 if (channel != 0) { 321 ps->channels[channel].count_load_time = ktime_get(); 322 return; 323 } 324 325 /* Two types of timer 326 * mode 1 is one shot, mode 2 is period, otherwise del timer */ 327 switch (ps->channels[0].mode) { 328 case 0: 329 case 1: 330 /* FIXME: enhance mode 4 precision */ 331 case 4: 332 create_pit_timer(ps, val, 0); 333 break; 334 case 2: 335 case 3: 336 create_pit_timer(ps, val, 1); 337 break; 338 default: 339 destroy_pit_timer(&ps->pit_timer); 340 } 341 } 342 343 void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val) 344 { 345 mutex_lock(&kvm->arch.vpit->pit_state.lock); 346 pit_load_count(kvm, channel, val); 347 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 348 } 349 350 static void pit_ioport_write(struct kvm_io_device *this, 351 gpa_t addr, int len, const void *data) 352 { 353 struct kvm_pit *pit = (struct kvm_pit *)this->private; 354 struct kvm_kpit_state *pit_state = &pit->pit_state; 355 struct kvm *kvm = pit->kvm; 356 int channel, access; 357 struct kvm_kpit_channel_state *s; 358 u32 val = *(u32 *) data; 359 360 val &= 0xff; 361 addr &= KVM_PIT_CHANNEL_MASK; 362 363 mutex_lock(&pit_state->lock); 364 365 if (val != 0) 366 pr_debug("pit: write addr is 0x%x, len is %d, val is 0x%x\n", 367 (unsigned int)addr, len, val); 368 369 if (addr == 3) { 370 channel = val >> 6; 371 if (channel == 3) { 372 /* Read-Back Command. */ 373 for (channel = 0; channel < 3; channel++) { 374 s = &pit_state->channels[channel]; 375 if (val & (2 << channel)) { 376 if (!(val & 0x20)) 377 pit_latch_count(kvm, channel); 378 if (!(val & 0x10)) 379 pit_latch_status(kvm, channel); 380 } 381 } 382 } else { 383 /* Select Counter <channel>. */ 384 s = &pit_state->channels[channel]; 385 access = (val >> 4) & KVM_PIT_CHANNEL_MASK; 386 if (access == 0) { 387 pit_latch_count(kvm, channel); 388 } else { 389 s->rw_mode = access; 390 s->read_state = access; 391 s->write_state = access; 392 s->mode = (val >> 1) & 7; 393 if (s->mode > 5) 394 s->mode -= 4; 395 s->bcd = val & 1; 396 } 397 } 398 } else { 399 /* Write Count. */ 400 s = &pit_state->channels[addr]; 401 switch (s->write_state) { 402 default: 403 case RW_STATE_LSB: 404 pit_load_count(kvm, addr, val); 405 break; 406 case RW_STATE_MSB: 407 pit_load_count(kvm, addr, val << 8); 408 break; 409 case RW_STATE_WORD0: 410 s->write_latch = val; 411 s->write_state = RW_STATE_WORD1; 412 break; 413 case RW_STATE_WORD1: 414 pit_load_count(kvm, addr, s->write_latch | (val << 8)); 415 s->write_state = RW_STATE_WORD0; 416 break; 417 } 418 } 419 420 mutex_unlock(&pit_state->lock); 421 } 422 423 static void pit_ioport_read(struct kvm_io_device *this, 424 gpa_t addr, int len, void *data) 425 { 426 struct kvm_pit *pit = (struct kvm_pit *)this->private; 427 struct kvm_kpit_state *pit_state = &pit->pit_state; 428 struct kvm *kvm = pit->kvm; 429 int ret, count; 430 struct kvm_kpit_channel_state *s; 431 432 addr &= KVM_PIT_CHANNEL_MASK; 433 s = &pit_state->channels[addr]; 434 435 mutex_lock(&pit_state->lock); 436 437 if (s->status_latched) { 438 s->status_latched = 0; 439 ret = s->status; 440 } else if (s->count_latched) { 441 switch (s->count_latched) { 442 default: 443 case RW_STATE_LSB: 444 ret = s->latched_count & 0xff; 445 s->count_latched = 0; 446 break; 447 case RW_STATE_MSB: 448 ret = s->latched_count >> 8; 449 s->count_latched = 0; 450 break; 451 case RW_STATE_WORD0: 452 ret = s->latched_count & 0xff; 453 s->count_latched = RW_STATE_MSB; 454 break; 455 } 456 } else { 457 switch (s->read_state) { 458 default: 459 case RW_STATE_LSB: 460 count = pit_get_count(kvm, addr); 461 ret = count & 0xff; 462 break; 463 case RW_STATE_MSB: 464 count = pit_get_count(kvm, addr); 465 ret = (count >> 8) & 0xff; 466 break; 467 case RW_STATE_WORD0: 468 count = pit_get_count(kvm, addr); 469 ret = count & 0xff; 470 s->read_state = RW_STATE_WORD1; 471 break; 472 case RW_STATE_WORD1: 473 count = pit_get_count(kvm, addr); 474 ret = (count >> 8) & 0xff; 475 s->read_state = RW_STATE_WORD0; 476 break; 477 } 478 } 479 480 if (len > sizeof(ret)) 481 len = sizeof(ret); 482 memcpy(data, (char *)&ret, len); 483 484 mutex_unlock(&pit_state->lock); 485 } 486 487 static int pit_in_range(struct kvm_io_device *this, gpa_t addr, 488 int len, int is_write) 489 { 490 return ((addr >= KVM_PIT_BASE_ADDRESS) && 491 (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH)); 492 } 493 494 static void speaker_ioport_write(struct kvm_io_device *this, 495 gpa_t addr, int len, const void *data) 496 { 497 struct kvm_pit *pit = (struct kvm_pit *)this->private; 498 struct kvm_kpit_state *pit_state = &pit->pit_state; 499 struct kvm *kvm = pit->kvm; 500 u32 val = *(u32 *) data; 501 502 mutex_lock(&pit_state->lock); 503 pit_state->speaker_data_on = (val >> 1) & 1; 504 pit_set_gate(kvm, 2, val & 1); 505 mutex_unlock(&pit_state->lock); 506 } 507 508 static void speaker_ioport_read(struct kvm_io_device *this, 509 gpa_t addr, int len, void *data) 510 { 511 struct kvm_pit *pit = (struct kvm_pit *)this->private; 512 struct kvm_kpit_state *pit_state = &pit->pit_state; 513 struct kvm *kvm = pit->kvm; 514 unsigned int refresh_clock; 515 int ret; 516 517 /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */ 518 refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1; 519 520 mutex_lock(&pit_state->lock); 521 ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(kvm, 2) | 522 (pit_get_out(kvm, 2) << 5) | (refresh_clock << 4)); 523 if (len > sizeof(ret)) 524 len = sizeof(ret); 525 memcpy(data, (char *)&ret, len); 526 mutex_unlock(&pit_state->lock); 527 } 528 529 static int speaker_in_range(struct kvm_io_device *this, gpa_t addr, 530 int len, int is_write) 531 { 532 return (addr == KVM_SPEAKER_BASE_ADDRESS); 533 } 534 535 void kvm_pit_reset(struct kvm_pit *pit) 536 { 537 int i; 538 struct kvm_kpit_channel_state *c; 539 540 mutex_lock(&pit->pit_state.lock); 541 for (i = 0; i < 3; i++) { 542 c = &pit->pit_state.channels[i]; 543 c->mode = 0xff; 544 c->gate = (i != 2); 545 pit_load_count(pit->kvm, i, 0); 546 } 547 mutex_unlock(&pit->pit_state.lock); 548 549 atomic_set(&pit->pit_state.pit_timer.pending, 0); 550 pit->pit_state.irq_ack = 1; 551 } 552 553 static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask) 554 { 555 struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier); 556 557 if (!mask) { 558 atomic_set(&pit->pit_state.pit_timer.pending, 0); 559 pit->pit_state.irq_ack = 1; 560 } 561 } 562 563 struct kvm_pit *kvm_create_pit(struct kvm *kvm) 564 { 565 struct kvm_pit *pit; 566 struct kvm_kpit_state *pit_state; 567 568 pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL); 569 if (!pit) 570 return NULL; 571 572 pit->irq_source_id = kvm_request_irq_source_id(kvm); 573 if (pit->irq_source_id < 0) { 574 kfree(pit); 575 return NULL; 576 } 577 578 mutex_init(&pit->pit_state.lock); 579 mutex_lock(&pit->pit_state.lock); 580 spin_lock_init(&pit->pit_state.inject_lock); 581 582 /* Initialize PIO device */ 583 pit->dev.read = pit_ioport_read; 584 pit->dev.write = pit_ioport_write; 585 pit->dev.in_range = pit_in_range; 586 pit->dev.private = pit; 587 kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev); 588 589 pit->speaker_dev.read = speaker_ioport_read; 590 pit->speaker_dev.write = speaker_ioport_write; 591 pit->speaker_dev.in_range = speaker_in_range; 592 pit->speaker_dev.private = pit; 593 kvm_io_bus_register_dev(&kvm->pio_bus, &pit->speaker_dev); 594 595 kvm->arch.vpit = pit; 596 pit->kvm = kvm; 597 598 pit_state = &pit->pit_state; 599 pit_state->pit = pit; 600 hrtimer_init(&pit_state->pit_timer.timer, 601 CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 602 pit_state->irq_ack_notifier.gsi = 0; 603 pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq; 604 kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); 605 pit_state->pit_timer.reinject = true; 606 mutex_unlock(&pit->pit_state.lock); 607 608 kvm_pit_reset(pit); 609 610 pit->mask_notifier.func = pit_mask_notifer; 611 kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); 612 613 return pit; 614 } 615 616 void kvm_free_pit(struct kvm *kvm) 617 { 618 struct hrtimer *timer; 619 620 if (kvm->arch.vpit) { 621 kvm_unregister_irq_mask_notifier(kvm, 0, 622 &kvm->arch.vpit->mask_notifier); 623 mutex_lock(&kvm->arch.vpit->pit_state.lock); 624 timer = &kvm->arch.vpit->pit_state.pit_timer.timer; 625 hrtimer_cancel(timer); 626 kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id); 627 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 628 kfree(kvm->arch.vpit); 629 } 630 } 631 632 static void __inject_pit_timer_intr(struct kvm *kvm) 633 { 634 struct kvm_vcpu *vcpu; 635 int i; 636 637 mutex_lock(&kvm->lock); 638 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); 639 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); 640 mutex_unlock(&kvm->lock); 641 642 /* 643 * Provides NMI watchdog support via Virtual Wire mode. 644 * The route is: PIT -> PIC -> LVT0 in NMI mode. 645 * 646 * Note: Our Virtual Wire implementation is simplified, only 647 * propagating PIT interrupts to all VCPUs when they have set 648 * LVT0 to NMI delivery. Other PIC interrupts are just sent to 649 * VCPU0, and only if its LVT0 is in EXTINT mode. 650 */ 651 if (kvm->arch.vapics_in_nmi_mode > 0) 652 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 653 vcpu = kvm->vcpus[i]; 654 if (vcpu) 655 kvm_apic_nmi_wd_deliver(vcpu); 656 } 657 } 658 659 void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu) 660 { 661 struct kvm_pit *pit = vcpu->kvm->arch.vpit; 662 struct kvm *kvm = vcpu->kvm; 663 struct kvm_kpit_state *ps; 664 665 if (vcpu && pit) { 666 int inject = 0; 667 ps = &pit->pit_state; 668 669 /* Try to inject pending interrupts when 670 * last one has been acked. 671 */ 672 spin_lock(&ps->inject_lock); 673 if (atomic_read(&ps->pit_timer.pending) && ps->irq_ack) { 674 ps->irq_ack = 0; 675 inject = 1; 676 } 677 spin_unlock(&ps->inject_lock); 678 if (inject) 679 __inject_pit_timer_intr(kvm); 680 } 681 } 682