1 /* 2 * 8259 interrupt controller emulation 3 * 4 * Copyright (c) 2003-2004 Fabrice Bellard 5 * Copyright (c) 2007 Intel Corporation 6 * Copyright 2009 Red Hat, Inc. and/or its affiliates. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to deal 10 * in the Software without restriction, including without limitation the rights 11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 * copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 24 * THE SOFTWARE. 25 * Authors: 26 * Yaozu (Eddie) Dong <Eddie.dong@intel.com> 27 * Port from Qemu. 28 */ 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/mm.h> 32 #include <linux/slab.h> 33 #include <linux/bitops.h> 34 #include "irq.h" 35 36 #include <linux/kvm_host.h> 37 #include "trace.h" 38 39 #define pr_pic_unimpl(fmt, ...) \ 40 pr_err_ratelimited("pic: " fmt, ## __VA_ARGS__) 41 42 static void pic_irq_request(struct kvm *kvm, int level); 43 44 static void pic_lock(struct kvm_pic *s) 45 __acquires(&s->lock) 46 { 47 spin_lock(&s->lock); 48 } 49 50 static void pic_unlock(struct kvm_pic *s) 51 __releases(&s->lock) 52 { 53 bool wakeup = s->wakeup_needed; 54 struct kvm_vcpu *vcpu; 55 unsigned long i; 56 57 s->wakeup_needed = false; 58 59 spin_unlock(&s->lock); 60 61 if (wakeup) { 62 kvm_for_each_vcpu(i, vcpu, s->kvm) { 63 if (kvm_apic_accept_pic_intr(vcpu)) { 64 kvm_make_request(KVM_REQ_EVENT, vcpu); 65 kvm_vcpu_kick(vcpu); 66 return; 67 } 68 } 69 } 70 } 71 72 static void pic_clear_isr(struct kvm_kpic_state *s, int irq) 73 { 74 s->isr &= ~(1 << irq); 75 if (s != &s->pics_state->pics[0]) 76 irq += 8; 77 /* 78 * We are dropping lock while calling ack notifiers since ack 79 * notifier callbacks for assigned devices call into PIC recursively. 80 * Other interrupt may be delivered to PIC while lock is dropped but 81 * it should be safe since PIC state is already updated at this stage. 82 */ 83 pic_unlock(s->pics_state); 84 kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq); 85 pic_lock(s->pics_state); 86 } 87 88 /* 89 * set irq level. If an edge is detected, then the IRR is set to 1 90 */ 91 static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level) 92 { 93 int mask, ret = 1; 94 mask = 1 << irq; 95 if (s->elcr & mask) /* level triggered */ 96 if (level) { 97 ret = !(s->irr & mask); 98 s->irr |= mask; 99 s->last_irr |= mask; 100 } else { 101 s->irr &= ~mask; 102 s->last_irr &= ~mask; 103 } 104 else /* edge triggered */ 105 if (level) { 106 if ((s->last_irr & mask) == 0) { 107 ret = !(s->irr & mask); 108 s->irr |= mask; 109 } 110 s->last_irr |= mask; 111 } else 112 s->last_irr &= ~mask; 113 114 return (s->imr & mask) ? -1 : ret; 115 } 116 117 /* 118 * return the highest priority found in mask (highest = smallest 119 * number). Return 8 if no irq 120 */ 121 static inline int get_priority(struct kvm_kpic_state *s, int mask) 122 { 123 int priority; 124 if (mask == 0) 125 return 8; 126 priority = 0; 127 while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0) 128 priority++; 129 return priority; 130 } 131 132 /* 133 * return the pic wanted interrupt. return -1 if none 134 */ 135 static int pic_get_irq(struct kvm_kpic_state *s) 136 { 137 int mask, cur_priority, priority; 138 139 mask = s->irr & ~s->imr; 140 priority = get_priority(s, mask); 141 if (priority == 8) 142 return -1; 143 /* 144 * compute current priority. If special fully nested mode on the 145 * master, the IRQ coming from the slave is not taken into account 146 * for the priority computation. 147 */ 148 mask = s->isr; 149 if (s->special_fully_nested_mode && s == &s->pics_state->pics[0]) 150 mask &= ~(1 << 2); 151 cur_priority = get_priority(s, mask); 152 if (priority < cur_priority) 153 /* 154 * higher priority found: an irq should be generated 155 */ 156 return (priority + s->priority_add) & 7; 157 else 158 return -1; 159 } 160 161 /* 162 * raise irq to CPU if necessary. must be called every time the active 163 * irq may change 164 */ 165 static void pic_update_irq(struct kvm_pic *s) 166 { 167 int irq2, irq; 168 169 irq2 = pic_get_irq(&s->pics[1]); 170 if (irq2 >= 0) { 171 /* 172 * if irq request by slave pic, signal master PIC 173 */ 174 pic_set_irq1(&s->pics[0], 2, 1); 175 pic_set_irq1(&s->pics[0], 2, 0); 176 } 177 irq = pic_get_irq(&s->pics[0]); 178 pic_irq_request(s->kvm, irq >= 0); 179 } 180 181 void kvm_pic_update_irq(struct kvm_pic *s) 182 { 183 pic_lock(s); 184 pic_update_irq(s); 185 pic_unlock(s); 186 } 187 188 int kvm_pic_set_irq(struct kvm_pic *s, int irq, int irq_source_id, int level) 189 { 190 int ret, irq_level; 191 192 BUG_ON(irq < 0 || irq >= PIC_NUM_PINS); 193 194 pic_lock(s); 195 irq_level = __kvm_irq_line_state(&s->irq_states[irq], 196 irq_source_id, level); 197 ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, irq_level); 198 pic_update_irq(s); 199 trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr, 200 s->pics[irq >> 3].imr, ret == 0); 201 pic_unlock(s); 202 203 return ret; 204 } 205 206 void kvm_pic_clear_all(struct kvm_pic *s, int irq_source_id) 207 { 208 int i; 209 210 pic_lock(s); 211 for (i = 0; i < PIC_NUM_PINS; i++) 212 __clear_bit(irq_source_id, &s->irq_states[i]); 213 pic_unlock(s); 214 } 215 216 /* 217 * acknowledge interrupt 'irq' 218 */ 219 static inline void pic_intack(struct kvm_kpic_state *s, int irq) 220 { 221 s->isr |= 1 << irq; 222 /* 223 * We don't clear a level sensitive interrupt here 224 */ 225 if (!(s->elcr & (1 << irq))) 226 s->irr &= ~(1 << irq); 227 228 if (s->auto_eoi) { 229 if (s->rotate_on_auto_eoi) 230 s->priority_add = (irq + 1) & 7; 231 pic_clear_isr(s, irq); 232 } 233 234 } 235 236 int kvm_pic_read_irq(struct kvm *kvm) 237 { 238 int irq, irq2, intno; 239 struct kvm_pic *s = kvm->arch.vpic; 240 241 s->output = 0; 242 243 pic_lock(s); 244 irq = pic_get_irq(&s->pics[0]); 245 if (irq >= 0) { 246 pic_intack(&s->pics[0], irq); 247 if (irq == 2) { 248 irq2 = pic_get_irq(&s->pics[1]); 249 if (irq2 >= 0) 250 pic_intack(&s->pics[1], irq2); 251 else 252 /* 253 * spurious IRQ on slave controller 254 */ 255 irq2 = 7; 256 intno = s->pics[1].irq_base + irq2; 257 } else 258 intno = s->pics[0].irq_base + irq; 259 } else { 260 /* 261 * spurious IRQ on host controller 262 */ 263 irq = 7; 264 intno = s->pics[0].irq_base + irq; 265 } 266 pic_update_irq(s); 267 pic_unlock(s); 268 269 return intno; 270 } 271 272 static void kvm_pic_reset(struct kvm_kpic_state *s) 273 { 274 int irq; 275 unsigned long i; 276 struct kvm_vcpu *vcpu; 277 u8 edge_irr = s->irr & ~s->elcr; 278 bool found = false; 279 280 s->last_irr = 0; 281 s->irr &= s->elcr; 282 s->imr = 0; 283 s->priority_add = 0; 284 s->special_mask = 0; 285 s->read_reg_select = 0; 286 if (!s->init4) { 287 s->special_fully_nested_mode = 0; 288 s->auto_eoi = 0; 289 } 290 s->init_state = 1; 291 292 kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm) 293 if (kvm_apic_accept_pic_intr(vcpu)) { 294 found = true; 295 break; 296 } 297 298 299 if (!found) 300 return; 301 302 for (irq = 0; irq < PIC_NUM_PINS/2; irq++) 303 if (edge_irr & (1 << irq)) 304 pic_clear_isr(s, irq); 305 } 306 307 static void pic_ioport_write(void *opaque, u32 addr, u32 val) 308 { 309 struct kvm_kpic_state *s = opaque; 310 int priority, cmd, irq; 311 312 addr &= 1; 313 if (addr == 0) { 314 if (val & 0x10) { 315 s->init4 = val & 1; 316 if (val & 0x02) 317 pr_pic_unimpl("single mode not supported"); 318 if (val & 0x08) 319 pr_pic_unimpl( 320 "level sensitive irq not supported"); 321 kvm_pic_reset(s); 322 } else if (val & 0x08) { 323 if (val & 0x04) 324 s->poll = 1; 325 if (val & 0x02) 326 s->read_reg_select = val & 1; 327 if (val & 0x40) 328 s->special_mask = (val >> 5) & 1; 329 } else { 330 cmd = val >> 5; 331 switch (cmd) { 332 case 0: 333 case 4: 334 s->rotate_on_auto_eoi = cmd >> 2; 335 break; 336 case 1: /* end of interrupt */ 337 case 5: 338 priority = get_priority(s, s->isr); 339 if (priority != 8) { 340 irq = (priority + s->priority_add) & 7; 341 if (cmd == 5) 342 s->priority_add = (irq + 1) & 7; 343 pic_clear_isr(s, irq); 344 pic_update_irq(s->pics_state); 345 } 346 break; 347 case 3: 348 irq = val & 7; 349 pic_clear_isr(s, irq); 350 pic_update_irq(s->pics_state); 351 break; 352 case 6: 353 s->priority_add = (val + 1) & 7; 354 pic_update_irq(s->pics_state); 355 break; 356 case 7: 357 irq = val & 7; 358 s->priority_add = (irq + 1) & 7; 359 pic_clear_isr(s, irq); 360 pic_update_irq(s->pics_state); 361 break; 362 default: 363 break; /* no operation */ 364 } 365 } 366 } else 367 switch (s->init_state) { 368 case 0: { /* normal mode */ 369 u8 imr_diff = s->imr ^ val, 370 off = (s == &s->pics_state->pics[0]) ? 0 : 8; 371 s->imr = val; 372 for (irq = 0; irq < PIC_NUM_PINS/2; irq++) 373 if (imr_diff & (1 << irq)) 374 kvm_fire_mask_notifiers( 375 s->pics_state->kvm, 376 SELECT_PIC(irq + off), 377 irq + off, 378 !!(s->imr & (1 << irq))); 379 pic_update_irq(s->pics_state); 380 break; 381 } 382 case 1: 383 s->irq_base = val & 0xf8; 384 s->init_state = 2; 385 break; 386 case 2: 387 if (s->init4) 388 s->init_state = 3; 389 else 390 s->init_state = 0; 391 break; 392 case 3: 393 s->special_fully_nested_mode = (val >> 4) & 1; 394 s->auto_eoi = (val >> 1) & 1; 395 s->init_state = 0; 396 break; 397 } 398 } 399 400 static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1) 401 { 402 int ret; 403 404 ret = pic_get_irq(s); 405 if (ret >= 0) { 406 if (addr1 >> 7) { 407 s->pics_state->pics[0].isr &= ~(1 << 2); 408 s->pics_state->pics[0].irr &= ~(1 << 2); 409 } 410 s->irr &= ~(1 << ret); 411 pic_clear_isr(s, ret); 412 if (addr1 >> 7 || ret != 2) 413 pic_update_irq(s->pics_state); 414 } else { 415 ret = 0x07; 416 pic_update_irq(s->pics_state); 417 } 418 419 return ret; 420 } 421 422 static u32 pic_ioport_read(void *opaque, u32 addr) 423 { 424 struct kvm_kpic_state *s = opaque; 425 int ret; 426 427 if (s->poll) { 428 ret = pic_poll_read(s, addr); 429 s->poll = 0; 430 } else 431 if ((addr & 1) == 0) 432 if (s->read_reg_select) 433 ret = s->isr; 434 else 435 ret = s->irr; 436 else 437 ret = s->imr; 438 return ret; 439 } 440 441 static void elcr_ioport_write(void *opaque, u32 val) 442 { 443 struct kvm_kpic_state *s = opaque; 444 s->elcr = val & s->elcr_mask; 445 } 446 447 static u32 elcr_ioport_read(void *opaque) 448 { 449 struct kvm_kpic_state *s = opaque; 450 return s->elcr; 451 } 452 453 static int picdev_write(struct kvm_pic *s, 454 gpa_t addr, int len, const void *val) 455 { 456 unsigned char data = *(unsigned char *)val; 457 458 if (len != 1) { 459 pr_pic_unimpl("non byte write\n"); 460 return 0; 461 } 462 switch (addr) { 463 case 0x20: 464 case 0x21: 465 pic_lock(s); 466 pic_ioport_write(&s->pics[0], addr, data); 467 pic_unlock(s); 468 break; 469 case 0xa0: 470 case 0xa1: 471 pic_lock(s); 472 pic_ioport_write(&s->pics[1], addr, data); 473 pic_unlock(s); 474 break; 475 case 0x4d0: 476 case 0x4d1: 477 pic_lock(s); 478 elcr_ioport_write(&s->pics[addr & 1], data); 479 pic_unlock(s); 480 break; 481 default: 482 return -EOPNOTSUPP; 483 } 484 return 0; 485 } 486 487 static int picdev_read(struct kvm_pic *s, 488 gpa_t addr, int len, void *val) 489 { 490 unsigned char *data = (unsigned char *)val; 491 492 if (len != 1) { 493 memset(val, 0, len); 494 pr_pic_unimpl("non byte read\n"); 495 return 0; 496 } 497 switch (addr) { 498 case 0x20: 499 case 0x21: 500 case 0xa0: 501 case 0xa1: 502 pic_lock(s); 503 *data = pic_ioport_read(&s->pics[addr >> 7], addr); 504 pic_unlock(s); 505 break; 506 case 0x4d0: 507 case 0x4d1: 508 pic_lock(s); 509 *data = elcr_ioport_read(&s->pics[addr & 1]); 510 pic_unlock(s); 511 break; 512 default: 513 return -EOPNOTSUPP; 514 } 515 return 0; 516 } 517 518 static int picdev_master_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, 519 gpa_t addr, int len, const void *val) 520 { 521 return picdev_write(container_of(dev, struct kvm_pic, dev_master), 522 addr, len, val); 523 } 524 525 static int picdev_master_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, 526 gpa_t addr, int len, void *val) 527 { 528 return picdev_read(container_of(dev, struct kvm_pic, dev_master), 529 addr, len, val); 530 } 531 532 static int picdev_slave_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, 533 gpa_t addr, int len, const void *val) 534 { 535 return picdev_write(container_of(dev, struct kvm_pic, dev_slave), 536 addr, len, val); 537 } 538 539 static int picdev_slave_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, 540 gpa_t addr, int len, void *val) 541 { 542 return picdev_read(container_of(dev, struct kvm_pic, dev_slave), 543 addr, len, val); 544 } 545 546 static int picdev_elcr_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, 547 gpa_t addr, int len, const void *val) 548 { 549 return picdev_write(container_of(dev, struct kvm_pic, dev_elcr), 550 addr, len, val); 551 } 552 553 static int picdev_elcr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, 554 gpa_t addr, int len, void *val) 555 { 556 return picdev_read(container_of(dev, struct kvm_pic, dev_elcr), 557 addr, len, val); 558 } 559 560 /* 561 * callback when PIC0 irq status changed 562 */ 563 static void pic_irq_request(struct kvm *kvm, int level) 564 { 565 struct kvm_pic *s = kvm->arch.vpic; 566 567 if (!s->output) 568 s->wakeup_needed = true; 569 s->output = level; 570 } 571 572 static const struct kvm_io_device_ops picdev_master_ops = { 573 .read = picdev_master_read, 574 .write = picdev_master_write, 575 }; 576 577 static const struct kvm_io_device_ops picdev_slave_ops = { 578 .read = picdev_slave_read, 579 .write = picdev_slave_write, 580 }; 581 582 static const struct kvm_io_device_ops picdev_elcr_ops = { 583 .read = picdev_elcr_read, 584 .write = picdev_elcr_write, 585 }; 586 587 int kvm_pic_init(struct kvm *kvm) 588 { 589 struct kvm_pic *s; 590 int ret; 591 592 s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL_ACCOUNT); 593 if (!s) 594 return -ENOMEM; 595 spin_lock_init(&s->lock); 596 s->kvm = kvm; 597 s->pics[0].elcr_mask = 0xf8; 598 s->pics[1].elcr_mask = 0xde; 599 s->pics[0].pics_state = s; 600 s->pics[1].pics_state = s; 601 602 /* 603 * Initialize PIO device 604 */ 605 kvm_iodevice_init(&s->dev_master, &picdev_master_ops); 606 kvm_iodevice_init(&s->dev_slave, &picdev_slave_ops); 607 kvm_iodevice_init(&s->dev_elcr, &picdev_elcr_ops); 608 mutex_lock(&kvm->slots_lock); 609 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x20, 2, 610 &s->dev_master); 611 if (ret < 0) 612 goto fail_unlock; 613 614 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0xa0, 2, &s->dev_slave); 615 if (ret < 0) 616 goto fail_unreg_2; 617 618 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x4d0, 2, &s->dev_elcr); 619 if (ret < 0) 620 goto fail_unreg_1; 621 622 mutex_unlock(&kvm->slots_lock); 623 624 kvm->arch.vpic = s; 625 626 return 0; 627 628 fail_unreg_1: 629 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &s->dev_slave); 630 631 fail_unreg_2: 632 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &s->dev_master); 633 634 fail_unlock: 635 mutex_unlock(&kvm->slots_lock); 636 637 kfree(s); 638 639 return ret; 640 } 641 642 void kvm_pic_destroy(struct kvm *kvm) 643 { 644 struct kvm_pic *vpic = kvm->arch.vpic; 645 646 if (!vpic) 647 return; 648 649 mutex_lock(&kvm->slots_lock); 650 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master); 651 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave); 652 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_elcr); 653 mutex_unlock(&kvm->slots_lock); 654 655 kvm->arch.vpic = NULL; 656 kfree(vpic); 657 } 658