1 /* 2 * 8259 interrupt controller emulation 3 * 4 * Copyright (c) 2003-2004 Fabrice Bellard 5 * Copyright (c) 2007 Intel Corporation 6 * Copyright 2009 Red Hat, Inc. and/or its affilates. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to deal 10 * in the Software without restriction, including without limitation the rights 11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 * copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 24 * THE SOFTWARE. 25 * Authors: 26 * Yaozu (Eddie) Dong <Eddie.dong@intel.com> 27 * Port from Qemu. 28 */ 29 #include <linux/mm.h> 30 #include <linux/slab.h> 31 #include <linux/bitops.h> 32 #include "irq.h" 33 34 #include <linux/kvm_host.h> 35 #include "trace.h" 36 37 static void pic_irq_request(struct kvm *kvm, int level); 38 39 static void pic_lock(struct kvm_pic *s) 40 __acquires(&s->lock) 41 { 42 raw_spin_lock(&s->lock); 43 } 44 45 static void pic_unlock(struct kvm_pic *s) 46 __releases(&s->lock) 47 { 48 bool wakeup = s->wakeup_needed; 49 struct kvm_vcpu *vcpu, *found = NULL; 50 int i; 51 52 s->wakeup_needed = false; 53 54 raw_spin_unlock(&s->lock); 55 56 if (wakeup) { 57 kvm_for_each_vcpu(i, vcpu, s->kvm) { 58 if (kvm_apic_accept_pic_intr(vcpu)) { 59 found = vcpu; 60 break; 61 } 62 } 63 64 if (!found) 65 found = s->kvm->bsp_vcpu; 66 67 if (!found) 68 return; 69 70 kvm_vcpu_kick(found); 71 } 72 } 73 74 static void pic_clear_isr(struct kvm_kpic_state *s, int irq) 75 { 76 s->isr &= ~(1 << irq); 77 s->isr_ack |= (1 << irq); 78 if (s != &s->pics_state->pics[0]) 79 irq += 8; 80 /* 81 * We are dropping lock while calling ack notifiers since ack 82 * notifier callbacks for assigned devices call into PIC recursively. 83 * Other interrupt may be delivered to PIC while lock is dropped but 84 * it should be safe since PIC state is already updated at this stage. 85 */ 86 pic_unlock(s->pics_state); 87 kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq); 88 pic_lock(s->pics_state); 89 } 90 91 void kvm_pic_clear_isr_ack(struct kvm *kvm) 92 { 93 struct kvm_pic *s = pic_irqchip(kvm); 94 95 pic_lock(s); 96 s->pics[0].isr_ack = 0xff; 97 s->pics[1].isr_ack = 0xff; 98 pic_unlock(s); 99 } 100 101 /* 102 * set irq level. If an edge is detected, then the IRR is set to 1 103 */ 104 static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level) 105 { 106 int mask, ret = 1; 107 mask = 1 << irq; 108 if (s->elcr & mask) /* level triggered */ 109 if (level) { 110 ret = !(s->irr & mask); 111 s->irr |= mask; 112 s->last_irr |= mask; 113 } else { 114 s->irr &= ~mask; 115 s->last_irr &= ~mask; 116 } 117 else /* edge triggered */ 118 if (level) { 119 if ((s->last_irr & mask) == 0) { 120 ret = !(s->irr & mask); 121 s->irr |= mask; 122 } 123 s->last_irr |= mask; 124 } else 125 s->last_irr &= ~mask; 126 127 return (s->imr & mask) ? -1 : ret; 128 } 129 130 /* 131 * return the highest priority found in mask (highest = smallest 132 * number). Return 8 if no irq 133 */ 134 static inline int get_priority(struct kvm_kpic_state *s, int mask) 135 { 136 int priority; 137 if (mask == 0) 138 return 8; 139 priority = 0; 140 while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0) 141 priority++; 142 return priority; 143 } 144 145 /* 146 * return the pic wanted interrupt. return -1 if none 147 */ 148 static int pic_get_irq(struct kvm_kpic_state *s) 149 { 150 int mask, cur_priority, priority; 151 152 mask = s->irr & ~s->imr; 153 priority = get_priority(s, mask); 154 if (priority == 8) 155 return -1; 156 /* 157 * compute current priority. If special fully nested mode on the 158 * master, the IRQ coming from the slave is not taken into account 159 * for the priority computation. 160 */ 161 mask = s->isr; 162 if (s->special_fully_nested_mode && s == &s->pics_state->pics[0]) 163 mask &= ~(1 << 2); 164 cur_priority = get_priority(s, mask); 165 if (priority < cur_priority) 166 /* 167 * higher priority found: an irq should be generated 168 */ 169 return (priority + s->priority_add) & 7; 170 else 171 return -1; 172 } 173 174 /* 175 * raise irq to CPU if necessary. must be called every time the active 176 * irq may change 177 */ 178 static void pic_update_irq(struct kvm_pic *s) 179 { 180 int irq2, irq; 181 182 irq2 = pic_get_irq(&s->pics[1]); 183 if (irq2 >= 0) { 184 /* 185 * if irq request by slave pic, signal master PIC 186 */ 187 pic_set_irq1(&s->pics[0], 2, 1); 188 pic_set_irq1(&s->pics[0], 2, 0); 189 } 190 irq = pic_get_irq(&s->pics[0]); 191 pic_irq_request(s->kvm, irq >= 0); 192 } 193 194 void kvm_pic_update_irq(struct kvm_pic *s) 195 { 196 pic_lock(s); 197 pic_update_irq(s); 198 pic_unlock(s); 199 } 200 201 int kvm_pic_set_irq(void *opaque, int irq, int level) 202 { 203 struct kvm_pic *s = opaque; 204 int ret = -1; 205 206 pic_lock(s); 207 if (irq >= 0 && irq < PIC_NUM_PINS) { 208 ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); 209 pic_update_irq(s); 210 trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr, 211 s->pics[irq >> 3].imr, ret == 0); 212 } 213 pic_unlock(s); 214 215 return ret; 216 } 217 218 /* 219 * acknowledge interrupt 'irq' 220 */ 221 static inline void pic_intack(struct kvm_kpic_state *s, int irq) 222 { 223 s->isr |= 1 << irq; 224 /* 225 * We don't clear a level sensitive interrupt here 226 */ 227 if (!(s->elcr & (1 << irq))) 228 s->irr &= ~(1 << irq); 229 230 if (s->auto_eoi) { 231 if (s->rotate_on_auto_eoi) 232 s->priority_add = (irq + 1) & 7; 233 pic_clear_isr(s, irq); 234 } 235 236 } 237 238 int kvm_pic_read_irq(struct kvm *kvm) 239 { 240 int irq, irq2, intno; 241 struct kvm_pic *s = pic_irqchip(kvm); 242 243 pic_lock(s); 244 irq = pic_get_irq(&s->pics[0]); 245 if (irq >= 0) { 246 pic_intack(&s->pics[0], irq); 247 if (irq == 2) { 248 irq2 = pic_get_irq(&s->pics[1]); 249 if (irq2 >= 0) 250 pic_intack(&s->pics[1], irq2); 251 else 252 /* 253 * spurious IRQ on slave controller 254 */ 255 irq2 = 7; 256 intno = s->pics[1].irq_base + irq2; 257 irq = irq2 + 8; 258 } else 259 intno = s->pics[0].irq_base + irq; 260 } else { 261 /* 262 * spurious IRQ on host controller 263 */ 264 irq = 7; 265 intno = s->pics[0].irq_base + irq; 266 } 267 pic_update_irq(s); 268 pic_unlock(s); 269 270 return intno; 271 } 272 273 void kvm_pic_reset(struct kvm_kpic_state *s) 274 { 275 int irq; 276 struct kvm_vcpu *vcpu0 = s->pics_state->kvm->bsp_vcpu; 277 u8 irr = s->irr, isr = s->imr; 278 279 s->last_irr = 0; 280 s->irr = 0; 281 s->imr = 0; 282 s->isr = 0; 283 s->isr_ack = 0xff; 284 s->priority_add = 0; 285 s->irq_base = 0; 286 s->read_reg_select = 0; 287 s->poll = 0; 288 s->special_mask = 0; 289 s->init_state = 0; 290 s->auto_eoi = 0; 291 s->rotate_on_auto_eoi = 0; 292 s->special_fully_nested_mode = 0; 293 s->init4 = 0; 294 295 for (irq = 0; irq < PIC_NUM_PINS/2; irq++) { 296 if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0)) 297 if (irr & (1 << irq) || isr & (1 << irq)) { 298 pic_clear_isr(s, irq); 299 } 300 } 301 } 302 303 static void pic_ioport_write(void *opaque, u32 addr, u32 val) 304 { 305 struct kvm_kpic_state *s = opaque; 306 int priority, cmd, irq; 307 308 addr &= 1; 309 if (addr == 0) { 310 if (val & 0x10) { 311 s->init4 = val & 1; 312 s->last_irr = 0; 313 s->imr = 0; 314 s->priority_add = 0; 315 s->special_mask = 0; 316 s->read_reg_select = 0; 317 if (!s->init4) { 318 s->special_fully_nested_mode = 0; 319 s->auto_eoi = 0; 320 } 321 s->init_state = 1; 322 if (val & 0x02) 323 printk(KERN_ERR "single mode not supported"); 324 if (val & 0x08) 325 printk(KERN_ERR 326 "level sensitive irq not supported"); 327 } else if (val & 0x08) { 328 if (val & 0x04) 329 s->poll = 1; 330 if (val & 0x02) 331 s->read_reg_select = val & 1; 332 if (val & 0x40) 333 s->special_mask = (val >> 5) & 1; 334 } else { 335 cmd = val >> 5; 336 switch (cmd) { 337 case 0: 338 case 4: 339 s->rotate_on_auto_eoi = cmd >> 2; 340 break; 341 case 1: /* end of interrupt */ 342 case 5: 343 priority = get_priority(s, s->isr); 344 if (priority != 8) { 345 irq = (priority + s->priority_add) & 7; 346 if (cmd == 5) 347 s->priority_add = (irq + 1) & 7; 348 pic_clear_isr(s, irq); 349 pic_update_irq(s->pics_state); 350 } 351 break; 352 case 3: 353 irq = val & 7; 354 pic_clear_isr(s, irq); 355 pic_update_irq(s->pics_state); 356 break; 357 case 6: 358 s->priority_add = (val + 1) & 7; 359 pic_update_irq(s->pics_state); 360 break; 361 case 7: 362 irq = val & 7; 363 s->priority_add = (irq + 1) & 7; 364 pic_clear_isr(s, irq); 365 pic_update_irq(s->pics_state); 366 break; 367 default: 368 break; /* no operation */ 369 } 370 } 371 } else 372 switch (s->init_state) { 373 case 0: { /* normal mode */ 374 u8 imr_diff = s->imr ^ val, 375 off = (s == &s->pics_state->pics[0]) ? 0 : 8; 376 s->imr = val; 377 for (irq = 0; irq < PIC_NUM_PINS/2; irq++) 378 if (imr_diff & (1 << irq)) 379 kvm_fire_mask_notifiers( 380 s->pics_state->kvm, 381 SELECT_PIC(irq + off), 382 irq + off, 383 !!(s->imr & (1 << irq))); 384 pic_update_irq(s->pics_state); 385 break; 386 } 387 case 1: 388 s->irq_base = val & 0xf8; 389 s->init_state = 2; 390 break; 391 case 2: 392 if (s->init4) 393 s->init_state = 3; 394 else 395 s->init_state = 0; 396 break; 397 case 3: 398 s->special_fully_nested_mode = (val >> 4) & 1; 399 s->auto_eoi = (val >> 1) & 1; 400 s->init_state = 0; 401 break; 402 } 403 } 404 405 static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1) 406 { 407 int ret; 408 409 ret = pic_get_irq(s); 410 if (ret >= 0) { 411 if (addr1 >> 7) { 412 s->pics_state->pics[0].isr &= ~(1 << 2); 413 s->pics_state->pics[0].irr &= ~(1 << 2); 414 } 415 s->irr &= ~(1 << ret); 416 pic_clear_isr(s, ret); 417 if (addr1 >> 7 || ret != 2) 418 pic_update_irq(s->pics_state); 419 } else { 420 ret = 0x07; 421 pic_update_irq(s->pics_state); 422 } 423 424 return ret; 425 } 426 427 static u32 pic_ioport_read(void *opaque, u32 addr1) 428 { 429 struct kvm_kpic_state *s = opaque; 430 unsigned int addr; 431 int ret; 432 433 addr = addr1; 434 addr &= 1; 435 if (s->poll) { 436 ret = pic_poll_read(s, addr1); 437 s->poll = 0; 438 } else 439 if (addr == 0) 440 if (s->read_reg_select) 441 ret = s->isr; 442 else 443 ret = s->irr; 444 else 445 ret = s->imr; 446 return ret; 447 } 448 449 static void elcr_ioport_write(void *opaque, u32 addr, u32 val) 450 { 451 struct kvm_kpic_state *s = opaque; 452 s->elcr = val & s->elcr_mask; 453 } 454 455 static u32 elcr_ioport_read(void *opaque, u32 addr1) 456 { 457 struct kvm_kpic_state *s = opaque; 458 return s->elcr; 459 } 460 461 static int picdev_in_range(gpa_t addr) 462 { 463 switch (addr) { 464 case 0x20: 465 case 0x21: 466 case 0xa0: 467 case 0xa1: 468 case 0x4d0: 469 case 0x4d1: 470 return 1; 471 default: 472 return 0; 473 } 474 } 475 476 static inline struct kvm_pic *to_pic(struct kvm_io_device *dev) 477 { 478 return container_of(dev, struct kvm_pic, dev); 479 } 480 481 static int picdev_write(struct kvm_io_device *this, 482 gpa_t addr, int len, const void *val) 483 { 484 struct kvm_pic *s = to_pic(this); 485 unsigned char data = *(unsigned char *)val; 486 if (!picdev_in_range(addr)) 487 return -EOPNOTSUPP; 488 489 if (len != 1) { 490 if (printk_ratelimit()) 491 printk(KERN_ERR "PIC: non byte write\n"); 492 return 0; 493 } 494 pic_lock(s); 495 switch (addr) { 496 case 0x20: 497 case 0x21: 498 case 0xa0: 499 case 0xa1: 500 pic_ioport_write(&s->pics[addr >> 7], addr, data); 501 break; 502 case 0x4d0: 503 case 0x4d1: 504 elcr_ioport_write(&s->pics[addr & 1], addr, data); 505 break; 506 } 507 pic_unlock(s); 508 return 0; 509 } 510 511 static int picdev_read(struct kvm_io_device *this, 512 gpa_t addr, int len, void *val) 513 { 514 struct kvm_pic *s = to_pic(this); 515 unsigned char data = 0; 516 if (!picdev_in_range(addr)) 517 return -EOPNOTSUPP; 518 519 if (len != 1) { 520 if (printk_ratelimit()) 521 printk(KERN_ERR "PIC: non byte read\n"); 522 return 0; 523 } 524 pic_lock(s); 525 switch (addr) { 526 case 0x20: 527 case 0x21: 528 case 0xa0: 529 case 0xa1: 530 data = pic_ioport_read(&s->pics[addr >> 7], addr); 531 break; 532 case 0x4d0: 533 case 0x4d1: 534 data = elcr_ioport_read(&s->pics[addr & 1], addr); 535 break; 536 } 537 *(unsigned char *)val = data; 538 pic_unlock(s); 539 return 0; 540 } 541 542 /* 543 * callback when PIC0 irq status changed 544 */ 545 static void pic_irq_request(struct kvm *kvm, int level) 546 { 547 struct kvm_vcpu *vcpu = kvm->bsp_vcpu; 548 struct kvm_pic *s = pic_irqchip(kvm); 549 int irq = pic_get_irq(&s->pics[0]); 550 551 s->output = level; 552 if (vcpu && level && (s->pics[0].isr_ack & (1 << irq))) { 553 s->pics[0].isr_ack &= ~(1 << irq); 554 s->wakeup_needed = true; 555 } 556 } 557 558 static const struct kvm_io_device_ops picdev_ops = { 559 .read = picdev_read, 560 .write = picdev_write, 561 }; 562 563 struct kvm_pic *kvm_create_pic(struct kvm *kvm) 564 { 565 struct kvm_pic *s; 566 int ret; 567 568 s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL); 569 if (!s) 570 return NULL; 571 raw_spin_lock_init(&s->lock); 572 s->kvm = kvm; 573 s->pics[0].elcr_mask = 0xf8; 574 s->pics[1].elcr_mask = 0xde; 575 s->pics[0].pics_state = s; 576 s->pics[1].pics_state = s; 577 578 /* 579 * Initialize PIO device 580 */ 581 kvm_iodevice_init(&s->dev, &picdev_ops); 582 mutex_lock(&kvm->slots_lock); 583 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &s->dev); 584 mutex_unlock(&kvm->slots_lock); 585 if (ret < 0) { 586 kfree(s); 587 return NULL; 588 } 589 590 return s; 591 } 592 593 void kvm_destroy_pic(struct kvm *kvm) 594 { 595 struct kvm_pic *vpic = kvm->arch.vpic; 596 597 if (vpic) { 598 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev); 599 kvm->arch.vpic = NULL; 600 kfree(vpic); 601 } 602 } 603