1 /* 2 * 8259 interrupt controller emulation 3 * 4 * Copyright (c) 2003-2004 Fabrice Bellard 5 * Copyright (c) 2007 Intel Corporation 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 * Authors: 25 * Yaozu (Eddie) Dong <Eddie.dong@intel.com> 26 * Port from Qemu. 27 */ 28 #include <linux/mm.h> 29 #include <linux/bitops.h> 30 #include "irq.h" 31 32 #include <linux/kvm_host.h> 33 34 static void pic_lock(struct kvm_pic *s) 35 __acquires(&s->lock) 36 { 37 spin_lock(&s->lock); 38 } 39 40 static void pic_unlock(struct kvm_pic *s) 41 __releases(&s->lock) 42 { 43 struct kvm *kvm = s->kvm; 44 unsigned acks = s->pending_acks; 45 bool wakeup = s->wakeup_needed; 46 struct kvm_vcpu *vcpu; 47 48 s->pending_acks = 0; 49 s->wakeup_needed = false; 50 51 spin_unlock(&s->lock); 52 53 while (acks) { 54 kvm_notify_acked_irq(kvm, SELECT_PIC(__ffs(acks)), 55 __ffs(acks)); 56 acks &= acks - 1; 57 } 58 59 if (wakeup) { 60 vcpu = s->kvm->vcpus[0]; 61 if (vcpu) 62 kvm_vcpu_kick(vcpu); 63 } 64 } 65 66 static void pic_clear_isr(struct kvm_kpic_state *s, int irq) 67 { 68 s->isr &= ~(1 << irq); 69 s->isr_ack |= (1 << irq); 70 } 71 72 void kvm_pic_clear_isr_ack(struct kvm *kvm) 73 { 74 struct kvm_pic *s = pic_irqchip(kvm); 75 s->pics[0].isr_ack = 0xff; 76 s->pics[1].isr_ack = 0xff; 77 } 78 79 /* 80 * set irq level. If an edge is detected, then the IRR is set to 1 81 */ 82 static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level) 83 { 84 int mask, ret = 1; 85 mask = 1 << irq; 86 if (s->elcr & mask) /* level triggered */ 87 if (level) { 88 ret = !(s->irr & mask); 89 s->irr |= mask; 90 s->last_irr |= mask; 91 } else { 92 s->irr &= ~mask; 93 s->last_irr &= ~mask; 94 } 95 else /* edge triggered */ 96 if (level) { 97 if ((s->last_irr & mask) == 0) { 98 ret = !(s->irr & mask); 99 s->irr |= mask; 100 } 101 s->last_irr |= mask; 102 } else 103 s->last_irr &= ~mask; 104 105 return (s->imr & mask) ? -1 : ret; 106 } 107 108 /* 109 * return the highest priority found in mask (highest = smallest 110 * number). Return 8 if no irq 111 */ 112 static inline int get_priority(struct kvm_kpic_state *s, int mask) 113 { 114 int priority; 115 if (mask == 0) 116 return 8; 117 priority = 0; 118 while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0) 119 priority++; 120 return priority; 121 } 122 123 /* 124 * return the pic wanted interrupt. return -1 if none 125 */ 126 static int pic_get_irq(struct kvm_kpic_state *s) 127 { 128 int mask, cur_priority, priority; 129 130 mask = s->irr & ~s->imr; 131 priority = get_priority(s, mask); 132 if (priority == 8) 133 return -1; 134 /* 135 * compute current priority. If special fully nested mode on the 136 * master, the IRQ coming from the slave is not taken into account 137 * for the priority computation. 138 */ 139 mask = s->isr; 140 if (s->special_fully_nested_mode && s == &s->pics_state->pics[0]) 141 mask &= ~(1 << 2); 142 cur_priority = get_priority(s, mask); 143 if (priority < cur_priority) 144 /* 145 * higher priority found: an irq should be generated 146 */ 147 return (priority + s->priority_add) & 7; 148 else 149 return -1; 150 } 151 152 /* 153 * raise irq to CPU if necessary. must be called every time the active 154 * irq may change 155 */ 156 static void pic_update_irq(struct kvm_pic *s) 157 { 158 int irq2, irq; 159 160 irq2 = pic_get_irq(&s->pics[1]); 161 if (irq2 >= 0) { 162 /* 163 * if irq request by slave pic, signal master PIC 164 */ 165 pic_set_irq1(&s->pics[0], 2, 1); 166 pic_set_irq1(&s->pics[0], 2, 0); 167 } 168 irq = pic_get_irq(&s->pics[0]); 169 if (irq >= 0) 170 s->irq_request(s->irq_request_opaque, 1); 171 else 172 s->irq_request(s->irq_request_opaque, 0); 173 } 174 175 void kvm_pic_update_irq(struct kvm_pic *s) 176 { 177 pic_lock(s); 178 pic_update_irq(s); 179 pic_unlock(s); 180 } 181 182 int kvm_pic_set_irq(void *opaque, int irq, int level) 183 { 184 struct kvm_pic *s = opaque; 185 int ret = -1; 186 187 pic_lock(s); 188 if (irq >= 0 && irq < PIC_NUM_PINS) { 189 ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); 190 pic_update_irq(s); 191 } 192 pic_unlock(s); 193 194 return ret; 195 } 196 197 /* 198 * acknowledge interrupt 'irq' 199 */ 200 static inline void pic_intack(struct kvm_kpic_state *s, int irq) 201 { 202 s->isr |= 1 << irq; 203 if (s->auto_eoi) { 204 if (s->rotate_on_auto_eoi) 205 s->priority_add = (irq + 1) & 7; 206 pic_clear_isr(s, irq); 207 } 208 /* 209 * We don't clear a level sensitive interrupt here 210 */ 211 if (!(s->elcr & (1 << irq))) 212 s->irr &= ~(1 << irq); 213 } 214 215 int kvm_pic_read_irq(struct kvm *kvm) 216 { 217 int irq, irq2, intno; 218 struct kvm_pic *s = pic_irqchip(kvm); 219 220 pic_lock(s); 221 irq = pic_get_irq(&s->pics[0]); 222 if (irq >= 0) { 223 pic_intack(&s->pics[0], irq); 224 if (irq == 2) { 225 irq2 = pic_get_irq(&s->pics[1]); 226 if (irq2 >= 0) 227 pic_intack(&s->pics[1], irq2); 228 else 229 /* 230 * spurious IRQ on slave controller 231 */ 232 irq2 = 7; 233 intno = s->pics[1].irq_base + irq2; 234 irq = irq2 + 8; 235 } else 236 intno = s->pics[0].irq_base + irq; 237 } else { 238 /* 239 * spurious IRQ on host controller 240 */ 241 irq = 7; 242 intno = s->pics[0].irq_base + irq; 243 } 244 pic_update_irq(s); 245 pic_unlock(s); 246 kvm_notify_acked_irq(kvm, SELECT_PIC(irq), irq); 247 248 return intno; 249 } 250 251 void kvm_pic_reset(struct kvm_kpic_state *s) 252 { 253 int irq, irqbase, n; 254 struct kvm *kvm = s->pics_state->irq_request_opaque; 255 struct kvm_vcpu *vcpu0 = kvm->vcpus[0]; 256 257 if (s == &s->pics_state->pics[0]) 258 irqbase = 0; 259 else 260 irqbase = 8; 261 262 for (irq = 0; irq < PIC_NUM_PINS/2; irq++) { 263 if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0)) 264 if (s->irr & (1 << irq) || s->isr & (1 << irq)) { 265 n = irq + irqbase; 266 s->pics_state->pending_acks |= 1 << n; 267 } 268 } 269 s->last_irr = 0; 270 s->irr = 0; 271 s->imr = 0; 272 s->isr = 0; 273 s->isr_ack = 0xff; 274 s->priority_add = 0; 275 s->irq_base = 0; 276 s->read_reg_select = 0; 277 s->poll = 0; 278 s->special_mask = 0; 279 s->init_state = 0; 280 s->auto_eoi = 0; 281 s->rotate_on_auto_eoi = 0; 282 s->special_fully_nested_mode = 0; 283 s->init4 = 0; 284 } 285 286 static void pic_ioport_write(void *opaque, u32 addr, u32 val) 287 { 288 struct kvm_kpic_state *s = opaque; 289 int priority, cmd, irq; 290 291 addr &= 1; 292 if (addr == 0) { 293 if (val & 0x10) { 294 kvm_pic_reset(s); /* init */ 295 /* 296 * deassert a pending interrupt 297 */ 298 s->pics_state->irq_request(s->pics_state-> 299 irq_request_opaque, 0); 300 s->init_state = 1; 301 s->init4 = val & 1; 302 if (val & 0x02) 303 printk(KERN_ERR "single mode not supported"); 304 if (val & 0x08) 305 printk(KERN_ERR 306 "level sensitive irq not supported"); 307 } else if (val & 0x08) { 308 if (val & 0x04) 309 s->poll = 1; 310 if (val & 0x02) 311 s->read_reg_select = val & 1; 312 if (val & 0x40) 313 s->special_mask = (val >> 5) & 1; 314 } else { 315 cmd = val >> 5; 316 switch (cmd) { 317 case 0: 318 case 4: 319 s->rotate_on_auto_eoi = cmd >> 2; 320 break; 321 case 1: /* end of interrupt */ 322 case 5: 323 priority = get_priority(s, s->isr); 324 if (priority != 8) { 325 irq = (priority + s->priority_add) & 7; 326 pic_clear_isr(s, irq); 327 if (cmd == 5) 328 s->priority_add = (irq + 1) & 7; 329 pic_update_irq(s->pics_state); 330 } 331 break; 332 case 3: 333 irq = val & 7; 334 pic_clear_isr(s, irq); 335 pic_update_irq(s->pics_state); 336 break; 337 case 6: 338 s->priority_add = (val + 1) & 7; 339 pic_update_irq(s->pics_state); 340 break; 341 case 7: 342 irq = val & 7; 343 s->priority_add = (irq + 1) & 7; 344 pic_clear_isr(s, irq); 345 pic_update_irq(s->pics_state); 346 break; 347 default: 348 break; /* no operation */ 349 } 350 } 351 } else 352 switch (s->init_state) { 353 case 0: /* normal mode */ 354 s->imr = val; 355 pic_update_irq(s->pics_state); 356 break; 357 case 1: 358 s->irq_base = val & 0xf8; 359 s->init_state = 2; 360 break; 361 case 2: 362 if (s->init4) 363 s->init_state = 3; 364 else 365 s->init_state = 0; 366 break; 367 case 3: 368 s->special_fully_nested_mode = (val >> 4) & 1; 369 s->auto_eoi = (val >> 1) & 1; 370 s->init_state = 0; 371 break; 372 } 373 } 374 375 static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1) 376 { 377 int ret; 378 379 ret = pic_get_irq(s); 380 if (ret >= 0) { 381 if (addr1 >> 7) { 382 s->pics_state->pics[0].isr &= ~(1 << 2); 383 s->pics_state->pics[0].irr &= ~(1 << 2); 384 } 385 s->irr &= ~(1 << ret); 386 pic_clear_isr(s, ret); 387 if (addr1 >> 7 || ret != 2) 388 pic_update_irq(s->pics_state); 389 } else { 390 ret = 0x07; 391 pic_update_irq(s->pics_state); 392 } 393 394 return ret; 395 } 396 397 static u32 pic_ioport_read(void *opaque, u32 addr1) 398 { 399 struct kvm_kpic_state *s = opaque; 400 unsigned int addr; 401 int ret; 402 403 addr = addr1; 404 addr &= 1; 405 if (s->poll) { 406 ret = pic_poll_read(s, addr1); 407 s->poll = 0; 408 } else 409 if (addr == 0) 410 if (s->read_reg_select) 411 ret = s->isr; 412 else 413 ret = s->irr; 414 else 415 ret = s->imr; 416 return ret; 417 } 418 419 static void elcr_ioport_write(void *opaque, u32 addr, u32 val) 420 { 421 struct kvm_kpic_state *s = opaque; 422 s->elcr = val & s->elcr_mask; 423 } 424 425 static u32 elcr_ioport_read(void *opaque, u32 addr1) 426 { 427 struct kvm_kpic_state *s = opaque; 428 return s->elcr; 429 } 430 431 static int picdev_in_range(struct kvm_io_device *this, gpa_t addr, 432 int len, int is_write) 433 { 434 switch (addr) { 435 case 0x20: 436 case 0x21: 437 case 0xa0: 438 case 0xa1: 439 case 0x4d0: 440 case 0x4d1: 441 return 1; 442 default: 443 return 0; 444 } 445 } 446 447 static void picdev_write(struct kvm_io_device *this, 448 gpa_t addr, int len, const void *val) 449 { 450 struct kvm_pic *s = this->private; 451 unsigned char data = *(unsigned char *)val; 452 453 if (len != 1) { 454 if (printk_ratelimit()) 455 printk(KERN_ERR "PIC: non byte write\n"); 456 return; 457 } 458 pic_lock(s); 459 switch (addr) { 460 case 0x20: 461 case 0x21: 462 case 0xa0: 463 case 0xa1: 464 pic_ioport_write(&s->pics[addr >> 7], addr, data); 465 break; 466 case 0x4d0: 467 case 0x4d1: 468 elcr_ioport_write(&s->pics[addr & 1], addr, data); 469 break; 470 } 471 pic_unlock(s); 472 } 473 474 static void picdev_read(struct kvm_io_device *this, 475 gpa_t addr, int len, void *val) 476 { 477 struct kvm_pic *s = this->private; 478 unsigned char data = 0; 479 480 if (len != 1) { 481 if (printk_ratelimit()) 482 printk(KERN_ERR "PIC: non byte read\n"); 483 return; 484 } 485 pic_lock(s); 486 switch (addr) { 487 case 0x20: 488 case 0x21: 489 case 0xa0: 490 case 0xa1: 491 data = pic_ioport_read(&s->pics[addr >> 7], addr); 492 break; 493 case 0x4d0: 494 case 0x4d1: 495 data = elcr_ioport_read(&s->pics[addr & 1], addr); 496 break; 497 } 498 *(unsigned char *)val = data; 499 pic_unlock(s); 500 } 501 502 /* 503 * callback when PIC0 irq status changed 504 */ 505 static void pic_irq_request(void *opaque, int level) 506 { 507 struct kvm *kvm = opaque; 508 struct kvm_vcpu *vcpu = kvm->vcpus[0]; 509 struct kvm_pic *s = pic_irqchip(kvm); 510 int irq = pic_get_irq(&s->pics[0]); 511 512 s->output = level; 513 if (vcpu && level && (s->pics[0].isr_ack & (1 << irq))) { 514 s->pics[0].isr_ack &= ~(1 << irq); 515 s->wakeup_needed = true; 516 } 517 } 518 519 struct kvm_pic *kvm_create_pic(struct kvm *kvm) 520 { 521 struct kvm_pic *s; 522 s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL); 523 if (!s) 524 return NULL; 525 spin_lock_init(&s->lock); 526 s->kvm = kvm; 527 s->pics[0].elcr_mask = 0xf8; 528 s->pics[1].elcr_mask = 0xde; 529 s->irq_request = pic_irq_request; 530 s->irq_request_opaque = kvm; 531 s->pics[0].pics_state = s; 532 s->pics[1].pics_state = s; 533 534 /* 535 * Initialize PIO device 536 */ 537 s->dev.read = picdev_read; 538 s->dev.write = picdev_write; 539 s->dev.in_range = picdev_in_range; 540 s->dev.private = s; 541 kvm_io_bus_register_dev(&kvm->pio_bus, &s->dev); 542 return s; 543 } 544