1 /* 2 * 8259 interrupt controller emulation 3 * 4 * Copyright (c) 2003-2004 Fabrice Bellard 5 * Copyright (c) 2007 Intel Corporation 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 * Authors: 25 * Yaozu (Eddie) Dong <Eddie.dong@intel.com> 26 * Port from Qemu. 27 */ 28 #include <linux/mm.h> 29 #include <linux/bitops.h> 30 #include "irq.h" 31 32 #include <linux/kvm_host.h> 33 #include "trace.h" 34 35 static void pic_clear_isr(struct kvm_kpic_state *s, int irq) 36 { 37 s->isr &= ~(1 << irq); 38 s->isr_ack |= (1 << irq); 39 if (s != &s->pics_state->pics[0]) 40 irq += 8; 41 /* 42 * We are dropping lock while calling ack notifiers since ack 43 * notifier callbacks for assigned devices call into PIC recursively. 44 * Other interrupt may be delivered to PIC while lock is dropped but 45 * it should be safe since PIC state is already updated at this stage. 46 */ 47 spin_unlock(&s->pics_state->lock); 48 kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq); 49 spin_lock(&s->pics_state->lock); 50 } 51 52 void kvm_pic_clear_isr_ack(struct kvm *kvm) 53 { 54 struct kvm_pic *s = pic_irqchip(kvm); 55 spin_lock(&s->lock); 56 s->pics[0].isr_ack = 0xff; 57 s->pics[1].isr_ack = 0xff; 58 spin_unlock(&s->lock); 59 } 60 61 /* 62 * set irq level. If an edge is detected, then the IRR is set to 1 63 */ 64 static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level) 65 { 66 int mask, ret = 1; 67 mask = 1 << irq; 68 if (s->elcr & mask) /* level triggered */ 69 if (level) { 70 ret = !(s->irr & mask); 71 s->irr |= mask; 72 s->last_irr |= mask; 73 } else { 74 s->irr &= ~mask; 75 s->last_irr &= ~mask; 76 } 77 else /* edge triggered */ 78 if (level) { 79 if ((s->last_irr & mask) == 0) { 80 ret = !(s->irr & mask); 81 s->irr |= mask; 82 } 83 s->last_irr |= mask; 84 } else 85 s->last_irr &= ~mask; 86 87 return (s->imr & mask) ? -1 : ret; 88 } 89 90 /* 91 * return the highest priority found in mask (highest = smallest 92 * number). Return 8 if no irq 93 */ 94 static inline int get_priority(struct kvm_kpic_state *s, int mask) 95 { 96 int priority; 97 if (mask == 0) 98 return 8; 99 priority = 0; 100 while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0) 101 priority++; 102 return priority; 103 } 104 105 /* 106 * return the pic wanted interrupt. return -1 if none 107 */ 108 static int pic_get_irq(struct kvm_kpic_state *s) 109 { 110 int mask, cur_priority, priority; 111 112 mask = s->irr & ~s->imr; 113 priority = get_priority(s, mask); 114 if (priority == 8) 115 return -1; 116 /* 117 * compute current priority. If special fully nested mode on the 118 * master, the IRQ coming from the slave is not taken into account 119 * for the priority computation. 120 */ 121 mask = s->isr; 122 if (s->special_fully_nested_mode && s == &s->pics_state->pics[0]) 123 mask &= ~(1 << 2); 124 cur_priority = get_priority(s, mask); 125 if (priority < cur_priority) 126 /* 127 * higher priority found: an irq should be generated 128 */ 129 return (priority + s->priority_add) & 7; 130 else 131 return -1; 132 } 133 134 /* 135 * raise irq to CPU if necessary. must be called every time the active 136 * irq may change 137 */ 138 static void pic_update_irq(struct kvm_pic *s) 139 { 140 int irq2, irq; 141 142 irq2 = pic_get_irq(&s->pics[1]); 143 if (irq2 >= 0) { 144 /* 145 * if irq request by slave pic, signal master PIC 146 */ 147 pic_set_irq1(&s->pics[0], 2, 1); 148 pic_set_irq1(&s->pics[0], 2, 0); 149 } 150 irq = pic_get_irq(&s->pics[0]); 151 if (irq >= 0) 152 s->irq_request(s->irq_request_opaque, 1); 153 else 154 s->irq_request(s->irq_request_opaque, 0); 155 } 156 157 void kvm_pic_update_irq(struct kvm_pic *s) 158 { 159 spin_lock(&s->lock); 160 pic_update_irq(s); 161 spin_unlock(&s->lock); 162 } 163 164 int kvm_pic_set_irq(void *opaque, int irq, int level) 165 { 166 struct kvm_pic *s = opaque; 167 int ret = -1; 168 169 spin_lock(&s->lock); 170 if (irq >= 0 && irq < PIC_NUM_PINS) { 171 ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); 172 pic_update_irq(s); 173 trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr, 174 s->pics[irq >> 3].imr, ret == 0); 175 } 176 spin_unlock(&s->lock); 177 178 return ret; 179 } 180 181 /* 182 * acknowledge interrupt 'irq' 183 */ 184 static inline void pic_intack(struct kvm_kpic_state *s, int irq) 185 { 186 s->isr |= 1 << irq; 187 /* 188 * We don't clear a level sensitive interrupt here 189 */ 190 if (!(s->elcr & (1 << irq))) 191 s->irr &= ~(1 << irq); 192 193 if (s->auto_eoi) { 194 if (s->rotate_on_auto_eoi) 195 s->priority_add = (irq + 1) & 7; 196 pic_clear_isr(s, irq); 197 } 198 199 } 200 201 int kvm_pic_read_irq(struct kvm *kvm) 202 { 203 int irq, irq2, intno; 204 struct kvm_pic *s = pic_irqchip(kvm); 205 206 spin_lock(&s->lock); 207 irq = pic_get_irq(&s->pics[0]); 208 if (irq >= 0) { 209 pic_intack(&s->pics[0], irq); 210 if (irq == 2) { 211 irq2 = pic_get_irq(&s->pics[1]); 212 if (irq2 >= 0) 213 pic_intack(&s->pics[1], irq2); 214 else 215 /* 216 * spurious IRQ on slave controller 217 */ 218 irq2 = 7; 219 intno = s->pics[1].irq_base + irq2; 220 irq = irq2 + 8; 221 } else 222 intno = s->pics[0].irq_base + irq; 223 } else { 224 /* 225 * spurious IRQ on host controller 226 */ 227 irq = 7; 228 intno = s->pics[0].irq_base + irq; 229 } 230 pic_update_irq(s); 231 spin_unlock(&s->lock); 232 233 return intno; 234 } 235 236 void kvm_pic_reset(struct kvm_kpic_state *s) 237 { 238 int irq; 239 struct kvm *kvm = s->pics_state->irq_request_opaque; 240 struct kvm_vcpu *vcpu0 = kvm->bsp_vcpu; 241 u8 irr = s->irr, isr = s->imr; 242 243 s->last_irr = 0; 244 s->irr = 0; 245 s->imr = 0; 246 s->isr = 0; 247 s->isr_ack = 0xff; 248 s->priority_add = 0; 249 s->irq_base = 0; 250 s->read_reg_select = 0; 251 s->poll = 0; 252 s->special_mask = 0; 253 s->init_state = 0; 254 s->auto_eoi = 0; 255 s->rotate_on_auto_eoi = 0; 256 s->special_fully_nested_mode = 0; 257 s->init4 = 0; 258 259 for (irq = 0; irq < PIC_NUM_PINS/2; irq++) { 260 if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0)) 261 if (irr & (1 << irq) || isr & (1 << irq)) { 262 pic_clear_isr(s, irq); 263 } 264 } 265 } 266 267 static void pic_ioport_write(void *opaque, u32 addr, u32 val) 268 { 269 struct kvm_kpic_state *s = opaque; 270 int priority, cmd, irq; 271 272 addr &= 1; 273 if (addr == 0) { 274 if (val & 0x10) { 275 kvm_pic_reset(s); /* init */ 276 /* 277 * deassert a pending interrupt 278 */ 279 s->pics_state->irq_request(s->pics_state-> 280 irq_request_opaque, 0); 281 s->init_state = 1; 282 s->init4 = val & 1; 283 if (val & 0x02) 284 printk(KERN_ERR "single mode not supported"); 285 if (val & 0x08) 286 printk(KERN_ERR 287 "level sensitive irq not supported"); 288 } else if (val & 0x08) { 289 if (val & 0x04) 290 s->poll = 1; 291 if (val & 0x02) 292 s->read_reg_select = val & 1; 293 if (val & 0x40) 294 s->special_mask = (val >> 5) & 1; 295 } else { 296 cmd = val >> 5; 297 switch (cmd) { 298 case 0: 299 case 4: 300 s->rotate_on_auto_eoi = cmd >> 2; 301 break; 302 case 1: /* end of interrupt */ 303 case 5: 304 priority = get_priority(s, s->isr); 305 if (priority != 8) { 306 irq = (priority + s->priority_add) & 7; 307 if (cmd == 5) 308 s->priority_add = (irq + 1) & 7; 309 pic_clear_isr(s, irq); 310 pic_update_irq(s->pics_state); 311 } 312 break; 313 case 3: 314 irq = val & 7; 315 pic_clear_isr(s, irq); 316 pic_update_irq(s->pics_state); 317 break; 318 case 6: 319 s->priority_add = (val + 1) & 7; 320 pic_update_irq(s->pics_state); 321 break; 322 case 7: 323 irq = val & 7; 324 s->priority_add = (irq + 1) & 7; 325 pic_clear_isr(s, irq); 326 pic_update_irq(s->pics_state); 327 break; 328 default: 329 break; /* no operation */ 330 } 331 } 332 } else 333 switch (s->init_state) { 334 case 0: /* normal mode */ 335 s->imr = val; 336 pic_update_irq(s->pics_state); 337 break; 338 case 1: 339 s->irq_base = val & 0xf8; 340 s->init_state = 2; 341 break; 342 case 2: 343 if (s->init4) 344 s->init_state = 3; 345 else 346 s->init_state = 0; 347 break; 348 case 3: 349 s->special_fully_nested_mode = (val >> 4) & 1; 350 s->auto_eoi = (val >> 1) & 1; 351 s->init_state = 0; 352 break; 353 } 354 } 355 356 static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1) 357 { 358 int ret; 359 360 ret = pic_get_irq(s); 361 if (ret >= 0) { 362 if (addr1 >> 7) { 363 s->pics_state->pics[0].isr &= ~(1 << 2); 364 s->pics_state->pics[0].irr &= ~(1 << 2); 365 } 366 s->irr &= ~(1 << ret); 367 pic_clear_isr(s, ret); 368 if (addr1 >> 7 || ret != 2) 369 pic_update_irq(s->pics_state); 370 } else { 371 ret = 0x07; 372 pic_update_irq(s->pics_state); 373 } 374 375 return ret; 376 } 377 378 static u32 pic_ioport_read(void *opaque, u32 addr1) 379 { 380 struct kvm_kpic_state *s = opaque; 381 unsigned int addr; 382 int ret; 383 384 addr = addr1; 385 addr &= 1; 386 if (s->poll) { 387 ret = pic_poll_read(s, addr1); 388 s->poll = 0; 389 } else 390 if (addr == 0) 391 if (s->read_reg_select) 392 ret = s->isr; 393 else 394 ret = s->irr; 395 else 396 ret = s->imr; 397 return ret; 398 } 399 400 static void elcr_ioport_write(void *opaque, u32 addr, u32 val) 401 { 402 struct kvm_kpic_state *s = opaque; 403 s->elcr = val & s->elcr_mask; 404 } 405 406 static u32 elcr_ioport_read(void *opaque, u32 addr1) 407 { 408 struct kvm_kpic_state *s = opaque; 409 return s->elcr; 410 } 411 412 static int picdev_in_range(gpa_t addr) 413 { 414 switch (addr) { 415 case 0x20: 416 case 0x21: 417 case 0xa0: 418 case 0xa1: 419 case 0x4d0: 420 case 0x4d1: 421 return 1; 422 default: 423 return 0; 424 } 425 } 426 427 static inline struct kvm_pic *to_pic(struct kvm_io_device *dev) 428 { 429 return container_of(dev, struct kvm_pic, dev); 430 } 431 432 static int picdev_write(struct kvm_io_device *this, 433 gpa_t addr, int len, const void *val) 434 { 435 struct kvm_pic *s = to_pic(this); 436 unsigned char data = *(unsigned char *)val; 437 if (!picdev_in_range(addr)) 438 return -EOPNOTSUPP; 439 440 if (len != 1) { 441 if (printk_ratelimit()) 442 printk(KERN_ERR "PIC: non byte write\n"); 443 return 0; 444 } 445 spin_lock(&s->lock); 446 switch (addr) { 447 case 0x20: 448 case 0x21: 449 case 0xa0: 450 case 0xa1: 451 pic_ioport_write(&s->pics[addr >> 7], addr, data); 452 break; 453 case 0x4d0: 454 case 0x4d1: 455 elcr_ioport_write(&s->pics[addr & 1], addr, data); 456 break; 457 } 458 spin_unlock(&s->lock); 459 return 0; 460 } 461 462 static int picdev_read(struct kvm_io_device *this, 463 gpa_t addr, int len, void *val) 464 { 465 struct kvm_pic *s = to_pic(this); 466 unsigned char data = 0; 467 if (!picdev_in_range(addr)) 468 return -EOPNOTSUPP; 469 470 if (len != 1) { 471 if (printk_ratelimit()) 472 printk(KERN_ERR "PIC: non byte read\n"); 473 return 0; 474 } 475 spin_lock(&s->lock); 476 switch (addr) { 477 case 0x20: 478 case 0x21: 479 case 0xa0: 480 case 0xa1: 481 data = pic_ioport_read(&s->pics[addr >> 7], addr); 482 break; 483 case 0x4d0: 484 case 0x4d1: 485 data = elcr_ioport_read(&s->pics[addr & 1], addr); 486 break; 487 } 488 *(unsigned char *)val = data; 489 spin_unlock(&s->lock); 490 return 0; 491 } 492 493 /* 494 * callback when PIC0 irq status changed 495 */ 496 static void pic_irq_request(void *opaque, int level) 497 { 498 struct kvm *kvm = opaque; 499 struct kvm_vcpu *vcpu = kvm->bsp_vcpu; 500 struct kvm_pic *s = pic_irqchip(kvm); 501 int irq = pic_get_irq(&s->pics[0]); 502 503 s->output = level; 504 if (vcpu && level && (s->pics[0].isr_ack & (1 << irq))) { 505 s->pics[0].isr_ack &= ~(1 << irq); 506 kvm_vcpu_kick(vcpu); 507 } 508 } 509 510 static const struct kvm_io_device_ops picdev_ops = { 511 .read = picdev_read, 512 .write = picdev_write, 513 }; 514 515 struct kvm_pic *kvm_create_pic(struct kvm *kvm) 516 { 517 struct kvm_pic *s; 518 int ret; 519 520 s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL); 521 if (!s) 522 return NULL; 523 spin_lock_init(&s->lock); 524 s->kvm = kvm; 525 s->pics[0].elcr_mask = 0xf8; 526 s->pics[1].elcr_mask = 0xde; 527 s->irq_request = pic_irq_request; 528 s->irq_request_opaque = kvm; 529 s->pics[0].pics_state = s; 530 s->pics[1].pics_state = s; 531 532 /* 533 * Initialize PIO device 534 */ 535 kvm_iodevice_init(&s->dev, &picdev_ops); 536 ret = kvm_io_bus_register_dev(kvm, &kvm->pio_bus, &s->dev); 537 if (ret < 0) { 538 kfree(s); 539 return NULL; 540 } 541 542 return s; 543 } 544