1 /* 2 * Xen event channels 3 * 4 * Xen models interrupts with abstract event channels. Because each 5 * domain gets 1024 event channels, but NR_IRQ is not that large, we 6 * must dynamically map irqs<->event channels. The event channels 7 * interface with the rest of the kernel by defining a xen interrupt 8 * chip. When an event is received, it is mapped to an irq and sent 9 * through the normal interrupt processing path. 10 * 11 * There are four kinds of events which can be mapped to an event 12 * channel: 13 * 14 * 1. Inter-domain notifications. This includes all the virtual 15 * device events, since they're driven by front-ends in another domain 16 * (typically dom0). 17 * 2. VIRQs, typically used for timers. These are per-cpu events. 18 * 3. IPIs. 19 * 4. PIRQs - Hardware interrupts. 20 * 21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 22 */ 23 24 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt 25 26 #include <linux/linkage.h> 27 #include <linux/interrupt.h> 28 #include <linux/irq.h> 29 #include <linux/module.h> 30 #include <linux/string.h> 31 #include <linux/bootmem.h> 32 #include <linux/slab.h> 33 #include <linux/irqnr.h> 34 #include <linux/pci.h> 35 36 #ifdef CONFIG_X86 37 #include <asm/desc.h> 38 #include <asm/ptrace.h> 39 #include <asm/irq.h> 40 #include <asm/idle.h> 41 #include <asm/io_apic.h> 42 #include <asm/xen/page.h> 43 #include <asm/xen/pci.h> 44 #endif 45 #include <asm/sync_bitops.h> 46 #include <asm/xen/hypercall.h> 47 #include <asm/xen/hypervisor.h> 48 49 #include <xen/xen.h> 50 #include <xen/hvm.h> 51 #include <xen/xen-ops.h> 52 #include <xen/events.h> 53 #include <xen/interface/xen.h> 54 #include <xen/interface/event_channel.h> 55 #include <xen/interface/hvm/hvm_op.h> 56 #include <xen/interface/hvm/params.h> 57 #include <xen/interface/physdev.h> 58 #include <xen/interface/sched.h> 59 #include <xen/interface/vcpu.h> 60 #include <asm/hw_irq.h> 61 62 #include "events_internal.h" 63 64 const struct evtchn_ops *evtchn_ops; 65 66 /* 67 * This lock protects updates to the following mapping and reference-count 68 * arrays. The lock does not need to be acquired to read the mapping tables. 69 */ 70 static DEFINE_MUTEX(irq_mapping_update_lock); 71 72 static LIST_HEAD(xen_irq_list_head); 73 74 /* IRQ <-> VIRQ mapping. */ 75 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; 76 77 /* IRQ <-> IPI mapping */ 78 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; 79 80 int **evtchn_to_irq; 81 #ifdef CONFIG_X86 82 static unsigned long *pirq_eoi_map; 83 #endif 84 static bool (*pirq_needs_eoi)(unsigned irq); 85 86 #define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq))) 87 #define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq))) 88 #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq)) 89 90 /* Xen will never allocate port zero for any purpose. */ 91 #define VALID_EVTCHN(chn) ((chn) != 0) 92 93 static struct irq_chip xen_dynamic_chip; 94 static struct irq_chip xen_percpu_chip; 95 static struct irq_chip xen_pirq_chip; 96 static void enable_dynirq(struct irq_data *data); 97 static void disable_dynirq(struct irq_data *data); 98 99 static void clear_evtchn_to_irq_row(unsigned row) 100 { 101 unsigned col; 102 103 for (col = 0; col < EVTCHN_PER_ROW; col++) 104 evtchn_to_irq[row][col] = -1; 105 } 106 107 static void clear_evtchn_to_irq_all(void) 108 { 109 unsigned row; 110 111 for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) { 112 if (evtchn_to_irq[row] == NULL) 113 continue; 114 clear_evtchn_to_irq_row(row); 115 } 116 } 117 118 static int set_evtchn_to_irq(unsigned evtchn, unsigned irq) 119 { 120 unsigned row; 121 unsigned col; 122 123 if (evtchn >= xen_evtchn_max_channels()) 124 return -EINVAL; 125 126 row = EVTCHN_ROW(evtchn); 127 col = EVTCHN_COL(evtchn); 128 129 if (evtchn_to_irq[row] == NULL) { 130 /* Unallocated irq entries return -1 anyway */ 131 if (irq == -1) 132 return 0; 133 134 evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL); 135 if (evtchn_to_irq[row] == NULL) 136 return -ENOMEM; 137 138 clear_evtchn_to_irq_row(row); 139 } 140 141 evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq; 142 return 0; 143 } 144 145 int get_evtchn_to_irq(unsigned evtchn) 146 { 147 if (evtchn >= xen_evtchn_max_channels()) 148 return -1; 149 if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL) 150 return -1; 151 return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]; 152 } 153 154 /* Get info for IRQ */ 155 struct irq_info *info_for_irq(unsigned irq) 156 { 157 return irq_get_handler_data(irq); 158 } 159 160 /* Constructors for packed IRQ information. */ 161 static int xen_irq_info_common_setup(struct irq_info *info, 162 unsigned irq, 163 enum xen_irq_type type, 164 unsigned evtchn, 165 unsigned short cpu) 166 { 167 int ret; 168 169 BUG_ON(info->type != IRQT_UNBOUND && info->type != type); 170 171 info->type = type; 172 info->irq = irq; 173 info->evtchn = evtchn; 174 info->cpu = cpu; 175 176 ret = set_evtchn_to_irq(evtchn, irq); 177 if (ret < 0) 178 return ret; 179 180 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); 181 182 return xen_evtchn_port_setup(info); 183 } 184 185 static int xen_irq_info_evtchn_setup(unsigned irq, 186 unsigned evtchn) 187 { 188 struct irq_info *info = info_for_irq(irq); 189 190 return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0); 191 } 192 193 static int xen_irq_info_ipi_setup(unsigned cpu, 194 unsigned irq, 195 unsigned evtchn, 196 enum ipi_vector ipi) 197 { 198 struct irq_info *info = info_for_irq(irq); 199 200 info->u.ipi = ipi; 201 202 per_cpu(ipi_to_irq, cpu)[ipi] = irq; 203 204 return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0); 205 } 206 207 static int xen_irq_info_virq_setup(unsigned cpu, 208 unsigned irq, 209 unsigned evtchn, 210 unsigned virq) 211 { 212 struct irq_info *info = info_for_irq(irq); 213 214 info->u.virq = virq; 215 216 per_cpu(virq_to_irq, cpu)[virq] = irq; 217 218 return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0); 219 } 220 221 static int xen_irq_info_pirq_setup(unsigned irq, 222 unsigned evtchn, 223 unsigned pirq, 224 unsigned gsi, 225 uint16_t domid, 226 unsigned char flags) 227 { 228 struct irq_info *info = info_for_irq(irq); 229 230 info->u.pirq.pirq = pirq; 231 info->u.pirq.gsi = gsi; 232 info->u.pirq.domid = domid; 233 info->u.pirq.flags = flags; 234 235 return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0); 236 } 237 238 static void xen_irq_info_cleanup(struct irq_info *info) 239 { 240 set_evtchn_to_irq(info->evtchn, -1); 241 info->evtchn = 0; 242 } 243 244 /* 245 * Accessors for packed IRQ information. 246 */ 247 unsigned int evtchn_from_irq(unsigned irq) 248 { 249 if (unlikely(WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq))) 250 return 0; 251 252 return info_for_irq(irq)->evtchn; 253 } 254 255 unsigned irq_from_evtchn(unsigned int evtchn) 256 { 257 return get_evtchn_to_irq(evtchn); 258 } 259 EXPORT_SYMBOL_GPL(irq_from_evtchn); 260 261 int irq_from_virq(unsigned int cpu, unsigned int virq) 262 { 263 return per_cpu(virq_to_irq, cpu)[virq]; 264 } 265 266 static enum ipi_vector ipi_from_irq(unsigned irq) 267 { 268 struct irq_info *info = info_for_irq(irq); 269 270 BUG_ON(info == NULL); 271 BUG_ON(info->type != IRQT_IPI); 272 273 return info->u.ipi; 274 } 275 276 static unsigned virq_from_irq(unsigned irq) 277 { 278 struct irq_info *info = info_for_irq(irq); 279 280 BUG_ON(info == NULL); 281 BUG_ON(info->type != IRQT_VIRQ); 282 283 return info->u.virq; 284 } 285 286 static unsigned pirq_from_irq(unsigned irq) 287 { 288 struct irq_info *info = info_for_irq(irq); 289 290 BUG_ON(info == NULL); 291 BUG_ON(info->type != IRQT_PIRQ); 292 293 return info->u.pirq.pirq; 294 } 295 296 static enum xen_irq_type type_from_irq(unsigned irq) 297 { 298 return info_for_irq(irq)->type; 299 } 300 301 unsigned cpu_from_irq(unsigned irq) 302 { 303 return info_for_irq(irq)->cpu; 304 } 305 306 unsigned int cpu_from_evtchn(unsigned int evtchn) 307 { 308 int irq = get_evtchn_to_irq(evtchn); 309 unsigned ret = 0; 310 311 if (irq != -1) 312 ret = cpu_from_irq(irq); 313 314 return ret; 315 } 316 317 #ifdef CONFIG_X86 318 static bool pirq_check_eoi_map(unsigned irq) 319 { 320 return test_bit(pirq_from_irq(irq), pirq_eoi_map); 321 } 322 #endif 323 324 static bool pirq_needs_eoi_flag(unsigned irq) 325 { 326 struct irq_info *info = info_for_irq(irq); 327 BUG_ON(info->type != IRQT_PIRQ); 328 329 return info->u.pirq.flags & PIRQ_NEEDS_EOI; 330 } 331 332 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) 333 { 334 int irq = get_evtchn_to_irq(chn); 335 struct irq_info *info = info_for_irq(irq); 336 337 BUG_ON(irq == -1); 338 #ifdef CONFIG_SMP 339 cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(cpu)); 340 #endif 341 xen_evtchn_port_bind_to_cpu(info, cpu); 342 343 info->cpu = cpu; 344 } 345 346 static void xen_evtchn_mask_all(void) 347 { 348 unsigned int evtchn; 349 350 for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++) 351 mask_evtchn(evtchn); 352 } 353 354 /** 355 * notify_remote_via_irq - send event to remote end of event channel via irq 356 * @irq: irq of event channel to send event to 357 * 358 * Unlike notify_remote_via_evtchn(), this is safe to use across 359 * save/restore. Notifications on a broken connection are silently 360 * dropped. 361 */ 362 void notify_remote_via_irq(int irq) 363 { 364 int evtchn = evtchn_from_irq(irq); 365 366 if (VALID_EVTCHN(evtchn)) 367 notify_remote_via_evtchn(evtchn); 368 } 369 EXPORT_SYMBOL_GPL(notify_remote_via_irq); 370 371 static void xen_irq_init(unsigned irq) 372 { 373 struct irq_info *info; 374 #ifdef CONFIG_SMP 375 /* By default all event channels notify CPU#0. */ 376 cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(0)); 377 #endif 378 379 info = kzalloc(sizeof(*info), GFP_KERNEL); 380 if (info == NULL) 381 panic("Unable to allocate metadata for IRQ%d\n", irq); 382 383 info->type = IRQT_UNBOUND; 384 info->refcnt = -1; 385 386 irq_set_handler_data(irq, info); 387 388 list_add_tail(&info->list, &xen_irq_list_head); 389 } 390 391 static int __must_check xen_allocate_irqs_dynamic(int nvec) 392 { 393 int i, irq = irq_alloc_descs(-1, 0, nvec, -1); 394 395 if (irq >= 0) { 396 for (i = 0; i < nvec; i++) 397 xen_irq_init(irq + i); 398 } 399 400 return irq; 401 } 402 403 static inline int __must_check xen_allocate_irq_dynamic(void) 404 { 405 406 return xen_allocate_irqs_dynamic(1); 407 } 408 409 static int __must_check xen_allocate_irq_gsi(unsigned gsi) 410 { 411 int irq; 412 413 /* 414 * A PV guest has no concept of a GSI (since it has no ACPI 415 * nor access to/knowledge of the physical APICs). Therefore 416 * all IRQs are dynamically allocated from the entire IRQ 417 * space. 418 */ 419 if (xen_pv_domain() && !xen_initial_domain()) 420 return xen_allocate_irq_dynamic(); 421 422 /* Legacy IRQ descriptors are already allocated by the arch. */ 423 if (gsi < NR_IRQS_LEGACY) 424 irq = gsi; 425 else 426 irq = irq_alloc_desc_at(gsi, -1); 427 428 xen_irq_init(irq); 429 430 return irq; 431 } 432 433 static void xen_free_irq(unsigned irq) 434 { 435 struct irq_info *info = irq_get_handler_data(irq); 436 437 if (WARN_ON(!info)) 438 return; 439 440 list_del(&info->list); 441 442 irq_set_handler_data(irq, NULL); 443 444 WARN_ON(info->refcnt > 0); 445 446 kfree(info); 447 448 /* Legacy IRQ descriptors are managed by the arch. */ 449 if (irq < NR_IRQS_LEGACY) 450 return; 451 452 irq_free_desc(irq); 453 } 454 455 static void xen_evtchn_close(unsigned int port) 456 { 457 struct evtchn_close close; 458 459 close.port = port; 460 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 461 BUG(); 462 } 463 464 static void pirq_query_unmask(int irq) 465 { 466 struct physdev_irq_status_query irq_status; 467 struct irq_info *info = info_for_irq(irq); 468 469 BUG_ON(info->type != IRQT_PIRQ); 470 471 irq_status.irq = pirq_from_irq(irq); 472 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) 473 irq_status.flags = 0; 474 475 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI; 476 if (irq_status.flags & XENIRQSTAT_needs_eoi) 477 info->u.pirq.flags |= PIRQ_NEEDS_EOI; 478 } 479 480 static void eoi_pirq(struct irq_data *data) 481 { 482 int evtchn = evtchn_from_irq(data->irq); 483 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; 484 int rc = 0; 485 486 irq_move_irq(data); 487 488 if (VALID_EVTCHN(evtchn)) 489 clear_evtchn(evtchn); 490 491 if (pirq_needs_eoi(data->irq)) { 492 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); 493 WARN_ON(rc); 494 } 495 } 496 497 static void mask_ack_pirq(struct irq_data *data) 498 { 499 disable_dynirq(data); 500 eoi_pirq(data); 501 } 502 503 static unsigned int __startup_pirq(unsigned int irq) 504 { 505 struct evtchn_bind_pirq bind_pirq; 506 struct irq_info *info = info_for_irq(irq); 507 int evtchn = evtchn_from_irq(irq); 508 int rc; 509 510 BUG_ON(info->type != IRQT_PIRQ); 511 512 if (VALID_EVTCHN(evtchn)) 513 goto out; 514 515 bind_pirq.pirq = pirq_from_irq(irq); 516 /* NB. We are happy to share unless we are probing. */ 517 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ? 518 BIND_PIRQ__WILL_SHARE : 0; 519 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq); 520 if (rc != 0) { 521 pr_warn("Failed to obtain physical IRQ %d\n", irq); 522 return 0; 523 } 524 evtchn = bind_pirq.port; 525 526 pirq_query_unmask(irq); 527 528 rc = set_evtchn_to_irq(evtchn, irq); 529 if (rc != 0) { 530 pr_err("irq%d: Failed to set port to irq mapping (%d)\n", 531 irq, rc); 532 xen_evtchn_close(evtchn); 533 return 0; 534 } 535 bind_evtchn_to_cpu(evtchn, 0); 536 info->evtchn = evtchn; 537 538 out: 539 unmask_evtchn(evtchn); 540 eoi_pirq(irq_get_irq_data(irq)); 541 542 return 0; 543 } 544 545 static unsigned int startup_pirq(struct irq_data *data) 546 { 547 return __startup_pirq(data->irq); 548 } 549 550 static void shutdown_pirq(struct irq_data *data) 551 { 552 unsigned int irq = data->irq; 553 struct irq_info *info = info_for_irq(irq); 554 unsigned evtchn = evtchn_from_irq(irq); 555 556 BUG_ON(info->type != IRQT_PIRQ); 557 558 if (!VALID_EVTCHN(evtchn)) 559 return; 560 561 mask_evtchn(evtchn); 562 xen_evtchn_close(evtchn); 563 xen_irq_info_cleanup(info); 564 } 565 566 static void enable_pirq(struct irq_data *data) 567 { 568 startup_pirq(data); 569 } 570 571 static void disable_pirq(struct irq_data *data) 572 { 573 disable_dynirq(data); 574 } 575 576 int xen_irq_from_gsi(unsigned gsi) 577 { 578 struct irq_info *info; 579 580 list_for_each_entry(info, &xen_irq_list_head, list) { 581 if (info->type != IRQT_PIRQ) 582 continue; 583 584 if (info->u.pirq.gsi == gsi) 585 return info->irq; 586 } 587 588 return -1; 589 } 590 EXPORT_SYMBOL_GPL(xen_irq_from_gsi); 591 592 static void __unbind_from_irq(unsigned int irq) 593 { 594 int evtchn = evtchn_from_irq(irq); 595 struct irq_info *info = irq_get_handler_data(irq); 596 597 if (info->refcnt > 0) { 598 info->refcnt--; 599 if (info->refcnt != 0) 600 return; 601 } 602 603 if (VALID_EVTCHN(evtchn)) { 604 unsigned int cpu = cpu_from_irq(irq); 605 606 xen_evtchn_close(evtchn); 607 608 switch (type_from_irq(irq)) { 609 case IRQT_VIRQ: 610 per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1; 611 break; 612 case IRQT_IPI: 613 per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1; 614 break; 615 default: 616 break; 617 } 618 619 xen_irq_info_cleanup(info); 620 } 621 622 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND); 623 624 xen_free_irq(irq); 625 } 626 627 /* 628 * Do not make any assumptions regarding the relationship between the 629 * IRQ number returned here and the Xen pirq argument. 630 * 631 * Note: We don't assign an event channel until the irq actually started 632 * up. Return an existing irq if we've already got one for the gsi. 633 * 634 * Shareable implies level triggered, not shareable implies edge 635 * triggered here. 636 */ 637 int xen_bind_pirq_gsi_to_irq(unsigned gsi, 638 unsigned pirq, int shareable, char *name) 639 { 640 int irq = -1; 641 struct physdev_irq irq_op; 642 int ret; 643 644 mutex_lock(&irq_mapping_update_lock); 645 646 irq = xen_irq_from_gsi(gsi); 647 if (irq != -1) { 648 pr_info("%s: returning irq %d for gsi %u\n", 649 __func__, irq, gsi); 650 goto out; 651 } 652 653 irq = xen_allocate_irq_gsi(gsi); 654 if (irq < 0) 655 goto out; 656 657 irq_op.irq = irq; 658 irq_op.vector = 0; 659 660 /* Only the privileged domain can do this. For non-priv, the pcifront 661 * driver provides a PCI bus that does the call to do exactly 662 * this in the priv domain. */ 663 if (xen_initial_domain() && 664 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { 665 xen_free_irq(irq); 666 irq = -ENOSPC; 667 goto out; 668 } 669 670 ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF, 671 shareable ? PIRQ_SHAREABLE : 0); 672 if (ret < 0) { 673 __unbind_from_irq(irq); 674 irq = ret; 675 goto out; 676 } 677 678 pirq_query_unmask(irq); 679 /* We try to use the handler with the appropriate semantic for the 680 * type of interrupt: if the interrupt is an edge triggered 681 * interrupt we use handle_edge_irq. 682 * 683 * On the other hand if the interrupt is level triggered we use 684 * handle_fasteoi_irq like the native code does for this kind of 685 * interrupts. 686 * 687 * Depending on the Xen version, pirq_needs_eoi might return true 688 * not only for level triggered interrupts but for edge triggered 689 * interrupts too. In any case Xen always honors the eoi mechanism, 690 * not injecting any more pirqs of the same kind if the first one 691 * hasn't received an eoi yet. Therefore using the fasteoi handler 692 * is the right choice either way. 693 */ 694 if (shareable) 695 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, 696 handle_fasteoi_irq, name); 697 else 698 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, 699 handle_edge_irq, name); 700 701 out: 702 mutex_unlock(&irq_mapping_update_lock); 703 704 return irq; 705 } 706 707 #ifdef CONFIG_PCI_MSI 708 int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc) 709 { 710 int rc; 711 struct physdev_get_free_pirq op_get_free_pirq; 712 713 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI; 714 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); 715 716 WARN_ONCE(rc == -ENOSYS, 717 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n"); 718 719 return rc ? -1 : op_get_free_pirq.pirq; 720 } 721 722 int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, 723 int pirq, int nvec, const char *name, domid_t domid) 724 { 725 int i, irq, ret; 726 727 mutex_lock(&irq_mapping_update_lock); 728 729 irq = xen_allocate_irqs_dynamic(nvec); 730 if (irq < 0) 731 goto out; 732 733 for (i = 0; i < nvec; i++) { 734 irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name); 735 736 ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid, 737 i == 0 ? 0 : PIRQ_MSI_GROUP); 738 if (ret < 0) 739 goto error_irq; 740 } 741 742 ret = irq_set_msi_desc(irq, msidesc); 743 if (ret < 0) 744 goto error_irq; 745 out: 746 mutex_unlock(&irq_mapping_update_lock); 747 return irq; 748 error_irq: 749 for (; i >= 0; i--) 750 __unbind_from_irq(irq + i); 751 mutex_unlock(&irq_mapping_update_lock); 752 return ret; 753 } 754 #endif 755 756 int xen_destroy_irq(int irq) 757 { 758 struct physdev_unmap_pirq unmap_irq; 759 struct irq_info *info = info_for_irq(irq); 760 int rc = -ENOENT; 761 762 mutex_lock(&irq_mapping_update_lock); 763 764 /* 765 * If trying to remove a vector in a MSI group different 766 * than the first one skip the PIRQ unmap unless this vector 767 * is the first one in the group. 768 */ 769 if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) { 770 unmap_irq.pirq = info->u.pirq.pirq; 771 unmap_irq.domid = info->u.pirq.domid; 772 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); 773 /* If another domain quits without making the pci_disable_msix 774 * call, the Xen hypervisor takes care of freeing the PIRQs 775 * (free_domain_pirqs). 776 */ 777 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF)) 778 pr_info("domain %d does not have %d anymore\n", 779 info->u.pirq.domid, info->u.pirq.pirq); 780 else if (rc) { 781 pr_warn("unmap irq failed %d\n", rc); 782 goto out; 783 } 784 } 785 786 xen_free_irq(irq); 787 788 out: 789 mutex_unlock(&irq_mapping_update_lock); 790 return rc; 791 } 792 793 int xen_irq_from_pirq(unsigned pirq) 794 { 795 int irq; 796 797 struct irq_info *info; 798 799 mutex_lock(&irq_mapping_update_lock); 800 801 list_for_each_entry(info, &xen_irq_list_head, list) { 802 if (info->type != IRQT_PIRQ) 803 continue; 804 irq = info->irq; 805 if (info->u.pirq.pirq == pirq) 806 goto out; 807 } 808 irq = -1; 809 out: 810 mutex_unlock(&irq_mapping_update_lock); 811 812 return irq; 813 } 814 815 816 int xen_pirq_from_irq(unsigned irq) 817 { 818 return pirq_from_irq(irq); 819 } 820 EXPORT_SYMBOL_GPL(xen_pirq_from_irq); 821 822 int bind_evtchn_to_irq(unsigned int evtchn) 823 { 824 int irq; 825 int ret; 826 827 if (evtchn >= xen_evtchn_max_channels()) 828 return -ENOMEM; 829 830 mutex_lock(&irq_mapping_update_lock); 831 832 irq = get_evtchn_to_irq(evtchn); 833 834 if (irq == -1) { 835 irq = xen_allocate_irq_dynamic(); 836 if (irq < 0) 837 goto out; 838 839 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, 840 handle_edge_irq, "event"); 841 842 ret = xen_irq_info_evtchn_setup(irq, evtchn); 843 if (ret < 0) { 844 __unbind_from_irq(irq); 845 irq = ret; 846 goto out; 847 } 848 /* New interdomain events are bound to VCPU 0. */ 849 bind_evtchn_to_cpu(evtchn, 0); 850 } else { 851 struct irq_info *info = info_for_irq(irq); 852 WARN_ON(info == NULL || info->type != IRQT_EVTCHN); 853 } 854 855 out: 856 mutex_unlock(&irq_mapping_update_lock); 857 858 return irq; 859 } 860 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); 861 862 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) 863 { 864 struct evtchn_bind_ipi bind_ipi; 865 int evtchn, irq; 866 int ret; 867 868 mutex_lock(&irq_mapping_update_lock); 869 870 irq = per_cpu(ipi_to_irq, cpu)[ipi]; 871 872 if (irq == -1) { 873 irq = xen_allocate_irq_dynamic(); 874 if (irq < 0) 875 goto out; 876 877 irq_set_chip_and_handler_name(irq, &xen_percpu_chip, 878 handle_percpu_irq, "ipi"); 879 880 bind_ipi.vcpu = cpu; 881 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, 882 &bind_ipi) != 0) 883 BUG(); 884 evtchn = bind_ipi.port; 885 886 ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); 887 if (ret < 0) { 888 __unbind_from_irq(irq); 889 irq = ret; 890 goto out; 891 } 892 bind_evtchn_to_cpu(evtchn, cpu); 893 } else { 894 struct irq_info *info = info_for_irq(irq); 895 WARN_ON(info == NULL || info->type != IRQT_IPI); 896 } 897 898 out: 899 mutex_unlock(&irq_mapping_update_lock); 900 return irq; 901 } 902 903 int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, 904 unsigned int remote_port) 905 { 906 struct evtchn_bind_interdomain bind_interdomain; 907 int err; 908 909 bind_interdomain.remote_dom = remote_domain; 910 bind_interdomain.remote_port = remote_port; 911 912 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, 913 &bind_interdomain); 914 915 return err ? : bind_evtchn_to_irq(bind_interdomain.local_port); 916 } 917 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq); 918 919 static int find_virq(unsigned int virq, unsigned int cpu) 920 { 921 struct evtchn_status status; 922 int port, rc = -ENOENT; 923 924 memset(&status, 0, sizeof(status)); 925 for (port = 0; port < xen_evtchn_max_channels(); port++) { 926 status.dom = DOMID_SELF; 927 status.port = port; 928 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status); 929 if (rc < 0) 930 continue; 931 if (status.status != EVTCHNSTAT_virq) 932 continue; 933 if (status.u.virq == virq && status.vcpu == cpu) { 934 rc = port; 935 break; 936 } 937 } 938 return rc; 939 } 940 941 /** 942 * xen_evtchn_nr_channels - number of usable event channel ports 943 * 944 * This may be less than the maximum supported by the current 945 * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum 946 * supported. 947 */ 948 unsigned xen_evtchn_nr_channels(void) 949 { 950 return evtchn_ops->nr_channels(); 951 } 952 EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels); 953 954 int bind_virq_to_irq(unsigned int virq, unsigned int cpu) 955 { 956 struct evtchn_bind_virq bind_virq; 957 int evtchn, irq, ret; 958 959 mutex_lock(&irq_mapping_update_lock); 960 961 irq = per_cpu(virq_to_irq, cpu)[virq]; 962 963 if (irq == -1) { 964 irq = xen_allocate_irq_dynamic(); 965 if (irq < 0) 966 goto out; 967 968 irq_set_chip_and_handler_name(irq, &xen_percpu_chip, 969 handle_percpu_irq, "virq"); 970 971 bind_virq.virq = virq; 972 bind_virq.vcpu = cpu; 973 ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 974 &bind_virq); 975 if (ret == 0) 976 evtchn = bind_virq.port; 977 else { 978 if (ret == -EEXIST) 979 ret = find_virq(virq, cpu); 980 BUG_ON(ret < 0); 981 evtchn = ret; 982 } 983 984 ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq); 985 if (ret < 0) { 986 __unbind_from_irq(irq); 987 irq = ret; 988 goto out; 989 } 990 991 bind_evtchn_to_cpu(evtchn, cpu); 992 } else { 993 struct irq_info *info = info_for_irq(irq); 994 WARN_ON(info == NULL || info->type != IRQT_VIRQ); 995 } 996 997 out: 998 mutex_unlock(&irq_mapping_update_lock); 999 1000 return irq; 1001 } 1002 1003 static void unbind_from_irq(unsigned int irq) 1004 { 1005 mutex_lock(&irq_mapping_update_lock); 1006 __unbind_from_irq(irq); 1007 mutex_unlock(&irq_mapping_update_lock); 1008 } 1009 1010 int bind_evtchn_to_irqhandler(unsigned int evtchn, 1011 irq_handler_t handler, 1012 unsigned long irqflags, 1013 const char *devname, void *dev_id) 1014 { 1015 int irq, retval; 1016 1017 irq = bind_evtchn_to_irq(evtchn); 1018 if (irq < 0) 1019 return irq; 1020 retval = request_irq(irq, handler, irqflags, devname, dev_id); 1021 if (retval != 0) { 1022 unbind_from_irq(irq); 1023 return retval; 1024 } 1025 1026 return irq; 1027 } 1028 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); 1029 1030 int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, 1031 unsigned int remote_port, 1032 irq_handler_t handler, 1033 unsigned long irqflags, 1034 const char *devname, 1035 void *dev_id) 1036 { 1037 int irq, retval; 1038 1039 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port); 1040 if (irq < 0) 1041 return irq; 1042 1043 retval = request_irq(irq, handler, irqflags, devname, dev_id); 1044 if (retval != 0) { 1045 unbind_from_irq(irq); 1046 return retval; 1047 } 1048 1049 return irq; 1050 } 1051 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler); 1052 1053 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, 1054 irq_handler_t handler, 1055 unsigned long irqflags, const char *devname, void *dev_id) 1056 { 1057 int irq, retval; 1058 1059 irq = bind_virq_to_irq(virq, cpu); 1060 if (irq < 0) 1061 return irq; 1062 retval = request_irq(irq, handler, irqflags, devname, dev_id); 1063 if (retval != 0) { 1064 unbind_from_irq(irq); 1065 return retval; 1066 } 1067 1068 return irq; 1069 } 1070 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); 1071 1072 int bind_ipi_to_irqhandler(enum ipi_vector ipi, 1073 unsigned int cpu, 1074 irq_handler_t handler, 1075 unsigned long irqflags, 1076 const char *devname, 1077 void *dev_id) 1078 { 1079 int irq, retval; 1080 1081 irq = bind_ipi_to_irq(ipi, cpu); 1082 if (irq < 0) 1083 return irq; 1084 1085 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME; 1086 retval = request_irq(irq, handler, irqflags, devname, dev_id); 1087 if (retval != 0) { 1088 unbind_from_irq(irq); 1089 return retval; 1090 } 1091 1092 return irq; 1093 } 1094 1095 void unbind_from_irqhandler(unsigned int irq, void *dev_id) 1096 { 1097 struct irq_info *info = irq_get_handler_data(irq); 1098 1099 if (WARN_ON(!info)) 1100 return; 1101 free_irq(irq, dev_id); 1102 unbind_from_irq(irq); 1103 } 1104 EXPORT_SYMBOL_GPL(unbind_from_irqhandler); 1105 1106 /** 1107 * xen_set_irq_priority() - set an event channel priority. 1108 * @irq:irq bound to an event channel. 1109 * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN. 1110 */ 1111 int xen_set_irq_priority(unsigned irq, unsigned priority) 1112 { 1113 struct evtchn_set_priority set_priority; 1114 1115 set_priority.port = evtchn_from_irq(irq); 1116 set_priority.priority = priority; 1117 1118 return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority, 1119 &set_priority); 1120 } 1121 EXPORT_SYMBOL_GPL(xen_set_irq_priority); 1122 1123 int evtchn_make_refcounted(unsigned int evtchn) 1124 { 1125 int irq = get_evtchn_to_irq(evtchn); 1126 struct irq_info *info; 1127 1128 if (irq == -1) 1129 return -ENOENT; 1130 1131 info = irq_get_handler_data(irq); 1132 1133 if (!info) 1134 return -ENOENT; 1135 1136 WARN_ON(info->refcnt != -1); 1137 1138 info->refcnt = 1; 1139 1140 return 0; 1141 } 1142 EXPORT_SYMBOL_GPL(evtchn_make_refcounted); 1143 1144 int evtchn_get(unsigned int evtchn) 1145 { 1146 int irq; 1147 struct irq_info *info; 1148 int err = -ENOENT; 1149 1150 if (evtchn >= xen_evtchn_max_channels()) 1151 return -EINVAL; 1152 1153 mutex_lock(&irq_mapping_update_lock); 1154 1155 irq = get_evtchn_to_irq(evtchn); 1156 if (irq == -1) 1157 goto done; 1158 1159 info = irq_get_handler_data(irq); 1160 1161 if (!info) 1162 goto done; 1163 1164 err = -EINVAL; 1165 if (info->refcnt <= 0) 1166 goto done; 1167 1168 info->refcnt++; 1169 err = 0; 1170 done: 1171 mutex_unlock(&irq_mapping_update_lock); 1172 1173 return err; 1174 } 1175 EXPORT_SYMBOL_GPL(evtchn_get); 1176 1177 void evtchn_put(unsigned int evtchn) 1178 { 1179 int irq = get_evtchn_to_irq(evtchn); 1180 if (WARN_ON(irq == -1)) 1181 return; 1182 unbind_from_irq(irq); 1183 } 1184 EXPORT_SYMBOL_GPL(evtchn_put); 1185 1186 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) 1187 { 1188 int irq; 1189 1190 #ifdef CONFIG_X86 1191 if (unlikely(vector == XEN_NMI_VECTOR)) { 1192 int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL); 1193 if (rc < 0) 1194 printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc); 1195 return; 1196 } 1197 #endif 1198 irq = per_cpu(ipi_to_irq, cpu)[vector]; 1199 BUG_ON(irq < 0); 1200 notify_remote_via_irq(irq); 1201 } 1202 1203 static DEFINE_PER_CPU(unsigned, xed_nesting_count); 1204 1205 static void __xen_evtchn_do_upcall(void) 1206 { 1207 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); 1208 int cpu = get_cpu(); 1209 unsigned count; 1210 1211 do { 1212 vcpu_info->evtchn_upcall_pending = 0; 1213 1214 if (__this_cpu_inc_return(xed_nesting_count) - 1) 1215 goto out; 1216 1217 xen_evtchn_handle_events(cpu); 1218 1219 BUG_ON(!irqs_disabled()); 1220 1221 count = __this_cpu_read(xed_nesting_count); 1222 __this_cpu_write(xed_nesting_count, 0); 1223 } while (count != 1 || vcpu_info->evtchn_upcall_pending); 1224 1225 out: 1226 1227 put_cpu(); 1228 } 1229 1230 void xen_evtchn_do_upcall(struct pt_regs *regs) 1231 { 1232 struct pt_regs *old_regs = set_irq_regs(regs); 1233 1234 irq_enter(); 1235 #ifdef CONFIG_X86 1236 exit_idle(); 1237 inc_irq_stat(irq_hv_callback_count); 1238 #endif 1239 1240 __xen_evtchn_do_upcall(); 1241 1242 irq_exit(); 1243 set_irq_regs(old_regs); 1244 } 1245 1246 void xen_hvm_evtchn_do_upcall(void) 1247 { 1248 __xen_evtchn_do_upcall(); 1249 } 1250 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall); 1251 1252 /* Rebind a new event channel to an existing irq. */ 1253 void rebind_evtchn_irq(int evtchn, int irq) 1254 { 1255 struct irq_info *info = info_for_irq(irq); 1256 1257 if (WARN_ON(!info)) 1258 return; 1259 1260 /* Make sure the irq is masked, since the new event channel 1261 will also be masked. */ 1262 disable_irq(irq); 1263 1264 mutex_lock(&irq_mapping_update_lock); 1265 1266 /* After resume the irq<->evtchn mappings are all cleared out */ 1267 BUG_ON(get_evtchn_to_irq(evtchn) != -1); 1268 /* Expect irq to have been bound before, 1269 so there should be a proper type */ 1270 BUG_ON(info->type == IRQT_UNBOUND); 1271 1272 (void)xen_irq_info_evtchn_setup(irq, evtchn); 1273 1274 mutex_unlock(&irq_mapping_update_lock); 1275 1276 /* new event channels are always bound to cpu 0 */ 1277 irq_set_affinity(irq, cpumask_of(0)); 1278 1279 /* Unmask the event channel. */ 1280 enable_irq(irq); 1281 } 1282 1283 /* Rebind an evtchn so that it gets delivered to a specific cpu */ 1284 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) 1285 { 1286 struct evtchn_bind_vcpu bind_vcpu; 1287 int evtchn = evtchn_from_irq(irq); 1288 int masked; 1289 1290 if (!VALID_EVTCHN(evtchn)) 1291 return -1; 1292 1293 /* 1294 * Events delivered via platform PCI interrupts are always 1295 * routed to vcpu 0 and hence cannot be rebound. 1296 */ 1297 if (xen_hvm_domain() && !xen_have_vector_callback) 1298 return -1; 1299 1300 /* Send future instances of this interrupt to other vcpu. */ 1301 bind_vcpu.port = evtchn; 1302 bind_vcpu.vcpu = tcpu; 1303 1304 /* 1305 * Mask the event while changing the VCPU binding to prevent 1306 * it being delivered on an unexpected VCPU. 1307 */ 1308 masked = test_and_set_mask(evtchn); 1309 1310 /* 1311 * If this fails, it usually just indicates that we're dealing with a 1312 * virq or IPI channel, which don't actually need to be rebound. Ignore 1313 * it, but don't do the xenlinux-level rebind in that case. 1314 */ 1315 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) 1316 bind_evtchn_to_cpu(evtchn, tcpu); 1317 1318 if (!masked) 1319 unmask_evtchn(evtchn); 1320 1321 return 0; 1322 } 1323 1324 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, 1325 bool force) 1326 { 1327 unsigned tcpu = cpumask_first_and(dest, cpu_online_mask); 1328 1329 return rebind_irq_to_cpu(data->irq, tcpu); 1330 } 1331 1332 static void enable_dynirq(struct irq_data *data) 1333 { 1334 int evtchn = evtchn_from_irq(data->irq); 1335 1336 if (VALID_EVTCHN(evtchn)) 1337 unmask_evtchn(evtchn); 1338 } 1339 1340 static void disable_dynirq(struct irq_data *data) 1341 { 1342 int evtchn = evtchn_from_irq(data->irq); 1343 1344 if (VALID_EVTCHN(evtchn)) 1345 mask_evtchn(evtchn); 1346 } 1347 1348 static void ack_dynirq(struct irq_data *data) 1349 { 1350 int evtchn = evtchn_from_irq(data->irq); 1351 1352 irq_move_irq(data); 1353 1354 if (VALID_EVTCHN(evtchn)) 1355 clear_evtchn(evtchn); 1356 } 1357 1358 static void mask_ack_dynirq(struct irq_data *data) 1359 { 1360 disable_dynirq(data); 1361 ack_dynirq(data); 1362 } 1363 1364 static int retrigger_dynirq(struct irq_data *data) 1365 { 1366 unsigned int evtchn = evtchn_from_irq(data->irq); 1367 int masked; 1368 1369 if (!VALID_EVTCHN(evtchn)) 1370 return 0; 1371 1372 masked = test_and_set_mask(evtchn); 1373 set_evtchn(evtchn); 1374 if (!masked) 1375 unmask_evtchn(evtchn); 1376 1377 return 1; 1378 } 1379 1380 static void restore_pirqs(void) 1381 { 1382 int pirq, rc, irq, gsi; 1383 struct physdev_map_pirq map_irq; 1384 struct irq_info *info; 1385 1386 list_for_each_entry(info, &xen_irq_list_head, list) { 1387 if (info->type != IRQT_PIRQ) 1388 continue; 1389 1390 pirq = info->u.pirq.pirq; 1391 gsi = info->u.pirq.gsi; 1392 irq = info->irq; 1393 1394 /* save/restore of PT devices doesn't work, so at this point the 1395 * only devices present are GSI based emulated devices */ 1396 if (!gsi) 1397 continue; 1398 1399 map_irq.domid = DOMID_SELF; 1400 map_irq.type = MAP_PIRQ_TYPE_GSI; 1401 map_irq.index = gsi; 1402 map_irq.pirq = pirq; 1403 1404 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); 1405 if (rc) { 1406 pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n", 1407 gsi, irq, pirq, rc); 1408 xen_free_irq(irq); 1409 continue; 1410 } 1411 1412 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); 1413 1414 __startup_pirq(irq); 1415 } 1416 } 1417 1418 static void restore_cpu_virqs(unsigned int cpu) 1419 { 1420 struct evtchn_bind_virq bind_virq; 1421 int virq, irq, evtchn; 1422 1423 for (virq = 0; virq < NR_VIRQS; virq++) { 1424 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) 1425 continue; 1426 1427 BUG_ON(virq_from_irq(irq) != virq); 1428 1429 /* Get a new binding from Xen. */ 1430 bind_virq.virq = virq; 1431 bind_virq.vcpu = cpu; 1432 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 1433 &bind_virq) != 0) 1434 BUG(); 1435 evtchn = bind_virq.port; 1436 1437 /* Record the new mapping. */ 1438 (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq); 1439 bind_evtchn_to_cpu(evtchn, cpu); 1440 } 1441 } 1442 1443 static void restore_cpu_ipis(unsigned int cpu) 1444 { 1445 struct evtchn_bind_ipi bind_ipi; 1446 int ipi, irq, evtchn; 1447 1448 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { 1449 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) 1450 continue; 1451 1452 BUG_ON(ipi_from_irq(irq) != ipi); 1453 1454 /* Get a new binding from Xen. */ 1455 bind_ipi.vcpu = cpu; 1456 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, 1457 &bind_ipi) != 0) 1458 BUG(); 1459 evtchn = bind_ipi.port; 1460 1461 /* Record the new mapping. */ 1462 (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); 1463 bind_evtchn_to_cpu(evtchn, cpu); 1464 } 1465 } 1466 1467 /* Clear an irq's pending state, in preparation for polling on it */ 1468 void xen_clear_irq_pending(int irq) 1469 { 1470 int evtchn = evtchn_from_irq(irq); 1471 1472 if (VALID_EVTCHN(evtchn)) 1473 clear_evtchn(evtchn); 1474 } 1475 EXPORT_SYMBOL(xen_clear_irq_pending); 1476 void xen_set_irq_pending(int irq) 1477 { 1478 int evtchn = evtchn_from_irq(irq); 1479 1480 if (VALID_EVTCHN(evtchn)) 1481 set_evtchn(evtchn); 1482 } 1483 1484 bool xen_test_irq_pending(int irq) 1485 { 1486 int evtchn = evtchn_from_irq(irq); 1487 bool ret = false; 1488 1489 if (VALID_EVTCHN(evtchn)) 1490 ret = test_evtchn(evtchn); 1491 1492 return ret; 1493 } 1494 1495 /* Poll waiting for an irq to become pending with timeout. In the usual case, 1496 * the irq will be disabled so it won't deliver an interrupt. */ 1497 void xen_poll_irq_timeout(int irq, u64 timeout) 1498 { 1499 evtchn_port_t evtchn = evtchn_from_irq(irq); 1500 1501 if (VALID_EVTCHN(evtchn)) { 1502 struct sched_poll poll; 1503 1504 poll.nr_ports = 1; 1505 poll.timeout = timeout; 1506 set_xen_guest_handle(poll.ports, &evtchn); 1507 1508 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) 1509 BUG(); 1510 } 1511 } 1512 EXPORT_SYMBOL(xen_poll_irq_timeout); 1513 /* Poll waiting for an irq to become pending. In the usual case, the 1514 * irq will be disabled so it won't deliver an interrupt. */ 1515 void xen_poll_irq(int irq) 1516 { 1517 xen_poll_irq_timeout(irq, 0 /* no timeout */); 1518 } 1519 1520 /* Check whether the IRQ line is shared with other guests. */ 1521 int xen_test_irq_shared(int irq) 1522 { 1523 struct irq_info *info = info_for_irq(irq); 1524 struct physdev_irq_status_query irq_status; 1525 1526 if (WARN_ON(!info)) 1527 return -ENOENT; 1528 1529 irq_status.irq = info->u.pirq.pirq; 1530 1531 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) 1532 return 0; 1533 return !(irq_status.flags & XENIRQSTAT_shared); 1534 } 1535 EXPORT_SYMBOL_GPL(xen_test_irq_shared); 1536 1537 void xen_irq_resume(void) 1538 { 1539 unsigned int cpu; 1540 struct irq_info *info; 1541 1542 /* New event-channel space is not 'live' yet. */ 1543 xen_evtchn_mask_all(); 1544 xen_evtchn_resume(); 1545 1546 /* No IRQ <-> event-channel mappings. */ 1547 list_for_each_entry(info, &xen_irq_list_head, list) 1548 info->evtchn = 0; /* zap event-channel binding */ 1549 1550 clear_evtchn_to_irq_all(); 1551 1552 for_each_possible_cpu(cpu) { 1553 restore_cpu_virqs(cpu); 1554 restore_cpu_ipis(cpu); 1555 } 1556 1557 restore_pirqs(); 1558 } 1559 1560 static struct irq_chip xen_dynamic_chip __read_mostly = { 1561 .name = "xen-dyn", 1562 1563 .irq_disable = disable_dynirq, 1564 .irq_mask = disable_dynirq, 1565 .irq_unmask = enable_dynirq, 1566 1567 .irq_ack = ack_dynirq, 1568 .irq_mask_ack = mask_ack_dynirq, 1569 1570 .irq_set_affinity = set_affinity_irq, 1571 .irq_retrigger = retrigger_dynirq, 1572 }; 1573 1574 static struct irq_chip xen_pirq_chip __read_mostly = { 1575 .name = "xen-pirq", 1576 1577 .irq_startup = startup_pirq, 1578 .irq_shutdown = shutdown_pirq, 1579 .irq_enable = enable_pirq, 1580 .irq_disable = disable_pirq, 1581 1582 .irq_mask = disable_dynirq, 1583 .irq_unmask = enable_dynirq, 1584 1585 .irq_ack = eoi_pirq, 1586 .irq_eoi = eoi_pirq, 1587 .irq_mask_ack = mask_ack_pirq, 1588 1589 .irq_set_affinity = set_affinity_irq, 1590 1591 .irq_retrigger = retrigger_dynirq, 1592 }; 1593 1594 static struct irq_chip xen_percpu_chip __read_mostly = { 1595 .name = "xen-percpu", 1596 1597 .irq_disable = disable_dynirq, 1598 .irq_mask = disable_dynirq, 1599 .irq_unmask = enable_dynirq, 1600 1601 .irq_ack = ack_dynirq, 1602 }; 1603 1604 int xen_set_callback_via(uint64_t via) 1605 { 1606 struct xen_hvm_param a; 1607 a.domid = DOMID_SELF; 1608 a.index = HVM_PARAM_CALLBACK_IRQ; 1609 a.value = via; 1610 return HYPERVISOR_hvm_op(HVMOP_set_param, &a); 1611 } 1612 EXPORT_SYMBOL_GPL(xen_set_callback_via); 1613 1614 #ifdef CONFIG_XEN_PVHVM 1615 /* Vector callbacks are better than PCI interrupts to receive event 1616 * channel notifications because we can receive vector callbacks on any 1617 * vcpu and we don't need PCI support or APIC interactions. */ 1618 void xen_callback_vector(void) 1619 { 1620 int rc; 1621 uint64_t callback_via; 1622 if (xen_have_vector_callback) { 1623 callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR); 1624 rc = xen_set_callback_via(callback_via); 1625 if (rc) { 1626 pr_err("Request for Xen HVM callback vector failed\n"); 1627 xen_have_vector_callback = 0; 1628 return; 1629 } 1630 pr_info("Xen HVM callback vector for event delivery is enabled\n"); 1631 /* in the restore case the vector has already been allocated */ 1632 if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors)) 1633 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, 1634 xen_hvm_callback_vector); 1635 } 1636 } 1637 #else 1638 void xen_callback_vector(void) {} 1639 #endif 1640 1641 #undef MODULE_PARAM_PREFIX 1642 #define MODULE_PARAM_PREFIX "xen." 1643 1644 static bool fifo_events = true; 1645 module_param(fifo_events, bool, 0); 1646 1647 void __init xen_init_IRQ(void) 1648 { 1649 int ret = -EINVAL; 1650 1651 if (fifo_events) 1652 ret = xen_evtchn_fifo_init(); 1653 if (ret < 0) 1654 xen_evtchn_2l_init(); 1655 1656 evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()), 1657 sizeof(*evtchn_to_irq), GFP_KERNEL); 1658 BUG_ON(!evtchn_to_irq); 1659 1660 /* No event channels are 'live' right now. */ 1661 xen_evtchn_mask_all(); 1662 1663 pirq_needs_eoi = pirq_needs_eoi_flag; 1664 1665 #ifdef CONFIG_X86 1666 if (xen_pv_domain()) { 1667 irq_ctx_init(smp_processor_id()); 1668 if (xen_initial_domain()) 1669 pci_xen_initial_domain(); 1670 } 1671 if (xen_feature(XENFEAT_hvm_callback_vector)) 1672 xen_callback_vector(); 1673 1674 if (xen_hvm_domain()) { 1675 native_init_IRQ(); 1676 /* pci_xen_hvm_init must be called after native_init_IRQ so that 1677 * __acpi_register_gsi can point at the right function */ 1678 pci_xen_hvm_init(); 1679 } else { 1680 int rc; 1681 struct physdev_pirq_eoi_gmfn eoi_gmfn; 1682 1683 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); 1684 eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map); 1685 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn); 1686 /* TODO: No PVH support for PIRQ EOI */ 1687 if (rc != 0) { 1688 free_page((unsigned long) pirq_eoi_map); 1689 pirq_eoi_map = NULL; 1690 } else 1691 pirq_needs_eoi = pirq_check_eoi_map; 1692 } 1693 #endif 1694 } 1695