1 /* 2 * Xen event channels 3 * 4 * Xen models interrupts with abstract event channels. Because each 5 * domain gets 1024 event channels, but NR_IRQ is not that large, we 6 * must dynamically map irqs<->event channels. The event channels 7 * interface with the rest of the kernel by defining a xen interrupt 8 * chip. When an event is received, it is mapped to an irq and sent 9 * through the normal interrupt processing path. 10 * 11 * There are four kinds of events which can be mapped to an event 12 * channel: 13 * 14 * 1. Inter-domain notifications. This includes all the virtual 15 * device events, since they're driven by front-ends in another domain 16 * (typically dom0). 17 * 2. VIRQs, typically used for timers. These are per-cpu events. 18 * 3. IPIs. 19 * 4. PIRQs - Hardware interrupts. 20 * 21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 22 */ 23 24 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt 25 26 #include <linux/linkage.h> 27 #include <linux/interrupt.h> 28 #include <linux/irq.h> 29 #include <linux/moduleparam.h> 30 #include <linux/string.h> 31 #include <linux/bootmem.h> 32 #include <linux/slab.h> 33 #include <linux/irqnr.h> 34 #include <linux/pci.h> 35 36 #ifdef CONFIG_X86 37 #include <asm/desc.h> 38 #include <asm/ptrace.h> 39 #include <asm/irq.h> 40 #include <asm/idle.h> 41 #include <asm/io_apic.h> 42 #include <asm/i8259.h> 43 #include <asm/xen/pci.h> 44 #endif 45 #include <asm/sync_bitops.h> 46 #include <asm/xen/hypercall.h> 47 #include <asm/xen/hypervisor.h> 48 #include <xen/page.h> 49 50 #include <xen/xen.h> 51 #include <xen/hvm.h> 52 #include <xen/xen-ops.h> 53 #include <xen/events.h> 54 #include <xen/interface/xen.h> 55 #include <xen/interface/event_channel.h> 56 #include <xen/interface/hvm/hvm_op.h> 57 #include <xen/interface/hvm/params.h> 58 #include <xen/interface/physdev.h> 59 #include <xen/interface/sched.h> 60 #include <xen/interface/vcpu.h> 61 #include <asm/hw_irq.h> 62 63 #include "events_internal.h" 64 65 const struct evtchn_ops *evtchn_ops; 66 67 /* 68 * This lock protects updates to the following mapping and reference-count 69 * arrays. The lock does not need to be acquired to read the mapping tables. 70 */ 71 static DEFINE_MUTEX(irq_mapping_update_lock); 72 73 static LIST_HEAD(xen_irq_list_head); 74 75 /* IRQ <-> VIRQ mapping. */ 76 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; 77 78 /* IRQ <-> IPI mapping */ 79 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; 80 81 int **evtchn_to_irq; 82 #ifdef CONFIG_X86 83 static unsigned long *pirq_eoi_map; 84 #endif 85 static bool (*pirq_needs_eoi)(unsigned irq); 86 87 #define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq))) 88 #define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq))) 89 #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq)) 90 91 /* Xen will never allocate port zero for any purpose. */ 92 #define VALID_EVTCHN(chn) ((chn) != 0) 93 94 static struct irq_chip xen_dynamic_chip; 95 static struct irq_chip xen_percpu_chip; 96 static struct irq_chip xen_pirq_chip; 97 static void enable_dynirq(struct irq_data *data); 98 static void disable_dynirq(struct irq_data *data); 99 100 static void clear_evtchn_to_irq_row(unsigned row) 101 { 102 unsigned col; 103 104 for (col = 0; col < EVTCHN_PER_ROW; col++) 105 evtchn_to_irq[row][col] = -1; 106 } 107 108 static void clear_evtchn_to_irq_all(void) 109 { 110 unsigned row; 111 112 for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) { 113 if (evtchn_to_irq[row] == NULL) 114 continue; 115 clear_evtchn_to_irq_row(row); 116 } 117 } 118 119 static int set_evtchn_to_irq(unsigned evtchn, unsigned irq) 120 { 121 unsigned row; 122 unsigned col; 123 124 if (evtchn >= xen_evtchn_max_channels()) 125 return -EINVAL; 126 127 row = EVTCHN_ROW(evtchn); 128 col = EVTCHN_COL(evtchn); 129 130 if (evtchn_to_irq[row] == NULL) { 131 /* Unallocated irq entries return -1 anyway */ 132 if (irq == -1) 133 return 0; 134 135 evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL); 136 if (evtchn_to_irq[row] == NULL) 137 return -ENOMEM; 138 139 clear_evtchn_to_irq_row(row); 140 } 141 142 evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq; 143 return 0; 144 } 145 146 int get_evtchn_to_irq(unsigned evtchn) 147 { 148 if (evtchn >= xen_evtchn_max_channels()) 149 return -1; 150 if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL) 151 return -1; 152 return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]; 153 } 154 155 /* Get info for IRQ */ 156 struct irq_info *info_for_irq(unsigned irq) 157 { 158 return irq_get_handler_data(irq); 159 } 160 161 /* Constructors for packed IRQ information. */ 162 static int xen_irq_info_common_setup(struct irq_info *info, 163 unsigned irq, 164 enum xen_irq_type type, 165 unsigned evtchn, 166 unsigned short cpu) 167 { 168 int ret; 169 170 BUG_ON(info->type != IRQT_UNBOUND && info->type != type); 171 172 info->type = type; 173 info->irq = irq; 174 info->evtchn = evtchn; 175 info->cpu = cpu; 176 177 ret = set_evtchn_to_irq(evtchn, irq); 178 if (ret < 0) 179 return ret; 180 181 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); 182 183 return xen_evtchn_port_setup(info); 184 } 185 186 static int xen_irq_info_evtchn_setup(unsigned irq, 187 unsigned evtchn) 188 { 189 struct irq_info *info = info_for_irq(irq); 190 191 return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0); 192 } 193 194 static int xen_irq_info_ipi_setup(unsigned cpu, 195 unsigned irq, 196 unsigned evtchn, 197 enum ipi_vector ipi) 198 { 199 struct irq_info *info = info_for_irq(irq); 200 201 info->u.ipi = ipi; 202 203 per_cpu(ipi_to_irq, cpu)[ipi] = irq; 204 205 return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0); 206 } 207 208 static int xen_irq_info_virq_setup(unsigned cpu, 209 unsigned irq, 210 unsigned evtchn, 211 unsigned virq) 212 { 213 struct irq_info *info = info_for_irq(irq); 214 215 info->u.virq = virq; 216 217 per_cpu(virq_to_irq, cpu)[virq] = irq; 218 219 return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0); 220 } 221 222 static int xen_irq_info_pirq_setup(unsigned irq, 223 unsigned evtchn, 224 unsigned pirq, 225 unsigned gsi, 226 uint16_t domid, 227 unsigned char flags) 228 { 229 struct irq_info *info = info_for_irq(irq); 230 231 info->u.pirq.pirq = pirq; 232 info->u.pirq.gsi = gsi; 233 info->u.pirq.domid = domid; 234 info->u.pirq.flags = flags; 235 236 return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0); 237 } 238 239 static void xen_irq_info_cleanup(struct irq_info *info) 240 { 241 set_evtchn_to_irq(info->evtchn, -1); 242 info->evtchn = 0; 243 } 244 245 /* 246 * Accessors for packed IRQ information. 247 */ 248 unsigned int evtchn_from_irq(unsigned irq) 249 { 250 if (unlikely(WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq))) 251 return 0; 252 253 return info_for_irq(irq)->evtchn; 254 } 255 256 unsigned irq_from_evtchn(unsigned int evtchn) 257 { 258 return get_evtchn_to_irq(evtchn); 259 } 260 EXPORT_SYMBOL_GPL(irq_from_evtchn); 261 262 int irq_from_virq(unsigned int cpu, unsigned int virq) 263 { 264 return per_cpu(virq_to_irq, cpu)[virq]; 265 } 266 267 static enum ipi_vector ipi_from_irq(unsigned irq) 268 { 269 struct irq_info *info = info_for_irq(irq); 270 271 BUG_ON(info == NULL); 272 BUG_ON(info->type != IRQT_IPI); 273 274 return info->u.ipi; 275 } 276 277 static unsigned virq_from_irq(unsigned irq) 278 { 279 struct irq_info *info = info_for_irq(irq); 280 281 BUG_ON(info == NULL); 282 BUG_ON(info->type != IRQT_VIRQ); 283 284 return info->u.virq; 285 } 286 287 static unsigned pirq_from_irq(unsigned irq) 288 { 289 struct irq_info *info = info_for_irq(irq); 290 291 BUG_ON(info == NULL); 292 BUG_ON(info->type != IRQT_PIRQ); 293 294 return info->u.pirq.pirq; 295 } 296 297 static enum xen_irq_type type_from_irq(unsigned irq) 298 { 299 return info_for_irq(irq)->type; 300 } 301 302 unsigned cpu_from_irq(unsigned irq) 303 { 304 return info_for_irq(irq)->cpu; 305 } 306 307 unsigned int cpu_from_evtchn(unsigned int evtchn) 308 { 309 int irq = get_evtchn_to_irq(evtchn); 310 unsigned ret = 0; 311 312 if (irq != -1) 313 ret = cpu_from_irq(irq); 314 315 return ret; 316 } 317 318 #ifdef CONFIG_X86 319 static bool pirq_check_eoi_map(unsigned irq) 320 { 321 return test_bit(pirq_from_irq(irq), pirq_eoi_map); 322 } 323 #endif 324 325 static bool pirq_needs_eoi_flag(unsigned irq) 326 { 327 struct irq_info *info = info_for_irq(irq); 328 BUG_ON(info->type != IRQT_PIRQ); 329 330 return info->u.pirq.flags & PIRQ_NEEDS_EOI; 331 } 332 333 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) 334 { 335 int irq = get_evtchn_to_irq(chn); 336 struct irq_info *info = info_for_irq(irq); 337 338 BUG_ON(irq == -1); 339 #ifdef CONFIG_SMP 340 cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu)); 341 #endif 342 xen_evtchn_port_bind_to_cpu(info, cpu); 343 344 info->cpu = cpu; 345 } 346 347 static void xen_evtchn_mask_all(void) 348 { 349 unsigned int evtchn; 350 351 for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++) 352 mask_evtchn(evtchn); 353 } 354 355 /** 356 * notify_remote_via_irq - send event to remote end of event channel via irq 357 * @irq: irq of event channel to send event to 358 * 359 * Unlike notify_remote_via_evtchn(), this is safe to use across 360 * save/restore. Notifications on a broken connection are silently 361 * dropped. 362 */ 363 void notify_remote_via_irq(int irq) 364 { 365 int evtchn = evtchn_from_irq(irq); 366 367 if (VALID_EVTCHN(evtchn)) 368 notify_remote_via_evtchn(evtchn); 369 } 370 EXPORT_SYMBOL_GPL(notify_remote_via_irq); 371 372 static void xen_irq_init(unsigned irq) 373 { 374 struct irq_info *info; 375 #ifdef CONFIG_SMP 376 /* By default all event channels notify CPU#0. */ 377 cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0)); 378 #endif 379 380 info = kzalloc(sizeof(*info), GFP_KERNEL); 381 if (info == NULL) 382 panic("Unable to allocate metadata for IRQ%d\n", irq); 383 384 info->type = IRQT_UNBOUND; 385 info->refcnt = -1; 386 387 irq_set_handler_data(irq, info); 388 389 list_add_tail(&info->list, &xen_irq_list_head); 390 } 391 392 static int __must_check xen_allocate_irqs_dynamic(int nvec) 393 { 394 int i, irq = irq_alloc_descs(-1, 0, nvec, -1); 395 396 if (irq >= 0) { 397 for (i = 0; i < nvec; i++) 398 xen_irq_init(irq + i); 399 } 400 401 return irq; 402 } 403 404 static inline int __must_check xen_allocate_irq_dynamic(void) 405 { 406 407 return xen_allocate_irqs_dynamic(1); 408 } 409 410 static int __must_check xen_allocate_irq_gsi(unsigned gsi) 411 { 412 int irq; 413 414 /* 415 * A PV guest has no concept of a GSI (since it has no ACPI 416 * nor access to/knowledge of the physical APICs). Therefore 417 * all IRQs are dynamically allocated from the entire IRQ 418 * space. 419 */ 420 if (xen_pv_domain() && !xen_initial_domain()) 421 return xen_allocate_irq_dynamic(); 422 423 /* Legacy IRQ descriptors are already allocated by the arch. */ 424 if (gsi < nr_legacy_irqs()) 425 irq = gsi; 426 else 427 irq = irq_alloc_desc_at(gsi, -1); 428 429 xen_irq_init(irq); 430 431 return irq; 432 } 433 434 static void xen_free_irq(unsigned irq) 435 { 436 struct irq_info *info = irq_get_handler_data(irq); 437 438 if (WARN_ON(!info)) 439 return; 440 441 list_del(&info->list); 442 443 irq_set_handler_data(irq, NULL); 444 445 WARN_ON(info->refcnt > 0); 446 447 kfree(info); 448 449 /* Legacy IRQ descriptors are managed by the arch. */ 450 if (irq < nr_legacy_irqs()) 451 return; 452 453 irq_free_desc(irq); 454 } 455 456 static void xen_evtchn_close(unsigned int port) 457 { 458 struct evtchn_close close; 459 460 close.port = port; 461 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 462 BUG(); 463 } 464 465 static void pirq_query_unmask(int irq) 466 { 467 struct physdev_irq_status_query irq_status; 468 struct irq_info *info = info_for_irq(irq); 469 470 BUG_ON(info->type != IRQT_PIRQ); 471 472 irq_status.irq = pirq_from_irq(irq); 473 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) 474 irq_status.flags = 0; 475 476 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI; 477 if (irq_status.flags & XENIRQSTAT_needs_eoi) 478 info->u.pirq.flags |= PIRQ_NEEDS_EOI; 479 } 480 481 static void eoi_pirq(struct irq_data *data) 482 { 483 int evtchn = evtchn_from_irq(data->irq); 484 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; 485 int rc = 0; 486 487 if (!VALID_EVTCHN(evtchn)) 488 return; 489 490 if (unlikely(irqd_is_setaffinity_pending(data))) { 491 int masked = test_and_set_mask(evtchn); 492 493 clear_evtchn(evtchn); 494 495 irq_move_masked_irq(data); 496 497 if (!masked) 498 unmask_evtchn(evtchn); 499 } else 500 clear_evtchn(evtchn); 501 502 if (pirq_needs_eoi(data->irq)) { 503 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); 504 WARN_ON(rc); 505 } 506 } 507 508 static void mask_ack_pirq(struct irq_data *data) 509 { 510 disable_dynirq(data); 511 eoi_pirq(data); 512 } 513 514 static unsigned int __startup_pirq(unsigned int irq) 515 { 516 struct evtchn_bind_pirq bind_pirq; 517 struct irq_info *info = info_for_irq(irq); 518 int evtchn = evtchn_from_irq(irq); 519 int rc; 520 521 BUG_ON(info->type != IRQT_PIRQ); 522 523 if (VALID_EVTCHN(evtchn)) 524 goto out; 525 526 bind_pirq.pirq = pirq_from_irq(irq); 527 /* NB. We are happy to share unless we are probing. */ 528 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ? 529 BIND_PIRQ__WILL_SHARE : 0; 530 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq); 531 if (rc != 0) { 532 pr_warn("Failed to obtain physical IRQ %d\n", irq); 533 return 0; 534 } 535 evtchn = bind_pirq.port; 536 537 pirq_query_unmask(irq); 538 539 rc = set_evtchn_to_irq(evtchn, irq); 540 if (rc) 541 goto err; 542 543 info->evtchn = evtchn; 544 bind_evtchn_to_cpu(evtchn, 0); 545 546 rc = xen_evtchn_port_setup(info); 547 if (rc) 548 goto err; 549 550 out: 551 unmask_evtchn(evtchn); 552 eoi_pirq(irq_get_irq_data(irq)); 553 554 return 0; 555 556 err: 557 pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); 558 xen_evtchn_close(evtchn); 559 return 0; 560 } 561 562 static unsigned int startup_pirq(struct irq_data *data) 563 { 564 return __startup_pirq(data->irq); 565 } 566 567 static void shutdown_pirq(struct irq_data *data) 568 { 569 unsigned int irq = data->irq; 570 struct irq_info *info = info_for_irq(irq); 571 unsigned evtchn = evtchn_from_irq(irq); 572 573 BUG_ON(info->type != IRQT_PIRQ); 574 575 if (!VALID_EVTCHN(evtchn)) 576 return; 577 578 mask_evtchn(evtchn); 579 xen_evtchn_close(evtchn); 580 xen_irq_info_cleanup(info); 581 } 582 583 static void enable_pirq(struct irq_data *data) 584 { 585 startup_pirq(data); 586 } 587 588 static void disable_pirq(struct irq_data *data) 589 { 590 disable_dynirq(data); 591 } 592 593 int xen_irq_from_gsi(unsigned gsi) 594 { 595 struct irq_info *info; 596 597 list_for_each_entry(info, &xen_irq_list_head, list) { 598 if (info->type != IRQT_PIRQ) 599 continue; 600 601 if (info->u.pirq.gsi == gsi) 602 return info->irq; 603 } 604 605 return -1; 606 } 607 EXPORT_SYMBOL_GPL(xen_irq_from_gsi); 608 609 static void __unbind_from_irq(unsigned int irq) 610 { 611 int evtchn = evtchn_from_irq(irq); 612 struct irq_info *info = irq_get_handler_data(irq); 613 614 if (info->refcnt > 0) { 615 info->refcnt--; 616 if (info->refcnt != 0) 617 return; 618 } 619 620 if (VALID_EVTCHN(evtchn)) { 621 unsigned int cpu = cpu_from_irq(irq); 622 623 xen_evtchn_close(evtchn); 624 625 switch (type_from_irq(irq)) { 626 case IRQT_VIRQ: 627 per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1; 628 break; 629 case IRQT_IPI: 630 per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1; 631 break; 632 default: 633 break; 634 } 635 636 xen_irq_info_cleanup(info); 637 } 638 639 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND); 640 641 xen_free_irq(irq); 642 } 643 644 /* 645 * Do not make any assumptions regarding the relationship between the 646 * IRQ number returned here and the Xen pirq argument. 647 * 648 * Note: We don't assign an event channel until the irq actually started 649 * up. Return an existing irq if we've already got one for the gsi. 650 * 651 * Shareable implies level triggered, not shareable implies edge 652 * triggered here. 653 */ 654 int xen_bind_pirq_gsi_to_irq(unsigned gsi, 655 unsigned pirq, int shareable, char *name) 656 { 657 int irq = -1; 658 struct physdev_irq irq_op; 659 int ret; 660 661 mutex_lock(&irq_mapping_update_lock); 662 663 irq = xen_irq_from_gsi(gsi); 664 if (irq != -1) { 665 pr_info("%s: returning irq %d for gsi %u\n", 666 __func__, irq, gsi); 667 goto out; 668 } 669 670 irq = xen_allocate_irq_gsi(gsi); 671 if (irq < 0) 672 goto out; 673 674 irq_op.irq = irq; 675 irq_op.vector = 0; 676 677 /* Only the privileged domain can do this. For non-priv, the pcifront 678 * driver provides a PCI bus that does the call to do exactly 679 * this in the priv domain. */ 680 if (xen_initial_domain() && 681 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { 682 xen_free_irq(irq); 683 irq = -ENOSPC; 684 goto out; 685 } 686 687 ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF, 688 shareable ? PIRQ_SHAREABLE : 0); 689 if (ret < 0) { 690 __unbind_from_irq(irq); 691 irq = ret; 692 goto out; 693 } 694 695 pirq_query_unmask(irq); 696 /* We try to use the handler with the appropriate semantic for the 697 * type of interrupt: if the interrupt is an edge triggered 698 * interrupt we use handle_edge_irq. 699 * 700 * On the other hand if the interrupt is level triggered we use 701 * handle_fasteoi_irq like the native code does for this kind of 702 * interrupts. 703 * 704 * Depending on the Xen version, pirq_needs_eoi might return true 705 * not only for level triggered interrupts but for edge triggered 706 * interrupts too. In any case Xen always honors the eoi mechanism, 707 * not injecting any more pirqs of the same kind if the first one 708 * hasn't received an eoi yet. Therefore using the fasteoi handler 709 * is the right choice either way. 710 */ 711 if (shareable) 712 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, 713 handle_fasteoi_irq, name); 714 else 715 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, 716 handle_edge_irq, name); 717 718 out: 719 mutex_unlock(&irq_mapping_update_lock); 720 721 return irq; 722 } 723 724 #ifdef CONFIG_PCI_MSI 725 int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc) 726 { 727 int rc; 728 struct physdev_get_free_pirq op_get_free_pirq; 729 730 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI; 731 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); 732 733 WARN_ONCE(rc == -ENOSYS, 734 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n"); 735 736 return rc ? -1 : op_get_free_pirq.pirq; 737 } 738 739 int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, 740 int pirq, int nvec, const char *name, domid_t domid) 741 { 742 int i, irq, ret; 743 744 mutex_lock(&irq_mapping_update_lock); 745 746 irq = xen_allocate_irqs_dynamic(nvec); 747 if (irq < 0) 748 goto out; 749 750 for (i = 0; i < nvec; i++) { 751 irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name); 752 753 ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid, 754 i == 0 ? 0 : PIRQ_MSI_GROUP); 755 if (ret < 0) 756 goto error_irq; 757 } 758 759 ret = irq_set_msi_desc(irq, msidesc); 760 if (ret < 0) 761 goto error_irq; 762 out: 763 mutex_unlock(&irq_mapping_update_lock); 764 return irq; 765 error_irq: 766 for (; i >= 0; i--) 767 __unbind_from_irq(irq + i); 768 mutex_unlock(&irq_mapping_update_lock); 769 return ret; 770 } 771 #endif 772 773 int xen_destroy_irq(int irq) 774 { 775 struct physdev_unmap_pirq unmap_irq; 776 struct irq_info *info = info_for_irq(irq); 777 int rc = -ENOENT; 778 779 mutex_lock(&irq_mapping_update_lock); 780 781 /* 782 * If trying to remove a vector in a MSI group different 783 * than the first one skip the PIRQ unmap unless this vector 784 * is the first one in the group. 785 */ 786 if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) { 787 unmap_irq.pirq = info->u.pirq.pirq; 788 unmap_irq.domid = info->u.pirq.domid; 789 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); 790 /* If another domain quits without making the pci_disable_msix 791 * call, the Xen hypervisor takes care of freeing the PIRQs 792 * (free_domain_pirqs). 793 */ 794 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF)) 795 pr_info("domain %d does not have %d anymore\n", 796 info->u.pirq.domid, info->u.pirq.pirq); 797 else if (rc) { 798 pr_warn("unmap irq failed %d\n", rc); 799 goto out; 800 } 801 } 802 803 xen_free_irq(irq); 804 805 out: 806 mutex_unlock(&irq_mapping_update_lock); 807 return rc; 808 } 809 810 int xen_irq_from_pirq(unsigned pirq) 811 { 812 int irq; 813 814 struct irq_info *info; 815 816 mutex_lock(&irq_mapping_update_lock); 817 818 list_for_each_entry(info, &xen_irq_list_head, list) { 819 if (info->type != IRQT_PIRQ) 820 continue; 821 irq = info->irq; 822 if (info->u.pirq.pirq == pirq) 823 goto out; 824 } 825 irq = -1; 826 out: 827 mutex_unlock(&irq_mapping_update_lock); 828 829 return irq; 830 } 831 832 833 int xen_pirq_from_irq(unsigned irq) 834 { 835 return pirq_from_irq(irq); 836 } 837 EXPORT_SYMBOL_GPL(xen_pirq_from_irq); 838 839 int bind_evtchn_to_irq(unsigned int evtchn) 840 { 841 int irq; 842 int ret; 843 844 if (evtchn >= xen_evtchn_max_channels()) 845 return -ENOMEM; 846 847 mutex_lock(&irq_mapping_update_lock); 848 849 irq = get_evtchn_to_irq(evtchn); 850 851 if (irq == -1) { 852 irq = xen_allocate_irq_dynamic(); 853 if (irq < 0) 854 goto out; 855 856 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, 857 handle_edge_irq, "event"); 858 859 ret = xen_irq_info_evtchn_setup(irq, evtchn); 860 if (ret < 0) { 861 __unbind_from_irq(irq); 862 irq = ret; 863 goto out; 864 } 865 /* New interdomain events are bound to VCPU 0. */ 866 bind_evtchn_to_cpu(evtchn, 0); 867 } else { 868 struct irq_info *info = info_for_irq(irq); 869 WARN_ON(info == NULL || info->type != IRQT_EVTCHN); 870 } 871 872 out: 873 mutex_unlock(&irq_mapping_update_lock); 874 875 return irq; 876 } 877 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); 878 879 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) 880 { 881 struct evtchn_bind_ipi bind_ipi; 882 int evtchn, irq; 883 int ret; 884 885 mutex_lock(&irq_mapping_update_lock); 886 887 irq = per_cpu(ipi_to_irq, cpu)[ipi]; 888 889 if (irq == -1) { 890 irq = xen_allocate_irq_dynamic(); 891 if (irq < 0) 892 goto out; 893 894 irq_set_chip_and_handler_name(irq, &xen_percpu_chip, 895 handle_percpu_irq, "ipi"); 896 897 bind_ipi.vcpu = cpu; 898 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, 899 &bind_ipi) != 0) 900 BUG(); 901 evtchn = bind_ipi.port; 902 903 ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); 904 if (ret < 0) { 905 __unbind_from_irq(irq); 906 irq = ret; 907 goto out; 908 } 909 bind_evtchn_to_cpu(evtchn, cpu); 910 } else { 911 struct irq_info *info = info_for_irq(irq); 912 WARN_ON(info == NULL || info->type != IRQT_IPI); 913 } 914 915 out: 916 mutex_unlock(&irq_mapping_update_lock); 917 return irq; 918 } 919 920 int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, 921 unsigned int remote_port) 922 { 923 struct evtchn_bind_interdomain bind_interdomain; 924 int err; 925 926 bind_interdomain.remote_dom = remote_domain; 927 bind_interdomain.remote_port = remote_port; 928 929 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, 930 &bind_interdomain); 931 932 return err ? : bind_evtchn_to_irq(bind_interdomain.local_port); 933 } 934 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq); 935 936 static int find_virq(unsigned int virq, unsigned int cpu) 937 { 938 struct evtchn_status status; 939 int port, rc = -ENOENT; 940 941 memset(&status, 0, sizeof(status)); 942 for (port = 0; port < xen_evtchn_max_channels(); port++) { 943 status.dom = DOMID_SELF; 944 status.port = port; 945 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status); 946 if (rc < 0) 947 continue; 948 if (status.status != EVTCHNSTAT_virq) 949 continue; 950 if (status.u.virq == virq && status.vcpu == cpu) { 951 rc = port; 952 break; 953 } 954 } 955 return rc; 956 } 957 958 /** 959 * xen_evtchn_nr_channels - number of usable event channel ports 960 * 961 * This may be less than the maximum supported by the current 962 * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum 963 * supported. 964 */ 965 unsigned xen_evtchn_nr_channels(void) 966 { 967 return evtchn_ops->nr_channels(); 968 } 969 EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels); 970 971 int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) 972 { 973 struct evtchn_bind_virq bind_virq; 974 int evtchn, irq, ret; 975 976 mutex_lock(&irq_mapping_update_lock); 977 978 irq = per_cpu(virq_to_irq, cpu)[virq]; 979 980 if (irq == -1) { 981 irq = xen_allocate_irq_dynamic(); 982 if (irq < 0) 983 goto out; 984 985 if (percpu) 986 irq_set_chip_and_handler_name(irq, &xen_percpu_chip, 987 handle_percpu_irq, "virq"); 988 else 989 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, 990 handle_edge_irq, "virq"); 991 992 bind_virq.virq = virq; 993 bind_virq.vcpu = cpu; 994 ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 995 &bind_virq); 996 if (ret == 0) 997 evtchn = bind_virq.port; 998 else { 999 if (ret == -EEXIST) 1000 ret = find_virq(virq, cpu); 1001 BUG_ON(ret < 0); 1002 evtchn = ret; 1003 } 1004 1005 ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq); 1006 if (ret < 0) { 1007 __unbind_from_irq(irq); 1008 irq = ret; 1009 goto out; 1010 } 1011 1012 bind_evtchn_to_cpu(evtchn, cpu); 1013 } else { 1014 struct irq_info *info = info_for_irq(irq); 1015 WARN_ON(info == NULL || info->type != IRQT_VIRQ); 1016 } 1017 1018 out: 1019 mutex_unlock(&irq_mapping_update_lock); 1020 1021 return irq; 1022 } 1023 1024 static void unbind_from_irq(unsigned int irq) 1025 { 1026 mutex_lock(&irq_mapping_update_lock); 1027 __unbind_from_irq(irq); 1028 mutex_unlock(&irq_mapping_update_lock); 1029 } 1030 1031 int bind_evtchn_to_irqhandler(unsigned int evtchn, 1032 irq_handler_t handler, 1033 unsigned long irqflags, 1034 const char *devname, void *dev_id) 1035 { 1036 int irq, retval; 1037 1038 irq = bind_evtchn_to_irq(evtchn); 1039 if (irq < 0) 1040 return irq; 1041 retval = request_irq(irq, handler, irqflags, devname, dev_id); 1042 if (retval != 0) { 1043 unbind_from_irq(irq); 1044 return retval; 1045 } 1046 1047 return irq; 1048 } 1049 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); 1050 1051 int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, 1052 unsigned int remote_port, 1053 irq_handler_t handler, 1054 unsigned long irqflags, 1055 const char *devname, 1056 void *dev_id) 1057 { 1058 int irq, retval; 1059 1060 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port); 1061 if (irq < 0) 1062 return irq; 1063 1064 retval = request_irq(irq, handler, irqflags, devname, dev_id); 1065 if (retval != 0) { 1066 unbind_from_irq(irq); 1067 return retval; 1068 } 1069 1070 return irq; 1071 } 1072 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler); 1073 1074 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, 1075 irq_handler_t handler, 1076 unsigned long irqflags, const char *devname, void *dev_id) 1077 { 1078 int irq, retval; 1079 1080 irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU); 1081 if (irq < 0) 1082 return irq; 1083 retval = request_irq(irq, handler, irqflags, devname, dev_id); 1084 if (retval != 0) { 1085 unbind_from_irq(irq); 1086 return retval; 1087 } 1088 1089 return irq; 1090 } 1091 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); 1092 1093 int bind_ipi_to_irqhandler(enum ipi_vector ipi, 1094 unsigned int cpu, 1095 irq_handler_t handler, 1096 unsigned long irqflags, 1097 const char *devname, 1098 void *dev_id) 1099 { 1100 int irq, retval; 1101 1102 irq = bind_ipi_to_irq(ipi, cpu); 1103 if (irq < 0) 1104 return irq; 1105 1106 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME; 1107 retval = request_irq(irq, handler, irqflags, devname, dev_id); 1108 if (retval != 0) { 1109 unbind_from_irq(irq); 1110 return retval; 1111 } 1112 1113 return irq; 1114 } 1115 1116 void unbind_from_irqhandler(unsigned int irq, void *dev_id) 1117 { 1118 struct irq_info *info = irq_get_handler_data(irq); 1119 1120 if (WARN_ON(!info)) 1121 return; 1122 free_irq(irq, dev_id); 1123 unbind_from_irq(irq); 1124 } 1125 EXPORT_SYMBOL_GPL(unbind_from_irqhandler); 1126 1127 /** 1128 * xen_set_irq_priority() - set an event channel priority. 1129 * @irq:irq bound to an event channel. 1130 * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN. 1131 */ 1132 int xen_set_irq_priority(unsigned irq, unsigned priority) 1133 { 1134 struct evtchn_set_priority set_priority; 1135 1136 set_priority.port = evtchn_from_irq(irq); 1137 set_priority.priority = priority; 1138 1139 return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority, 1140 &set_priority); 1141 } 1142 EXPORT_SYMBOL_GPL(xen_set_irq_priority); 1143 1144 int evtchn_make_refcounted(unsigned int evtchn) 1145 { 1146 int irq = get_evtchn_to_irq(evtchn); 1147 struct irq_info *info; 1148 1149 if (irq == -1) 1150 return -ENOENT; 1151 1152 info = irq_get_handler_data(irq); 1153 1154 if (!info) 1155 return -ENOENT; 1156 1157 WARN_ON(info->refcnt != -1); 1158 1159 info->refcnt = 1; 1160 1161 return 0; 1162 } 1163 EXPORT_SYMBOL_GPL(evtchn_make_refcounted); 1164 1165 int evtchn_get(unsigned int evtchn) 1166 { 1167 int irq; 1168 struct irq_info *info; 1169 int err = -ENOENT; 1170 1171 if (evtchn >= xen_evtchn_max_channels()) 1172 return -EINVAL; 1173 1174 mutex_lock(&irq_mapping_update_lock); 1175 1176 irq = get_evtchn_to_irq(evtchn); 1177 if (irq == -1) 1178 goto done; 1179 1180 info = irq_get_handler_data(irq); 1181 1182 if (!info) 1183 goto done; 1184 1185 err = -EINVAL; 1186 if (info->refcnt <= 0) 1187 goto done; 1188 1189 info->refcnt++; 1190 err = 0; 1191 done: 1192 mutex_unlock(&irq_mapping_update_lock); 1193 1194 return err; 1195 } 1196 EXPORT_SYMBOL_GPL(evtchn_get); 1197 1198 void evtchn_put(unsigned int evtchn) 1199 { 1200 int irq = get_evtchn_to_irq(evtchn); 1201 if (WARN_ON(irq == -1)) 1202 return; 1203 unbind_from_irq(irq); 1204 } 1205 EXPORT_SYMBOL_GPL(evtchn_put); 1206 1207 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) 1208 { 1209 int irq; 1210 1211 #ifdef CONFIG_X86 1212 if (unlikely(vector == XEN_NMI_VECTOR)) { 1213 int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL); 1214 if (rc < 0) 1215 printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc); 1216 return; 1217 } 1218 #endif 1219 irq = per_cpu(ipi_to_irq, cpu)[vector]; 1220 BUG_ON(irq < 0); 1221 notify_remote_via_irq(irq); 1222 } 1223 1224 static DEFINE_PER_CPU(unsigned, xed_nesting_count); 1225 1226 static void __xen_evtchn_do_upcall(void) 1227 { 1228 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); 1229 int cpu = get_cpu(); 1230 unsigned count; 1231 1232 do { 1233 vcpu_info->evtchn_upcall_pending = 0; 1234 1235 if (__this_cpu_inc_return(xed_nesting_count) - 1) 1236 goto out; 1237 1238 xen_evtchn_handle_events(cpu); 1239 1240 BUG_ON(!irqs_disabled()); 1241 1242 count = __this_cpu_read(xed_nesting_count); 1243 __this_cpu_write(xed_nesting_count, 0); 1244 } while (count != 1 || vcpu_info->evtchn_upcall_pending); 1245 1246 out: 1247 1248 put_cpu(); 1249 } 1250 1251 void xen_evtchn_do_upcall(struct pt_regs *regs) 1252 { 1253 struct pt_regs *old_regs = set_irq_regs(regs); 1254 1255 irq_enter(); 1256 #ifdef CONFIG_X86 1257 exit_idle(); 1258 inc_irq_stat(irq_hv_callback_count); 1259 #endif 1260 1261 __xen_evtchn_do_upcall(); 1262 1263 irq_exit(); 1264 set_irq_regs(old_regs); 1265 } 1266 1267 void xen_hvm_evtchn_do_upcall(void) 1268 { 1269 __xen_evtchn_do_upcall(); 1270 } 1271 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall); 1272 1273 /* Rebind a new event channel to an existing irq. */ 1274 void rebind_evtchn_irq(int evtchn, int irq) 1275 { 1276 struct irq_info *info = info_for_irq(irq); 1277 1278 if (WARN_ON(!info)) 1279 return; 1280 1281 /* Make sure the irq is masked, since the new event channel 1282 will also be masked. */ 1283 disable_irq(irq); 1284 1285 mutex_lock(&irq_mapping_update_lock); 1286 1287 /* After resume the irq<->evtchn mappings are all cleared out */ 1288 BUG_ON(get_evtchn_to_irq(evtchn) != -1); 1289 /* Expect irq to have been bound before, 1290 so there should be a proper type */ 1291 BUG_ON(info->type == IRQT_UNBOUND); 1292 1293 (void)xen_irq_info_evtchn_setup(irq, evtchn); 1294 1295 mutex_unlock(&irq_mapping_update_lock); 1296 1297 bind_evtchn_to_cpu(evtchn, info->cpu); 1298 /* This will be deferred until interrupt is processed */ 1299 irq_set_affinity(irq, cpumask_of(info->cpu)); 1300 1301 /* Unmask the event channel. */ 1302 enable_irq(irq); 1303 } 1304 1305 /* Rebind an evtchn so that it gets delivered to a specific cpu */ 1306 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) 1307 { 1308 struct evtchn_bind_vcpu bind_vcpu; 1309 int evtchn = evtchn_from_irq(irq); 1310 int masked; 1311 1312 if (!VALID_EVTCHN(evtchn)) 1313 return -1; 1314 1315 if (!xen_support_evtchn_rebind()) 1316 return -1; 1317 1318 /* Send future instances of this interrupt to other vcpu. */ 1319 bind_vcpu.port = evtchn; 1320 bind_vcpu.vcpu = tcpu; 1321 1322 /* 1323 * Mask the event while changing the VCPU binding to prevent 1324 * it being delivered on an unexpected VCPU. 1325 */ 1326 masked = test_and_set_mask(evtchn); 1327 1328 /* 1329 * If this fails, it usually just indicates that we're dealing with a 1330 * virq or IPI channel, which don't actually need to be rebound. Ignore 1331 * it, but don't do the xenlinux-level rebind in that case. 1332 */ 1333 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) 1334 bind_evtchn_to_cpu(evtchn, tcpu); 1335 1336 if (!masked) 1337 unmask_evtchn(evtchn); 1338 1339 return 0; 1340 } 1341 1342 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, 1343 bool force) 1344 { 1345 unsigned tcpu = cpumask_first_and(dest, cpu_online_mask); 1346 1347 return rebind_irq_to_cpu(data->irq, tcpu); 1348 } 1349 1350 static void enable_dynirq(struct irq_data *data) 1351 { 1352 int evtchn = evtchn_from_irq(data->irq); 1353 1354 if (VALID_EVTCHN(evtchn)) 1355 unmask_evtchn(evtchn); 1356 } 1357 1358 static void disable_dynirq(struct irq_data *data) 1359 { 1360 int evtchn = evtchn_from_irq(data->irq); 1361 1362 if (VALID_EVTCHN(evtchn)) 1363 mask_evtchn(evtchn); 1364 } 1365 1366 static void ack_dynirq(struct irq_data *data) 1367 { 1368 int evtchn = evtchn_from_irq(data->irq); 1369 1370 if (!VALID_EVTCHN(evtchn)) 1371 return; 1372 1373 if (unlikely(irqd_is_setaffinity_pending(data))) { 1374 int masked = test_and_set_mask(evtchn); 1375 1376 clear_evtchn(evtchn); 1377 1378 irq_move_masked_irq(data); 1379 1380 if (!masked) 1381 unmask_evtchn(evtchn); 1382 } else 1383 clear_evtchn(evtchn); 1384 } 1385 1386 static void mask_ack_dynirq(struct irq_data *data) 1387 { 1388 disable_dynirq(data); 1389 ack_dynirq(data); 1390 } 1391 1392 static int retrigger_dynirq(struct irq_data *data) 1393 { 1394 unsigned int evtchn = evtchn_from_irq(data->irq); 1395 int masked; 1396 1397 if (!VALID_EVTCHN(evtchn)) 1398 return 0; 1399 1400 masked = test_and_set_mask(evtchn); 1401 set_evtchn(evtchn); 1402 if (!masked) 1403 unmask_evtchn(evtchn); 1404 1405 return 1; 1406 } 1407 1408 static void restore_pirqs(void) 1409 { 1410 int pirq, rc, irq, gsi; 1411 struct physdev_map_pirq map_irq; 1412 struct irq_info *info; 1413 1414 list_for_each_entry(info, &xen_irq_list_head, list) { 1415 if (info->type != IRQT_PIRQ) 1416 continue; 1417 1418 pirq = info->u.pirq.pirq; 1419 gsi = info->u.pirq.gsi; 1420 irq = info->irq; 1421 1422 /* save/restore of PT devices doesn't work, so at this point the 1423 * only devices present are GSI based emulated devices */ 1424 if (!gsi) 1425 continue; 1426 1427 map_irq.domid = DOMID_SELF; 1428 map_irq.type = MAP_PIRQ_TYPE_GSI; 1429 map_irq.index = gsi; 1430 map_irq.pirq = pirq; 1431 1432 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); 1433 if (rc) { 1434 pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n", 1435 gsi, irq, pirq, rc); 1436 xen_free_irq(irq); 1437 continue; 1438 } 1439 1440 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); 1441 1442 __startup_pirq(irq); 1443 } 1444 } 1445 1446 static void restore_cpu_virqs(unsigned int cpu) 1447 { 1448 struct evtchn_bind_virq bind_virq; 1449 int virq, irq, evtchn; 1450 1451 for (virq = 0; virq < NR_VIRQS; virq++) { 1452 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) 1453 continue; 1454 1455 BUG_ON(virq_from_irq(irq) != virq); 1456 1457 /* Get a new binding from Xen. */ 1458 bind_virq.virq = virq; 1459 bind_virq.vcpu = cpu; 1460 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 1461 &bind_virq) != 0) 1462 BUG(); 1463 evtchn = bind_virq.port; 1464 1465 /* Record the new mapping. */ 1466 (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq); 1467 bind_evtchn_to_cpu(evtchn, cpu); 1468 } 1469 } 1470 1471 static void restore_cpu_ipis(unsigned int cpu) 1472 { 1473 struct evtchn_bind_ipi bind_ipi; 1474 int ipi, irq, evtchn; 1475 1476 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { 1477 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) 1478 continue; 1479 1480 BUG_ON(ipi_from_irq(irq) != ipi); 1481 1482 /* Get a new binding from Xen. */ 1483 bind_ipi.vcpu = cpu; 1484 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, 1485 &bind_ipi) != 0) 1486 BUG(); 1487 evtchn = bind_ipi.port; 1488 1489 /* Record the new mapping. */ 1490 (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); 1491 bind_evtchn_to_cpu(evtchn, cpu); 1492 } 1493 } 1494 1495 /* Clear an irq's pending state, in preparation for polling on it */ 1496 void xen_clear_irq_pending(int irq) 1497 { 1498 int evtchn = evtchn_from_irq(irq); 1499 1500 if (VALID_EVTCHN(evtchn)) 1501 clear_evtchn(evtchn); 1502 } 1503 EXPORT_SYMBOL(xen_clear_irq_pending); 1504 void xen_set_irq_pending(int irq) 1505 { 1506 int evtchn = evtchn_from_irq(irq); 1507 1508 if (VALID_EVTCHN(evtchn)) 1509 set_evtchn(evtchn); 1510 } 1511 1512 bool xen_test_irq_pending(int irq) 1513 { 1514 int evtchn = evtchn_from_irq(irq); 1515 bool ret = false; 1516 1517 if (VALID_EVTCHN(evtchn)) 1518 ret = test_evtchn(evtchn); 1519 1520 return ret; 1521 } 1522 1523 /* Poll waiting for an irq to become pending with timeout. In the usual case, 1524 * the irq will be disabled so it won't deliver an interrupt. */ 1525 void xen_poll_irq_timeout(int irq, u64 timeout) 1526 { 1527 evtchn_port_t evtchn = evtchn_from_irq(irq); 1528 1529 if (VALID_EVTCHN(evtchn)) { 1530 struct sched_poll poll; 1531 1532 poll.nr_ports = 1; 1533 poll.timeout = timeout; 1534 set_xen_guest_handle(poll.ports, &evtchn); 1535 1536 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) 1537 BUG(); 1538 } 1539 } 1540 EXPORT_SYMBOL(xen_poll_irq_timeout); 1541 /* Poll waiting for an irq to become pending. In the usual case, the 1542 * irq will be disabled so it won't deliver an interrupt. */ 1543 void xen_poll_irq(int irq) 1544 { 1545 xen_poll_irq_timeout(irq, 0 /* no timeout */); 1546 } 1547 1548 /* Check whether the IRQ line is shared with other guests. */ 1549 int xen_test_irq_shared(int irq) 1550 { 1551 struct irq_info *info = info_for_irq(irq); 1552 struct physdev_irq_status_query irq_status; 1553 1554 if (WARN_ON(!info)) 1555 return -ENOENT; 1556 1557 irq_status.irq = info->u.pirq.pirq; 1558 1559 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) 1560 return 0; 1561 return !(irq_status.flags & XENIRQSTAT_shared); 1562 } 1563 EXPORT_SYMBOL_GPL(xen_test_irq_shared); 1564 1565 void xen_irq_resume(void) 1566 { 1567 unsigned int cpu; 1568 struct irq_info *info; 1569 1570 /* New event-channel space is not 'live' yet. */ 1571 xen_evtchn_mask_all(); 1572 xen_evtchn_resume(); 1573 1574 /* No IRQ <-> event-channel mappings. */ 1575 list_for_each_entry(info, &xen_irq_list_head, list) 1576 info->evtchn = 0; /* zap event-channel binding */ 1577 1578 clear_evtchn_to_irq_all(); 1579 1580 for_each_possible_cpu(cpu) { 1581 restore_cpu_virqs(cpu); 1582 restore_cpu_ipis(cpu); 1583 } 1584 1585 restore_pirqs(); 1586 } 1587 1588 static struct irq_chip xen_dynamic_chip __read_mostly = { 1589 .name = "xen-dyn", 1590 1591 .irq_disable = disable_dynirq, 1592 .irq_mask = disable_dynirq, 1593 .irq_unmask = enable_dynirq, 1594 1595 .irq_ack = ack_dynirq, 1596 .irq_mask_ack = mask_ack_dynirq, 1597 1598 .irq_set_affinity = set_affinity_irq, 1599 .irq_retrigger = retrigger_dynirq, 1600 }; 1601 1602 static struct irq_chip xen_pirq_chip __read_mostly = { 1603 .name = "xen-pirq", 1604 1605 .irq_startup = startup_pirq, 1606 .irq_shutdown = shutdown_pirq, 1607 .irq_enable = enable_pirq, 1608 .irq_disable = disable_pirq, 1609 1610 .irq_mask = disable_dynirq, 1611 .irq_unmask = enable_dynirq, 1612 1613 .irq_ack = eoi_pirq, 1614 .irq_eoi = eoi_pirq, 1615 .irq_mask_ack = mask_ack_pirq, 1616 1617 .irq_set_affinity = set_affinity_irq, 1618 1619 .irq_retrigger = retrigger_dynirq, 1620 }; 1621 1622 static struct irq_chip xen_percpu_chip __read_mostly = { 1623 .name = "xen-percpu", 1624 1625 .irq_disable = disable_dynirq, 1626 .irq_mask = disable_dynirq, 1627 .irq_unmask = enable_dynirq, 1628 1629 .irq_ack = ack_dynirq, 1630 }; 1631 1632 int xen_set_callback_via(uint64_t via) 1633 { 1634 struct xen_hvm_param a; 1635 a.domid = DOMID_SELF; 1636 a.index = HVM_PARAM_CALLBACK_IRQ; 1637 a.value = via; 1638 return HYPERVISOR_hvm_op(HVMOP_set_param, &a); 1639 } 1640 EXPORT_SYMBOL_GPL(xen_set_callback_via); 1641 1642 #ifdef CONFIG_XEN_PVHVM 1643 /* Vector callbacks are better than PCI interrupts to receive event 1644 * channel notifications because we can receive vector callbacks on any 1645 * vcpu and we don't need PCI support or APIC interactions. */ 1646 void xen_callback_vector(void) 1647 { 1648 int rc; 1649 uint64_t callback_via; 1650 if (xen_have_vector_callback) { 1651 callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR); 1652 rc = xen_set_callback_via(callback_via); 1653 if (rc) { 1654 pr_err("Request for Xen HVM callback vector failed\n"); 1655 xen_have_vector_callback = 0; 1656 return; 1657 } 1658 pr_info("Xen HVM callback vector for event delivery is enabled\n"); 1659 /* in the restore case the vector has already been allocated */ 1660 if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors)) 1661 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, 1662 xen_hvm_callback_vector); 1663 } 1664 } 1665 #else 1666 void xen_callback_vector(void) {} 1667 #endif 1668 1669 #undef MODULE_PARAM_PREFIX 1670 #define MODULE_PARAM_PREFIX "xen." 1671 1672 static bool fifo_events = true; 1673 module_param(fifo_events, bool, 0); 1674 1675 void __init xen_init_IRQ(void) 1676 { 1677 int ret = -EINVAL; 1678 1679 if (fifo_events) 1680 ret = xen_evtchn_fifo_init(); 1681 if (ret < 0) 1682 xen_evtchn_2l_init(); 1683 1684 evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()), 1685 sizeof(*evtchn_to_irq), GFP_KERNEL); 1686 BUG_ON(!evtchn_to_irq); 1687 1688 /* No event channels are 'live' right now. */ 1689 xen_evtchn_mask_all(); 1690 1691 pirq_needs_eoi = pirq_needs_eoi_flag; 1692 1693 #ifdef CONFIG_X86 1694 if (xen_pv_domain()) { 1695 irq_ctx_init(smp_processor_id()); 1696 if (xen_initial_domain()) 1697 pci_xen_initial_domain(); 1698 } 1699 if (xen_feature(XENFEAT_hvm_callback_vector)) 1700 xen_callback_vector(); 1701 1702 if (xen_hvm_domain()) { 1703 native_init_IRQ(); 1704 /* pci_xen_hvm_init must be called after native_init_IRQ so that 1705 * __acpi_register_gsi can point at the right function */ 1706 pci_xen_hvm_init(); 1707 } else { 1708 int rc; 1709 struct physdev_pirq_eoi_gmfn eoi_gmfn; 1710 1711 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); 1712 eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map); 1713 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn); 1714 /* TODO: No PVH support for PIRQ EOI */ 1715 if (rc != 0) { 1716 free_page((unsigned long) pirq_eoi_map); 1717 pirq_eoi_map = NULL; 1718 } else 1719 pirq_needs_eoi = pirq_check_eoi_map; 1720 } 1721 #endif 1722 } 1723