1 /* 2 * Xen event channels 3 * 4 * Xen models interrupts with abstract event channels. Because each 5 * domain gets 1024 event channels, but NR_IRQ is not that large, we 6 * must dynamically map irqs<->event channels. The event channels 7 * interface with the rest of the kernel by defining a xen interrupt 8 * chip. When an event is received, it is mapped to an irq and sent 9 * through the normal interrupt processing path. 10 * 11 * There are four kinds of events which can be mapped to an event 12 * channel: 13 * 14 * 1. Inter-domain notifications. This includes all the virtual 15 * device events, since they're driven by front-ends in another domain 16 * (typically dom0). 17 * 2. VIRQs, typically used for timers. These are per-cpu events. 18 * 3. IPIs. 19 * 4. PIRQs - Hardware interrupts. 20 * 21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 22 */ 23 24 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt 25 26 #include <linux/linkage.h> 27 #include <linux/interrupt.h> 28 #include <linux/irq.h> 29 #include <linux/moduleparam.h> 30 #include <linux/string.h> 31 #include <linux/bootmem.h> 32 #include <linux/slab.h> 33 #include <linux/irqnr.h> 34 #include <linux/pci.h> 35 36 #ifdef CONFIG_X86 37 #include <asm/desc.h> 38 #include <asm/ptrace.h> 39 #include <asm/irq.h> 40 #include <asm/idle.h> 41 #include <asm/io_apic.h> 42 #include <asm/i8259.h> 43 #include <asm/xen/pci.h> 44 #endif 45 #include <asm/sync_bitops.h> 46 #include <asm/xen/hypercall.h> 47 #include <asm/xen/hypervisor.h> 48 #include <xen/page.h> 49 50 #include <xen/xen.h> 51 #include <xen/hvm.h> 52 #include <xen/xen-ops.h> 53 #include <xen/events.h> 54 #include <xen/interface/xen.h> 55 #include <xen/interface/event_channel.h> 56 #include <xen/interface/hvm/hvm_op.h> 57 #include <xen/interface/hvm/params.h> 58 #include <xen/interface/physdev.h> 59 #include <xen/interface/sched.h> 60 #include <xen/interface/vcpu.h> 61 #include <asm/hw_irq.h> 62 63 #include "events_internal.h" 64 65 const struct evtchn_ops *evtchn_ops; 66 67 /* 68 * This lock protects updates to the following mapping and reference-count 69 * arrays. The lock does not need to be acquired to read the mapping tables. 70 */ 71 static DEFINE_MUTEX(irq_mapping_update_lock); 72 73 static LIST_HEAD(xen_irq_list_head); 74 75 /* IRQ <-> VIRQ mapping. */ 76 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; 77 78 /* IRQ <-> IPI mapping */ 79 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; 80 81 int **evtchn_to_irq; 82 #ifdef CONFIG_X86 83 static unsigned long *pirq_eoi_map; 84 #endif 85 static bool (*pirq_needs_eoi)(unsigned irq); 86 87 #define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq))) 88 #define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq))) 89 #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq)) 90 91 /* Xen will never allocate port zero for any purpose. */ 92 #define VALID_EVTCHN(chn) ((chn) != 0) 93 94 static struct irq_chip xen_dynamic_chip; 95 static struct irq_chip xen_percpu_chip; 96 static struct irq_chip xen_pirq_chip; 97 static void enable_dynirq(struct irq_data *data); 98 static void disable_dynirq(struct irq_data *data); 99 100 static void clear_evtchn_to_irq_row(unsigned row) 101 { 102 unsigned col; 103 104 for (col = 0; col < EVTCHN_PER_ROW; col++) 105 evtchn_to_irq[row][col] = -1; 106 } 107 108 static void clear_evtchn_to_irq_all(void) 109 { 110 unsigned row; 111 112 for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) { 113 if (evtchn_to_irq[row] == NULL) 114 continue; 115 clear_evtchn_to_irq_row(row); 116 } 117 } 118 119 static int set_evtchn_to_irq(unsigned evtchn, unsigned irq) 120 { 121 unsigned row; 122 unsigned col; 123 124 if (evtchn >= xen_evtchn_max_channels()) 125 return -EINVAL; 126 127 row = EVTCHN_ROW(evtchn); 128 col = EVTCHN_COL(evtchn); 129 130 if (evtchn_to_irq[row] == NULL) { 131 /* Unallocated irq entries return -1 anyway */ 132 if (irq == -1) 133 return 0; 134 135 evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL); 136 if (evtchn_to_irq[row] == NULL) 137 return -ENOMEM; 138 139 clear_evtchn_to_irq_row(row); 140 } 141 142 evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq; 143 return 0; 144 } 145 146 int get_evtchn_to_irq(unsigned evtchn) 147 { 148 if (evtchn >= xen_evtchn_max_channels()) 149 return -1; 150 if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL) 151 return -1; 152 return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]; 153 } 154 155 /* Get info for IRQ */ 156 struct irq_info *info_for_irq(unsigned irq) 157 { 158 return irq_get_handler_data(irq); 159 } 160 161 /* Constructors for packed IRQ information. */ 162 static int xen_irq_info_common_setup(struct irq_info *info, 163 unsigned irq, 164 enum xen_irq_type type, 165 unsigned evtchn, 166 unsigned short cpu) 167 { 168 int ret; 169 170 BUG_ON(info->type != IRQT_UNBOUND && info->type != type); 171 172 info->type = type; 173 info->irq = irq; 174 info->evtchn = evtchn; 175 info->cpu = cpu; 176 177 ret = set_evtchn_to_irq(evtchn, irq); 178 if (ret < 0) 179 return ret; 180 181 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); 182 183 return xen_evtchn_port_setup(info); 184 } 185 186 static int xen_irq_info_evtchn_setup(unsigned irq, 187 unsigned evtchn) 188 { 189 struct irq_info *info = info_for_irq(irq); 190 191 return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0); 192 } 193 194 static int xen_irq_info_ipi_setup(unsigned cpu, 195 unsigned irq, 196 unsigned evtchn, 197 enum ipi_vector ipi) 198 { 199 struct irq_info *info = info_for_irq(irq); 200 201 info->u.ipi = ipi; 202 203 per_cpu(ipi_to_irq, cpu)[ipi] = irq; 204 205 return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0); 206 } 207 208 static int xen_irq_info_virq_setup(unsigned cpu, 209 unsigned irq, 210 unsigned evtchn, 211 unsigned virq) 212 { 213 struct irq_info *info = info_for_irq(irq); 214 215 info->u.virq = virq; 216 217 per_cpu(virq_to_irq, cpu)[virq] = irq; 218 219 return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0); 220 } 221 222 static int xen_irq_info_pirq_setup(unsigned irq, 223 unsigned evtchn, 224 unsigned pirq, 225 unsigned gsi, 226 uint16_t domid, 227 unsigned char flags) 228 { 229 struct irq_info *info = info_for_irq(irq); 230 231 info->u.pirq.pirq = pirq; 232 info->u.pirq.gsi = gsi; 233 info->u.pirq.domid = domid; 234 info->u.pirq.flags = flags; 235 236 return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0); 237 } 238 239 static void xen_irq_info_cleanup(struct irq_info *info) 240 { 241 set_evtchn_to_irq(info->evtchn, -1); 242 info->evtchn = 0; 243 } 244 245 /* 246 * Accessors for packed IRQ information. 247 */ 248 unsigned int evtchn_from_irq(unsigned irq) 249 { 250 if (unlikely(WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq))) 251 return 0; 252 253 return info_for_irq(irq)->evtchn; 254 } 255 256 unsigned irq_from_evtchn(unsigned int evtchn) 257 { 258 return get_evtchn_to_irq(evtchn); 259 } 260 EXPORT_SYMBOL_GPL(irq_from_evtchn); 261 262 int irq_from_virq(unsigned int cpu, unsigned int virq) 263 { 264 return per_cpu(virq_to_irq, cpu)[virq]; 265 } 266 267 static enum ipi_vector ipi_from_irq(unsigned irq) 268 { 269 struct irq_info *info = info_for_irq(irq); 270 271 BUG_ON(info == NULL); 272 BUG_ON(info->type != IRQT_IPI); 273 274 return info->u.ipi; 275 } 276 277 static unsigned virq_from_irq(unsigned irq) 278 { 279 struct irq_info *info = info_for_irq(irq); 280 281 BUG_ON(info == NULL); 282 BUG_ON(info->type != IRQT_VIRQ); 283 284 return info->u.virq; 285 } 286 287 static unsigned pirq_from_irq(unsigned irq) 288 { 289 struct irq_info *info = info_for_irq(irq); 290 291 BUG_ON(info == NULL); 292 BUG_ON(info->type != IRQT_PIRQ); 293 294 return info->u.pirq.pirq; 295 } 296 297 static enum xen_irq_type type_from_irq(unsigned irq) 298 { 299 return info_for_irq(irq)->type; 300 } 301 302 unsigned cpu_from_irq(unsigned irq) 303 { 304 return info_for_irq(irq)->cpu; 305 } 306 307 unsigned int cpu_from_evtchn(unsigned int evtchn) 308 { 309 int irq = get_evtchn_to_irq(evtchn); 310 unsigned ret = 0; 311 312 if (irq != -1) 313 ret = cpu_from_irq(irq); 314 315 return ret; 316 } 317 318 #ifdef CONFIG_X86 319 static bool pirq_check_eoi_map(unsigned irq) 320 { 321 return test_bit(pirq_from_irq(irq), pirq_eoi_map); 322 } 323 #endif 324 325 static bool pirq_needs_eoi_flag(unsigned irq) 326 { 327 struct irq_info *info = info_for_irq(irq); 328 BUG_ON(info->type != IRQT_PIRQ); 329 330 return info->u.pirq.flags & PIRQ_NEEDS_EOI; 331 } 332 333 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) 334 { 335 int irq = get_evtchn_to_irq(chn); 336 struct irq_info *info = info_for_irq(irq); 337 338 BUG_ON(irq == -1); 339 #ifdef CONFIG_SMP 340 cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu)); 341 #endif 342 xen_evtchn_port_bind_to_cpu(info, cpu); 343 344 info->cpu = cpu; 345 } 346 347 static void xen_evtchn_mask_all(void) 348 { 349 unsigned int evtchn; 350 351 for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++) 352 mask_evtchn(evtchn); 353 } 354 355 /** 356 * notify_remote_via_irq - send event to remote end of event channel via irq 357 * @irq: irq of event channel to send event to 358 * 359 * Unlike notify_remote_via_evtchn(), this is safe to use across 360 * save/restore. Notifications on a broken connection are silently 361 * dropped. 362 */ 363 void notify_remote_via_irq(int irq) 364 { 365 int evtchn = evtchn_from_irq(irq); 366 367 if (VALID_EVTCHN(evtchn)) 368 notify_remote_via_evtchn(evtchn); 369 } 370 EXPORT_SYMBOL_GPL(notify_remote_via_irq); 371 372 static void xen_irq_init(unsigned irq) 373 { 374 struct irq_info *info; 375 #ifdef CONFIG_SMP 376 /* By default all event channels notify CPU#0. */ 377 cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0)); 378 #endif 379 380 info = kzalloc(sizeof(*info), GFP_KERNEL); 381 if (info == NULL) 382 panic("Unable to allocate metadata for IRQ%d\n", irq); 383 384 info->type = IRQT_UNBOUND; 385 info->refcnt = -1; 386 387 irq_set_handler_data(irq, info); 388 389 list_add_tail(&info->list, &xen_irq_list_head); 390 } 391 392 static int __must_check xen_allocate_irqs_dynamic(int nvec) 393 { 394 int i, irq = irq_alloc_descs(-1, 0, nvec, -1); 395 396 if (irq >= 0) { 397 for (i = 0; i < nvec; i++) 398 xen_irq_init(irq + i); 399 } 400 401 return irq; 402 } 403 404 static inline int __must_check xen_allocate_irq_dynamic(void) 405 { 406 407 return xen_allocate_irqs_dynamic(1); 408 } 409 410 static int __must_check xen_allocate_irq_gsi(unsigned gsi) 411 { 412 int irq; 413 414 /* 415 * A PV guest has no concept of a GSI (since it has no ACPI 416 * nor access to/knowledge of the physical APICs). Therefore 417 * all IRQs are dynamically allocated from the entire IRQ 418 * space. 419 */ 420 if (xen_pv_domain() && !xen_initial_domain()) 421 return xen_allocate_irq_dynamic(); 422 423 /* Legacy IRQ descriptors are already allocated by the arch. */ 424 if (gsi < nr_legacy_irqs()) 425 irq = gsi; 426 else 427 irq = irq_alloc_desc_at(gsi, -1); 428 429 xen_irq_init(irq); 430 431 return irq; 432 } 433 434 static void xen_free_irq(unsigned irq) 435 { 436 struct irq_info *info = irq_get_handler_data(irq); 437 438 if (WARN_ON(!info)) 439 return; 440 441 list_del(&info->list); 442 443 irq_set_handler_data(irq, NULL); 444 445 WARN_ON(info->refcnt > 0); 446 447 kfree(info); 448 449 /* Legacy IRQ descriptors are managed by the arch. */ 450 if (irq < nr_legacy_irqs()) 451 return; 452 453 irq_free_desc(irq); 454 } 455 456 static void xen_evtchn_close(unsigned int port) 457 { 458 struct evtchn_close close; 459 460 close.port = port; 461 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 462 BUG(); 463 } 464 465 static void pirq_query_unmask(int irq) 466 { 467 struct physdev_irq_status_query irq_status; 468 struct irq_info *info = info_for_irq(irq); 469 470 BUG_ON(info->type != IRQT_PIRQ); 471 472 irq_status.irq = pirq_from_irq(irq); 473 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) 474 irq_status.flags = 0; 475 476 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI; 477 if (irq_status.flags & XENIRQSTAT_needs_eoi) 478 info->u.pirq.flags |= PIRQ_NEEDS_EOI; 479 } 480 481 static void eoi_pirq(struct irq_data *data) 482 { 483 int evtchn = evtchn_from_irq(data->irq); 484 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; 485 int rc = 0; 486 487 irq_move_irq(data); 488 489 if (VALID_EVTCHN(evtchn)) 490 clear_evtchn(evtchn); 491 492 if (pirq_needs_eoi(data->irq)) { 493 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); 494 WARN_ON(rc); 495 } 496 } 497 498 static void mask_ack_pirq(struct irq_data *data) 499 { 500 disable_dynirq(data); 501 eoi_pirq(data); 502 } 503 504 static unsigned int __startup_pirq(unsigned int irq) 505 { 506 struct evtchn_bind_pirq bind_pirq; 507 struct irq_info *info = info_for_irq(irq); 508 int evtchn = evtchn_from_irq(irq); 509 int rc; 510 511 BUG_ON(info->type != IRQT_PIRQ); 512 513 if (VALID_EVTCHN(evtchn)) 514 goto out; 515 516 bind_pirq.pirq = pirq_from_irq(irq); 517 /* NB. We are happy to share unless we are probing. */ 518 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ? 519 BIND_PIRQ__WILL_SHARE : 0; 520 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq); 521 if (rc != 0) { 522 pr_warn("Failed to obtain physical IRQ %d\n", irq); 523 return 0; 524 } 525 evtchn = bind_pirq.port; 526 527 pirq_query_unmask(irq); 528 529 rc = set_evtchn_to_irq(evtchn, irq); 530 if (rc) 531 goto err; 532 533 info->evtchn = evtchn; 534 bind_evtchn_to_cpu(evtchn, 0); 535 536 rc = xen_evtchn_port_setup(info); 537 if (rc) 538 goto err; 539 540 out: 541 unmask_evtchn(evtchn); 542 eoi_pirq(irq_get_irq_data(irq)); 543 544 return 0; 545 546 err: 547 pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); 548 xen_evtchn_close(evtchn); 549 return 0; 550 } 551 552 static unsigned int startup_pirq(struct irq_data *data) 553 { 554 return __startup_pirq(data->irq); 555 } 556 557 static void shutdown_pirq(struct irq_data *data) 558 { 559 unsigned int irq = data->irq; 560 struct irq_info *info = info_for_irq(irq); 561 unsigned evtchn = evtchn_from_irq(irq); 562 563 BUG_ON(info->type != IRQT_PIRQ); 564 565 if (!VALID_EVTCHN(evtchn)) 566 return; 567 568 mask_evtchn(evtchn); 569 xen_evtchn_close(evtchn); 570 xen_irq_info_cleanup(info); 571 } 572 573 static void enable_pirq(struct irq_data *data) 574 { 575 startup_pirq(data); 576 } 577 578 static void disable_pirq(struct irq_data *data) 579 { 580 disable_dynirq(data); 581 } 582 583 int xen_irq_from_gsi(unsigned gsi) 584 { 585 struct irq_info *info; 586 587 list_for_each_entry(info, &xen_irq_list_head, list) { 588 if (info->type != IRQT_PIRQ) 589 continue; 590 591 if (info->u.pirq.gsi == gsi) 592 return info->irq; 593 } 594 595 return -1; 596 } 597 EXPORT_SYMBOL_GPL(xen_irq_from_gsi); 598 599 static void __unbind_from_irq(unsigned int irq) 600 { 601 int evtchn = evtchn_from_irq(irq); 602 struct irq_info *info = irq_get_handler_data(irq); 603 604 if (info->refcnt > 0) { 605 info->refcnt--; 606 if (info->refcnt != 0) 607 return; 608 } 609 610 if (VALID_EVTCHN(evtchn)) { 611 unsigned int cpu = cpu_from_irq(irq); 612 613 xen_evtchn_close(evtchn); 614 615 switch (type_from_irq(irq)) { 616 case IRQT_VIRQ: 617 per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1; 618 break; 619 case IRQT_IPI: 620 per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1; 621 break; 622 default: 623 break; 624 } 625 626 xen_irq_info_cleanup(info); 627 } 628 629 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND); 630 631 xen_free_irq(irq); 632 } 633 634 /* 635 * Do not make any assumptions regarding the relationship between the 636 * IRQ number returned here and the Xen pirq argument. 637 * 638 * Note: We don't assign an event channel until the irq actually started 639 * up. Return an existing irq if we've already got one for the gsi. 640 * 641 * Shareable implies level triggered, not shareable implies edge 642 * triggered here. 643 */ 644 int xen_bind_pirq_gsi_to_irq(unsigned gsi, 645 unsigned pirq, int shareable, char *name) 646 { 647 int irq = -1; 648 struct physdev_irq irq_op; 649 int ret; 650 651 mutex_lock(&irq_mapping_update_lock); 652 653 irq = xen_irq_from_gsi(gsi); 654 if (irq != -1) { 655 pr_info("%s: returning irq %d for gsi %u\n", 656 __func__, irq, gsi); 657 goto out; 658 } 659 660 irq = xen_allocate_irq_gsi(gsi); 661 if (irq < 0) 662 goto out; 663 664 irq_op.irq = irq; 665 irq_op.vector = 0; 666 667 /* Only the privileged domain can do this. For non-priv, the pcifront 668 * driver provides a PCI bus that does the call to do exactly 669 * this in the priv domain. */ 670 if (xen_initial_domain() && 671 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { 672 xen_free_irq(irq); 673 irq = -ENOSPC; 674 goto out; 675 } 676 677 ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF, 678 shareable ? PIRQ_SHAREABLE : 0); 679 if (ret < 0) { 680 __unbind_from_irq(irq); 681 irq = ret; 682 goto out; 683 } 684 685 pirq_query_unmask(irq); 686 /* We try to use the handler with the appropriate semantic for the 687 * type of interrupt: if the interrupt is an edge triggered 688 * interrupt we use handle_edge_irq. 689 * 690 * On the other hand if the interrupt is level triggered we use 691 * handle_fasteoi_irq like the native code does for this kind of 692 * interrupts. 693 * 694 * Depending on the Xen version, pirq_needs_eoi might return true 695 * not only for level triggered interrupts but for edge triggered 696 * interrupts too. In any case Xen always honors the eoi mechanism, 697 * not injecting any more pirqs of the same kind if the first one 698 * hasn't received an eoi yet. Therefore using the fasteoi handler 699 * is the right choice either way. 700 */ 701 if (shareable) 702 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, 703 handle_fasteoi_irq, name); 704 else 705 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, 706 handle_edge_irq, name); 707 708 out: 709 mutex_unlock(&irq_mapping_update_lock); 710 711 return irq; 712 } 713 714 #ifdef CONFIG_PCI_MSI 715 int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc) 716 { 717 int rc; 718 struct physdev_get_free_pirq op_get_free_pirq; 719 720 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI; 721 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); 722 723 WARN_ONCE(rc == -ENOSYS, 724 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n"); 725 726 return rc ? -1 : op_get_free_pirq.pirq; 727 } 728 729 int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, 730 int pirq, int nvec, const char *name, domid_t domid) 731 { 732 int i, irq, ret; 733 734 mutex_lock(&irq_mapping_update_lock); 735 736 irq = xen_allocate_irqs_dynamic(nvec); 737 if (irq < 0) 738 goto out; 739 740 for (i = 0; i < nvec; i++) { 741 irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name); 742 743 ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid, 744 i == 0 ? 0 : PIRQ_MSI_GROUP); 745 if (ret < 0) 746 goto error_irq; 747 } 748 749 ret = irq_set_msi_desc(irq, msidesc); 750 if (ret < 0) 751 goto error_irq; 752 out: 753 mutex_unlock(&irq_mapping_update_lock); 754 return irq; 755 error_irq: 756 for (; i >= 0; i--) 757 __unbind_from_irq(irq + i); 758 mutex_unlock(&irq_mapping_update_lock); 759 return ret; 760 } 761 #endif 762 763 int xen_destroy_irq(int irq) 764 { 765 struct physdev_unmap_pirq unmap_irq; 766 struct irq_info *info = info_for_irq(irq); 767 int rc = -ENOENT; 768 769 mutex_lock(&irq_mapping_update_lock); 770 771 /* 772 * If trying to remove a vector in a MSI group different 773 * than the first one skip the PIRQ unmap unless this vector 774 * is the first one in the group. 775 */ 776 if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) { 777 unmap_irq.pirq = info->u.pirq.pirq; 778 unmap_irq.domid = info->u.pirq.domid; 779 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); 780 /* If another domain quits without making the pci_disable_msix 781 * call, the Xen hypervisor takes care of freeing the PIRQs 782 * (free_domain_pirqs). 783 */ 784 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF)) 785 pr_info("domain %d does not have %d anymore\n", 786 info->u.pirq.domid, info->u.pirq.pirq); 787 else if (rc) { 788 pr_warn("unmap irq failed %d\n", rc); 789 goto out; 790 } 791 } 792 793 xen_free_irq(irq); 794 795 out: 796 mutex_unlock(&irq_mapping_update_lock); 797 return rc; 798 } 799 800 int xen_irq_from_pirq(unsigned pirq) 801 { 802 int irq; 803 804 struct irq_info *info; 805 806 mutex_lock(&irq_mapping_update_lock); 807 808 list_for_each_entry(info, &xen_irq_list_head, list) { 809 if (info->type != IRQT_PIRQ) 810 continue; 811 irq = info->irq; 812 if (info->u.pirq.pirq == pirq) 813 goto out; 814 } 815 irq = -1; 816 out: 817 mutex_unlock(&irq_mapping_update_lock); 818 819 return irq; 820 } 821 822 823 int xen_pirq_from_irq(unsigned irq) 824 { 825 return pirq_from_irq(irq); 826 } 827 EXPORT_SYMBOL_GPL(xen_pirq_from_irq); 828 829 int bind_evtchn_to_irq(unsigned int evtchn) 830 { 831 int irq; 832 int ret; 833 834 if (evtchn >= xen_evtchn_max_channels()) 835 return -ENOMEM; 836 837 mutex_lock(&irq_mapping_update_lock); 838 839 irq = get_evtchn_to_irq(evtchn); 840 841 if (irq == -1) { 842 irq = xen_allocate_irq_dynamic(); 843 if (irq < 0) 844 goto out; 845 846 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, 847 handle_edge_irq, "event"); 848 849 ret = xen_irq_info_evtchn_setup(irq, evtchn); 850 if (ret < 0) { 851 __unbind_from_irq(irq); 852 irq = ret; 853 goto out; 854 } 855 /* New interdomain events are bound to VCPU 0. */ 856 bind_evtchn_to_cpu(evtchn, 0); 857 } else { 858 struct irq_info *info = info_for_irq(irq); 859 WARN_ON(info == NULL || info->type != IRQT_EVTCHN); 860 } 861 862 out: 863 mutex_unlock(&irq_mapping_update_lock); 864 865 return irq; 866 } 867 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); 868 869 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) 870 { 871 struct evtchn_bind_ipi bind_ipi; 872 int evtchn, irq; 873 int ret; 874 875 mutex_lock(&irq_mapping_update_lock); 876 877 irq = per_cpu(ipi_to_irq, cpu)[ipi]; 878 879 if (irq == -1) { 880 irq = xen_allocate_irq_dynamic(); 881 if (irq < 0) 882 goto out; 883 884 irq_set_chip_and_handler_name(irq, &xen_percpu_chip, 885 handle_percpu_irq, "ipi"); 886 887 bind_ipi.vcpu = cpu; 888 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, 889 &bind_ipi) != 0) 890 BUG(); 891 evtchn = bind_ipi.port; 892 893 ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); 894 if (ret < 0) { 895 __unbind_from_irq(irq); 896 irq = ret; 897 goto out; 898 } 899 bind_evtchn_to_cpu(evtchn, cpu); 900 } else { 901 struct irq_info *info = info_for_irq(irq); 902 WARN_ON(info == NULL || info->type != IRQT_IPI); 903 } 904 905 out: 906 mutex_unlock(&irq_mapping_update_lock); 907 return irq; 908 } 909 910 int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, 911 unsigned int remote_port) 912 { 913 struct evtchn_bind_interdomain bind_interdomain; 914 int err; 915 916 bind_interdomain.remote_dom = remote_domain; 917 bind_interdomain.remote_port = remote_port; 918 919 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, 920 &bind_interdomain); 921 922 return err ? : bind_evtchn_to_irq(bind_interdomain.local_port); 923 } 924 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq); 925 926 static int find_virq(unsigned int virq, unsigned int cpu) 927 { 928 struct evtchn_status status; 929 int port, rc = -ENOENT; 930 931 memset(&status, 0, sizeof(status)); 932 for (port = 0; port < xen_evtchn_max_channels(); port++) { 933 status.dom = DOMID_SELF; 934 status.port = port; 935 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status); 936 if (rc < 0) 937 continue; 938 if (status.status != EVTCHNSTAT_virq) 939 continue; 940 if (status.u.virq == virq && status.vcpu == cpu) { 941 rc = port; 942 break; 943 } 944 } 945 return rc; 946 } 947 948 /** 949 * xen_evtchn_nr_channels - number of usable event channel ports 950 * 951 * This may be less than the maximum supported by the current 952 * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum 953 * supported. 954 */ 955 unsigned xen_evtchn_nr_channels(void) 956 { 957 return evtchn_ops->nr_channels(); 958 } 959 EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels); 960 961 int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) 962 { 963 struct evtchn_bind_virq bind_virq; 964 int evtchn, irq, ret; 965 966 mutex_lock(&irq_mapping_update_lock); 967 968 irq = per_cpu(virq_to_irq, cpu)[virq]; 969 970 if (irq == -1) { 971 irq = xen_allocate_irq_dynamic(); 972 if (irq < 0) 973 goto out; 974 975 if (percpu) 976 irq_set_chip_and_handler_name(irq, &xen_percpu_chip, 977 handle_percpu_irq, "virq"); 978 else 979 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, 980 handle_edge_irq, "virq"); 981 982 bind_virq.virq = virq; 983 bind_virq.vcpu = cpu; 984 ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 985 &bind_virq); 986 if (ret == 0) 987 evtchn = bind_virq.port; 988 else { 989 if (ret == -EEXIST) 990 ret = find_virq(virq, cpu); 991 BUG_ON(ret < 0); 992 evtchn = ret; 993 } 994 995 ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq); 996 if (ret < 0) { 997 __unbind_from_irq(irq); 998 irq = ret; 999 goto out; 1000 } 1001 1002 bind_evtchn_to_cpu(evtchn, cpu); 1003 } else { 1004 struct irq_info *info = info_for_irq(irq); 1005 WARN_ON(info == NULL || info->type != IRQT_VIRQ); 1006 } 1007 1008 out: 1009 mutex_unlock(&irq_mapping_update_lock); 1010 1011 return irq; 1012 } 1013 1014 static void unbind_from_irq(unsigned int irq) 1015 { 1016 mutex_lock(&irq_mapping_update_lock); 1017 __unbind_from_irq(irq); 1018 mutex_unlock(&irq_mapping_update_lock); 1019 } 1020 1021 int bind_evtchn_to_irqhandler(unsigned int evtchn, 1022 irq_handler_t handler, 1023 unsigned long irqflags, 1024 const char *devname, void *dev_id) 1025 { 1026 int irq, retval; 1027 1028 irq = bind_evtchn_to_irq(evtchn); 1029 if (irq < 0) 1030 return irq; 1031 retval = request_irq(irq, handler, irqflags, devname, dev_id); 1032 if (retval != 0) { 1033 unbind_from_irq(irq); 1034 return retval; 1035 } 1036 1037 return irq; 1038 } 1039 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); 1040 1041 int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, 1042 unsigned int remote_port, 1043 irq_handler_t handler, 1044 unsigned long irqflags, 1045 const char *devname, 1046 void *dev_id) 1047 { 1048 int irq, retval; 1049 1050 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port); 1051 if (irq < 0) 1052 return irq; 1053 1054 retval = request_irq(irq, handler, irqflags, devname, dev_id); 1055 if (retval != 0) { 1056 unbind_from_irq(irq); 1057 return retval; 1058 } 1059 1060 return irq; 1061 } 1062 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler); 1063 1064 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, 1065 irq_handler_t handler, 1066 unsigned long irqflags, const char *devname, void *dev_id) 1067 { 1068 int irq, retval; 1069 1070 irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU); 1071 if (irq < 0) 1072 return irq; 1073 retval = request_irq(irq, handler, irqflags, devname, dev_id); 1074 if (retval != 0) { 1075 unbind_from_irq(irq); 1076 return retval; 1077 } 1078 1079 return irq; 1080 } 1081 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); 1082 1083 int bind_ipi_to_irqhandler(enum ipi_vector ipi, 1084 unsigned int cpu, 1085 irq_handler_t handler, 1086 unsigned long irqflags, 1087 const char *devname, 1088 void *dev_id) 1089 { 1090 int irq, retval; 1091 1092 irq = bind_ipi_to_irq(ipi, cpu); 1093 if (irq < 0) 1094 return irq; 1095 1096 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME; 1097 retval = request_irq(irq, handler, irqflags, devname, dev_id); 1098 if (retval != 0) { 1099 unbind_from_irq(irq); 1100 return retval; 1101 } 1102 1103 return irq; 1104 } 1105 1106 void unbind_from_irqhandler(unsigned int irq, void *dev_id) 1107 { 1108 struct irq_info *info = irq_get_handler_data(irq); 1109 1110 if (WARN_ON(!info)) 1111 return; 1112 free_irq(irq, dev_id); 1113 unbind_from_irq(irq); 1114 } 1115 EXPORT_SYMBOL_GPL(unbind_from_irqhandler); 1116 1117 /** 1118 * xen_set_irq_priority() - set an event channel priority. 1119 * @irq:irq bound to an event channel. 1120 * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN. 1121 */ 1122 int xen_set_irq_priority(unsigned irq, unsigned priority) 1123 { 1124 struct evtchn_set_priority set_priority; 1125 1126 set_priority.port = evtchn_from_irq(irq); 1127 set_priority.priority = priority; 1128 1129 return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority, 1130 &set_priority); 1131 } 1132 EXPORT_SYMBOL_GPL(xen_set_irq_priority); 1133 1134 int evtchn_make_refcounted(unsigned int evtchn) 1135 { 1136 int irq = get_evtchn_to_irq(evtchn); 1137 struct irq_info *info; 1138 1139 if (irq == -1) 1140 return -ENOENT; 1141 1142 info = irq_get_handler_data(irq); 1143 1144 if (!info) 1145 return -ENOENT; 1146 1147 WARN_ON(info->refcnt != -1); 1148 1149 info->refcnt = 1; 1150 1151 return 0; 1152 } 1153 EXPORT_SYMBOL_GPL(evtchn_make_refcounted); 1154 1155 int evtchn_get(unsigned int evtchn) 1156 { 1157 int irq; 1158 struct irq_info *info; 1159 int err = -ENOENT; 1160 1161 if (evtchn >= xen_evtchn_max_channels()) 1162 return -EINVAL; 1163 1164 mutex_lock(&irq_mapping_update_lock); 1165 1166 irq = get_evtchn_to_irq(evtchn); 1167 if (irq == -1) 1168 goto done; 1169 1170 info = irq_get_handler_data(irq); 1171 1172 if (!info) 1173 goto done; 1174 1175 err = -EINVAL; 1176 if (info->refcnt <= 0) 1177 goto done; 1178 1179 info->refcnt++; 1180 err = 0; 1181 done: 1182 mutex_unlock(&irq_mapping_update_lock); 1183 1184 return err; 1185 } 1186 EXPORT_SYMBOL_GPL(evtchn_get); 1187 1188 void evtchn_put(unsigned int evtchn) 1189 { 1190 int irq = get_evtchn_to_irq(evtchn); 1191 if (WARN_ON(irq == -1)) 1192 return; 1193 unbind_from_irq(irq); 1194 } 1195 EXPORT_SYMBOL_GPL(evtchn_put); 1196 1197 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) 1198 { 1199 int irq; 1200 1201 #ifdef CONFIG_X86 1202 if (unlikely(vector == XEN_NMI_VECTOR)) { 1203 int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL); 1204 if (rc < 0) 1205 printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc); 1206 return; 1207 } 1208 #endif 1209 irq = per_cpu(ipi_to_irq, cpu)[vector]; 1210 BUG_ON(irq < 0); 1211 notify_remote_via_irq(irq); 1212 } 1213 1214 static DEFINE_PER_CPU(unsigned, xed_nesting_count); 1215 1216 static void __xen_evtchn_do_upcall(void) 1217 { 1218 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); 1219 int cpu = get_cpu(); 1220 unsigned count; 1221 1222 do { 1223 vcpu_info->evtchn_upcall_pending = 0; 1224 1225 if (__this_cpu_inc_return(xed_nesting_count) - 1) 1226 goto out; 1227 1228 xen_evtchn_handle_events(cpu); 1229 1230 BUG_ON(!irqs_disabled()); 1231 1232 count = __this_cpu_read(xed_nesting_count); 1233 __this_cpu_write(xed_nesting_count, 0); 1234 } while (count != 1 || vcpu_info->evtchn_upcall_pending); 1235 1236 out: 1237 1238 put_cpu(); 1239 } 1240 1241 void xen_evtchn_do_upcall(struct pt_regs *regs) 1242 { 1243 struct pt_regs *old_regs = set_irq_regs(regs); 1244 1245 irq_enter(); 1246 #ifdef CONFIG_X86 1247 exit_idle(); 1248 inc_irq_stat(irq_hv_callback_count); 1249 #endif 1250 1251 __xen_evtchn_do_upcall(); 1252 1253 irq_exit(); 1254 set_irq_regs(old_regs); 1255 } 1256 1257 void xen_hvm_evtchn_do_upcall(void) 1258 { 1259 __xen_evtchn_do_upcall(); 1260 } 1261 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall); 1262 1263 /* Rebind a new event channel to an existing irq. */ 1264 void rebind_evtchn_irq(int evtchn, int irq) 1265 { 1266 struct irq_info *info = info_for_irq(irq); 1267 1268 if (WARN_ON(!info)) 1269 return; 1270 1271 /* Make sure the irq is masked, since the new event channel 1272 will also be masked. */ 1273 disable_irq(irq); 1274 1275 mutex_lock(&irq_mapping_update_lock); 1276 1277 /* After resume the irq<->evtchn mappings are all cleared out */ 1278 BUG_ON(get_evtchn_to_irq(evtchn) != -1); 1279 /* Expect irq to have been bound before, 1280 so there should be a proper type */ 1281 BUG_ON(info->type == IRQT_UNBOUND); 1282 1283 (void)xen_irq_info_evtchn_setup(irq, evtchn); 1284 1285 mutex_unlock(&irq_mapping_update_lock); 1286 1287 bind_evtchn_to_cpu(evtchn, info->cpu); 1288 /* This will be deferred until interrupt is processed */ 1289 irq_set_affinity(irq, cpumask_of(info->cpu)); 1290 1291 /* Unmask the event channel. */ 1292 enable_irq(irq); 1293 } 1294 1295 /* Rebind an evtchn so that it gets delivered to a specific cpu */ 1296 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) 1297 { 1298 struct evtchn_bind_vcpu bind_vcpu; 1299 int evtchn = evtchn_from_irq(irq); 1300 int masked; 1301 1302 if (!VALID_EVTCHN(evtchn)) 1303 return -1; 1304 1305 if (!xen_support_evtchn_rebind()) 1306 return -1; 1307 1308 /* Send future instances of this interrupt to other vcpu. */ 1309 bind_vcpu.port = evtchn; 1310 bind_vcpu.vcpu = tcpu; 1311 1312 /* 1313 * Mask the event while changing the VCPU binding to prevent 1314 * it being delivered on an unexpected VCPU. 1315 */ 1316 masked = test_and_set_mask(evtchn); 1317 1318 /* 1319 * If this fails, it usually just indicates that we're dealing with a 1320 * virq or IPI channel, which don't actually need to be rebound. Ignore 1321 * it, but don't do the xenlinux-level rebind in that case. 1322 */ 1323 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) 1324 bind_evtchn_to_cpu(evtchn, tcpu); 1325 1326 if (!masked) 1327 unmask_evtchn(evtchn); 1328 1329 return 0; 1330 } 1331 1332 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, 1333 bool force) 1334 { 1335 unsigned tcpu = cpumask_first_and(dest, cpu_online_mask); 1336 1337 return rebind_irq_to_cpu(data->irq, tcpu); 1338 } 1339 1340 static void enable_dynirq(struct irq_data *data) 1341 { 1342 int evtchn = evtchn_from_irq(data->irq); 1343 1344 if (VALID_EVTCHN(evtchn)) 1345 unmask_evtchn(evtchn); 1346 } 1347 1348 static void disable_dynirq(struct irq_data *data) 1349 { 1350 int evtchn = evtchn_from_irq(data->irq); 1351 1352 if (VALID_EVTCHN(evtchn)) 1353 mask_evtchn(evtchn); 1354 } 1355 1356 static void ack_dynirq(struct irq_data *data) 1357 { 1358 int evtchn = evtchn_from_irq(data->irq); 1359 1360 irq_move_irq(data); 1361 1362 if (VALID_EVTCHN(evtchn)) 1363 clear_evtchn(evtchn); 1364 } 1365 1366 static void mask_ack_dynirq(struct irq_data *data) 1367 { 1368 disable_dynirq(data); 1369 ack_dynirq(data); 1370 } 1371 1372 static int retrigger_dynirq(struct irq_data *data) 1373 { 1374 unsigned int evtchn = evtchn_from_irq(data->irq); 1375 int masked; 1376 1377 if (!VALID_EVTCHN(evtchn)) 1378 return 0; 1379 1380 masked = test_and_set_mask(evtchn); 1381 set_evtchn(evtchn); 1382 if (!masked) 1383 unmask_evtchn(evtchn); 1384 1385 return 1; 1386 } 1387 1388 static void restore_pirqs(void) 1389 { 1390 int pirq, rc, irq, gsi; 1391 struct physdev_map_pirq map_irq; 1392 struct irq_info *info; 1393 1394 list_for_each_entry(info, &xen_irq_list_head, list) { 1395 if (info->type != IRQT_PIRQ) 1396 continue; 1397 1398 pirq = info->u.pirq.pirq; 1399 gsi = info->u.pirq.gsi; 1400 irq = info->irq; 1401 1402 /* save/restore of PT devices doesn't work, so at this point the 1403 * only devices present are GSI based emulated devices */ 1404 if (!gsi) 1405 continue; 1406 1407 map_irq.domid = DOMID_SELF; 1408 map_irq.type = MAP_PIRQ_TYPE_GSI; 1409 map_irq.index = gsi; 1410 map_irq.pirq = pirq; 1411 1412 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); 1413 if (rc) { 1414 pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n", 1415 gsi, irq, pirq, rc); 1416 xen_free_irq(irq); 1417 continue; 1418 } 1419 1420 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); 1421 1422 __startup_pirq(irq); 1423 } 1424 } 1425 1426 static void restore_cpu_virqs(unsigned int cpu) 1427 { 1428 struct evtchn_bind_virq bind_virq; 1429 int virq, irq, evtchn; 1430 1431 for (virq = 0; virq < NR_VIRQS; virq++) { 1432 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) 1433 continue; 1434 1435 BUG_ON(virq_from_irq(irq) != virq); 1436 1437 /* Get a new binding from Xen. */ 1438 bind_virq.virq = virq; 1439 bind_virq.vcpu = cpu; 1440 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 1441 &bind_virq) != 0) 1442 BUG(); 1443 evtchn = bind_virq.port; 1444 1445 /* Record the new mapping. */ 1446 (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq); 1447 bind_evtchn_to_cpu(evtchn, cpu); 1448 } 1449 } 1450 1451 static void restore_cpu_ipis(unsigned int cpu) 1452 { 1453 struct evtchn_bind_ipi bind_ipi; 1454 int ipi, irq, evtchn; 1455 1456 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { 1457 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) 1458 continue; 1459 1460 BUG_ON(ipi_from_irq(irq) != ipi); 1461 1462 /* Get a new binding from Xen. */ 1463 bind_ipi.vcpu = cpu; 1464 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, 1465 &bind_ipi) != 0) 1466 BUG(); 1467 evtchn = bind_ipi.port; 1468 1469 /* Record the new mapping. */ 1470 (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); 1471 bind_evtchn_to_cpu(evtchn, cpu); 1472 } 1473 } 1474 1475 /* Clear an irq's pending state, in preparation for polling on it */ 1476 void xen_clear_irq_pending(int irq) 1477 { 1478 int evtchn = evtchn_from_irq(irq); 1479 1480 if (VALID_EVTCHN(evtchn)) 1481 clear_evtchn(evtchn); 1482 } 1483 EXPORT_SYMBOL(xen_clear_irq_pending); 1484 void xen_set_irq_pending(int irq) 1485 { 1486 int evtchn = evtchn_from_irq(irq); 1487 1488 if (VALID_EVTCHN(evtchn)) 1489 set_evtchn(evtchn); 1490 } 1491 1492 bool xen_test_irq_pending(int irq) 1493 { 1494 int evtchn = evtchn_from_irq(irq); 1495 bool ret = false; 1496 1497 if (VALID_EVTCHN(evtchn)) 1498 ret = test_evtchn(evtchn); 1499 1500 return ret; 1501 } 1502 1503 /* Poll waiting for an irq to become pending with timeout. In the usual case, 1504 * the irq will be disabled so it won't deliver an interrupt. */ 1505 void xen_poll_irq_timeout(int irq, u64 timeout) 1506 { 1507 evtchn_port_t evtchn = evtchn_from_irq(irq); 1508 1509 if (VALID_EVTCHN(evtchn)) { 1510 struct sched_poll poll; 1511 1512 poll.nr_ports = 1; 1513 poll.timeout = timeout; 1514 set_xen_guest_handle(poll.ports, &evtchn); 1515 1516 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) 1517 BUG(); 1518 } 1519 } 1520 EXPORT_SYMBOL(xen_poll_irq_timeout); 1521 /* Poll waiting for an irq to become pending. In the usual case, the 1522 * irq will be disabled so it won't deliver an interrupt. */ 1523 void xen_poll_irq(int irq) 1524 { 1525 xen_poll_irq_timeout(irq, 0 /* no timeout */); 1526 } 1527 1528 /* Check whether the IRQ line is shared with other guests. */ 1529 int xen_test_irq_shared(int irq) 1530 { 1531 struct irq_info *info = info_for_irq(irq); 1532 struct physdev_irq_status_query irq_status; 1533 1534 if (WARN_ON(!info)) 1535 return -ENOENT; 1536 1537 irq_status.irq = info->u.pirq.pirq; 1538 1539 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) 1540 return 0; 1541 return !(irq_status.flags & XENIRQSTAT_shared); 1542 } 1543 EXPORT_SYMBOL_GPL(xen_test_irq_shared); 1544 1545 void xen_irq_resume(void) 1546 { 1547 unsigned int cpu; 1548 struct irq_info *info; 1549 1550 /* New event-channel space is not 'live' yet. */ 1551 xen_evtchn_mask_all(); 1552 xen_evtchn_resume(); 1553 1554 /* No IRQ <-> event-channel mappings. */ 1555 list_for_each_entry(info, &xen_irq_list_head, list) 1556 info->evtchn = 0; /* zap event-channel binding */ 1557 1558 clear_evtchn_to_irq_all(); 1559 1560 for_each_possible_cpu(cpu) { 1561 restore_cpu_virqs(cpu); 1562 restore_cpu_ipis(cpu); 1563 } 1564 1565 restore_pirqs(); 1566 } 1567 1568 static struct irq_chip xen_dynamic_chip __read_mostly = { 1569 .name = "xen-dyn", 1570 1571 .irq_disable = disable_dynirq, 1572 .irq_mask = disable_dynirq, 1573 .irq_unmask = enable_dynirq, 1574 1575 .irq_ack = ack_dynirq, 1576 .irq_mask_ack = mask_ack_dynirq, 1577 1578 .irq_set_affinity = set_affinity_irq, 1579 .irq_retrigger = retrigger_dynirq, 1580 }; 1581 1582 static struct irq_chip xen_pirq_chip __read_mostly = { 1583 .name = "xen-pirq", 1584 1585 .irq_startup = startup_pirq, 1586 .irq_shutdown = shutdown_pirq, 1587 .irq_enable = enable_pirq, 1588 .irq_disable = disable_pirq, 1589 1590 .irq_mask = disable_dynirq, 1591 .irq_unmask = enable_dynirq, 1592 1593 .irq_ack = eoi_pirq, 1594 .irq_eoi = eoi_pirq, 1595 .irq_mask_ack = mask_ack_pirq, 1596 1597 .irq_set_affinity = set_affinity_irq, 1598 1599 .irq_retrigger = retrigger_dynirq, 1600 }; 1601 1602 static struct irq_chip xen_percpu_chip __read_mostly = { 1603 .name = "xen-percpu", 1604 1605 .irq_disable = disable_dynirq, 1606 .irq_mask = disable_dynirq, 1607 .irq_unmask = enable_dynirq, 1608 1609 .irq_ack = ack_dynirq, 1610 }; 1611 1612 int xen_set_callback_via(uint64_t via) 1613 { 1614 struct xen_hvm_param a; 1615 a.domid = DOMID_SELF; 1616 a.index = HVM_PARAM_CALLBACK_IRQ; 1617 a.value = via; 1618 return HYPERVISOR_hvm_op(HVMOP_set_param, &a); 1619 } 1620 EXPORT_SYMBOL_GPL(xen_set_callback_via); 1621 1622 #ifdef CONFIG_XEN_PVHVM 1623 /* Vector callbacks are better than PCI interrupts to receive event 1624 * channel notifications because we can receive vector callbacks on any 1625 * vcpu and we don't need PCI support or APIC interactions. */ 1626 void xen_callback_vector(void) 1627 { 1628 int rc; 1629 uint64_t callback_via; 1630 if (xen_have_vector_callback) { 1631 callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR); 1632 rc = xen_set_callback_via(callback_via); 1633 if (rc) { 1634 pr_err("Request for Xen HVM callback vector failed\n"); 1635 xen_have_vector_callback = 0; 1636 return; 1637 } 1638 pr_info("Xen HVM callback vector for event delivery is enabled\n"); 1639 /* in the restore case the vector has already been allocated */ 1640 if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors)) 1641 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, 1642 xen_hvm_callback_vector); 1643 } 1644 } 1645 #else 1646 void xen_callback_vector(void) {} 1647 #endif 1648 1649 #undef MODULE_PARAM_PREFIX 1650 #define MODULE_PARAM_PREFIX "xen." 1651 1652 static bool fifo_events = true; 1653 module_param(fifo_events, bool, 0); 1654 1655 void __init xen_init_IRQ(void) 1656 { 1657 int ret = -EINVAL; 1658 1659 if (fifo_events) 1660 ret = xen_evtchn_fifo_init(); 1661 if (ret < 0) 1662 xen_evtchn_2l_init(); 1663 1664 evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()), 1665 sizeof(*evtchn_to_irq), GFP_KERNEL); 1666 BUG_ON(!evtchn_to_irq); 1667 1668 /* No event channels are 'live' right now. */ 1669 xen_evtchn_mask_all(); 1670 1671 pirq_needs_eoi = pirq_needs_eoi_flag; 1672 1673 #ifdef CONFIG_X86 1674 if (xen_pv_domain()) { 1675 irq_ctx_init(smp_processor_id()); 1676 if (xen_initial_domain()) 1677 pci_xen_initial_domain(); 1678 } 1679 if (xen_feature(XENFEAT_hvm_callback_vector)) 1680 xen_callback_vector(); 1681 1682 if (xen_hvm_domain()) { 1683 native_init_IRQ(); 1684 /* pci_xen_hvm_init must be called after native_init_IRQ so that 1685 * __acpi_register_gsi can point at the right function */ 1686 pci_xen_hvm_init(); 1687 } else { 1688 int rc; 1689 struct physdev_pirq_eoi_gmfn eoi_gmfn; 1690 1691 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); 1692 eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map); 1693 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn); 1694 /* TODO: No PVH support for PIRQ EOI */ 1695 if (rc != 0) { 1696 free_page((unsigned long) pirq_eoi_map); 1697 pirq_eoi_map = NULL; 1698 } else 1699 pirq_needs_eoi = pirq_check_eoi_map; 1700 } 1701 #endif 1702 } 1703