1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Xen event channels 4 * 5 * Xen models interrupts with abstract event channels. Because each 6 * domain gets 1024 event channels, but NR_IRQ is not that large, we 7 * must dynamically map irqs<->event channels. The event channels 8 * interface with the rest of the kernel by defining a xen interrupt 9 * chip. When an event is received, it is mapped to an irq and sent 10 * through the normal interrupt processing path. 11 * 12 * There are four kinds of events which can be mapped to an event 13 * channel: 14 * 15 * 1. Inter-domain notifications. This includes all the virtual 16 * device events, since they're driven by front-ends in another domain 17 * (typically dom0). 18 * 2. VIRQs, typically used for timers. These are per-cpu events. 19 * 3. IPIs. 20 * 4. PIRQs - Hardware interrupts. 21 * 22 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 23 */ 24 25 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt 26 27 #include <linux/linkage.h> 28 #include <linux/interrupt.h> 29 #include <linux/irq.h> 30 #include <linux/moduleparam.h> 31 #include <linux/string.h> 32 #include <linux/memblock.h> 33 #include <linux/slab.h> 34 #include <linux/irqnr.h> 35 #include <linux/pci.h> 36 37 #ifdef CONFIG_X86 38 #include <asm/desc.h> 39 #include <asm/ptrace.h> 40 #include <asm/irq.h> 41 #include <asm/io_apic.h> 42 #include <asm/i8259.h> 43 #include <asm/xen/pci.h> 44 #endif 45 #include <asm/sync_bitops.h> 46 #include <asm/xen/hypercall.h> 47 #include <asm/xen/hypervisor.h> 48 #include <xen/page.h> 49 50 #include <xen/xen.h> 51 #include <xen/hvm.h> 52 #include <xen/xen-ops.h> 53 #include <xen/events.h> 54 #include <xen/interface/xen.h> 55 #include <xen/interface/event_channel.h> 56 #include <xen/interface/hvm/hvm_op.h> 57 #include <xen/interface/hvm/params.h> 58 #include <xen/interface/physdev.h> 59 #include <xen/interface/sched.h> 60 #include <xen/interface/vcpu.h> 61 #include <asm/hw_irq.h> 62 63 #include "events_internal.h" 64 65 const struct evtchn_ops *evtchn_ops; 66 67 /* 68 * This lock protects updates to the following mapping and reference-count 69 * arrays. The lock does not need to be acquired to read the mapping tables. 70 */ 71 static DEFINE_MUTEX(irq_mapping_update_lock); 72 73 static LIST_HEAD(xen_irq_list_head); 74 75 /* IRQ <-> VIRQ mapping. */ 76 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; 77 78 /* IRQ <-> IPI mapping */ 79 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; 80 81 int **evtchn_to_irq; 82 #ifdef CONFIG_X86 83 static unsigned long *pirq_eoi_map; 84 #endif 85 static bool (*pirq_needs_eoi)(unsigned irq); 86 87 #define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq))) 88 #define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq))) 89 #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq)) 90 91 /* Xen will never allocate port zero for any purpose. */ 92 #define VALID_EVTCHN(chn) ((chn) != 0) 93 94 static struct irq_chip xen_dynamic_chip; 95 static struct irq_chip xen_percpu_chip; 96 static struct irq_chip xen_pirq_chip; 97 static void enable_dynirq(struct irq_data *data); 98 static void disable_dynirq(struct irq_data *data); 99 100 static void clear_evtchn_to_irq_row(unsigned row) 101 { 102 unsigned col; 103 104 for (col = 0; col < EVTCHN_PER_ROW; col++) 105 evtchn_to_irq[row][col] = -1; 106 } 107 108 static void clear_evtchn_to_irq_all(void) 109 { 110 unsigned row; 111 112 for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) { 113 if (evtchn_to_irq[row] == NULL) 114 continue; 115 clear_evtchn_to_irq_row(row); 116 } 117 } 118 119 static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq) 120 { 121 unsigned row; 122 unsigned col; 123 124 if (evtchn >= xen_evtchn_max_channels()) 125 return -EINVAL; 126 127 row = EVTCHN_ROW(evtchn); 128 col = EVTCHN_COL(evtchn); 129 130 if (evtchn_to_irq[row] == NULL) { 131 /* Unallocated irq entries return -1 anyway */ 132 if (irq == -1) 133 return 0; 134 135 evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL); 136 if (evtchn_to_irq[row] == NULL) 137 return -ENOMEM; 138 139 clear_evtchn_to_irq_row(row); 140 } 141 142 evtchn_to_irq[row][col] = irq; 143 return 0; 144 } 145 146 int get_evtchn_to_irq(evtchn_port_t evtchn) 147 { 148 if (evtchn >= xen_evtchn_max_channels()) 149 return -1; 150 if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL) 151 return -1; 152 return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]; 153 } 154 155 /* Get info for IRQ */ 156 struct irq_info *info_for_irq(unsigned irq) 157 { 158 return irq_get_handler_data(irq); 159 } 160 161 /* Constructors for packed IRQ information. */ 162 static int xen_irq_info_common_setup(struct irq_info *info, 163 unsigned irq, 164 enum xen_irq_type type, 165 evtchn_port_t evtchn, 166 unsigned short cpu) 167 { 168 int ret; 169 170 BUG_ON(info->type != IRQT_UNBOUND && info->type != type); 171 172 info->type = type; 173 info->irq = irq; 174 info->evtchn = evtchn; 175 info->cpu = cpu; 176 177 ret = set_evtchn_to_irq(evtchn, irq); 178 if (ret < 0) 179 return ret; 180 181 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); 182 183 return xen_evtchn_port_setup(info); 184 } 185 186 static int xen_irq_info_evtchn_setup(unsigned irq, 187 evtchn_port_t evtchn) 188 { 189 struct irq_info *info = info_for_irq(irq); 190 191 return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0); 192 } 193 194 static int xen_irq_info_ipi_setup(unsigned cpu, 195 unsigned irq, 196 evtchn_port_t evtchn, 197 enum ipi_vector ipi) 198 { 199 struct irq_info *info = info_for_irq(irq); 200 201 info->u.ipi = ipi; 202 203 per_cpu(ipi_to_irq, cpu)[ipi] = irq; 204 205 return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0); 206 } 207 208 static int xen_irq_info_virq_setup(unsigned cpu, 209 unsigned irq, 210 evtchn_port_t evtchn, 211 unsigned virq) 212 { 213 struct irq_info *info = info_for_irq(irq); 214 215 info->u.virq = virq; 216 217 per_cpu(virq_to_irq, cpu)[virq] = irq; 218 219 return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0); 220 } 221 222 static int xen_irq_info_pirq_setup(unsigned irq, 223 evtchn_port_t evtchn, 224 unsigned pirq, 225 unsigned gsi, 226 uint16_t domid, 227 unsigned char flags) 228 { 229 struct irq_info *info = info_for_irq(irq); 230 231 info->u.pirq.pirq = pirq; 232 info->u.pirq.gsi = gsi; 233 info->u.pirq.domid = domid; 234 info->u.pirq.flags = flags; 235 236 return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0); 237 } 238 239 static void xen_irq_info_cleanup(struct irq_info *info) 240 { 241 set_evtchn_to_irq(info->evtchn, -1); 242 info->evtchn = 0; 243 } 244 245 /* 246 * Accessors for packed IRQ information. 247 */ 248 evtchn_port_t evtchn_from_irq(unsigned irq) 249 { 250 if (WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq)) 251 return 0; 252 253 return info_for_irq(irq)->evtchn; 254 } 255 256 unsigned int irq_from_evtchn(evtchn_port_t evtchn) 257 { 258 return get_evtchn_to_irq(evtchn); 259 } 260 EXPORT_SYMBOL_GPL(irq_from_evtchn); 261 262 int irq_from_virq(unsigned int cpu, unsigned int virq) 263 { 264 return per_cpu(virq_to_irq, cpu)[virq]; 265 } 266 267 static enum ipi_vector ipi_from_irq(unsigned irq) 268 { 269 struct irq_info *info = info_for_irq(irq); 270 271 BUG_ON(info == NULL); 272 BUG_ON(info->type != IRQT_IPI); 273 274 return info->u.ipi; 275 } 276 277 static unsigned virq_from_irq(unsigned irq) 278 { 279 struct irq_info *info = info_for_irq(irq); 280 281 BUG_ON(info == NULL); 282 BUG_ON(info->type != IRQT_VIRQ); 283 284 return info->u.virq; 285 } 286 287 static unsigned pirq_from_irq(unsigned irq) 288 { 289 struct irq_info *info = info_for_irq(irq); 290 291 BUG_ON(info == NULL); 292 BUG_ON(info->type != IRQT_PIRQ); 293 294 return info->u.pirq.pirq; 295 } 296 297 static enum xen_irq_type type_from_irq(unsigned irq) 298 { 299 return info_for_irq(irq)->type; 300 } 301 302 unsigned cpu_from_irq(unsigned irq) 303 { 304 return info_for_irq(irq)->cpu; 305 } 306 307 unsigned int cpu_from_evtchn(evtchn_port_t evtchn) 308 { 309 int irq = get_evtchn_to_irq(evtchn); 310 unsigned ret = 0; 311 312 if (irq != -1) 313 ret = cpu_from_irq(irq); 314 315 return ret; 316 } 317 318 #ifdef CONFIG_X86 319 static bool pirq_check_eoi_map(unsigned irq) 320 { 321 return test_bit(pirq_from_irq(irq), pirq_eoi_map); 322 } 323 #endif 324 325 static bool pirq_needs_eoi_flag(unsigned irq) 326 { 327 struct irq_info *info = info_for_irq(irq); 328 BUG_ON(info->type != IRQT_PIRQ); 329 330 return info->u.pirq.flags & PIRQ_NEEDS_EOI; 331 } 332 333 static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu) 334 { 335 int irq = get_evtchn_to_irq(evtchn); 336 struct irq_info *info = info_for_irq(irq); 337 338 BUG_ON(irq == -1); 339 #ifdef CONFIG_SMP 340 cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu)); 341 #endif 342 xen_evtchn_port_bind_to_cpu(info, cpu); 343 344 info->cpu = cpu; 345 } 346 347 /** 348 * notify_remote_via_irq - send event to remote end of event channel via irq 349 * @irq: irq of event channel to send event to 350 * 351 * Unlike notify_remote_via_evtchn(), this is safe to use across 352 * save/restore. Notifications on a broken connection are silently 353 * dropped. 354 */ 355 void notify_remote_via_irq(int irq) 356 { 357 evtchn_port_t evtchn = evtchn_from_irq(irq); 358 359 if (VALID_EVTCHN(evtchn)) 360 notify_remote_via_evtchn(evtchn); 361 } 362 EXPORT_SYMBOL_GPL(notify_remote_via_irq); 363 364 static void xen_irq_init(unsigned irq) 365 { 366 struct irq_info *info; 367 #ifdef CONFIG_SMP 368 /* By default all event channels notify CPU#0. */ 369 cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0)); 370 #endif 371 372 info = kzalloc(sizeof(*info), GFP_KERNEL); 373 if (info == NULL) 374 panic("Unable to allocate metadata for IRQ%d\n", irq); 375 376 info->type = IRQT_UNBOUND; 377 info->refcnt = -1; 378 379 irq_set_handler_data(irq, info); 380 381 list_add_tail(&info->list, &xen_irq_list_head); 382 } 383 384 static int __must_check xen_allocate_irqs_dynamic(int nvec) 385 { 386 int i, irq = irq_alloc_descs(-1, 0, nvec, -1); 387 388 if (irq >= 0) { 389 for (i = 0; i < nvec; i++) 390 xen_irq_init(irq + i); 391 } 392 393 return irq; 394 } 395 396 static inline int __must_check xen_allocate_irq_dynamic(void) 397 { 398 399 return xen_allocate_irqs_dynamic(1); 400 } 401 402 static int __must_check xen_allocate_irq_gsi(unsigned gsi) 403 { 404 int irq; 405 406 /* 407 * A PV guest has no concept of a GSI (since it has no ACPI 408 * nor access to/knowledge of the physical APICs). Therefore 409 * all IRQs are dynamically allocated from the entire IRQ 410 * space. 411 */ 412 if (xen_pv_domain() && !xen_initial_domain()) 413 return xen_allocate_irq_dynamic(); 414 415 /* Legacy IRQ descriptors are already allocated by the arch. */ 416 if (gsi < nr_legacy_irqs()) 417 irq = gsi; 418 else 419 irq = irq_alloc_desc_at(gsi, -1); 420 421 xen_irq_init(irq); 422 423 return irq; 424 } 425 426 static void xen_free_irq(unsigned irq) 427 { 428 struct irq_info *info = irq_get_handler_data(irq); 429 430 if (WARN_ON(!info)) 431 return; 432 433 list_del(&info->list); 434 435 irq_set_handler_data(irq, NULL); 436 437 WARN_ON(info->refcnt > 0); 438 439 kfree(info); 440 441 /* Legacy IRQ descriptors are managed by the arch. */ 442 if (irq < nr_legacy_irqs()) 443 return; 444 445 irq_free_desc(irq); 446 } 447 448 static void xen_evtchn_close(evtchn_port_t port) 449 { 450 struct evtchn_close close; 451 452 close.port = port; 453 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 454 BUG(); 455 } 456 457 static void pirq_query_unmask(int irq) 458 { 459 struct physdev_irq_status_query irq_status; 460 struct irq_info *info = info_for_irq(irq); 461 462 BUG_ON(info->type != IRQT_PIRQ); 463 464 irq_status.irq = pirq_from_irq(irq); 465 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) 466 irq_status.flags = 0; 467 468 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI; 469 if (irq_status.flags & XENIRQSTAT_needs_eoi) 470 info->u.pirq.flags |= PIRQ_NEEDS_EOI; 471 } 472 473 static void eoi_pirq(struct irq_data *data) 474 { 475 evtchn_port_t evtchn = evtchn_from_irq(data->irq); 476 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; 477 int rc = 0; 478 479 if (!VALID_EVTCHN(evtchn)) 480 return; 481 482 if (unlikely(irqd_is_setaffinity_pending(data)) && 483 likely(!irqd_irq_disabled(data))) { 484 int masked = test_and_set_mask(evtchn); 485 486 clear_evtchn(evtchn); 487 488 irq_move_masked_irq(data); 489 490 if (!masked) 491 unmask_evtchn(evtchn); 492 } else 493 clear_evtchn(evtchn); 494 495 if (pirq_needs_eoi(data->irq)) { 496 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); 497 WARN_ON(rc); 498 } 499 } 500 501 static void mask_ack_pirq(struct irq_data *data) 502 { 503 disable_dynirq(data); 504 eoi_pirq(data); 505 } 506 507 static unsigned int __startup_pirq(unsigned int irq) 508 { 509 struct evtchn_bind_pirq bind_pirq; 510 struct irq_info *info = info_for_irq(irq); 511 evtchn_port_t evtchn = evtchn_from_irq(irq); 512 int rc; 513 514 BUG_ON(info->type != IRQT_PIRQ); 515 516 if (VALID_EVTCHN(evtchn)) 517 goto out; 518 519 bind_pirq.pirq = pirq_from_irq(irq); 520 /* NB. We are happy to share unless we are probing. */ 521 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ? 522 BIND_PIRQ__WILL_SHARE : 0; 523 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq); 524 if (rc != 0) { 525 pr_warn("Failed to obtain physical IRQ %d\n", irq); 526 return 0; 527 } 528 evtchn = bind_pirq.port; 529 530 pirq_query_unmask(irq); 531 532 rc = set_evtchn_to_irq(evtchn, irq); 533 if (rc) 534 goto err; 535 536 info->evtchn = evtchn; 537 bind_evtchn_to_cpu(evtchn, 0); 538 539 rc = xen_evtchn_port_setup(info); 540 if (rc) 541 goto err; 542 543 out: 544 unmask_evtchn(evtchn); 545 eoi_pirq(irq_get_irq_data(irq)); 546 547 return 0; 548 549 err: 550 pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); 551 xen_evtchn_close(evtchn); 552 return 0; 553 } 554 555 static unsigned int startup_pirq(struct irq_data *data) 556 { 557 return __startup_pirq(data->irq); 558 } 559 560 static void shutdown_pirq(struct irq_data *data) 561 { 562 unsigned int irq = data->irq; 563 struct irq_info *info = info_for_irq(irq); 564 evtchn_port_t evtchn = evtchn_from_irq(irq); 565 566 BUG_ON(info->type != IRQT_PIRQ); 567 568 if (!VALID_EVTCHN(evtchn)) 569 return; 570 571 mask_evtchn(evtchn); 572 xen_evtchn_close(evtchn); 573 xen_irq_info_cleanup(info); 574 } 575 576 static void enable_pirq(struct irq_data *data) 577 { 578 enable_dynirq(data); 579 } 580 581 static void disable_pirq(struct irq_data *data) 582 { 583 disable_dynirq(data); 584 } 585 586 int xen_irq_from_gsi(unsigned gsi) 587 { 588 struct irq_info *info; 589 590 list_for_each_entry(info, &xen_irq_list_head, list) { 591 if (info->type != IRQT_PIRQ) 592 continue; 593 594 if (info->u.pirq.gsi == gsi) 595 return info->irq; 596 } 597 598 return -1; 599 } 600 EXPORT_SYMBOL_GPL(xen_irq_from_gsi); 601 602 static void __unbind_from_irq(unsigned int irq) 603 { 604 evtchn_port_t evtchn = evtchn_from_irq(irq); 605 struct irq_info *info = irq_get_handler_data(irq); 606 607 if (info->refcnt > 0) { 608 info->refcnt--; 609 if (info->refcnt != 0) 610 return; 611 } 612 613 if (VALID_EVTCHN(evtchn)) { 614 unsigned int cpu = cpu_from_irq(irq); 615 616 xen_evtchn_close(evtchn); 617 618 switch (type_from_irq(irq)) { 619 case IRQT_VIRQ: 620 per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1; 621 break; 622 case IRQT_IPI: 623 per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1; 624 break; 625 default: 626 break; 627 } 628 629 xen_irq_info_cleanup(info); 630 } 631 632 xen_free_irq(irq); 633 } 634 635 /* 636 * Do not make any assumptions regarding the relationship between the 637 * IRQ number returned here and the Xen pirq argument. 638 * 639 * Note: We don't assign an event channel until the irq actually started 640 * up. Return an existing irq if we've already got one for the gsi. 641 * 642 * Shareable implies level triggered, not shareable implies edge 643 * triggered here. 644 */ 645 int xen_bind_pirq_gsi_to_irq(unsigned gsi, 646 unsigned pirq, int shareable, char *name) 647 { 648 int irq = -1; 649 struct physdev_irq irq_op; 650 int ret; 651 652 mutex_lock(&irq_mapping_update_lock); 653 654 irq = xen_irq_from_gsi(gsi); 655 if (irq != -1) { 656 pr_info("%s: returning irq %d for gsi %u\n", 657 __func__, irq, gsi); 658 goto out; 659 } 660 661 irq = xen_allocate_irq_gsi(gsi); 662 if (irq < 0) 663 goto out; 664 665 irq_op.irq = irq; 666 irq_op.vector = 0; 667 668 /* Only the privileged domain can do this. For non-priv, the pcifront 669 * driver provides a PCI bus that does the call to do exactly 670 * this in the priv domain. */ 671 if (xen_initial_domain() && 672 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { 673 xen_free_irq(irq); 674 irq = -ENOSPC; 675 goto out; 676 } 677 678 ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF, 679 shareable ? PIRQ_SHAREABLE : 0); 680 if (ret < 0) { 681 __unbind_from_irq(irq); 682 irq = ret; 683 goto out; 684 } 685 686 pirq_query_unmask(irq); 687 /* We try to use the handler with the appropriate semantic for the 688 * type of interrupt: if the interrupt is an edge triggered 689 * interrupt we use handle_edge_irq. 690 * 691 * On the other hand if the interrupt is level triggered we use 692 * handle_fasteoi_irq like the native code does for this kind of 693 * interrupts. 694 * 695 * Depending on the Xen version, pirq_needs_eoi might return true 696 * not only for level triggered interrupts but for edge triggered 697 * interrupts too. In any case Xen always honors the eoi mechanism, 698 * not injecting any more pirqs of the same kind if the first one 699 * hasn't received an eoi yet. Therefore using the fasteoi handler 700 * is the right choice either way. 701 */ 702 if (shareable) 703 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, 704 handle_fasteoi_irq, name); 705 else 706 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, 707 handle_edge_irq, name); 708 709 out: 710 mutex_unlock(&irq_mapping_update_lock); 711 712 return irq; 713 } 714 715 #ifdef CONFIG_PCI_MSI 716 int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc) 717 { 718 int rc; 719 struct physdev_get_free_pirq op_get_free_pirq; 720 721 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI; 722 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); 723 724 WARN_ONCE(rc == -ENOSYS, 725 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n"); 726 727 return rc ? -1 : op_get_free_pirq.pirq; 728 } 729 730 int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, 731 int pirq, int nvec, const char *name, domid_t domid) 732 { 733 int i, irq, ret; 734 735 mutex_lock(&irq_mapping_update_lock); 736 737 irq = xen_allocate_irqs_dynamic(nvec); 738 if (irq < 0) 739 goto out; 740 741 for (i = 0; i < nvec; i++) { 742 irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name); 743 744 ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid, 745 i == 0 ? 0 : PIRQ_MSI_GROUP); 746 if (ret < 0) 747 goto error_irq; 748 } 749 750 ret = irq_set_msi_desc(irq, msidesc); 751 if (ret < 0) 752 goto error_irq; 753 out: 754 mutex_unlock(&irq_mapping_update_lock); 755 return irq; 756 error_irq: 757 while (nvec--) 758 __unbind_from_irq(irq + nvec); 759 mutex_unlock(&irq_mapping_update_lock); 760 return ret; 761 } 762 #endif 763 764 int xen_destroy_irq(int irq) 765 { 766 struct physdev_unmap_pirq unmap_irq; 767 struct irq_info *info = info_for_irq(irq); 768 int rc = -ENOENT; 769 770 mutex_lock(&irq_mapping_update_lock); 771 772 /* 773 * If trying to remove a vector in a MSI group different 774 * than the first one skip the PIRQ unmap unless this vector 775 * is the first one in the group. 776 */ 777 if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) { 778 unmap_irq.pirq = info->u.pirq.pirq; 779 unmap_irq.domid = info->u.pirq.domid; 780 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); 781 /* If another domain quits without making the pci_disable_msix 782 * call, the Xen hypervisor takes care of freeing the PIRQs 783 * (free_domain_pirqs). 784 */ 785 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF)) 786 pr_info("domain %d does not have %d anymore\n", 787 info->u.pirq.domid, info->u.pirq.pirq); 788 else if (rc) { 789 pr_warn("unmap irq failed %d\n", rc); 790 goto out; 791 } 792 } 793 794 xen_free_irq(irq); 795 796 out: 797 mutex_unlock(&irq_mapping_update_lock); 798 return rc; 799 } 800 801 int xen_irq_from_pirq(unsigned pirq) 802 { 803 int irq; 804 805 struct irq_info *info; 806 807 mutex_lock(&irq_mapping_update_lock); 808 809 list_for_each_entry(info, &xen_irq_list_head, list) { 810 if (info->type != IRQT_PIRQ) 811 continue; 812 irq = info->irq; 813 if (info->u.pirq.pirq == pirq) 814 goto out; 815 } 816 irq = -1; 817 out: 818 mutex_unlock(&irq_mapping_update_lock); 819 820 return irq; 821 } 822 823 824 int xen_pirq_from_irq(unsigned irq) 825 { 826 return pirq_from_irq(irq); 827 } 828 EXPORT_SYMBOL_GPL(xen_pirq_from_irq); 829 830 int bind_evtchn_to_irq(evtchn_port_t evtchn) 831 { 832 int irq; 833 int ret; 834 835 if (evtchn >= xen_evtchn_max_channels()) 836 return -ENOMEM; 837 838 mutex_lock(&irq_mapping_update_lock); 839 840 irq = get_evtchn_to_irq(evtchn); 841 842 if (irq == -1) { 843 irq = xen_allocate_irq_dynamic(); 844 if (irq < 0) 845 goto out; 846 847 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, 848 handle_edge_irq, "event"); 849 850 ret = xen_irq_info_evtchn_setup(irq, evtchn); 851 if (ret < 0) { 852 __unbind_from_irq(irq); 853 irq = ret; 854 goto out; 855 } 856 /* New interdomain events are bound to VCPU 0. */ 857 bind_evtchn_to_cpu(evtchn, 0); 858 } else { 859 struct irq_info *info = info_for_irq(irq); 860 WARN_ON(info == NULL || info->type != IRQT_EVTCHN); 861 } 862 863 out: 864 mutex_unlock(&irq_mapping_update_lock); 865 866 return irq; 867 } 868 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); 869 870 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) 871 { 872 struct evtchn_bind_ipi bind_ipi; 873 evtchn_port_t evtchn; 874 int ret, irq; 875 876 mutex_lock(&irq_mapping_update_lock); 877 878 irq = per_cpu(ipi_to_irq, cpu)[ipi]; 879 880 if (irq == -1) { 881 irq = xen_allocate_irq_dynamic(); 882 if (irq < 0) 883 goto out; 884 885 irq_set_chip_and_handler_name(irq, &xen_percpu_chip, 886 handle_percpu_irq, "ipi"); 887 888 bind_ipi.vcpu = xen_vcpu_nr(cpu); 889 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, 890 &bind_ipi) != 0) 891 BUG(); 892 evtchn = bind_ipi.port; 893 894 ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); 895 if (ret < 0) { 896 __unbind_from_irq(irq); 897 irq = ret; 898 goto out; 899 } 900 bind_evtchn_to_cpu(evtchn, cpu); 901 } else { 902 struct irq_info *info = info_for_irq(irq); 903 WARN_ON(info == NULL || info->type != IRQT_IPI); 904 } 905 906 out: 907 mutex_unlock(&irq_mapping_update_lock); 908 return irq; 909 } 910 911 int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, 912 evtchn_port_t remote_port) 913 { 914 struct evtchn_bind_interdomain bind_interdomain; 915 int err; 916 917 bind_interdomain.remote_dom = remote_domain; 918 bind_interdomain.remote_port = remote_port; 919 920 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, 921 &bind_interdomain); 922 923 return err ? : bind_evtchn_to_irq(bind_interdomain.local_port); 924 } 925 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq); 926 927 static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn) 928 { 929 struct evtchn_status status; 930 evtchn_port_t port; 931 int rc = -ENOENT; 932 933 memset(&status, 0, sizeof(status)); 934 for (port = 0; port < xen_evtchn_max_channels(); port++) { 935 status.dom = DOMID_SELF; 936 status.port = port; 937 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status); 938 if (rc < 0) 939 continue; 940 if (status.status != EVTCHNSTAT_virq) 941 continue; 942 if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) { 943 *evtchn = port; 944 break; 945 } 946 } 947 return rc; 948 } 949 950 /** 951 * xen_evtchn_nr_channels - number of usable event channel ports 952 * 953 * This may be less than the maximum supported by the current 954 * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum 955 * supported. 956 */ 957 unsigned xen_evtchn_nr_channels(void) 958 { 959 return evtchn_ops->nr_channels(); 960 } 961 EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels); 962 963 int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) 964 { 965 struct evtchn_bind_virq bind_virq; 966 evtchn_port_t evtchn = 0; 967 int irq, ret; 968 969 mutex_lock(&irq_mapping_update_lock); 970 971 irq = per_cpu(virq_to_irq, cpu)[virq]; 972 973 if (irq == -1) { 974 irq = xen_allocate_irq_dynamic(); 975 if (irq < 0) 976 goto out; 977 978 if (percpu) 979 irq_set_chip_and_handler_name(irq, &xen_percpu_chip, 980 handle_percpu_irq, "virq"); 981 else 982 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, 983 handle_edge_irq, "virq"); 984 985 bind_virq.virq = virq; 986 bind_virq.vcpu = xen_vcpu_nr(cpu); 987 ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 988 &bind_virq); 989 if (ret == 0) 990 evtchn = bind_virq.port; 991 else { 992 if (ret == -EEXIST) 993 ret = find_virq(virq, cpu, &evtchn); 994 BUG_ON(ret < 0); 995 } 996 997 ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq); 998 if (ret < 0) { 999 __unbind_from_irq(irq); 1000 irq = ret; 1001 goto out; 1002 } 1003 1004 bind_evtchn_to_cpu(evtchn, cpu); 1005 } else { 1006 struct irq_info *info = info_for_irq(irq); 1007 WARN_ON(info == NULL || info->type != IRQT_VIRQ); 1008 } 1009 1010 out: 1011 mutex_unlock(&irq_mapping_update_lock); 1012 1013 return irq; 1014 } 1015 1016 static void unbind_from_irq(unsigned int irq) 1017 { 1018 mutex_lock(&irq_mapping_update_lock); 1019 __unbind_from_irq(irq); 1020 mutex_unlock(&irq_mapping_update_lock); 1021 } 1022 1023 int bind_evtchn_to_irqhandler(evtchn_port_t evtchn, 1024 irq_handler_t handler, 1025 unsigned long irqflags, 1026 const char *devname, void *dev_id) 1027 { 1028 int irq, retval; 1029 1030 irq = bind_evtchn_to_irq(evtchn); 1031 if (irq < 0) 1032 return irq; 1033 retval = request_irq(irq, handler, irqflags, devname, dev_id); 1034 if (retval != 0) { 1035 unbind_from_irq(irq); 1036 return retval; 1037 } 1038 1039 return irq; 1040 } 1041 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); 1042 1043 int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, 1044 evtchn_port_t remote_port, 1045 irq_handler_t handler, 1046 unsigned long irqflags, 1047 const char *devname, 1048 void *dev_id) 1049 { 1050 int irq, retval; 1051 1052 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port); 1053 if (irq < 0) 1054 return irq; 1055 1056 retval = request_irq(irq, handler, irqflags, devname, dev_id); 1057 if (retval != 0) { 1058 unbind_from_irq(irq); 1059 return retval; 1060 } 1061 1062 return irq; 1063 } 1064 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler); 1065 1066 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, 1067 irq_handler_t handler, 1068 unsigned long irqflags, const char *devname, void *dev_id) 1069 { 1070 int irq, retval; 1071 1072 irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU); 1073 if (irq < 0) 1074 return irq; 1075 retval = request_irq(irq, handler, irqflags, devname, dev_id); 1076 if (retval != 0) { 1077 unbind_from_irq(irq); 1078 return retval; 1079 } 1080 1081 return irq; 1082 } 1083 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); 1084 1085 int bind_ipi_to_irqhandler(enum ipi_vector ipi, 1086 unsigned int cpu, 1087 irq_handler_t handler, 1088 unsigned long irqflags, 1089 const char *devname, 1090 void *dev_id) 1091 { 1092 int irq, retval; 1093 1094 irq = bind_ipi_to_irq(ipi, cpu); 1095 if (irq < 0) 1096 return irq; 1097 1098 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME; 1099 retval = request_irq(irq, handler, irqflags, devname, dev_id); 1100 if (retval != 0) { 1101 unbind_from_irq(irq); 1102 return retval; 1103 } 1104 1105 return irq; 1106 } 1107 1108 void unbind_from_irqhandler(unsigned int irq, void *dev_id) 1109 { 1110 struct irq_info *info = irq_get_handler_data(irq); 1111 1112 if (WARN_ON(!info)) 1113 return; 1114 free_irq(irq, dev_id); 1115 unbind_from_irq(irq); 1116 } 1117 EXPORT_SYMBOL_GPL(unbind_from_irqhandler); 1118 1119 /** 1120 * xen_set_irq_priority() - set an event channel priority. 1121 * @irq:irq bound to an event channel. 1122 * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN. 1123 */ 1124 int xen_set_irq_priority(unsigned irq, unsigned priority) 1125 { 1126 struct evtchn_set_priority set_priority; 1127 1128 set_priority.port = evtchn_from_irq(irq); 1129 set_priority.priority = priority; 1130 1131 return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority, 1132 &set_priority); 1133 } 1134 EXPORT_SYMBOL_GPL(xen_set_irq_priority); 1135 1136 int evtchn_make_refcounted(evtchn_port_t evtchn) 1137 { 1138 int irq = get_evtchn_to_irq(evtchn); 1139 struct irq_info *info; 1140 1141 if (irq == -1) 1142 return -ENOENT; 1143 1144 info = irq_get_handler_data(irq); 1145 1146 if (!info) 1147 return -ENOENT; 1148 1149 WARN_ON(info->refcnt != -1); 1150 1151 info->refcnt = 1; 1152 1153 return 0; 1154 } 1155 EXPORT_SYMBOL_GPL(evtchn_make_refcounted); 1156 1157 int evtchn_get(evtchn_port_t evtchn) 1158 { 1159 int irq; 1160 struct irq_info *info; 1161 int err = -ENOENT; 1162 1163 if (evtchn >= xen_evtchn_max_channels()) 1164 return -EINVAL; 1165 1166 mutex_lock(&irq_mapping_update_lock); 1167 1168 irq = get_evtchn_to_irq(evtchn); 1169 if (irq == -1) 1170 goto done; 1171 1172 info = irq_get_handler_data(irq); 1173 1174 if (!info) 1175 goto done; 1176 1177 err = -EINVAL; 1178 if (info->refcnt <= 0) 1179 goto done; 1180 1181 info->refcnt++; 1182 err = 0; 1183 done: 1184 mutex_unlock(&irq_mapping_update_lock); 1185 1186 return err; 1187 } 1188 EXPORT_SYMBOL_GPL(evtchn_get); 1189 1190 void evtchn_put(evtchn_port_t evtchn) 1191 { 1192 int irq = get_evtchn_to_irq(evtchn); 1193 if (WARN_ON(irq == -1)) 1194 return; 1195 unbind_from_irq(irq); 1196 } 1197 EXPORT_SYMBOL_GPL(evtchn_put); 1198 1199 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) 1200 { 1201 int irq; 1202 1203 #ifdef CONFIG_X86 1204 if (unlikely(vector == XEN_NMI_VECTOR)) { 1205 int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, xen_vcpu_nr(cpu), 1206 NULL); 1207 if (rc < 0) 1208 printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc); 1209 return; 1210 } 1211 #endif 1212 irq = per_cpu(ipi_to_irq, cpu)[vector]; 1213 BUG_ON(irq < 0); 1214 notify_remote_via_irq(irq); 1215 } 1216 1217 static void __xen_evtchn_do_upcall(void) 1218 { 1219 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); 1220 int cpu = smp_processor_id(); 1221 1222 do { 1223 vcpu_info->evtchn_upcall_pending = 0; 1224 1225 xen_evtchn_handle_events(cpu); 1226 1227 BUG_ON(!irqs_disabled()); 1228 1229 virt_rmb(); /* Hypervisor can set upcall pending. */ 1230 1231 } while (vcpu_info->evtchn_upcall_pending); 1232 } 1233 1234 void xen_evtchn_do_upcall(struct pt_regs *regs) 1235 { 1236 struct pt_regs *old_regs = set_irq_regs(regs); 1237 1238 irq_enter(); 1239 #ifdef CONFIG_X86 1240 inc_irq_stat(irq_hv_callback_count); 1241 #endif 1242 1243 __xen_evtchn_do_upcall(); 1244 1245 irq_exit(); 1246 set_irq_regs(old_regs); 1247 } 1248 1249 void xen_hvm_evtchn_do_upcall(void) 1250 { 1251 __xen_evtchn_do_upcall(); 1252 } 1253 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall); 1254 1255 /* Rebind a new event channel to an existing irq. */ 1256 void rebind_evtchn_irq(evtchn_port_t evtchn, int irq) 1257 { 1258 struct irq_info *info = info_for_irq(irq); 1259 1260 if (WARN_ON(!info)) 1261 return; 1262 1263 /* Make sure the irq is masked, since the new event channel 1264 will also be masked. */ 1265 disable_irq(irq); 1266 1267 mutex_lock(&irq_mapping_update_lock); 1268 1269 /* After resume the irq<->evtchn mappings are all cleared out */ 1270 BUG_ON(get_evtchn_to_irq(evtchn) != -1); 1271 /* Expect irq to have been bound before, 1272 so there should be a proper type */ 1273 BUG_ON(info->type == IRQT_UNBOUND); 1274 1275 (void)xen_irq_info_evtchn_setup(irq, evtchn); 1276 1277 mutex_unlock(&irq_mapping_update_lock); 1278 1279 bind_evtchn_to_cpu(evtchn, info->cpu); 1280 /* This will be deferred until interrupt is processed */ 1281 irq_set_affinity(irq, cpumask_of(info->cpu)); 1282 1283 /* Unmask the event channel. */ 1284 enable_irq(irq); 1285 } 1286 1287 /* Rebind an evtchn so that it gets delivered to a specific cpu */ 1288 static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu) 1289 { 1290 struct evtchn_bind_vcpu bind_vcpu; 1291 int masked; 1292 1293 if (!VALID_EVTCHN(evtchn)) 1294 return -1; 1295 1296 if (!xen_support_evtchn_rebind()) 1297 return -1; 1298 1299 /* Send future instances of this interrupt to other vcpu. */ 1300 bind_vcpu.port = evtchn; 1301 bind_vcpu.vcpu = xen_vcpu_nr(tcpu); 1302 1303 /* 1304 * Mask the event while changing the VCPU binding to prevent 1305 * it being delivered on an unexpected VCPU. 1306 */ 1307 masked = test_and_set_mask(evtchn); 1308 1309 /* 1310 * If this fails, it usually just indicates that we're dealing with a 1311 * virq or IPI channel, which don't actually need to be rebound. Ignore 1312 * it, but don't do the xenlinux-level rebind in that case. 1313 */ 1314 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) 1315 bind_evtchn_to_cpu(evtchn, tcpu); 1316 1317 if (!masked) 1318 unmask_evtchn(evtchn); 1319 1320 return 0; 1321 } 1322 1323 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, 1324 bool force) 1325 { 1326 unsigned tcpu = cpumask_first_and(dest, cpu_online_mask); 1327 int ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu); 1328 1329 if (!ret) 1330 irq_data_update_effective_affinity(data, cpumask_of(tcpu)); 1331 1332 return ret; 1333 } 1334 1335 /* To be called with desc->lock held. */ 1336 int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu) 1337 { 1338 struct irq_data *d = irq_desc_get_irq_data(desc); 1339 1340 return set_affinity_irq(d, cpumask_of(tcpu), false); 1341 } 1342 EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn); 1343 1344 static void enable_dynirq(struct irq_data *data) 1345 { 1346 evtchn_port_t evtchn = evtchn_from_irq(data->irq); 1347 1348 if (VALID_EVTCHN(evtchn)) 1349 unmask_evtchn(evtchn); 1350 } 1351 1352 static void disable_dynirq(struct irq_data *data) 1353 { 1354 evtchn_port_t evtchn = evtchn_from_irq(data->irq); 1355 1356 if (VALID_EVTCHN(evtchn)) 1357 mask_evtchn(evtchn); 1358 } 1359 1360 static void ack_dynirq(struct irq_data *data) 1361 { 1362 evtchn_port_t evtchn = evtchn_from_irq(data->irq); 1363 1364 if (!VALID_EVTCHN(evtchn)) 1365 return; 1366 1367 if (unlikely(irqd_is_setaffinity_pending(data)) && 1368 likely(!irqd_irq_disabled(data))) { 1369 int masked = test_and_set_mask(evtchn); 1370 1371 clear_evtchn(evtchn); 1372 1373 irq_move_masked_irq(data); 1374 1375 if (!masked) 1376 unmask_evtchn(evtchn); 1377 } else 1378 clear_evtchn(evtchn); 1379 } 1380 1381 static void mask_ack_dynirq(struct irq_data *data) 1382 { 1383 disable_dynirq(data); 1384 ack_dynirq(data); 1385 } 1386 1387 static int retrigger_dynirq(struct irq_data *data) 1388 { 1389 evtchn_port_t evtchn = evtchn_from_irq(data->irq); 1390 int masked; 1391 1392 if (!VALID_EVTCHN(evtchn)) 1393 return 0; 1394 1395 masked = test_and_set_mask(evtchn); 1396 set_evtchn(evtchn); 1397 if (!masked) 1398 unmask_evtchn(evtchn); 1399 1400 return 1; 1401 } 1402 1403 static void restore_pirqs(void) 1404 { 1405 int pirq, rc, irq, gsi; 1406 struct physdev_map_pirq map_irq; 1407 struct irq_info *info; 1408 1409 list_for_each_entry(info, &xen_irq_list_head, list) { 1410 if (info->type != IRQT_PIRQ) 1411 continue; 1412 1413 pirq = info->u.pirq.pirq; 1414 gsi = info->u.pirq.gsi; 1415 irq = info->irq; 1416 1417 /* save/restore of PT devices doesn't work, so at this point the 1418 * only devices present are GSI based emulated devices */ 1419 if (!gsi) 1420 continue; 1421 1422 map_irq.domid = DOMID_SELF; 1423 map_irq.type = MAP_PIRQ_TYPE_GSI; 1424 map_irq.index = gsi; 1425 map_irq.pirq = pirq; 1426 1427 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); 1428 if (rc) { 1429 pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n", 1430 gsi, irq, pirq, rc); 1431 xen_free_irq(irq); 1432 continue; 1433 } 1434 1435 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); 1436 1437 __startup_pirq(irq); 1438 } 1439 } 1440 1441 static void restore_cpu_virqs(unsigned int cpu) 1442 { 1443 struct evtchn_bind_virq bind_virq; 1444 evtchn_port_t evtchn; 1445 int virq, irq; 1446 1447 for (virq = 0; virq < NR_VIRQS; virq++) { 1448 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) 1449 continue; 1450 1451 BUG_ON(virq_from_irq(irq) != virq); 1452 1453 /* Get a new binding from Xen. */ 1454 bind_virq.virq = virq; 1455 bind_virq.vcpu = xen_vcpu_nr(cpu); 1456 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 1457 &bind_virq) != 0) 1458 BUG(); 1459 evtchn = bind_virq.port; 1460 1461 /* Record the new mapping. */ 1462 (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq); 1463 bind_evtchn_to_cpu(evtchn, cpu); 1464 } 1465 } 1466 1467 static void restore_cpu_ipis(unsigned int cpu) 1468 { 1469 struct evtchn_bind_ipi bind_ipi; 1470 evtchn_port_t evtchn; 1471 int ipi, irq; 1472 1473 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { 1474 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) 1475 continue; 1476 1477 BUG_ON(ipi_from_irq(irq) != ipi); 1478 1479 /* Get a new binding from Xen. */ 1480 bind_ipi.vcpu = xen_vcpu_nr(cpu); 1481 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, 1482 &bind_ipi) != 0) 1483 BUG(); 1484 evtchn = bind_ipi.port; 1485 1486 /* Record the new mapping. */ 1487 (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); 1488 bind_evtchn_to_cpu(evtchn, cpu); 1489 } 1490 } 1491 1492 /* Clear an irq's pending state, in preparation for polling on it */ 1493 void xen_clear_irq_pending(int irq) 1494 { 1495 evtchn_port_t evtchn = evtchn_from_irq(irq); 1496 1497 if (VALID_EVTCHN(evtchn)) 1498 clear_evtchn(evtchn); 1499 } 1500 EXPORT_SYMBOL(xen_clear_irq_pending); 1501 void xen_set_irq_pending(int irq) 1502 { 1503 evtchn_port_t evtchn = evtchn_from_irq(irq); 1504 1505 if (VALID_EVTCHN(evtchn)) 1506 set_evtchn(evtchn); 1507 } 1508 1509 bool xen_test_irq_pending(int irq) 1510 { 1511 evtchn_port_t evtchn = evtchn_from_irq(irq); 1512 bool ret = false; 1513 1514 if (VALID_EVTCHN(evtchn)) 1515 ret = test_evtchn(evtchn); 1516 1517 return ret; 1518 } 1519 1520 /* Poll waiting for an irq to become pending with timeout. In the usual case, 1521 * the irq will be disabled so it won't deliver an interrupt. */ 1522 void xen_poll_irq_timeout(int irq, u64 timeout) 1523 { 1524 evtchn_port_t evtchn = evtchn_from_irq(irq); 1525 1526 if (VALID_EVTCHN(evtchn)) { 1527 struct sched_poll poll; 1528 1529 poll.nr_ports = 1; 1530 poll.timeout = timeout; 1531 set_xen_guest_handle(poll.ports, &evtchn); 1532 1533 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) 1534 BUG(); 1535 } 1536 } 1537 EXPORT_SYMBOL(xen_poll_irq_timeout); 1538 /* Poll waiting for an irq to become pending. In the usual case, the 1539 * irq will be disabled so it won't deliver an interrupt. */ 1540 void xen_poll_irq(int irq) 1541 { 1542 xen_poll_irq_timeout(irq, 0 /* no timeout */); 1543 } 1544 1545 /* Check whether the IRQ line is shared with other guests. */ 1546 int xen_test_irq_shared(int irq) 1547 { 1548 struct irq_info *info = info_for_irq(irq); 1549 struct physdev_irq_status_query irq_status; 1550 1551 if (WARN_ON(!info)) 1552 return -ENOENT; 1553 1554 irq_status.irq = info->u.pirq.pirq; 1555 1556 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) 1557 return 0; 1558 return !(irq_status.flags & XENIRQSTAT_shared); 1559 } 1560 EXPORT_SYMBOL_GPL(xen_test_irq_shared); 1561 1562 void xen_irq_resume(void) 1563 { 1564 unsigned int cpu; 1565 struct irq_info *info; 1566 1567 /* New event-channel space is not 'live' yet. */ 1568 xen_evtchn_resume(); 1569 1570 /* No IRQ <-> event-channel mappings. */ 1571 list_for_each_entry(info, &xen_irq_list_head, list) 1572 info->evtchn = 0; /* zap event-channel binding */ 1573 1574 clear_evtchn_to_irq_all(); 1575 1576 for_each_possible_cpu(cpu) { 1577 restore_cpu_virqs(cpu); 1578 restore_cpu_ipis(cpu); 1579 } 1580 1581 restore_pirqs(); 1582 } 1583 1584 static struct irq_chip xen_dynamic_chip __read_mostly = { 1585 .name = "xen-dyn", 1586 1587 .irq_disable = disable_dynirq, 1588 .irq_mask = disable_dynirq, 1589 .irq_unmask = enable_dynirq, 1590 1591 .irq_ack = ack_dynirq, 1592 .irq_mask_ack = mask_ack_dynirq, 1593 1594 .irq_set_affinity = set_affinity_irq, 1595 .irq_retrigger = retrigger_dynirq, 1596 }; 1597 1598 static struct irq_chip xen_pirq_chip __read_mostly = { 1599 .name = "xen-pirq", 1600 1601 .irq_startup = startup_pirq, 1602 .irq_shutdown = shutdown_pirq, 1603 .irq_enable = enable_pirq, 1604 .irq_disable = disable_pirq, 1605 1606 .irq_mask = disable_dynirq, 1607 .irq_unmask = enable_dynirq, 1608 1609 .irq_ack = eoi_pirq, 1610 .irq_eoi = eoi_pirq, 1611 .irq_mask_ack = mask_ack_pirq, 1612 1613 .irq_set_affinity = set_affinity_irq, 1614 1615 .irq_retrigger = retrigger_dynirq, 1616 }; 1617 1618 static struct irq_chip xen_percpu_chip __read_mostly = { 1619 .name = "xen-percpu", 1620 1621 .irq_disable = disable_dynirq, 1622 .irq_mask = disable_dynirq, 1623 .irq_unmask = enable_dynirq, 1624 1625 .irq_ack = ack_dynirq, 1626 }; 1627 1628 int xen_set_callback_via(uint64_t via) 1629 { 1630 struct xen_hvm_param a; 1631 a.domid = DOMID_SELF; 1632 a.index = HVM_PARAM_CALLBACK_IRQ; 1633 a.value = via; 1634 return HYPERVISOR_hvm_op(HVMOP_set_param, &a); 1635 } 1636 EXPORT_SYMBOL_GPL(xen_set_callback_via); 1637 1638 #ifdef CONFIG_XEN_PVHVM 1639 /* Vector callbacks are better than PCI interrupts to receive event 1640 * channel notifications because we can receive vector callbacks on any 1641 * vcpu and we don't need PCI support or APIC interactions. */ 1642 void xen_callback_vector(void) 1643 { 1644 int rc; 1645 uint64_t callback_via; 1646 1647 if (xen_have_vector_callback) { 1648 callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR); 1649 rc = xen_set_callback_via(callback_via); 1650 if (rc) { 1651 pr_err("Request for Xen HVM callback vector failed\n"); 1652 xen_have_vector_callback = 0; 1653 return; 1654 } 1655 pr_info_once("Xen HVM callback vector for event delivery is enabled\n"); 1656 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, 1657 xen_hvm_callback_vector); 1658 } 1659 } 1660 #else 1661 void xen_callback_vector(void) {} 1662 #endif 1663 1664 #undef MODULE_PARAM_PREFIX 1665 #define MODULE_PARAM_PREFIX "xen." 1666 1667 static bool fifo_events = true; 1668 module_param(fifo_events, bool, 0); 1669 1670 void __init xen_init_IRQ(void) 1671 { 1672 int ret = -EINVAL; 1673 evtchn_port_t evtchn; 1674 1675 if (fifo_events) 1676 ret = xen_evtchn_fifo_init(); 1677 if (ret < 0) 1678 xen_evtchn_2l_init(); 1679 1680 evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()), 1681 sizeof(*evtchn_to_irq), GFP_KERNEL); 1682 BUG_ON(!evtchn_to_irq); 1683 1684 /* No event channels are 'live' right now. */ 1685 for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++) 1686 mask_evtchn(evtchn); 1687 1688 pirq_needs_eoi = pirq_needs_eoi_flag; 1689 1690 #ifdef CONFIG_X86 1691 if (xen_pv_domain()) { 1692 if (xen_initial_domain()) 1693 pci_xen_initial_domain(); 1694 } 1695 if (xen_feature(XENFEAT_hvm_callback_vector)) 1696 xen_callback_vector(); 1697 1698 if (xen_hvm_domain()) { 1699 native_init_IRQ(); 1700 /* pci_xen_hvm_init must be called after native_init_IRQ so that 1701 * __acpi_register_gsi can point at the right function */ 1702 pci_xen_hvm_init(); 1703 } else { 1704 int rc; 1705 struct physdev_pirq_eoi_gmfn eoi_gmfn; 1706 1707 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); 1708 eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map); 1709 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn); 1710 if (rc != 0) { 1711 free_page((unsigned long) pirq_eoi_map); 1712 pirq_eoi_map = NULL; 1713 } else 1714 pirq_needs_eoi = pirq_check_eoi_map; 1715 } 1716 #endif 1717 } 1718