Lines Matching full:irq

9  * chip.  When an event is received, it is mapped to an irq and sent
29 #include <linux/irq.h>
46 #include <asm/irq.h>
86 * Packed IRQ information:
88 * event channel - irq->event channel mapping
91 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
92 * guest, or GSI (real passthrough IRQ) of the device.
110 unsigned irq; member
155 * IRQ-desc lock
162 /* IRQ <-> VIRQ mapping. */
165 /* IRQ <-> IPI mapping */
177 static bool (*pirq_needs_eoi)(unsigned irq);
216 static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq) in set_evtchn_to_irq() argument
229 /* Unallocated irq entries return -1 anyway */ in set_evtchn_to_irq()
230 if (irq == -1) in set_evtchn_to_irq()
247 WRITE_ONCE(evtchn_to_irq[row][col], irq); in set_evtchn_to_irq()
251 /* Get info for IRQ */
252 static struct irq_info *info_for_irq(unsigned irq) in info_for_irq() argument
254 if (irq < nr_legacy_irqs()) in info_for_irq()
255 return legacy_info_ptrs[irq]; in info_for_irq()
257 return irq_get_chip_data(irq); in info_for_irq()
260 static void set_info_for_irq(unsigned int irq, struct irq_info *info) in set_info_for_irq() argument
262 if (irq < nr_legacy_irqs()) in set_info_for_irq()
263 legacy_info_ptrs[irq] = info; in set_info_for_irq()
265 irq_set_chip_data(irq, info); in set_info_for_irq()
270 int irq; in evtchn_to_info() local
276 irq = READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]); in evtchn_to_info()
278 return (irq < 0) ? NULL : info_for_irq(irq); in evtchn_to_info()
307 static void xen_irq_free_desc(unsigned int irq) in xen_irq_free_desc() argument
309 /* Legacy IRQ descriptors are managed by the arch. */ in xen_irq_free_desc()
310 if (irq >= nr_legacy_irqs()) in xen_irq_free_desc()
311 irq_free_desc(irq); in xen_irq_free_desc()
318 unsigned int irq = info->irq; in delayed_free_irq() local
321 set_info_for_irq(irq, NULL); in delayed_free_irq()
325 xen_irq_free_desc(irq); in delayed_free_irq()
328 /* Constructors for packed IRQ information. */
344 ret = set_evtchn_to_irq(evtchn, info->irq); in xen_irq_info_common_setup()
348 irq_clear_status_flags(info->irq, IRQ_NOREQUEST | IRQ_NOAUTOEN); in xen_irq_info_common_setup()
372 per_cpu(ipi_to_irq, cpu)[ipi] = info->irq; in xen_irq_info_ipi_setup()
383 per_cpu(virq_to_irq, cpu)[virq] = info->irq; in xen_irq_info_virq_setup()
409 * Accessors for packed IRQ information.
411 static evtchn_port_t evtchn_from_irq(unsigned int irq) in evtchn_from_irq() argument
415 if (likely(irq < nr_irqs)) in evtchn_from_irq()
416 info = info_for_irq(irq); in evtchn_from_irq()
427 return info ? info->irq : -1; in irq_from_evtchn()
434 int irq = per_cpu(virq_to_irq, cpu)[virq]; in irq_evtchn_from_virq() local
436 *evtchn = evtchn_from_irq(irq); in irq_evtchn_from_virq()
438 return irq; in irq_evtchn_from_virq()
457 static unsigned pirq_from_irq(unsigned irq) in pirq_from_irq() argument
459 struct irq_info *info = info_for_irq(irq); in pirq_from_irq()
503 static bool pirq_check_eoi_map(unsigned irq) in pirq_check_eoi_map() argument
505 return test_bit(pirq_from_irq(irq), pirq_eoi_map); in pirq_check_eoi_map()
509 static bool pirq_needs_eoi_flag(unsigned irq) in pirq_needs_eoi_flag() argument
511 struct irq_info *info = info_for_irq(irq); in pirq_needs_eoi_flag()
521 struct irq_data *data = irq_get_irq_data(info->irq); in bind_evtchn_to_cpu()
535 * notify_remote_via_irq - send event to remote end of event channel via irq
536 * @irq: irq of event channel to send event to
542 void notify_remote_via_irq(int irq) in notify_remote_via_irq() argument
544 evtchn_port_t evtchn = evtchn_from_irq(irq); in notify_remote_via_irq()
702 void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags) in xen_irq_lateeoi() argument
708 info = info_for_irq(irq); in xen_irq_lateeoi()
717 static struct irq_info *xen_irq_init(unsigned int irq) in xen_irq_init() argument
723 info->irq = irq; in xen_irq_init()
728 set_info_for_irq(irq, info); in xen_irq_init()
733 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); in xen_irq_init()
744 int irq = irq_alloc_desc_from(0, -1); in xen_allocate_irq_dynamic() local
747 if (irq >= 0) { in xen_allocate_irq_dynamic()
748 info = xen_irq_init(irq); in xen_allocate_irq_dynamic()
750 xen_irq_free_desc(irq); in xen_allocate_irq_dynamic()
758 int irq; in xen_allocate_irq_gsi() local
764 * all IRQs are dynamically allocated from the entire IRQ in xen_allocate_irq_gsi()
770 /* Legacy IRQ descriptors are already allocated by the arch. */ in xen_allocate_irq_gsi()
772 irq = gsi; in xen_allocate_irq_gsi()
774 irq = irq_alloc_desc_at(gsi, -1); in xen_allocate_irq_gsi()
776 info = xen_irq_init(irq); in xen_allocate_irq_gsi()
778 xen_irq_free_desc(irq); in xen_allocate_irq_gsi()
805 static void pirq_query_unmask(int irq) in pirq_query_unmask() argument
808 struct irq_info *info = info_for_irq(irq); in pirq_query_unmask()
812 irq_status.irq = pirq_from_irq(irq); in pirq_query_unmask()
823 struct irq_info *info = info_for_irq(data->irq); in eoi_pirq()
825 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; in eoi_pirq()
833 if (pirq_needs_eoi(data->irq)) { in eoi_pirq()
845 static unsigned int __startup_pirq(unsigned int irq) in __startup_pirq() argument
848 struct irq_info *info = info_for_irq(irq); in __startup_pirq()
849 evtchn_port_t evtchn = evtchn_from_irq(irq); in __startup_pirq()
857 bind_pirq.pirq = pirq_from_irq(irq); in __startup_pirq()
863 pr_warn("Failed to obtain physical IRQ %d\n", irq); in __startup_pirq()
868 pirq_query_unmask(irq); in __startup_pirq()
870 rc = set_evtchn_to_irq(evtchn, irq); in __startup_pirq()
884 eoi_pirq(irq_get_irq_data(irq)); in __startup_pirq()
889 pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); in __startup_pirq()
896 return __startup_pirq(data->irq); in startup_pirq()
901 unsigned int irq = data->irq; in shutdown_pirq() local
902 struct irq_info *info = info_for_irq(irq); in shutdown_pirq()
903 evtchn_port_t evtchn = evtchn_from_irq(irq); in shutdown_pirq()
934 return info->irq; in xen_irq_from_gsi()
941 static void __unbind_from_irq(struct irq_info *info, unsigned int irq) in __unbind_from_irq() argument
947 xen_irq_free_desc(irq); in __unbind_from_irq()
994 * IRQ number returned here and the Xen pirq argument.
996 * Note: We don't assign an event channel until the irq actually started
997 * up. Return an existing irq if we've already got one for the gsi.
1013 pr_info("%s: returning irq %d for gsi %u\n", in xen_bind_pirq_gsi_to_irq()
1022 irq_op.irq = info->irq; in xen_bind_pirq_gsi_to_irq()
1038 __unbind_from_irq(info, info->irq); in xen_bind_pirq_gsi_to_irq()
1042 pirq_query_unmask(info->irq); in xen_bind_pirq_gsi_to_irq()
1059 irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip, in xen_bind_pirq_gsi_to_irq()
1062 irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip, in xen_bind_pirq_gsi_to_irq()
1065 ret = info->irq; in xen_bind_pirq_gsi_to_irq()
1091 int i, irq, ret; in xen_bind_pirq_msi_to_irq() local
1096 irq = irq_alloc_descs(-1, 0, nvec, -1); in xen_bind_pirq_msi_to_irq()
1097 if (irq < 0) in xen_bind_pirq_msi_to_irq()
1101 info = xen_irq_init(irq + i); in xen_bind_pirq_msi_to_irq()
1107 irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name); in xen_bind_pirq_msi_to_irq()
1115 ret = irq_set_msi_desc(irq, msidesc); in xen_bind_pirq_msi_to_irq()
1120 return irq; in xen_bind_pirq_msi_to_irq()
1124 info = info_for_irq(irq + nvec); in xen_bind_pirq_msi_to_irq()
1125 __unbind_from_irq(info, irq + nvec); in xen_bind_pirq_msi_to_irq()
1132 int xen_destroy_irq(int irq) in xen_destroy_irq() argument
1135 struct irq_info *info = info_for_irq(irq); in xen_destroy_irq()
1157 pr_warn("unmap irq failed %d\n", rc); in xen_destroy_irq()
1171 int irq; in xen_irq_from_pirq() local
1180 irq = info->irq; in xen_irq_from_pirq()
1184 irq = -1; in xen_irq_from_pirq()
1188 return irq; in xen_irq_from_pirq()
1192 int xen_pirq_from_irq(unsigned irq) in xen_pirq_from_irq() argument
1194 return pirq_from_irq(irq); in xen_pirq_from_irq()
1216 irq_set_chip_and_handler_name(info->irq, chip, in bind_evtchn_to_irq_chip()
1221 __unbind_from_irq(info, info->irq); in bind_evtchn_to_irq_chip()
1237 ret = info->irq; in bind_evtchn_to_irq_chip()
1273 irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip, in bind_ipi_to_irq()
1284 __unbind_from_irq(info, info->irq); in bind_ipi_to_irq()
1292 ret = info->irq; in bind_ipi_to_irq()
1382 irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip, in bind_virq_to_irq()
1385 irq_set_chip_and_handler_name(info->irq, &xen_dynamic_chip, in bind_virq_to_irq()
1402 __unbind_from_irq(info, info->irq); in bind_virq_to_irq()
1411 ret = info->irq; in bind_virq_to_irq()
1423 static void unbind_from_irq(unsigned int irq) in unbind_from_irq() argument
1428 info = info_for_irq(irq); in unbind_from_irq()
1429 __unbind_from_irq(info, irq); in unbind_from_irq()
1439 int irq, retval; in bind_evtchn_to_irqhandler_chip() local
1441 irq = bind_evtchn_to_irq_chip(evtchn, chip, NULL, in bind_evtchn_to_irqhandler_chip()
1443 if (irq < 0) in bind_evtchn_to_irqhandler_chip()
1444 return irq; in bind_evtchn_to_irqhandler_chip()
1445 retval = request_irq(irq, handler, irqflags, devname, dev_id); in bind_evtchn_to_irqhandler_chip()
1447 unbind_from_irq(irq); in bind_evtchn_to_irqhandler_chip()
1451 return irq; in bind_evtchn_to_irqhandler_chip()
1481 int irq, retval; in bind_interdomain_evtchn_to_irqhandler_chip() local
1483 irq = bind_interdomain_evtchn_to_irq_chip(dev, remote_port, chip, in bind_interdomain_evtchn_to_irqhandler_chip()
1485 if (irq < 0) in bind_interdomain_evtchn_to_irqhandler_chip()
1486 return irq; in bind_interdomain_evtchn_to_irqhandler_chip()
1488 retval = request_irq(irq, handler, irqflags, devname, dev_id); in bind_interdomain_evtchn_to_irqhandler_chip()
1490 unbind_from_irq(irq); in bind_interdomain_evtchn_to_irqhandler_chip()
1494 return irq; in bind_interdomain_evtchn_to_irqhandler_chip()
1514 int irq, retval; in bind_virq_to_irqhandler() local
1516 irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU); in bind_virq_to_irqhandler()
1517 if (irq < 0) in bind_virq_to_irqhandler()
1518 return irq; in bind_virq_to_irqhandler()
1519 retval = request_irq(irq, handler, irqflags, devname, dev_id); in bind_virq_to_irqhandler()
1521 unbind_from_irq(irq); in bind_virq_to_irqhandler()
1525 return irq; in bind_virq_to_irqhandler()
1536 int irq, retval; in bind_ipi_to_irqhandler() local
1538 irq = bind_ipi_to_irq(ipi, cpu); in bind_ipi_to_irqhandler()
1539 if (irq < 0) in bind_ipi_to_irqhandler()
1540 return irq; in bind_ipi_to_irqhandler()
1543 retval = request_irq(irq, handler, irqflags, devname, dev_id); in bind_ipi_to_irqhandler()
1545 unbind_from_irq(irq); in bind_ipi_to_irqhandler()
1549 return irq; in bind_ipi_to_irqhandler()
1552 void unbind_from_irqhandler(unsigned int irq, void *dev_id) in unbind_from_irqhandler() argument
1554 struct irq_info *info = info_for_irq(irq); in unbind_from_irqhandler()
1558 free_irq(irq, dev_id); in unbind_from_irqhandler()
1559 unbind_from_irq(irq); in unbind_from_irqhandler()
1565 * @irq:irq bound to an event channel.
1568 int xen_set_irq_priority(unsigned irq, unsigned priority) in xen_set_irq_priority() argument
1572 set_priority.port = evtchn_from_irq(irq); in xen_set_irq_priority()
1630 unbind_from_irq(info->irq); in evtchn_put()
1701 generic_handle_irq(info->irq); in handle_irq_for_port()
1712 * When closing an event channel the associated IRQ must not be freed in xen_evtchn_do_upcall()
1715 * the IRQ is handled via queue_rcu_work() _after_ closing the event in xen_evtchn_do_upcall()
1744 /* Rebind a new event channel to an existing irq. */
1745 void rebind_evtchn_irq(evtchn_port_t evtchn, int irq) in rebind_evtchn_irq() argument
1747 struct irq_info *info = info_for_irq(irq); in rebind_evtchn_irq()
1752 /* Make sure the irq is masked, since the new event channel in rebind_evtchn_irq()
1754 disable_irq(irq); in rebind_evtchn_irq()
1758 /* After resume the irq<->evtchn mappings are all cleared out */ in rebind_evtchn_irq()
1760 /* Expect irq to have been bound before, in rebind_evtchn_irq()
1764 info->irq = irq; in rebind_evtchn_irq()
1772 enable_irq(irq); in rebind_evtchn_irq()
1844 ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu); in set_affinity_irq()
1853 struct irq_info *info = info_for_irq(data->irq); in enable_dynirq()
1862 struct irq_info *info = info_for_irq(data->irq); in disable_dynirq()
1871 struct irq_info *info = info_for_irq(data->irq); in ack_dynirq()
1886 struct irq_info *info = info_for_irq(data->irq); in lateeoi_ack_dynirq()
1902 struct irq_info *info = info_for_irq(data->irq); in lateeoi_mask_ack_dynirq()
1913 struct irq_info *info = info_for_irq(data->irq); in retrigger_dynirq()
1928 int pirq, rc, irq, gsi; in restore_pirqs() local
1938 irq = info->irq; in restore_pirqs()
1952 pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n", in restore_pirqs()
1953 gsi, irq, pirq, rc); in restore_pirqs()
1958 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); in restore_pirqs()
1960 __startup_pirq(irq); in restore_pirqs()
1969 int virq, irq; in restore_cpu_virqs() local
1972 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) in restore_cpu_virqs()
1974 info = info_for_irq(irq); in restore_cpu_virqs()
1998 int ipi, irq; in restore_cpu_ipis() local
2001 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) in restore_cpu_ipis()
2003 info = info_for_irq(irq); in restore_cpu_ipis()
2021 /* Clear an irq's pending state, in preparation for polling on it */
2022 void xen_clear_irq_pending(int irq) in xen_clear_irq_pending() argument
2024 struct irq_info *info = info_for_irq(irq); in xen_clear_irq_pending()
2031 void xen_set_irq_pending(int irq) in xen_set_irq_pending() argument
2033 evtchn_port_t evtchn = evtchn_from_irq(irq); in xen_set_irq_pending()
2039 bool xen_test_irq_pending(int irq) in xen_test_irq_pending() argument
2041 evtchn_port_t evtchn = evtchn_from_irq(irq); in xen_test_irq_pending()
2050 /* Poll waiting for an irq to become pending with timeout. In the usual case,
2051 * the irq will be disabled so it won't deliver an interrupt. */
2052 void xen_poll_irq_timeout(int irq, u64 timeout) in xen_poll_irq_timeout() argument
2054 evtchn_port_t evtchn = evtchn_from_irq(irq); in xen_poll_irq_timeout()
2068 /* Poll waiting for an irq to become pending. In the usual case, the
2069 * irq will be disabled so it won't deliver an interrupt. */
2070 void xen_poll_irq(int irq) in xen_poll_irq() argument
2072 xen_poll_irq_timeout(irq, 0 /* no timeout */); in xen_poll_irq()
2075 /* Check whether the IRQ line is shared with other guests. */
2076 int xen_test_irq_shared(int irq) in xen_test_irq_shared() argument
2078 struct irq_info *info = info_for_irq(irq); in xen_test_irq_shared()
2084 irq_status.irq = info->u.pirq.pirq; in xen_test_irq_shared()
2100 /* No IRQ <-> event-channel mappings. */ in xen_irq_resume()