1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Xen Event Channels (internal header) 4 * 5 * Copyright (C) 2013 Citrix Systems R&D Ltd. 6 */ 7 #ifndef __EVENTS_INTERNAL_H__ 8 #define __EVENTS_INTERNAL_H__ 9 10 /* Interrupt types. */ 11 enum xen_irq_type { 12 IRQT_UNBOUND = 0, 13 IRQT_PIRQ, 14 IRQT_VIRQ, 15 IRQT_IPI, 16 IRQT_EVTCHN 17 }; 18 19 /* 20 * Packed IRQ information: 21 * type - enum xen_irq_type 22 * event channel - irq->event channel mapping 23 * cpu - cpu this event channel is bound to 24 * index - type-specific information: 25 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM 26 * guest, or GSI (real passthrough IRQ) of the device. 27 * VIRQ - virq number 28 * IPI - IPI vector 29 * EVTCHN - 30 */ 31 struct irq_info { 32 struct list_head list; 33 int refcnt; 34 enum xen_irq_type type; /* type */ 35 unsigned irq; 36 evtchn_port_t evtchn; /* event channel */ 37 unsigned short cpu; /* cpu bound */ 38 39 union { 40 unsigned short virq; 41 enum ipi_vector ipi; 42 struct { 43 unsigned short pirq; 44 unsigned short gsi; 45 unsigned char vector; 46 unsigned char flags; 47 uint16_t domid; 48 } pirq; 49 } u; 50 }; 51 52 #define PIRQ_NEEDS_EOI (1 << 0) 53 #define PIRQ_SHAREABLE (1 << 1) 54 #define PIRQ_MSI_GROUP (1 << 2) 55 56 struct evtchn_ops { 57 unsigned (*max_channels)(void); 58 unsigned (*nr_channels)(void); 59 60 int (*setup)(struct irq_info *info); 61 void (*bind_to_cpu)(struct irq_info *info, unsigned cpu); 62 63 void (*clear_pending)(evtchn_port_t port); 64 void (*set_pending)(evtchn_port_t port); 65 bool (*is_pending)(evtchn_port_t port); 66 bool (*test_and_set_mask)(evtchn_port_t port); 67 void (*mask)(evtchn_port_t port); 68 void (*unmask)(evtchn_port_t port); 69 70 void (*handle_events)(unsigned cpu); 71 void (*resume)(void); 72 }; 73 74 extern const struct evtchn_ops *evtchn_ops; 75 76 extern int **evtchn_to_irq; 77 int get_evtchn_to_irq(evtchn_port_t evtchn); 78 79 struct irq_info *info_for_irq(unsigned irq); 80 unsigned cpu_from_irq(unsigned irq); 81 unsigned int cpu_from_evtchn(evtchn_port_t evtchn); 82 83 static inline unsigned xen_evtchn_max_channels(void) 84 { 85 return evtchn_ops->max_channels(); 86 } 87 88 /* 89 * Do any ABI specific setup for a bound event channel before it can 90 * be unmasked and used. 91 */ 92 static inline int xen_evtchn_port_setup(struct irq_info *info) 93 { 94 if (evtchn_ops->setup) 95 return evtchn_ops->setup(info); 96 return 0; 97 } 98 99 static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info, 100 unsigned cpu) 101 { 102 evtchn_ops->bind_to_cpu(info, cpu); 103 } 104 105 static inline void clear_evtchn(evtchn_port_t port) 106 { 107 evtchn_ops->clear_pending(port); 108 } 109 110 static inline void set_evtchn(evtchn_port_t port) 111 { 112 evtchn_ops->set_pending(port); 113 } 114 115 static inline bool test_evtchn(evtchn_port_t port) 116 { 117 return evtchn_ops->is_pending(port); 118 } 119 120 static inline bool test_and_set_mask(evtchn_port_t port) 121 { 122 return evtchn_ops->test_and_set_mask(port); 123 } 124 125 static inline void mask_evtchn(evtchn_port_t port) 126 { 127 return evtchn_ops->mask(port); 128 } 129 130 static inline void unmask_evtchn(evtchn_port_t port) 131 { 132 return evtchn_ops->unmask(port); 133 } 134 135 static inline void xen_evtchn_handle_events(unsigned cpu) 136 { 137 return evtchn_ops->handle_events(cpu); 138 } 139 140 static inline void xen_evtchn_resume(void) 141 { 142 if (evtchn_ops->resume) 143 evtchn_ops->resume(); 144 } 145 146 void xen_evtchn_2l_init(void); 147 int xen_evtchn_fifo_init(void); 148 149 #endif /* #ifndef __EVENTS_INTERNAL_H__ */ 150