xref: /openbmc/linux/drivers/xen/events/events_internal.h (revision e533cda12d8f0e7936354bafdc85c81741f805d2)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Xen Event Channels (internal header)
4  *
5  * Copyright (C) 2013 Citrix Systems R&D Ltd.
6  */
7 #ifndef __EVENTS_INTERNAL_H__
8 #define __EVENTS_INTERNAL_H__
9 
10 /* Interrupt types. */
11 enum xen_irq_type {
12 	IRQT_UNBOUND = 0,
13 	IRQT_PIRQ,
14 	IRQT_VIRQ,
15 	IRQT_IPI,
16 	IRQT_EVTCHN
17 };
18 
19 /*
20  * Packed IRQ information:
21  * type - enum xen_irq_type
22  * event channel - irq->event channel mapping
23  * cpu - cpu this event channel is bound to
24  * index - type-specific information:
25  *    PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
26  *           guest, or GSI (real passthrough IRQ) of the device.
27  *    VIRQ - virq number
28  *    IPI - IPI vector
29  *    EVTCHN -
30  */
31 struct irq_info {
32 	struct list_head list;
33 	struct list_head eoi_list;
34 	short refcnt;
35 	short spurious_cnt;
36 	enum xen_irq_type type;	/* type */
37 	unsigned irq;
38 	evtchn_port_t evtchn;	/* event channel */
39 	unsigned short cpu;	/* cpu bound */
40 	unsigned short eoi_cpu;	/* EOI must happen on this cpu */
41 	unsigned int irq_epoch;	/* If eoi_cpu valid: irq_epoch of event */
42 	u64 eoi_time;		/* Time in jiffies when to EOI. */
43 
44 	union {
45 		unsigned short virq;
46 		enum ipi_vector ipi;
47 		struct {
48 			unsigned short pirq;
49 			unsigned short gsi;
50 			unsigned char vector;
51 			unsigned char flags;
52 			uint16_t domid;
53 		} pirq;
54 	} u;
55 };
56 
57 #define PIRQ_NEEDS_EOI	(1 << 0)
58 #define PIRQ_SHAREABLE	(1 << 1)
59 #define PIRQ_MSI_GROUP	(1 << 2)
60 
61 struct evtchn_loop_ctrl;
62 
63 struct evtchn_ops {
64 	unsigned (*max_channels)(void);
65 	unsigned (*nr_channels)(void);
66 
67 	int (*setup)(struct irq_info *info);
68 	void (*bind_to_cpu)(struct irq_info *info, unsigned cpu);
69 
70 	void (*clear_pending)(evtchn_port_t port);
71 	void (*set_pending)(evtchn_port_t port);
72 	bool (*is_pending)(evtchn_port_t port);
73 	bool (*test_and_set_mask)(evtchn_port_t port);
74 	void (*mask)(evtchn_port_t port);
75 	void (*unmask)(evtchn_port_t port);
76 
77 	void (*handle_events)(unsigned cpu, struct evtchn_loop_ctrl *ctrl);
78 	void (*resume)(void);
79 
80 	int (*percpu_init)(unsigned int cpu);
81 	int (*percpu_deinit)(unsigned int cpu);
82 };
83 
84 extern const struct evtchn_ops *evtchn_ops;
85 
86 extern int **evtchn_to_irq;
87 int get_evtchn_to_irq(evtchn_port_t evtchn);
88 void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl);
89 
90 struct irq_info *info_for_irq(unsigned irq);
91 unsigned cpu_from_irq(unsigned irq);
92 unsigned int cpu_from_evtchn(evtchn_port_t evtchn);
93 
94 static inline unsigned xen_evtchn_max_channels(void)
95 {
96 	return evtchn_ops->max_channels();
97 }
98 
99 /*
100  * Do any ABI specific setup for a bound event channel before it can
101  * be unmasked and used.
102  */
103 static inline int xen_evtchn_port_setup(struct irq_info *info)
104 {
105 	if (evtchn_ops->setup)
106 		return evtchn_ops->setup(info);
107 	return 0;
108 }
109 
110 static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info,
111 					       unsigned cpu)
112 {
113 	evtchn_ops->bind_to_cpu(info, cpu);
114 }
115 
116 static inline void clear_evtchn(evtchn_port_t port)
117 {
118 	evtchn_ops->clear_pending(port);
119 }
120 
121 static inline void set_evtchn(evtchn_port_t port)
122 {
123 	evtchn_ops->set_pending(port);
124 }
125 
126 static inline bool test_evtchn(evtchn_port_t port)
127 {
128 	return evtchn_ops->is_pending(port);
129 }
130 
131 static inline bool test_and_set_mask(evtchn_port_t port)
132 {
133 	return evtchn_ops->test_and_set_mask(port);
134 }
135 
136 static inline void mask_evtchn(evtchn_port_t port)
137 {
138 	return evtchn_ops->mask(port);
139 }
140 
141 static inline void unmask_evtchn(evtchn_port_t port)
142 {
143 	return evtchn_ops->unmask(port);
144 }
145 
146 static inline void xen_evtchn_handle_events(unsigned cpu,
147 					    struct evtchn_loop_ctrl *ctrl)
148 {
149 	return evtchn_ops->handle_events(cpu, ctrl);
150 }
151 
152 static inline void xen_evtchn_resume(void)
153 {
154 	if (evtchn_ops->resume)
155 		evtchn_ops->resume();
156 }
157 
158 void xen_evtchn_2l_init(void);
159 int xen_evtchn_fifo_init(void);
160 
161 #endif /* #ifndef __EVENTS_INTERNAL_H__ */
162