xref: /openbmc/linux/include/kvm/arm_vgic.h (revision df3305156f989339529b3d6744b898d498fb1f7b)
1 /*
2  * Copyright (C) 2012 ARM Ltd.
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  */
18 
19 #ifndef __ASM_ARM_KVM_VGIC_H
20 #define __ASM_ARM_KVM_VGIC_H
21 
22 #include <linux/kernel.h>
23 #include <linux/kvm.h>
24 #include <linux/irqreturn.h>
25 #include <linux/spinlock.h>
26 #include <linux/types.h>
27 
28 #define VGIC_NR_IRQS_LEGACY	256
29 #define VGIC_NR_SGIS		16
30 #define VGIC_NR_PPIS		16
31 #define VGIC_NR_PRIVATE_IRQS	(VGIC_NR_SGIS + VGIC_NR_PPIS)
32 
33 #define VGIC_V2_MAX_LRS		(1 << 6)
34 #define VGIC_V3_MAX_LRS		16
35 #define VGIC_MAX_IRQS		1024
36 #define VGIC_V2_MAX_CPUS	8
37 
38 /* Sanity checks... */
39 #if (KVM_MAX_VCPUS > 255)
40 #error Too many KVM VCPUs, the VGIC only supports up to 255 VCPUs for now
41 #endif
42 
43 #if (VGIC_NR_IRQS_LEGACY & 31)
44 #error "VGIC_NR_IRQS must be a multiple of 32"
45 #endif
46 
47 #if (VGIC_NR_IRQS_LEGACY > VGIC_MAX_IRQS)
48 #error "VGIC_NR_IRQS must be <= 1024"
49 #endif
50 
51 /*
52  * The GIC distributor registers describing interrupts have two parts:
53  * - 32 per-CPU interrupts (SGI + PPI)
54  * - a bunch of shared interrupts (SPI)
55  */
56 struct vgic_bitmap {
57 	/*
58 	 * - One UL per VCPU for private interrupts (assumes UL is at
59 	 *   least 32 bits)
60 	 * - As many UL as necessary for shared interrupts.
61 	 *
62 	 * The private interrupts are accessed via the "private"
63 	 * field, one UL per vcpu (the state for vcpu n is in
64 	 * private[n]). The shared interrupts are accessed via the
65 	 * "shared" pointer (IRQn state is at bit n-32 in the bitmap).
66 	 */
67 	unsigned long *private;
68 	unsigned long *shared;
69 };
70 
71 struct vgic_bytemap {
72 	/*
73 	 * - 8 u32 per VCPU for private interrupts
74 	 * - As many u32 as necessary for shared interrupts.
75 	 *
76 	 * The private interrupts are accessed via the "private"
77 	 * field, (the state for vcpu n is in private[n*8] to
78 	 * private[n*8 + 7]). The shared interrupts are accessed via
79 	 * the "shared" pointer (IRQn state is at byte (n-32)%4 of the
80 	 * shared[(n-32)/4] word).
81 	 */
82 	u32 *private;
83 	u32 *shared;
84 };
85 
86 struct kvm_vcpu;
87 
88 enum vgic_type {
89 	VGIC_V2,		/* Good ol' GICv2 */
90 	VGIC_V3,		/* New fancy GICv3 */
91 };
92 
93 #define LR_STATE_PENDING	(1 << 0)
94 #define LR_STATE_ACTIVE		(1 << 1)
95 #define LR_STATE_MASK		(3 << 0)
96 #define LR_EOI_INT		(1 << 2)
97 
98 struct vgic_lr {
99 	u16	irq;
100 	u8	source;
101 	u8	state;
102 };
103 
104 struct vgic_vmcr {
105 	u32	ctlr;
106 	u32	abpr;
107 	u32	bpr;
108 	u32	pmr;
109 };
110 
111 struct vgic_ops {
112 	struct vgic_lr	(*get_lr)(const struct kvm_vcpu *, int);
113 	void	(*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
114 	void	(*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
115 	u64	(*get_elrsr)(const struct kvm_vcpu *vcpu);
116 	u64	(*get_eisr)(const struct kvm_vcpu *vcpu);
117 	u32	(*get_interrupt_status)(const struct kvm_vcpu *vcpu);
118 	void	(*enable_underflow)(struct kvm_vcpu *vcpu);
119 	void	(*disable_underflow)(struct kvm_vcpu *vcpu);
120 	void	(*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
121 	void	(*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
122 	void	(*enable)(struct kvm_vcpu *vcpu);
123 };
124 
125 struct vgic_params {
126 	/* vgic type */
127 	enum vgic_type	type;
128 	/* Physical address of vgic virtual cpu interface */
129 	phys_addr_t	vcpu_base;
130 	/* Number of list registers */
131 	u32		nr_lr;
132 	/* Interrupt number */
133 	unsigned int	maint_irq;
134 	/* Virtual control interface base address */
135 	void __iomem	*vctrl_base;
136 	int		max_gic_vcpus;
137 	/* Only needed for the legacy KVM_CREATE_IRQCHIP */
138 	bool		can_emulate_gicv2;
139 };
140 
141 struct vgic_vm_ops {
142 	bool	(*handle_mmio)(struct kvm_vcpu *, struct kvm_run *,
143 			       struct kvm_exit_mmio *);
144 	bool	(*queue_sgi)(struct kvm_vcpu *, int irq);
145 	void	(*add_sgi_source)(struct kvm_vcpu *, int irq, int source);
146 	int	(*init_model)(struct kvm *);
147 	int	(*map_resources)(struct kvm *, const struct vgic_params *);
148 };
149 
150 struct vgic_dist {
151 #ifdef CONFIG_KVM_ARM_VGIC
152 	spinlock_t		lock;
153 	bool			in_kernel;
154 	bool			ready;
155 
156 	/* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
157 	u32			vgic_model;
158 
159 	int			nr_cpus;
160 	int			nr_irqs;
161 
162 	/* Virtual control interface mapping */
163 	void __iomem		*vctrl_base;
164 
165 	/* Distributor and vcpu interface mapping in the guest */
166 	phys_addr_t		vgic_dist_base;
167 	/* GICv2 and GICv3 use different mapped register blocks */
168 	union {
169 		phys_addr_t		vgic_cpu_base;
170 		phys_addr_t		vgic_redist_base;
171 	};
172 
173 	/* Distributor enabled */
174 	u32			enabled;
175 
176 	/* Interrupt enabled (one bit per IRQ) */
177 	struct vgic_bitmap	irq_enabled;
178 
179 	/* Level-triggered interrupt external input is asserted */
180 	struct vgic_bitmap	irq_level;
181 
182 	/*
183 	 * Interrupt state is pending on the distributor
184 	 */
185 	struct vgic_bitmap	irq_pending;
186 
187 	/*
188 	 * Tracks writes to GICD_ISPENDRn and GICD_ICPENDRn for level-triggered
189 	 * interrupts.  Essentially holds the state of the flip-flop in
190 	 * Figure 4-10 on page 4-101 in ARM IHI 0048B.b.
191 	 * Once set, it is only cleared for level-triggered interrupts on
192 	 * guest ACKs (when we queue it) or writes to GICD_ICPENDRn.
193 	 */
194 	struct vgic_bitmap	irq_soft_pend;
195 
196 	/* Level-triggered interrupt queued on VCPU interface */
197 	struct vgic_bitmap	irq_queued;
198 
199 	/* Interrupt priority. Not used yet. */
200 	struct vgic_bytemap	irq_priority;
201 
202 	/* Level/edge triggered */
203 	struct vgic_bitmap	irq_cfg;
204 
205 	/*
206 	 * Source CPU per SGI and target CPU:
207 	 *
208 	 * Each byte represent a SGI observable on a VCPU, each bit of
209 	 * this byte indicating if the corresponding VCPU has
210 	 * generated this interrupt. This is a GICv2 feature only.
211 	 *
212 	 * For VCPUn (n < 8), irq_sgi_sources[n*16] to [n*16 + 15] are
213 	 * the SGIs observable on VCPUn.
214 	 */
215 	u8			*irq_sgi_sources;
216 
217 	/*
218 	 * Target CPU for each SPI:
219 	 *
220 	 * Array of available SPI, each byte indicating the target
221 	 * VCPU for SPI. IRQn (n >=32) is at irq_spi_cpu[n-32].
222 	 */
223 	u8			*irq_spi_cpu;
224 
225 	/*
226 	 * Reverse lookup of irq_spi_cpu for faster compute pending:
227 	 *
228 	 * Array of bitmaps, one per VCPU, describing if IRQn is
229 	 * routed to a particular VCPU.
230 	 */
231 	struct vgic_bitmap	*irq_spi_target;
232 
233 	/* Target MPIDR for each IRQ (needed for GICv3 IROUTERn) only */
234 	u32			*irq_spi_mpidr;
235 
236 	/* Bitmap indicating which CPU has something pending */
237 	unsigned long		*irq_pending_on_cpu;
238 
239 	struct vgic_vm_ops	vm_ops;
240 #endif
241 };
242 
243 struct vgic_v2_cpu_if {
244 	u32		vgic_hcr;
245 	u32		vgic_vmcr;
246 	u32		vgic_misr;	/* Saved only */
247 	u64		vgic_eisr;	/* Saved only */
248 	u64		vgic_elrsr;	/* Saved only */
249 	u32		vgic_apr;
250 	u32		vgic_lr[VGIC_V2_MAX_LRS];
251 };
252 
253 struct vgic_v3_cpu_if {
254 #ifdef CONFIG_ARM_GIC_V3
255 	u32		vgic_hcr;
256 	u32		vgic_vmcr;
257 	u32		vgic_sre;	/* Restored only, change ignored */
258 	u32		vgic_misr;	/* Saved only */
259 	u32		vgic_eisr;	/* Saved only */
260 	u32		vgic_elrsr;	/* Saved only */
261 	u32		vgic_ap0r[4];
262 	u32		vgic_ap1r[4];
263 	u64		vgic_lr[VGIC_V3_MAX_LRS];
264 #endif
265 };
266 
267 struct vgic_cpu {
268 #ifdef CONFIG_KVM_ARM_VGIC
269 	/* per IRQ to LR mapping */
270 	u8		*vgic_irq_lr_map;
271 
272 	/* Pending interrupts on this VCPU */
273 	DECLARE_BITMAP(	pending_percpu, VGIC_NR_PRIVATE_IRQS);
274 	unsigned long	*pending_shared;
275 
276 	/* Bitmap of used/free list registers */
277 	DECLARE_BITMAP(	lr_used, VGIC_V2_MAX_LRS);
278 
279 	/* Number of list registers on this CPU */
280 	int		nr_lr;
281 
282 	/* CPU vif control registers for world switch */
283 	union {
284 		struct vgic_v2_cpu_if	vgic_v2;
285 		struct vgic_v3_cpu_if	vgic_v3;
286 	};
287 #endif
288 };
289 
290 #define LR_EMPTY	0xff
291 
292 #define INT_STATUS_EOI		(1 << 0)
293 #define INT_STATUS_UNDERFLOW	(1 << 1)
294 
295 struct kvm;
296 struct kvm_vcpu;
297 struct kvm_run;
298 struct kvm_exit_mmio;
299 
300 #ifdef CONFIG_KVM_ARM_VGIC
301 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
302 int kvm_vgic_hyp_init(void);
303 int kvm_vgic_map_resources(struct kvm *kvm);
304 int kvm_vgic_get_max_vcpus(void);
305 int kvm_vgic_create(struct kvm *kvm, u32 type);
306 void kvm_vgic_destroy(struct kvm *kvm);
307 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
308 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
309 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
310 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
311 			bool level);
312 void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
313 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
314 bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
315 		      struct kvm_exit_mmio *mmio);
316 
317 #define irqchip_in_kernel(k)	(!!((k)->arch.vgic.in_kernel))
318 #define vgic_initialized(k)	(!!((k)->arch.vgic.nr_cpus))
319 #define vgic_ready(k)		((k)->arch.vgic.ready)
320 
321 int vgic_v2_probe(struct device_node *vgic_node,
322 		  const struct vgic_ops **ops,
323 		  const struct vgic_params **params);
324 #ifdef CONFIG_ARM_GIC_V3
325 int vgic_v3_probe(struct device_node *vgic_node,
326 		  const struct vgic_ops **ops,
327 		  const struct vgic_params **params);
328 #else
329 static inline int vgic_v3_probe(struct device_node *vgic_node,
330 				const struct vgic_ops **ops,
331 				const struct vgic_params **params)
332 {
333 	return -ENODEV;
334 }
335 #endif
336 
337 #else
338 static inline int kvm_vgic_hyp_init(void)
339 {
340 	return 0;
341 }
342 
343 static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
344 {
345 	return 0;
346 }
347 
348 static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
349 {
350 	return -ENXIO;
351 }
352 
353 static inline int kvm_vgic_map_resources(struct kvm *kvm)
354 {
355 	return 0;
356 }
357 
358 static inline int kvm_vgic_create(struct kvm *kvm, u32 type)
359 {
360 	return 0;
361 }
362 
363 static inline void kvm_vgic_destroy(struct kvm *kvm)
364 {
365 }
366 
367 static inline void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
368 {
369 }
370 
371 static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
372 {
373 	return 0;
374 }
375 
376 static inline void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) {}
377 static inline void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) {}
378 
379 static inline int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid,
380 				      unsigned int irq_num, bool level)
381 {
382 	return 0;
383 }
384 
385 static inline int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
386 {
387 	return 0;
388 }
389 
390 static inline bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
391 				    struct kvm_exit_mmio *mmio)
392 {
393 	return false;
394 }
395 
396 static inline int irqchip_in_kernel(struct kvm *kvm)
397 {
398 	return 0;
399 }
400 
401 static inline bool vgic_initialized(struct kvm *kvm)
402 {
403 	return true;
404 }
405 
406 static inline bool vgic_ready(struct kvm *kvm)
407 {
408 	return true;
409 }
410 
411 static inline int kvm_vgic_get_max_vcpus(void)
412 {
413 	return KVM_MAX_VCPUS;
414 }
415 #endif
416 
417 #endif
418