1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ARM Generic Interrupt Controller (GIC) v3 support
4  */
5 
6 #include <linux/sizes.h>
7 
8 #include "kvm_util.h"
9 #include "processor.h"
10 #include "delay.h"
11 
12 #include "gic_v3.h"
13 #include "gic_private.h"
14 
15 struct gicv3_data {
16 	void *dist_base;
17 	void *redist_base[GICV3_MAX_CPUS];
18 	unsigned int nr_cpus;
19 	unsigned int nr_spis;
20 };
21 
22 #define sgi_base_from_redist(redist_base) 	(redist_base + SZ_64K)
23 #define DIST_BIT				(1U << 31)
24 
25 enum gicv3_intid_range {
26 	SGI_RANGE,
27 	PPI_RANGE,
28 	SPI_RANGE,
29 	INVALID_RANGE,
30 };
31 
32 static struct gicv3_data gicv3_data;
33 
34 static void gicv3_gicd_wait_for_rwp(void)
35 {
36 	unsigned int count = 100000; /* 1s */
37 
38 	while (readl(gicv3_data.dist_base + GICD_CTLR) & GICD_CTLR_RWP) {
39 		GUEST_ASSERT(count--);
40 		udelay(10);
41 	}
42 }
43 
44 static void gicv3_gicr_wait_for_rwp(void *redist_base)
45 {
46 	unsigned int count = 100000; /* 1s */
47 
48 	while (readl(redist_base + GICR_CTLR) & GICR_CTLR_RWP) {
49 		GUEST_ASSERT(count--);
50 		udelay(10);
51 	}
52 }
53 
54 static void gicv3_wait_for_rwp(uint32_t cpu_or_dist)
55 {
56 	if (cpu_or_dist & DIST_BIT)
57 		gicv3_gicd_wait_for_rwp();
58 	else
59 		gicv3_gicr_wait_for_rwp(gicv3_data.redist_base[cpu_or_dist]);
60 }
61 
62 static enum gicv3_intid_range get_intid_range(unsigned int intid)
63 {
64 	switch (intid) {
65 	case 0 ... 15:
66 		return SGI_RANGE;
67 	case 16 ... 31:
68 		return PPI_RANGE;
69 	case 32 ... 1019:
70 		return SPI_RANGE;
71 	}
72 
73 	/* We should not be reaching here */
74 	GUEST_ASSERT(0);
75 
76 	return INVALID_RANGE;
77 }
78 
79 static uint64_t gicv3_read_iar(void)
80 {
81 	uint64_t irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1);
82 
83 	dsb(sy);
84 	return irqstat;
85 }
86 
87 static void gicv3_write_eoir(uint32_t irq)
88 {
89 	write_sysreg_s(irq, SYS_ICC_EOIR1_EL1);
90 	isb();
91 }
92 
93 static void gicv3_write_dir(uint32_t irq)
94 {
95 	write_sysreg_s(irq, SYS_ICC_DIR_EL1);
96 	isb();
97 }
98 
99 static void gicv3_set_priority_mask(uint64_t mask)
100 {
101 	write_sysreg_s(mask, SYS_ICC_PMR_EL1);
102 }
103 
104 static void gicv3_set_eoi_split(bool split)
105 {
106 	uint32_t val;
107 
108 	/* All other fields are read-only, so no need to read CTLR first. In
109 	 * fact, the kernel does the same.
110 	 */
111 	val = split ? (1U << 1) : 0;
112 	write_sysreg_s(val, SYS_ICC_CTLR_EL1);
113 	isb();
114 }
115 
116 uint32_t gicv3_reg_readl(uint32_t cpu_or_dist, uint64_t offset)
117 {
118 	void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base
119 		: sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]);
120 	return readl(base + offset);
121 }
122 
123 void gicv3_reg_writel(uint32_t cpu_or_dist, uint64_t offset, uint32_t reg_val)
124 {
125 	void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base
126 		: sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]);
127 	writel(reg_val, base + offset);
128 }
129 
130 uint32_t gicv3_getl_fields(uint32_t cpu_or_dist, uint64_t offset, uint32_t mask)
131 {
132 	return gicv3_reg_readl(cpu_or_dist, offset) & mask;
133 }
134 
135 void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset,
136 		uint32_t mask, uint32_t reg_val)
137 {
138 	uint32_t tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask;
139 
140 	tmp |= (reg_val & mask);
141 	gicv3_reg_writel(cpu_or_dist, offset, tmp);
142 }
143 
144 /*
145  * We use a single offset for the distributor and redistributor maps as they
146  * have the same value in both. The only exceptions are registers that only
147  * exist in one and not the other, like GICR_WAKER that doesn't exist in the
148  * distributor map. Such registers are conveniently marked as reserved in the
149  * map that doesn't implement it; like GICR_WAKER's offset of 0x0014 being
150  * marked as "Reserved" in the Distributor map.
151  */
152 static void gicv3_access_reg(uint32_t intid, uint64_t offset,
153 		uint32_t reg_bits, uint32_t bits_per_field,
154 		bool write, uint32_t *val)
155 {
156 	uint32_t cpu = guest_get_vcpuid();
157 	enum gicv3_intid_range intid_range = get_intid_range(intid);
158 	uint32_t fields_per_reg, index, mask, shift;
159 	uint32_t cpu_or_dist;
160 
161 	GUEST_ASSERT(bits_per_field <= reg_bits);
162 	GUEST_ASSERT(*val < (1U << bits_per_field));
163 	/* Some registers like IROUTER are 64 bit long. Those are currently not
164 	 * supported by readl nor writel, so just asserting here until then.
165 	 */
166 	GUEST_ASSERT(reg_bits == 32);
167 
168 	fields_per_reg = reg_bits / bits_per_field;
169 	index = intid % fields_per_reg;
170 	shift = index * bits_per_field;
171 	mask = ((1U << bits_per_field) - 1) << shift;
172 
173 	/* Set offset to the actual register holding intid's config. */
174 	offset += (intid / fields_per_reg) * (reg_bits / 8);
175 
176 	cpu_or_dist = (intid_range == SPI_RANGE) ? DIST_BIT : cpu;
177 
178 	if (write)
179 		gicv3_setl_fields(cpu_or_dist, offset, mask, *val << shift);
180 	*val = gicv3_getl_fields(cpu_or_dist, offset, mask) >> shift;
181 }
182 
183 static void gicv3_write_reg(uint32_t intid, uint64_t offset,
184 		uint32_t reg_bits, uint32_t bits_per_field, uint32_t val)
185 {
186 	gicv3_access_reg(intid, offset, reg_bits,
187 			bits_per_field, true, &val);
188 }
189 
190 static uint32_t gicv3_read_reg(uint32_t intid, uint64_t offset,
191 		uint32_t reg_bits, uint32_t bits_per_field)
192 {
193 	uint32_t val;
194 
195 	gicv3_access_reg(intid, offset, reg_bits,
196 			bits_per_field, false, &val);
197 	return val;
198 }
199 
200 static void gicv3_set_priority(uint32_t intid, uint32_t prio)
201 {
202 	gicv3_write_reg(intid, GICD_IPRIORITYR, 32, 8, prio);
203 }
204 
205 /* Sets the intid to be level-sensitive or edge-triggered. */
206 static void gicv3_irq_set_config(uint32_t intid, bool is_edge)
207 {
208 	uint32_t val;
209 
210 	/* N/A for private interrupts. */
211 	GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE);
212 	val = is_edge ? 2 : 0;
213 	gicv3_write_reg(intid, GICD_ICFGR, 32, 2, val);
214 }
215 
216 static void gicv3_irq_enable(uint32_t intid)
217 {
218 	bool is_spi = get_intid_range(intid) == SPI_RANGE;
219 	uint32_t cpu = guest_get_vcpuid();
220 
221 	gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, 1);
222 	gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
223 }
224 
225 static void gicv3_irq_disable(uint32_t intid)
226 {
227 	bool is_spi = get_intid_range(intid) == SPI_RANGE;
228 	uint32_t cpu = guest_get_vcpuid();
229 
230 	gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, 1);
231 	gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
232 }
233 
234 static void gicv3_irq_set_active(uint32_t intid)
235 {
236 	gicv3_write_reg(intid, GICD_ISACTIVER, 32, 1, 1);
237 }
238 
239 static void gicv3_irq_clear_active(uint32_t intid)
240 {
241 	gicv3_write_reg(intid, GICD_ICACTIVER, 32, 1, 1);
242 }
243 
244 static bool gicv3_irq_get_active(uint32_t intid)
245 {
246 	return gicv3_read_reg(intid, GICD_ISACTIVER, 32, 1);
247 }
248 
249 static void gicv3_irq_set_pending(uint32_t intid)
250 {
251 	gicv3_write_reg(intid, GICD_ISPENDR, 32, 1, 1);
252 }
253 
254 static void gicv3_irq_clear_pending(uint32_t intid)
255 {
256 	gicv3_write_reg(intid, GICD_ICPENDR, 32, 1, 1);
257 }
258 
259 static bool gicv3_irq_get_pending(uint32_t intid)
260 {
261 	return gicv3_read_reg(intid, GICD_ISPENDR, 32, 1);
262 }
263 
264 static void gicv3_enable_redist(void *redist_base)
265 {
266 	uint32_t val = readl(redist_base + GICR_WAKER);
267 	unsigned int count = 100000; /* 1s */
268 
269 	val &= ~GICR_WAKER_ProcessorSleep;
270 	writel(val, redist_base + GICR_WAKER);
271 
272 	/* Wait until the processor is 'active' */
273 	while (readl(redist_base + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) {
274 		GUEST_ASSERT(count--);
275 		udelay(10);
276 	}
277 }
278 
279 static inline void *gicr_base_cpu(void *redist_base, uint32_t cpu)
280 {
281 	/* Align all the redistributors sequentially */
282 	return redist_base + cpu * SZ_64K * 2;
283 }
284 
285 static void gicv3_cpu_init(unsigned int cpu, void *redist_base)
286 {
287 	void *sgi_base;
288 	unsigned int i;
289 	void *redist_base_cpu;
290 
291 	GUEST_ASSERT(cpu < gicv3_data.nr_cpus);
292 
293 	redist_base_cpu = gicr_base_cpu(redist_base, cpu);
294 	sgi_base = sgi_base_from_redist(redist_base_cpu);
295 
296 	gicv3_enable_redist(redist_base_cpu);
297 
298 	/*
299 	 * Mark all the SGI and PPI interrupts as non-secure Group-1.
300 	 * Also, deactivate and disable them.
301 	 */
302 	writel(~0, sgi_base + GICR_IGROUPR0);
303 	writel(~0, sgi_base + GICR_ICACTIVER0);
304 	writel(~0, sgi_base + GICR_ICENABLER0);
305 
306 	/* Set a default priority for all the SGIs and PPIs */
307 	for (i = 0; i < 32; i += 4)
308 		writel(GICD_INT_DEF_PRI_X4,
309 				sgi_base + GICR_IPRIORITYR0 + i);
310 
311 	gicv3_gicr_wait_for_rwp(redist_base_cpu);
312 
313 	/* Enable the GIC system register (ICC_*) access */
314 	write_sysreg_s(read_sysreg_s(SYS_ICC_SRE_EL1) | ICC_SRE_EL1_SRE,
315 			SYS_ICC_SRE_EL1);
316 
317 	/* Set a default priority threshold */
318 	write_sysreg_s(ICC_PMR_DEF_PRIO, SYS_ICC_PMR_EL1);
319 
320 	/* Enable non-secure Group-1 interrupts */
321 	write_sysreg_s(ICC_IGRPEN1_EL1_ENABLE, SYS_ICC_GRPEN1_EL1);
322 
323 	gicv3_data.redist_base[cpu] = redist_base_cpu;
324 }
325 
326 static void gicv3_dist_init(void)
327 {
328 	void *dist_base = gicv3_data.dist_base;
329 	unsigned int i;
330 
331 	/* Disable the distributor until we set things up */
332 	writel(0, dist_base + GICD_CTLR);
333 	gicv3_gicd_wait_for_rwp();
334 
335 	/*
336 	 * Mark all the SPI interrupts as non-secure Group-1.
337 	 * Also, deactivate and disable them.
338 	 */
339 	for (i = 32; i < gicv3_data.nr_spis; i += 32) {
340 		writel(~0, dist_base + GICD_IGROUPR + i / 8);
341 		writel(~0, dist_base + GICD_ICACTIVER + i / 8);
342 		writel(~0, dist_base + GICD_ICENABLER + i / 8);
343 	}
344 
345 	/* Set a default priority for all the SPIs */
346 	for (i = 32; i < gicv3_data.nr_spis; i += 4)
347 		writel(GICD_INT_DEF_PRI_X4,
348 				dist_base + GICD_IPRIORITYR + i);
349 
350 	/* Wait for the settings to sync-in */
351 	gicv3_gicd_wait_for_rwp();
352 
353 	/* Finally, enable the distributor globally with ARE */
354 	writel(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A |
355 			GICD_CTLR_ENABLE_G1, dist_base + GICD_CTLR);
356 	gicv3_gicd_wait_for_rwp();
357 }
358 
359 static void gicv3_init(unsigned int nr_cpus, void *dist_base)
360 {
361 	GUEST_ASSERT(nr_cpus <= GICV3_MAX_CPUS);
362 
363 	gicv3_data.nr_cpus = nr_cpus;
364 	gicv3_data.dist_base = dist_base;
365 	gicv3_data.nr_spis = GICD_TYPER_SPIS(
366 				readl(gicv3_data.dist_base + GICD_TYPER));
367 	if (gicv3_data.nr_spis > 1020)
368 		gicv3_data.nr_spis = 1020;
369 
370 	/*
371 	 * Initialize only the distributor for now.
372 	 * The redistributor and CPU interfaces are initialized
373 	 * later for every PE.
374 	 */
375 	gicv3_dist_init();
376 }
377 
378 const struct gic_common_ops gicv3_ops = {
379 	.gic_init = gicv3_init,
380 	.gic_cpu_init = gicv3_cpu_init,
381 	.gic_irq_enable = gicv3_irq_enable,
382 	.gic_irq_disable = gicv3_irq_disable,
383 	.gic_read_iar = gicv3_read_iar,
384 	.gic_write_eoir = gicv3_write_eoir,
385 	.gic_write_dir = gicv3_write_dir,
386 	.gic_set_priority_mask = gicv3_set_priority_mask,
387 	.gic_set_eoi_split = gicv3_set_eoi_split,
388 	.gic_set_priority = gicv3_set_priority,
389 	.gic_irq_set_active = gicv3_irq_set_active,
390 	.gic_irq_clear_active = gicv3_irq_clear_active,
391 	.gic_irq_get_active = gicv3_irq_get_active,
392 	.gic_irq_set_pending = gicv3_irq_set_pending,
393 	.gic_irq_clear_pending = gicv3_irq_clear_pending,
394 	.gic_irq_get_pending = gicv3_irq_get_pending,
395 	.gic_irq_set_config = gicv3_irq_set_config,
396 };
397