1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ARM Generic Interrupt Controller (GIC) v3 support
4  */
5 
6 #include <linux/sizes.h>
7 
8 #include "kvm_util.h"
9 #include "processor.h"
10 #include "delay.h"
11 
12 #include "gic_v3.h"
13 #include "gic_private.h"
14 
15 struct gicv3_data {
16 	void *dist_base;
17 	void *redist_base[GICV3_MAX_CPUS];
18 	unsigned int nr_cpus;
19 	unsigned int nr_spis;
20 };
21 
22 #define sgi_base_from_redist(redist_base)	(redist_base + SZ_64K)
23 #define DIST_BIT				(1U << 31)
24 
25 enum gicv3_intid_range {
26 	SGI_RANGE,
27 	PPI_RANGE,
28 	SPI_RANGE,
29 	INVALID_RANGE,
30 };
31 
32 static struct gicv3_data gicv3_data;
33 
gicv3_gicd_wait_for_rwp(void)34 static void gicv3_gicd_wait_for_rwp(void)
35 {
36 	unsigned int count = 100000; /* 1s */
37 
38 	while (readl(gicv3_data.dist_base + GICD_CTLR) & GICD_CTLR_RWP) {
39 		GUEST_ASSERT(count--);
40 		udelay(10);
41 	}
42 }
43 
gicv3_gicr_wait_for_rwp(void * redist_base)44 static void gicv3_gicr_wait_for_rwp(void *redist_base)
45 {
46 	unsigned int count = 100000; /* 1s */
47 
48 	while (readl(redist_base + GICR_CTLR) & GICR_CTLR_RWP) {
49 		GUEST_ASSERT(count--);
50 		udelay(10);
51 	}
52 }
53 
gicv3_wait_for_rwp(uint32_t cpu_or_dist)54 static void gicv3_wait_for_rwp(uint32_t cpu_or_dist)
55 {
56 	if (cpu_or_dist & DIST_BIT)
57 		gicv3_gicd_wait_for_rwp();
58 	else
59 		gicv3_gicr_wait_for_rwp(gicv3_data.redist_base[cpu_or_dist]);
60 }
61 
get_intid_range(unsigned int intid)62 static enum gicv3_intid_range get_intid_range(unsigned int intid)
63 {
64 	switch (intid) {
65 	case 0 ... 15:
66 		return SGI_RANGE;
67 	case 16 ... 31:
68 		return PPI_RANGE;
69 	case 32 ... 1019:
70 		return SPI_RANGE;
71 	}
72 
73 	/* We should not be reaching here */
74 	GUEST_ASSERT(0);
75 
76 	return INVALID_RANGE;
77 }
78 
gicv3_read_iar(void)79 static uint64_t gicv3_read_iar(void)
80 {
81 	uint64_t irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1);
82 
83 	dsb(sy);
84 	return irqstat;
85 }
86 
gicv3_write_eoir(uint32_t irq)87 static void gicv3_write_eoir(uint32_t irq)
88 {
89 	write_sysreg_s(irq, SYS_ICC_EOIR1_EL1);
90 	isb();
91 }
92 
gicv3_write_dir(uint32_t irq)93 static void gicv3_write_dir(uint32_t irq)
94 {
95 	write_sysreg_s(irq, SYS_ICC_DIR_EL1);
96 	isb();
97 }
98 
gicv3_set_priority_mask(uint64_t mask)99 static void gicv3_set_priority_mask(uint64_t mask)
100 {
101 	write_sysreg_s(mask, SYS_ICC_PMR_EL1);
102 }
103 
gicv3_set_eoi_split(bool split)104 static void gicv3_set_eoi_split(bool split)
105 {
106 	uint32_t val;
107 
108 	/*
109 	 * All other fields are read-only, so no need to read CTLR first. In
110 	 * fact, the kernel does the same.
111 	 */
112 	val = split ? (1U << 1) : 0;
113 	write_sysreg_s(val, SYS_ICC_CTLR_EL1);
114 	isb();
115 }
116 
gicv3_reg_readl(uint32_t cpu_or_dist,uint64_t offset)117 uint32_t gicv3_reg_readl(uint32_t cpu_or_dist, uint64_t offset)
118 {
119 	void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base
120 		: sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]);
121 	return readl(base + offset);
122 }
123 
gicv3_reg_writel(uint32_t cpu_or_dist,uint64_t offset,uint32_t reg_val)124 void gicv3_reg_writel(uint32_t cpu_or_dist, uint64_t offset, uint32_t reg_val)
125 {
126 	void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base
127 		: sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]);
128 	writel(reg_val, base + offset);
129 }
130 
gicv3_getl_fields(uint32_t cpu_or_dist,uint64_t offset,uint32_t mask)131 uint32_t gicv3_getl_fields(uint32_t cpu_or_dist, uint64_t offset, uint32_t mask)
132 {
133 	return gicv3_reg_readl(cpu_or_dist, offset) & mask;
134 }
135 
gicv3_setl_fields(uint32_t cpu_or_dist,uint64_t offset,uint32_t mask,uint32_t reg_val)136 void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset,
137 		uint32_t mask, uint32_t reg_val)
138 {
139 	uint32_t tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask;
140 
141 	tmp |= (reg_val & mask);
142 	gicv3_reg_writel(cpu_or_dist, offset, tmp);
143 }
144 
145 /*
146  * We use a single offset for the distributor and redistributor maps as they
147  * have the same value in both. The only exceptions are registers that only
148  * exist in one and not the other, like GICR_WAKER that doesn't exist in the
149  * distributor map. Such registers are conveniently marked as reserved in the
150  * map that doesn't implement it; like GICR_WAKER's offset of 0x0014 being
151  * marked as "Reserved" in the Distributor map.
152  */
gicv3_access_reg(uint32_t intid,uint64_t offset,uint32_t reg_bits,uint32_t bits_per_field,bool write,uint32_t * val)153 static void gicv3_access_reg(uint32_t intid, uint64_t offset,
154 		uint32_t reg_bits, uint32_t bits_per_field,
155 		bool write, uint32_t *val)
156 {
157 	uint32_t cpu = guest_get_vcpuid();
158 	enum gicv3_intid_range intid_range = get_intid_range(intid);
159 	uint32_t fields_per_reg, index, mask, shift;
160 	uint32_t cpu_or_dist;
161 
162 	GUEST_ASSERT(bits_per_field <= reg_bits);
163 	GUEST_ASSERT(!write || *val < (1U << bits_per_field));
164 	/*
165 	 * This function does not support 64 bit accesses. Just asserting here
166 	 * until we implement readq/writeq.
167 	 */
168 	GUEST_ASSERT(reg_bits == 32);
169 
170 	fields_per_reg = reg_bits / bits_per_field;
171 	index = intid % fields_per_reg;
172 	shift = index * bits_per_field;
173 	mask = ((1U << bits_per_field) - 1) << shift;
174 
175 	/* Set offset to the actual register holding intid's config. */
176 	offset += (intid / fields_per_reg) * (reg_bits / 8);
177 
178 	cpu_or_dist = (intid_range == SPI_RANGE) ? DIST_BIT : cpu;
179 
180 	if (write)
181 		gicv3_setl_fields(cpu_or_dist, offset, mask, *val << shift);
182 	*val = gicv3_getl_fields(cpu_or_dist, offset, mask) >> shift;
183 }
184 
gicv3_write_reg(uint32_t intid,uint64_t offset,uint32_t reg_bits,uint32_t bits_per_field,uint32_t val)185 static void gicv3_write_reg(uint32_t intid, uint64_t offset,
186 		uint32_t reg_bits, uint32_t bits_per_field, uint32_t val)
187 {
188 	gicv3_access_reg(intid, offset, reg_bits,
189 			bits_per_field, true, &val);
190 }
191 
gicv3_read_reg(uint32_t intid,uint64_t offset,uint32_t reg_bits,uint32_t bits_per_field)192 static uint32_t gicv3_read_reg(uint32_t intid, uint64_t offset,
193 		uint32_t reg_bits, uint32_t bits_per_field)
194 {
195 	uint32_t val;
196 
197 	gicv3_access_reg(intid, offset, reg_bits,
198 			bits_per_field, false, &val);
199 	return val;
200 }
201 
gicv3_set_priority(uint32_t intid,uint32_t prio)202 static void gicv3_set_priority(uint32_t intid, uint32_t prio)
203 {
204 	gicv3_write_reg(intid, GICD_IPRIORITYR, 32, 8, prio);
205 }
206 
207 /* Sets the intid to be level-sensitive or edge-triggered. */
gicv3_irq_set_config(uint32_t intid,bool is_edge)208 static void gicv3_irq_set_config(uint32_t intid, bool is_edge)
209 {
210 	uint32_t val;
211 
212 	/* N/A for private interrupts. */
213 	GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE);
214 	val = is_edge ? 2 : 0;
215 	gicv3_write_reg(intid, GICD_ICFGR, 32, 2, val);
216 }
217 
gicv3_irq_enable(uint32_t intid)218 static void gicv3_irq_enable(uint32_t intid)
219 {
220 	bool is_spi = get_intid_range(intid) == SPI_RANGE;
221 	uint32_t cpu = guest_get_vcpuid();
222 
223 	gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, 1);
224 	gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
225 }
226 
gicv3_irq_disable(uint32_t intid)227 static void gicv3_irq_disable(uint32_t intid)
228 {
229 	bool is_spi = get_intid_range(intid) == SPI_RANGE;
230 	uint32_t cpu = guest_get_vcpuid();
231 
232 	gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, 1);
233 	gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
234 }
235 
gicv3_irq_set_active(uint32_t intid)236 static void gicv3_irq_set_active(uint32_t intid)
237 {
238 	gicv3_write_reg(intid, GICD_ISACTIVER, 32, 1, 1);
239 }
240 
gicv3_irq_clear_active(uint32_t intid)241 static void gicv3_irq_clear_active(uint32_t intid)
242 {
243 	gicv3_write_reg(intid, GICD_ICACTIVER, 32, 1, 1);
244 }
245 
gicv3_irq_get_active(uint32_t intid)246 static bool gicv3_irq_get_active(uint32_t intid)
247 {
248 	return gicv3_read_reg(intid, GICD_ISACTIVER, 32, 1);
249 }
250 
gicv3_irq_set_pending(uint32_t intid)251 static void gicv3_irq_set_pending(uint32_t intid)
252 {
253 	gicv3_write_reg(intid, GICD_ISPENDR, 32, 1, 1);
254 }
255 
gicv3_irq_clear_pending(uint32_t intid)256 static void gicv3_irq_clear_pending(uint32_t intid)
257 {
258 	gicv3_write_reg(intid, GICD_ICPENDR, 32, 1, 1);
259 }
260 
gicv3_irq_get_pending(uint32_t intid)261 static bool gicv3_irq_get_pending(uint32_t intid)
262 {
263 	return gicv3_read_reg(intid, GICD_ISPENDR, 32, 1);
264 }
265 
gicv3_enable_redist(void * redist_base)266 static void gicv3_enable_redist(void *redist_base)
267 {
268 	uint32_t val = readl(redist_base + GICR_WAKER);
269 	unsigned int count = 100000; /* 1s */
270 
271 	val &= ~GICR_WAKER_ProcessorSleep;
272 	writel(val, redist_base + GICR_WAKER);
273 
274 	/* Wait until the processor is 'active' */
275 	while (readl(redist_base + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) {
276 		GUEST_ASSERT(count--);
277 		udelay(10);
278 	}
279 }
280 
gicr_base_cpu(void * redist_base,uint32_t cpu)281 static inline void *gicr_base_cpu(void *redist_base, uint32_t cpu)
282 {
283 	/* Align all the redistributors sequentially */
284 	return redist_base + cpu * SZ_64K * 2;
285 }
286 
gicv3_cpu_init(unsigned int cpu,void * redist_base)287 static void gicv3_cpu_init(unsigned int cpu, void *redist_base)
288 {
289 	void *sgi_base;
290 	unsigned int i;
291 	void *redist_base_cpu;
292 
293 	GUEST_ASSERT(cpu < gicv3_data.nr_cpus);
294 
295 	redist_base_cpu = gicr_base_cpu(redist_base, cpu);
296 	sgi_base = sgi_base_from_redist(redist_base_cpu);
297 
298 	gicv3_enable_redist(redist_base_cpu);
299 
300 	/*
301 	 * Mark all the SGI and PPI interrupts as non-secure Group-1.
302 	 * Also, deactivate and disable them.
303 	 */
304 	writel(~0, sgi_base + GICR_IGROUPR0);
305 	writel(~0, sgi_base + GICR_ICACTIVER0);
306 	writel(~0, sgi_base + GICR_ICENABLER0);
307 
308 	/* Set a default priority for all the SGIs and PPIs */
309 	for (i = 0; i < 32; i += 4)
310 		writel(GICD_INT_DEF_PRI_X4,
311 				sgi_base + GICR_IPRIORITYR0 + i);
312 
313 	gicv3_gicr_wait_for_rwp(redist_base_cpu);
314 
315 	/* Enable the GIC system register (ICC_*) access */
316 	write_sysreg_s(read_sysreg_s(SYS_ICC_SRE_EL1) | ICC_SRE_EL1_SRE,
317 			SYS_ICC_SRE_EL1);
318 
319 	/* Set a default priority threshold */
320 	write_sysreg_s(ICC_PMR_DEF_PRIO, SYS_ICC_PMR_EL1);
321 
322 	/* Enable non-secure Group-1 interrupts */
323 	write_sysreg_s(ICC_IGRPEN1_EL1_ENABLE, SYS_ICC_GRPEN1_EL1);
324 
325 	gicv3_data.redist_base[cpu] = redist_base_cpu;
326 }
327 
gicv3_dist_init(void)328 static void gicv3_dist_init(void)
329 {
330 	void *dist_base = gicv3_data.dist_base;
331 	unsigned int i;
332 
333 	/* Disable the distributor until we set things up */
334 	writel(0, dist_base + GICD_CTLR);
335 	gicv3_gicd_wait_for_rwp();
336 
337 	/*
338 	 * Mark all the SPI interrupts as non-secure Group-1.
339 	 * Also, deactivate and disable them.
340 	 */
341 	for (i = 32; i < gicv3_data.nr_spis; i += 32) {
342 		writel(~0, dist_base + GICD_IGROUPR + i / 8);
343 		writel(~0, dist_base + GICD_ICACTIVER + i / 8);
344 		writel(~0, dist_base + GICD_ICENABLER + i / 8);
345 	}
346 
347 	/* Set a default priority for all the SPIs */
348 	for (i = 32; i < gicv3_data.nr_spis; i += 4)
349 		writel(GICD_INT_DEF_PRI_X4,
350 				dist_base + GICD_IPRIORITYR + i);
351 
352 	/* Wait for the settings to sync-in */
353 	gicv3_gicd_wait_for_rwp();
354 
355 	/* Finally, enable the distributor globally with ARE */
356 	writel(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A |
357 			GICD_CTLR_ENABLE_G1, dist_base + GICD_CTLR);
358 	gicv3_gicd_wait_for_rwp();
359 }
360 
gicv3_init(unsigned int nr_cpus,void * dist_base)361 static void gicv3_init(unsigned int nr_cpus, void *dist_base)
362 {
363 	GUEST_ASSERT(nr_cpus <= GICV3_MAX_CPUS);
364 
365 	gicv3_data.nr_cpus = nr_cpus;
366 	gicv3_data.dist_base = dist_base;
367 	gicv3_data.nr_spis = GICD_TYPER_SPIS(
368 				readl(gicv3_data.dist_base + GICD_TYPER));
369 	if (gicv3_data.nr_spis > 1020)
370 		gicv3_data.nr_spis = 1020;
371 
372 	/*
373 	 * Initialize only the distributor for now.
374 	 * The redistributor and CPU interfaces are initialized
375 	 * later for every PE.
376 	 */
377 	gicv3_dist_init();
378 }
379 
380 const struct gic_common_ops gicv3_ops = {
381 	.gic_init = gicv3_init,
382 	.gic_cpu_init = gicv3_cpu_init,
383 	.gic_irq_enable = gicv3_irq_enable,
384 	.gic_irq_disable = gicv3_irq_disable,
385 	.gic_read_iar = gicv3_read_iar,
386 	.gic_write_eoir = gicv3_write_eoir,
387 	.gic_write_dir = gicv3_write_dir,
388 	.gic_set_priority_mask = gicv3_set_priority_mask,
389 	.gic_set_eoi_split = gicv3_set_eoi_split,
390 	.gic_set_priority = gicv3_set_priority,
391 	.gic_irq_set_active = gicv3_irq_set_active,
392 	.gic_irq_clear_active = gicv3_irq_clear_active,
393 	.gic_irq_get_active = gicv3_irq_get_active,
394 	.gic_irq_set_pending = gicv3_irq_set_pending,
395 	.gic_irq_clear_pending = gicv3_irq_clear_pending,
396 	.gic_irq_get_pending = gicv3_irq_get_pending,
397 	.gic_irq_set_config = gicv3_irq_set_config,
398 };
399