1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright The Asahi Linux Contributors
4 *
5 * Based on irq-lpc32xx:
6 * Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com>
7 * Based on irq-bcm2836:
8 * Copyright 2015 Broadcom
9 */
10
11 /*
12 * AIC is a fairly simple interrupt controller with the following features:
13 *
14 * - 896 level-triggered hardware IRQs
15 * - Single mask bit per IRQ
16 * - Per-IRQ affinity setting
17 * - Automatic masking on event delivery (auto-ack)
18 * - Software triggering (ORed with hw line)
19 * - 2 per-CPU IPIs (meant as "self" and "other", but they are
20 * interchangeable if not symmetric)
21 * - Automatic prioritization (single event/ack register per CPU, lower IRQs =
22 * higher priority)
23 * - Automatic masking on ack
24 * - Default "this CPU" register view and explicit per-CPU views
25 *
26 * In addition, this driver also handles FIQs, as these are routed to the same
27 * IRQ vector. These are used for Fast IPIs, the ARMv8 timer IRQs, and
28 * performance counters (TODO).
29 *
30 * Implementation notes:
31 *
32 * - This driver creates two IRQ domains, one for HW IRQs and internal FIQs,
33 * and one for IPIs.
34 * - Since Linux needs more than 2 IPIs, we implement a software IRQ controller
35 * and funnel all IPIs into one per-CPU IPI (the second "self" IPI is unused).
36 * - FIQ hwirq numbers are assigned after true hwirqs, and are per-cpu.
37 * - DT bindings use 3-cell form (like GIC):
38 * - <0 nr flags> - hwirq #nr
39 * - <1 nr flags> - FIQ #nr
40 * - nr=0 Physical HV timer
41 * - nr=1 Virtual HV timer
42 * - nr=2 Physical guest timer
43 * - nr=3 Virtual guest timer
44 */
45
46 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
47
48 #include <linux/bits.h>
49 #include <linux/bitfield.h>
50 #include <linux/cpuhotplug.h>
51 #include <linux/io.h>
52 #include <linux/irqchip.h>
53 #include <linux/irqchip/arm-vgic-info.h>
54 #include <linux/irqdomain.h>
55 #include <linux/jump_label.h>
56 #include <linux/limits.h>
57 #include <linux/of_address.h>
58 #include <linux/slab.h>
59 #include <asm/apple_m1_pmu.h>
60 #include <asm/cputype.h>
61 #include <asm/exception.h>
62 #include <asm/sysreg.h>
63 #include <asm/virt.h>
64
65 #include <dt-bindings/interrupt-controller/apple-aic.h>
66
67 /*
68 * AIC v1 registers (MMIO)
69 */
70
71 #define AIC_INFO 0x0004
72 #define AIC_INFO_NR_IRQ GENMASK(15, 0)
73
74 #define AIC_CONFIG 0x0010
75
76 #define AIC_WHOAMI 0x2000
77 #define AIC_EVENT 0x2004
78 #define AIC_EVENT_DIE GENMASK(31, 24)
79 #define AIC_EVENT_TYPE GENMASK(23, 16)
80 #define AIC_EVENT_NUM GENMASK(15, 0)
81
82 #define AIC_EVENT_TYPE_FIQ 0 /* Software use */
83 #define AIC_EVENT_TYPE_IRQ 1
84 #define AIC_EVENT_TYPE_IPI 4
85 #define AIC_EVENT_IPI_OTHER 1
86 #define AIC_EVENT_IPI_SELF 2
87
88 #define AIC_IPI_SEND 0x2008
89 #define AIC_IPI_ACK 0x200c
90 #define AIC_IPI_MASK_SET 0x2024
91 #define AIC_IPI_MASK_CLR 0x2028
92
93 #define AIC_IPI_SEND_CPU(cpu) BIT(cpu)
94
95 #define AIC_IPI_OTHER BIT(0)
96 #define AIC_IPI_SELF BIT(31)
97
98 #define AIC_TARGET_CPU 0x3000
99
100 #define AIC_CPU_IPI_SET(cpu) (0x5008 + ((cpu) << 7))
101 #define AIC_CPU_IPI_CLR(cpu) (0x500c + ((cpu) << 7))
102 #define AIC_CPU_IPI_MASK_SET(cpu) (0x5024 + ((cpu) << 7))
103 #define AIC_CPU_IPI_MASK_CLR(cpu) (0x5028 + ((cpu) << 7))
104
105 #define AIC_MAX_IRQ 0x400
106
107 /*
108 * AIC v2 registers (MMIO)
109 */
110
111 #define AIC2_VERSION 0x0000
112 #define AIC2_VERSION_VER GENMASK(7, 0)
113
114 #define AIC2_INFO1 0x0004
115 #define AIC2_INFO1_NR_IRQ GENMASK(15, 0)
116 #define AIC2_INFO1_LAST_DIE GENMASK(27, 24)
117
118 #define AIC2_INFO2 0x0008
119
120 #define AIC2_INFO3 0x000c
121 #define AIC2_INFO3_MAX_IRQ GENMASK(15, 0)
122 #define AIC2_INFO3_MAX_DIE GENMASK(27, 24)
123
124 #define AIC2_RESET 0x0010
125 #define AIC2_RESET_RESET BIT(0)
126
127 #define AIC2_CONFIG 0x0014
128 #define AIC2_CONFIG_ENABLE BIT(0)
129 #define AIC2_CONFIG_PREFER_PCPU BIT(28)
130
131 #define AIC2_TIMEOUT 0x0028
132 #define AIC2_CLUSTER_PRIO 0x0030
133 #define AIC2_DELAY_GROUPS 0x0100
134
135 #define AIC2_IRQ_CFG 0x2000
136
137 /*
138 * AIC2 registers are laid out like this, starting at AIC2_IRQ_CFG:
139 *
140 * Repeat for each die:
141 * IRQ_CFG: u32 * MAX_IRQS
142 * SW_SET: u32 * (MAX_IRQS / 32)
143 * SW_CLR: u32 * (MAX_IRQS / 32)
144 * MASK_SET: u32 * (MAX_IRQS / 32)
145 * MASK_CLR: u32 * (MAX_IRQS / 32)
146 * HW_STATE: u32 * (MAX_IRQS / 32)
147 *
148 * This is followed by a set of event registers, each 16K page aligned.
149 * The first one is the AP event register we will use. Unfortunately,
150 * the actual implemented die count is not specified anywhere in the
151 * capability registers, so we have to explicitly specify the event
152 * register as a second reg entry in the device tree to remain
153 * forward-compatible.
154 */
155
156 #define AIC2_IRQ_CFG_TARGET GENMASK(3, 0)
157 #define AIC2_IRQ_CFG_DELAY_IDX GENMASK(7, 5)
158
159 #define MASK_REG(x) (4 * ((x) >> 5))
160 #define MASK_BIT(x) BIT((x) & GENMASK(4, 0))
161
162 /*
163 * IMP-DEF sysregs that control FIQ sources
164 */
165
166 /* IPI request registers */
167 #define SYS_IMP_APL_IPI_RR_LOCAL_EL1 sys_reg(3, 5, 15, 0, 0)
168 #define SYS_IMP_APL_IPI_RR_GLOBAL_EL1 sys_reg(3, 5, 15, 0, 1)
169 #define IPI_RR_CPU GENMASK(7, 0)
170 /* Cluster only used for the GLOBAL register */
171 #define IPI_RR_CLUSTER GENMASK(23, 16)
172 #define IPI_RR_TYPE GENMASK(29, 28)
173 #define IPI_RR_IMMEDIATE 0
174 #define IPI_RR_RETRACT 1
175 #define IPI_RR_DEFERRED 2
176 #define IPI_RR_NOWAKE 3
177
178 /* IPI status register */
179 #define SYS_IMP_APL_IPI_SR_EL1 sys_reg(3, 5, 15, 1, 1)
180 #define IPI_SR_PENDING BIT(0)
181
182 /* Guest timer FIQ enable register */
183 #define SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2 sys_reg(3, 5, 15, 1, 3)
184 #define VM_TMR_FIQ_ENABLE_V BIT(0)
185 #define VM_TMR_FIQ_ENABLE_P BIT(1)
186
187 /* Deferred IPI countdown register */
188 #define SYS_IMP_APL_IPI_CR_EL1 sys_reg(3, 5, 15, 3, 1)
189
190 /* Uncore PMC control register */
191 #define SYS_IMP_APL_UPMCR0_EL1 sys_reg(3, 7, 15, 0, 4)
192 #define UPMCR0_IMODE GENMASK(18, 16)
193 #define UPMCR0_IMODE_OFF 0
194 #define UPMCR0_IMODE_AIC 2
195 #define UPMCR0_IMODE_HALT 3
196 #define UPMCR0_IMODE_FIQ 4
197
198 /* Uncore PMC status register */
199 #define SYS_IMP_APL_UPMSR_EL1 sys_reg(3, 7, 15, 6, 4)
200 #define UPMSR_IACT BIT(0)
201
202 /* MPIDR fields */
203 #define MPIDR_CPU(x) MPIDR_AFFINITY_LEVEL(x, 0)
204 #define MPIDR_CLUSTER(x) MPIDR_AFFINITY_LEVEL(x, 1)
205
206 #define AIC_IRQ_HWIRQ(die, irq) (FIELD_PREP(AIC_EVENT_DIE, die) | \
207 FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_IRQ) | \
208 FIELD_PREP(AIC_EVENT_NUM, irq))
209 #define AIC_FIQ_HWIRQ(x) (FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_FIQ) | \
210 FIELD_PREP(AIC_EVENT_NUM, x))
211 #define AIC_HWIRQ_IRQ(x) FIELD_GET(AIC_EVENT_NUM, x)
212 #define AIC_HWIRQ_DIE(x) FIELD_GET(AIC_EVENT_DIE, x)
213 #define AIC_NR_SWIPI 32
214
215 /*
216 * FIQ hwirq index definitions: FIQ sources use the DT binding defines
217 * directly, except that timers are special. At the irqchip level, the
218 * two timer types are represented by their access method: _EL0 registers
219 * or _EL02 registers. In the DT binding, the timers are represented
220 * by their purpose (HV or guest). This mapping is for when the kernel is
221 * running at EL2 (with VHE). When the kernel is running at EL1, the
222 * mapping differs and aic_irq_domain_translate() performs the remapping.
223 */
224 enum fiq_hwirq {
225 /* Must be ordered as in apple-aic.h */
226 AIC_TMR_EL0_PHYS = AIC_TMR_HV_PHYS,
227 AIC_TMR_EL0_VIRT = AIC_TMR_HV_VIRT,
228 AIC_TMR_EL02_PHYS = AIC_TMR_GUEST_PHYS,
229 AIC_TMR_EL02_VIRT = AIC_TMR_GUEST_VIRT,
230 AIC_CPU_PMU_Effi = AIC_CPU_PMU_E,
231 AIC_CPU_PMU_Perf = AIC_CPU_PMU_P,
232 /* No need for this to be discovered from DT */
233 AIC_VGIC_MI,
234 AIC_NR_FIQ
235 };
236
237 static DEFINE_STATIC_KEY_TRUE(use_fast_ipi);
238
239 struct aic_info {
240 int version;
241
242 /* Register offsets */
243 u32 event;
244 u32 target_cpu;
245 u32 irq_cfg;
246 u32 sw_set;
247 u32 sw_clr;
248 u32 mask_set;
249 u32 mask_clr;
250
251 u32 die_stride;
252
253 /* Features */
254 bool fast_ipi;
255 };
256
257 static const struct aic_info aic1_info __initconst = {
258 .version = 1,
259
260 .event = AIC_EVENT,
261 .target_cpu = AIC_TARGET_CPU,
262 };
263
264 static const struct aic_info aic1_fipi_info __initconst = {
265 .version = 1,
266
267 .event = AIC_EVENT,
268 .target_cpu = AIC_TARGET_CPU,
269
270 .fast_ipi = true,
271 };
272
273 static const struct aic_info aic2_info __initconst = {
274 .version = 2,
275
276 .irq_cfg = AIC2_IRQ_CFG,
277
278 .fast_ipi = true,
279 };
280
281 static const struct of_device_id aic_info_match[] = {
282 {
283 .compatible = "apple,t8103-aic",
284 .data = &aic1_fipi_info,
285 },
286 {
287 .compatible = "apple,aic",
288 .data = &aic1_info,
289 },
290 {
291 .compatible = "apple,aic2",
292 .data = &aic2_info,
293 },
294 {}
295 };
296
297 struct aic_irq_chip {
298 void __iomem *base;
299 void __iomem *event;
300 struct irq_domain *hw_domain;
301 struct {
302 cpumask_t aff;
303 } *fiq_aff[AIC_NR_FIQ];
304
305 int nr_irq;
306 int max_irq;
307 int nr_die;
308 int max_die;
309
310 struct aic_info info;
311 };
312
313 static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked);
314
315 static struct aic_irq_chip *aic_irqc;
316
317 static void aic_handle_ipi(struct pt_regs *regs);
318
aic_ic_read(struct aic_irq_chip * ic,u32 reg)319 static u32 aic_ic_read(struct aic_irq_chip *ic, u32 reg)
320 {
321 return readl_relaxed(ic->base + reg);
322 }
323
aic_ic_write(struct aic_irq_chip * ic,u32 reg,u32 val)324 static void aic_ic_write(struct aic_irq_chip *ic, u32 reg, u32 val)
325 {
326 writel_relaxed(val, ic->base + reg);
327 }
328
329 /*
330 * IRQ irqchip
331 */
332
aic_irq_mask(struct irq_data * d)333 static void aic_irq_mask(struct irq_data *d)
334 {
335 irq_hw_number_t hwirq = irqd_to_hwirq(d);
336 struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
337
338 u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
339 u32 irq = AIC_HWIRQ_IRQ(hwirq);
340
341 aic_ic_write(ic, ic->info.mask_set + off + MASK_REG(irq), MASK_BIT(irq));
342 }
343
aic_irq_unmask(struct irq_data * d)344 static void aic_irq_unmask(struct irq_data *d)
345 {
346 irq_hw_number_t hwirq = irqd_to_hwirq(d);
347 struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
348
349 u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
350 u32 irq = AIC_HWIRQ_IRQ(hwirq);
351
352 aic_ic_write(ic, ic->info.mask_clr + off + MASK_REG(irq), MASK_BIT(irq));
353 }
354
aic_irq_eoi(struct irq_data * d)355 static void aic_irq_eoi(struct irq_data *d)
356 {
357 /*
358 * Reading the interrupt reason automatically acknowledges and masks
359 * the IRQ, so we just unmask it here if needed.
360 */
361 if (!irqd_irq_masked(d))
362 aic_irq_unmask(d);
363 }
364
aic_handle_irq(struct pt_regs * regs)365 static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs)
366 {
367 struct aic_irq_chip *ic = aic_irqc;
368 u32 event, type, irq;
369
370 do {
371 /*
372 * We cannot use a relaxed read here, as reads from DMA buffers
373 * need to be ordered after the IRQ fires.
374 */
375 event = readl(ic->event + ic->info.event);
376 type = FIELD_GET(AIC_EVENT_TYPE, event);
377 irq = FIELD_GET(AIC_EVENT_NUM, event);
378
379 if (type == AIC_EVENT_TYPE_IRQ)
380 generic_handle_domain_irq(aic_irqc->hw_domain, event);
381 else if (type == AIC_EVENT_TYPE_IPI && irq == 1)
382 aic_handle_ipi(regs);
383 else if (event != 0)
384 pr_err_ratelimited("Unknown IRQ event %d, %d\n", type, irq);
385 } while (event);
386
387 /*
388 * vGIC maintenance interrupts end up here too, so we need to check
389 * for them separately. It should however only trigger when NV is
390 * in use, and be cleared when coming back from the handler.
391 */
392 if (is_kernel_in_hyp_mode() &&
393 (read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) &&
394 read_sysreg_s(SYS_ICH_MISR_EL2) != 0) {
395 generic_handle_domain_irq(aic_irqc->hw_domain,
396 AIC_FIQ_HWIRQ(AIC_VGIC_MI));
397
398 if (unlikely((read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) &&
399 read_sysreg_s(SYS_ICH_MISR_EL2))) {
400 pr_err_ratelimited("vGIC IRQ fired and not handled by KVM, disabling.\n");
401 sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);
402 }
403 }
404 }
405
aic_irq_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)406 static int aic_irq_set_affinity(struct irq_data *d,
407 const struct cpumask *mask_val, bool force)
408 {
409 irq_hw_number_t hwirq = irqd_to_hwirq(d);
410 struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
411 int cpu;
412
413 BUG_ON(!ic->info.target_cpu);
414
415 if (force)
416 cpu = cpumask_first(mask_val);
417 else
418 cpu = cpumask_any_and(mask_val, cpu_online_mask);
419
420 aic_ic_write(ic, ic->info.target_cpu + AIC_HWIRQ_IRQ(hwirq) * 4, BIT(cpu));
421 irq_data_update_effective_affinity(d, cpumask_of(cpu));
422
423 return IRQ_SET_MASK_OK;
424 }
425
aic_irq_set_type(struct irq_data * d,unsigned int type)426 static int aic_irq_set_type(struct irq_data *d, unsigned int type)
427 {
428 /*
429 * Some IRQs (e.g. MSIs) implicitly have edge semantics, and we don't
430 * have a way to find out the type of any given IRQ, so just allow both.
431 */
432 return (type == IRQ_TYPE_LEVEL_HIGH || type == IRQ_TYPE_EDGE_RISING) ? 0 : -EINVAL;
433 }
434
435 static struct irq_chip aic_chip = {
436 .name = "AIC",
437 .irq_mask = aic_irq_mask,
438 .irq_unmask = aic_irq_unmask,
439 .irq_eoi = aic_irq_eoi,
440 .irq_set_affinity = aic_irq_set_affinity,
441 .irq_set_type = aic_irq_set_type,
442 };
443
444 static struct irq_chip aic2_chip = {
445 .name = "AIC2",
446 .irq_mask = aic_irq_mask,
447 .irq_unmask = aic_irq_unmask,
448 .irq_eoi = aic_irq_eoi,
449 .irq_set_type = aic_irq_set_type,
450 };
451
452 /*
453 * FIQ irqchip
454 */
455
aic_fiq_get_idx(struct irq_data * d)456 static unsigned long aic_fiq_get_idx(struct irq_data *d)
457 {
458 return AIC_HWIRQ_IRQ(irqd_to_hwirq(d));
459 }
460
aic_fiq_set_mask(struct irq_data * d)461 static void aic_fiq_set_mask(struct irq_data *d)
462 {
463 /* Only the guest timers have real mask bits, unfortunately. */
464 switch (aic_fiq_get_idx(d)) {
465 case AIC_TMR_EL02_PHYS:
466 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_P, 0);
467 isb();
468 break;
469 case AIC_TMR_EL02_VIRT:
470 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_V, 0);
471 isb();
472 break;
473 default:
474 break;
475 }
476 }
477
aic_fiq_clear_mask(struct irq_data * d)478 static void aic_fiq_clear_mask(struct irq_data *d)
479 {
480 switch (aic_fiq_get_idx(d)) {
481 case AIC_TMR_EL02_PHYS:
482 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_P);
483 isb();
484 break;
485 case AIC_TMR_EL02_VIRT:
486 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_V);
487 isb();
488 break;
489 default:
490 break;
491 }
492 }
493
aic_fiq_mask(struct irq_data * d)494 static void aic_fiq_mask(struct irq_data *d)
495 {
496 aic_fiq_set_mask(d);
497 __this_cpu_and(aic_fiq_unmasked, ~BIT(aic_fiq_get_idx(d)));
498 }
499
aic_fiq_unmask(struct irq_data * d)500 static void aic_fiq_unmask(struct irq_data *d)
501 {
502 aic_fiq_clear_mask(d);
503 __this_cpu_or(aic_fiq_unmasked, BIT(aic_fiq_get_idx(d)));
504 }
505
aic_fiq_eoi(struct irq_data * d)506 static void aic_fiq_eoi(struct irq_data *d)
507 {
508 /* We mask to ack (where we can), so we need to unmask at EOI. */
509 if (__this_cpu_read(aic_fiq_unmasked) & BIT(aic_fiq_get_idx(d)))
510 aic_fiq_clear_mask(d);
511 }
512
513 #define TIMER_FIRING(x) \
514 (((x) & (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_MASK | \
515 ARCH_TIMER_CTRL_IT_STAT)) == \
516 (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT))
517
aic_handle_fiq(struct pt_regs * regs)518 static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
519 {
520 /*
521 * It would be really nice if we had a system register that lets us get
522 * the FIQ source state without having to peek down into sources...
523 * but such a register does not seem to exist.
524 *
525 * So, we have these potential sources to test for:
526 * - Fast IPIs (not yet used)
527 * - The 4 timers (CNTP, CNTV for each of HV and guest)
528 * - Per-core PMCs (not yet supported)
529 * - Per-cluster uncore PMCs (not yet supported)
530 *
531 * Since not dealing with any of these results in a FIQ storm,
532 * we check for everything here, even things we don't support yet.
533 */
534
535 if (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) {
536 if (static_branch_likely(&use_fast_ipi)) {
537 aic_handle_ipi(regs);
538 } else {
539 pr_err_ratelimited("Fast IPI fired. Acking.\n");
540 write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
541 }
542 }
543
544 if (TIMER_FIRING(read_sysreg(cntp_ctl_el0)))
545 generic_handle_domain_irq(aic_irqc->hw_domain,
546 AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS));
547
548 if (TIMER_FIRING(read_sysreg(cntv_ctl_el0)))
549 generic_handle_domain_irq(aic_irqc->hw_domain,
550 AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT));
551
552 if (is_kernel_in_hyp_mode()) {
553 uint64_t enabled = read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2);
554
555 if ((enabled & VM_TMR_FIQ_ENABLE_P) &&
556 TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02)))
557 generic_handle_domain_irq(aic_irqc->hw_domain,
558 AIC_FIQ_HWIRQ(AIC_TMR_EL02_PHYS));
559
560 if ((enabled & VM_TMR_FIQ_ENABLE_V) &&
561 TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02)))
562 generic_handle_domain_irq(aic_irqc->hw_domain,
563 AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT));
564 }
565
566 if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
567 (FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) {
568 int irq;
569 if (cpumask_test_cpu(smp_processor_id(),
570 &aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff))
571 irq = AIC_CPU_PMU_P;
572 else
573 irq = AIC_CPU_PMU_E;
574 generic_handle_domain_irq(aic_irqc->hw_domain,
575 AIC_FIQ_HWIRQ(irq));
576 }
577
578 if (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ &&
579 (read_sysreg_s(SYS_IMP_APL_UPMSR_EL1) & UPMSR_IACT)) {
580 /* Same story with uncore PMCs */
581 pr_err_ratelimited("Uncore PMC FIQ fired. Masking.\n");
582 sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
583 FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
584 }
585 }
586
aic_fiq_set_type(struct irq_data * d,unsigned int type)587 static int aic_fiq_set_type(struct irq_data *d, unsigned int type)
588 {
589 return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL;
590 }
591
592 static struct irq_chip fiq_chip = {
593 .name = "AIC-FIQ",
594 .irq_mask = aic_fiq_mask,
595 .irq_unmask = aic_fiq_unmask,
596 .irq_ack = aic_fiq_set_mask,
597 .irq_eoi = aic_fiq_eoi,
598 .irq_set_type = aic_fiq_set_type,
599 };
600
601 /*
602 * Main IRQ domain
603 */
604
aic_irq_domain_map(struct irq_domain * id,unsigned int irq,irq_hw_number_t hw)605 static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq,
606 irq_hw_number_t hw)
607 {
608 struct aic_irq_chip *ic = id->host_data;
609 u32 type = FIELD_GET(AIC_EVENT_TYPE, hw);
610 struct irq_chip *chip = &aic_chip;
611
612 if (ic->info.version == 2)
613 chip = &aic2_chip;
614
615 if (type == AIC_EVENT_TYPE_IRQ) {
616 irq_domain_set_info(id, irq, hw, chip, id->host_data,
617 handle_fasteoi_irq, NULL, NULL);
618 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
619 } else {
620 int fiq = FIELD_GET(AIC_EVENT_NUM, hw);
621
622 switch (fiq) {
623 case AIC_CPU_PMU_P:
624 case AIC_CPU_PMU_E:
625 irq_set_percpu_devid_partition(irq, &ic->fiq_aff[fiq]->aff);
626 break;
627 default:
628 irq_set_percpu_devid(irq);
629 break;
630 }
631
632 irq_domain_set_info(id, irq, hw, &fiq_chip, id->host_data,
633 handle_percpu_devid_irq, NULL, NULL);
634 }
635
636 return 0;
637 }
638
aic_irq_domain_translate(struct irq_domain * id,struct irq_fwspec * fwspec,unsigned long * hwirq,unsigned int * type)639 static int aic_irq_domain_translate(struct irq_domain *id,
640 struct irq_fwspec *fwspec,
641 unsigned long *hwirq,
642 unsigned int *type)
643 {
644 struct aic_irq_chip *ic = id->host_data;
645 u32 *args;
646 u32 die = 0;
647
648 if (fwspec->param_count < 3 || fwspec->param_count > 4 ||
649 !is_of_node(fwspec->fwnode))
650 return -EINVAL;
651
652 args = &fwspec->param[1];
653
654 if (fwspec->param_count == 4) {
655 die = args[0];
656 args++;
657 }
658
659 switch (fwspec->param[0]) {
660 case AIC_IRQ:
661 if (die >= ic->nr_die)
662 return -EINVAL;
663 if (args[0] >= ic->nr_irq)
664 return -EINVAL;
665 *hwirq = AIC_IRQ_HWIRQ(die, args[0]);
666 break;
667 case AIC_FIQ:
668 if (die != 0)
669 return -EINVAL;
670 if (args[0] >= AIC_NR_FIQ)
671 return -EINVAL;
672 *hwirq = AIC_FIQ_HWIRQ(args[0]);
673
674 /*
675 * In EL1 the non-redirected registers are the guest's,
676 * not EL2's, so remap the hwirqs to match.
677 */
678 if (!is_kernel_in_hyp_mode()) {
679 switch (args[0]) {
680 case AIC_TMR_GUEST_PHYS:
681 *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS);
682 break;
683 case AIC_TMR_GUEST_VIRT:
684 *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT);
685 break;
686 case AIC_TMR_HV_PHYS:
687 case AIC_TMR_HV_VIRT:
688 return -ENOENT;
689 default:
690 break;
691 }
692 }
693 break;
694 default:
695 return -EINVAL;
696 }
697
698 *type = args[1] & IRQ_TYPE_SENSE_MASK;
699
700 return 0;
701 }
702
aic_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)703 static int aic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
704 unsigned int nr_irqs, void *arg)
705 {
706 unsigned int type = IRQ_TYPE_NONE;
707 struct irq_fwspec *fwspec = arg;
708 irq_hw_number_t hwirq;
709 int i, ret;
710
711 ret = aic_irq_domain_translate(domain, fwspec, &hwirq, &type);
712 if (ret)
713 return ret;
714
715 for (i = 0; i < nr_irqs; i++) {
716 ret = aic_irq_domain_map(domain, virq + i, hwirq + i);
717 if (ret)
718 return ret;
719 }
720
721 return 0;
722 }
723
aic_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)724 static void aic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
725 unsigned int nr_irqs)
726 {
727 int i;
728
729 for (i = 0; i < nr_irqs; i++) {
730 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
731
732 irq_set_handler(virq + i, NULL);
733 irq_domain_reset_irq_data(d);
734 }
735 }
736
737 static const struct irq_domain_ops aic_irq_domain_ops = {
738 .translate = aic_irq_domain_translate,
739 .alloc = aic_irq_domain_alloc,
740 .free = aic_irq_domain_free,
741 };
742
743 /*
744 * IPI irqchip
745 */
746
aic_ipi_send_fast(int cpu)747 static void aic_ipi_send_fast(int cpu)
748 {
749 u64 mpidr = cpu_logical_map(cpu);
750 u64 my_mpidr = read_cpuid_mpidr();
751 u64 cluster = MPIDR_CLUSTER(mpidr);
752 u64 idx = MPIDR_CPU(mpidr);
753
754 if (MPIDR_CLUSTER(my_mpidr) == cluster)
755 write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx),
756 SYS_IMP_APL_IPI_RR_LOCAL_EL1);
757 else
758 write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx) | FIELD_PREP(IPI_RR_CLUSTER, cluster),
759 SYS_IMP_APL_IPI_RR_GLOBAL_EL1);
760 isb();
761 }
762
aic_handle_ipi(struct pt_regs * regs)763 static void aic_handle_ipi(struct pt_regs *regs)
764 {
765 /*
766 * Ack the IPI. We need to order this after the AIC event read, but
767 * that is enforced by normal MMIO ordering guarantees.
768 *
769 * For the Fast IPI case, this needs to be ordered before the vIPI
770 * handling below, so we need to isb();
771 */
772 if (static_branch_likely(&use_fast_ipi)) {
773 write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
774 isb();
775 } else {
776 aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
777 }
778
779 ipi_mux_process();
780
781 /*
782 * No ordering needed here; at worst this just changes the timing of
783 * when the next IPI will be delivered.
784 */
785 if (!static_branch_likely(&use_fast_ipi))
786 aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
787 }
788
aic_ipi_send_single(unsigned int cpu)789 static void aic_ipi_send_single(unsigned int cpu)
790 {
791 if (static_branch_likely(&use_fast_ipi))
792 aic_ipi_send_fast(cpu);
793 else
794 aic_ic_write(aic_irqc, AIC_IPI_SEND, AIC_IPI_SEND_CPU(cpu));
795 }
796
aic_init_smp(struct aic_irq_chip * irqc,struct device_node * node)797 static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
798 {
799 int base_ipi;
800
801 base_ipi = ipi_mux_create(AIC_NR_SWIPI, aic_ipi_send_single);
802 if (WARN_ON(base_ipi <= 0))
803 return -ENODEV;
804
805 set_smp_ipi_range(base_ipi, AIC_NR_SWIPI);
806
807 return 0;
808 }
809
aic_init_cpu(unsigned int cpu)810 static int aic_init_cpu(unsigned int cpu)
811 {
812 /* Mask all hard-wired per-CPU IRQ/FIQ sources */
813
814 /* Pending Fast IPI FIQs */
815 write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
816
817 /* Timer FIQs */
818 sysreg_clear_set(cntp_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);
819 sysreg_clear_set(cntv_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);
820
821 /* EL2-only (VHE mode) IRQ sources */
822 if (is_kernel_in_hyp_mode()) {
823 /* Guest timers */
824 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2,
825 VM_TMR_FIQ_ENABLE_V | VM_TMR_FIQ_ENABLE_P, 0);
826
827 /* vGIC maintenance IRQ */
828 sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);
829 }
830
831 /* PMC FIQ */
832 sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1, PMCR0_IMODE | PMCR0_IACT,
833 FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF));
834
835 /* Uncore PMC FIQ */
836 sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
837 FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
838
839 /* Commit all of the above */
840 isb();
841
842 if (aic_irqc->info.version == 1) {
843 /*
844 * Make sure the kernel's idea of logical CPU order is the same as AIC's
845 * If we ever end up with a mismatch here, we will have to introduce
846 * a mapping table similar to what other irqchip drivers do.
847 */
848 WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id());
849
850 /*
851 * Always keep IPIs unmasked at the hardware level (except auto-masking
852 * by AIC during processing). We manage masks at the vIPI level.
853 * These registers only exist on AICv1, AICv2 always uses fast IPIs.
854 */
855 aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER);
856 if (static_branch_likely(&use_fast_ipi)) {
857 aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER);
858 } else {
859 aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF);
860 aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
861 }
862 }
863
864 /* Initialize the local mask state */
865 __this_cpu_write(aic_fiq_unmasked, 0);
866
867 return 0;
868 }
869
870 static struct gic_kvm_info vgic_info __initdata = {
871 .type = GIC_V3,
872 .no_maint_irq_mask = true,
873 .no_hw_deactivation = true,
874 };
875
build_fiq_affinity(struct aic_irq_chip * ic,struct device_node * aff)876 static void build_fiq_affinity(struct aic_irq_chip *ic, struct device_node *aff)
877 {
878 int i, n;
879 u32 fiq;
880
881 if (of_property_read_u32(aff, "apple,fiq-index", &fiq) ||
882 WARN_ON(fiq >= AIC_NR_FIQ) || ic->fiq_aff[fiq])
883 return;
884
885 n = of_property_count_elems_of_size(aff, "cpus", sizeof(u32));
886 if (WARN_ON(n < 0))
887 return;
888
889 ic->fiq_aff[fiq] = kzalloc(sizeof(*ic->fiq_aff[fiq]), GFP_KERNEL);
890 if (!ic->fiq_aff[fiq])
891 return;
892
893 for (i = 0; i < n; i++) {
894 struct device_node *cpu_node;
895 u32 cpu_phandle;
896 int cpu;
897
898 if (of_property_read_u32_index(aff, "cpus", i, &cpu_phandle))
899 continue;
900
901 cpu_node = of_find_node_by_phandle(cpu_phandle);
902 if (WARN_ON(!cpu_node))
903 continue;
904
905 cpu = of_cpu_node_to_id(cpu_node);
906 of_node_put(cpu_node);
907 if (WARN_ON(cpu < 0))
908 continue;
909
910 cpumask_set_cpu(cpu, &ic->fiq_aff[fiq]->aff);
911 }
912 }
913
aic_of_ic_init(struct device_node * node,struct device_node * parent)914 static int __init aic_of_ic_init(struct device_node *node, struct device_node *parent)
915 {
916 int i, die;
917 u32 off, start_off;
918 void __iomem *regs;
919 struct aic_irq_chip *irqc;
920 struct device_node *affs;
921 const struct of_device_id *match;
922
923 regs = of_iomap(node, 0);
924 if (WARN_ON(!regs))
925 return -EIO;
926
927 irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
928 if (!irqc) {
929 iounmap(regs);
930 return -ENOMEM;
931 }
932
933 irqc->base = regs;
934
935 match = of_match_node(aic_info_match, node);
936 if (!match)
937 goto err_unmap;
938
939 irqc->info = *(struct aic_info *)match->data;
940
941 aic_irqc = irqc;
942
943 switch (irqc->info.version) {
944 case 1: {
945 u32 info;
946
947 info = aic_ic_read(irqc, AIC_INFO);
948 irqc->nr_irq = FIELD_GET(AIC_INFO_NR_IRQ, info);
949 irqc->max_irq = AIC_MAX_IRQ;
950 irqc->nr_die = irqc->max_die = 1;
951
952 off = start_off = irqc->info.target_cpu;
953 off += sizeof(u32) * irqc->max_irq; /* TARGET_CPU */
954
955 irqc->event = irqc->base;
956
957 break;
958 }
959 case 2: {
960 u32 info1, info3;
961
962 info1 = aic_ic_read(irqc, AIC2_INFO1);
963 info3 = aic_ic_read(irqc, AIC2_INFO3);
964
965 irqc->nr_irq = FIELD_GET(AIC2_INFO1_NR_IRQ, info1);
966 irqc->max_irq = FIELD_GET(AIC2_INFO3_MAX_IRQ, info3);
967 irqc->nr_die = FIELD_GET(AIC2_INFO1_LAST_DIE, info1) + 1;
968 irqc->max_die = FIELD_GET(AIC2_INFO3_MAX_DIE, info3);
969
970 off = start_off = irqc->info.irq_cfg;
971 off += sizeof(u32) * irqc->max_irq; /* IRQ_CFG */
972
973 irqc->event = of_iomap(node, 1);
974 if (WARN_ON(!irqc->event))
975 goto err_unmap;
976
977 break;
978 }
979 }
980
981 irqc->info.sw_set = off;
982 off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_SET */
983 irqc->info.sw_clr = off;
984 off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_CLR */
985 irqc->info.mask_set = off;
986 off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_SET */
987 irqc->info.mask_clr = off;
988 off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_CLR */
989 off += sizeof(u32) * (irqc->max_irq >> 5); /* HW_STATE */
990
991 if (irqc->info.fast_ipi)
992 static_branch_enable(&use_fast_ipi);
993 else
994 static_branch_disable(&use_fast_ipi);
995
996 irqc->info.die_stride = off - start_off;
997
998 irqc->hw_domain = irq_domain_create_tree(of_node_to_fwnode(node),
999 &aic_irq_domain_ops, irqc);
1000 if (WARN_ON(!irqc->hw_domain))
1001 goto err_unmap;
1002
1003 irq_domain_update_bus_token(irqc->hw_domain, DOMAIN_BUS_WIRED);
1004
1005 if (aic_init_smp(irqc, node))
1006 goto err_remove_domain;
1007
1008 affs = of_get_child_by_name(node, "affinities");
1009 if (affs) {
1010 struct device_node *chld;
1011
1012 for_each_child_of_node(affs, chld)
1013 build_fiq_affinity(irqc, chld);
1014 }
1015 of_node_put(affs);
1016
1017 set_handle_irq(aic_handle_irq);
1018 set_handle_fiq(aic_handle_fiq);
1019
1020 off = 0;
1021 for (die = 0; die < irqc->nr_die; die++) {
1022 for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
1023 aic_ic_write(irqc, irqc->info.mask_set + off + i * 4, U32_MAX);
1024 for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
1025 aic_ic_write(irqc, irqc->info.sw_clr + off + i * 4, U32_MAX);
1026 if (irqc->info.target_cpu)
1027 for (i = 0; i < irqc->nr_irq; i++)
1028 aic_ic_write(irqc, irqc->info.target_cpu + off + i * 4, 1);
1029 off += irqc->info.die_stride;
1030 }
1031
1032 if (irqc->info.version == 2) {
1033 u32 config = aic_ic_read(irqc, AIC2_CONFIG);
1034
1035 config |= AIC2_CONFIG_ENABLE;
1036 aic_ic_write(irqc, AIC2_CONFIG, config);
1037 }
1038
1039 if (!is_kernel_in_hyp_mode())
1040 pr_info("Kernel running in EL1, mapping interrupts");
1041
1042 if (static_branch_likely(&use_fast_ipi))
1043 pr_info("Using Fast IPIs");
1044
1045 cpuhp_setup_state(CPUHP_AP_IRQ_APPLE_AIC_STARTING,
1046 "irqchip/apple-aic/ipi:starting",
1047 aic_init_cpu, NULL);
1048
1049 if (is_kernel_in_hyp_mode()) {
1050 struct irq_fwspec mi = {
1051 .fwnode = of_node_to_fwnode(node),
1052 .param_count = 3,
1053 .param = {
1054 [0] = AIC_FIQ, /* This is a lie */
1055 [1] = AIC_VGIC_MI,
1056 [2] = IRQ_TYPE_LEVEL_HIGH,
1057 },
1058 };
1059
1060 vgic_info.maint_irq = irq_create_fwspec_mapping(&mi);
1061 WARN_ON(!vgic_info.maint_irq);
1062 }
1063
1064 vgic_set_kvm_info(&vgic_info);
1065
1066 pr_info("Initialized with %d/%d IRQs * %d/%d die(s), %d FIQs, %d vIPIs",
1067 irqc->nr_irq, irqc->max_irq, irqc->nr_die, irqc->max_die, AIC_NR_FIQ, AIC_NR_SWIPI);
1068
1069 return 0;
1070
1071 err_remove_domain:
1072 irq_domain_remove(irqc->hw_domain);
1073 err_unmap:
1074 if (irqc->event && irqc->event != irqc->base)
1075 iounmap(irqc->event);
1076 iounmap(irqc->base);
1077 kfree(irqc);
1078 return -ENODEV;
1079 }
1080
1081 IRQCHIP_DECLARE(apple_aic, "apple,aic", aic_of_ic_init);
1082 IRQCHIP_DECLARE(apple_aic2, "apple,aic2", aic_of_ic_init);
1083